-"
-empty,
-int,2
-deallocate q;
--- special cases
-\pset expanded off
-select 'comma,comma' as comma, 'semi;semi' as semi;
-comma,semi
-"comma,comma",semi;semi
-\pset csv_fieldsep ';'
-select 'comma,comma' as comma, 'semi;semi' as semi;
-comma;semi
-comma,comma;"semi;semi"
-select '\.' as data;
-data
-"\."
-\pset csv_fieldsep '.'
-select '\' as d1, '' as d2;
-"d1"."d2"
-"\".""
--- illegal csv separators
-\pset csv_fieldsep ''
-\pset: csv_fieldsep must be a single one-byte character
-\pset csv_fieldsep '\0'
-\pset: csv_fieldsep must be a single one-byte character
-\pset csv_fieldsep '\n'
-\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return
-\pset csv_fieldsep '\r'
-\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return
-\pset csv_fieldsep '"'
-\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return
-\pset csv_fieldsep ',,'
-\pset: csv_fieldsep must be a single one-byte character
-\pset csv_fieldsep ','
--- test html output format
-\pset format html
-\pset border 1
-\pset expanded off
-\d psql_serial_tab_id_seq
-
- Sequence "public.psql_serial_tab_id_seq"
-
- Type |
- Start |
- Minimum |
- Maximum |
- Increment |
- Cycles? |
- Cache |
-
-
- integer |
- 1 |
- 1 |
- 2147483647 |
- 1 |
- no |
- 1 |
-
-
-Owned by: public.psql_serial_tab.id
-
-\pset tuples_only true
-\df exp
-
-
- pg_catalog |
- exp |
- double precision |
- double precision |
- func |
-
-
- pg_catalog |
- exp |
- numeric |
- numeric |
- func |
-
-
-
-\pset tuples_only false
-\pset expanded on
-\d psql_serial_tab_id_seq
-
- Sequence "public.psql_serial_tab_id_seq"
-
- Record 1 |
-
- Type |
- integer |
-
-
- Start |
- 1 |
-
-
- Minimum |
- 1 |
-
-
- Maximum |
- 2147483647 |
-
-
- Increment |
- 1 |
-
-
- Cycles? |
- no |
-
-
- Cache |
- 1 |
-
-
-Owned by: public.psql_serial_tab.id
-
-\pset tuples_only true
-\df exp
-
-
- |
-
- Schema |
- pg_catalog |
-
-
- Name |
- exp |
-
-
- Result data type |
- double precision |
-
-
- Argument data types |
- double precision |
-
-
- Type |
- func |
-
-
- |
-
- Schema |
- pg_catalog |
-
-
- Name |
- exp |
-
-
- Result data type |
- numeric |
-
-
- Argument data types |
- numeric |
-
-
- Type |
- func |
-
-
-
-\pset tuples_only false
-prepare q as
- select 'some"text' as "a&title", E' \n' as "junk",
- ' ' as "empty", n as int
- from generate_series(1,2) as n;
-\pset expanded off
-\pset border 0
-execute q;
-
-
- a&title |
- junk |
- empty |
- int |
-
-
- some"text |
- <foo>
-<bar> |
- |
- 1 |
-
-
- some"text |
- <foo>
-<bar> |
- |
- 2 |
-
-
-(2 rows)
-
-\pset border 1
-execute q;
-
-
- a&title |
- junk |
- empty |
- int |
-
-
- some"text |
- <foo>
-<bar> |
- |
- 1 |
-
-
- some"text |
- <foo>
-<bar> |
- |
- 2 |
-
-
-(2 rows)
-
-\pset tableattr foobar
-execute q;
-
-
- a&title |
- junk |
- empty |
- int |
-
-
- some"text |
- <foo>
-<bar> |
- |
- 1 |
-
-
- some"text |
- <foo>
-<bar> |
- |
- 2 |
-
-
-(2 rows)
-
-\pset tableattr
-\pset expanded on
-\pset border 0
-execute q;
-
-
- Record 1 |
-
- a&title |
- some"text |
-
-
- junk |
- <foo>
-<bar> |
-
-
- empty |
- |
-
-
- int |
- 1 |
-
-
- Record 2 |
-
- a&title |
- some"text |
-
-
- junk |
- <foo>
-<bar> |
-
-
- empty |
- |
-
-
- int |
- 2 |
-
-
-
-\pset border 1
-execute q;
-
-
- Record 1 |
-
- a&title |
- some"text |
-
-
- junk |
- <foo>
-<bar> |
-
-
- empty |
- |
-
-
- int |
- 1 |
-
-
- Record 2 |
-
- a&title |
- some"text |
-
-
- junk |
- <foo>
-<bar> |
-
-
- empty |
- |
-
-
- int |
- 2 |
-
-
-
-\pset tableattr foobar
-execute q;
-
-
- Record 1 |
-
- a&title |
- some"text |
-
-
- junk |
- <foo>
-<bar> |
-
-
- empty |
- |
-
-
- int |
- 1 |
-
-
- Record 2 |
-
- a&title |
- some"text |
-
-
- junk |
- <foo>
-<bar> |
-
-
- empty |
- |
-
-
- int |
- 2 |
-
-
-
-\pset tableattr
-deallocate q;
--- test latex output format
-\pset format latex
-\pset border 1
-\pset expanded off
-\d psql_serial_tab_id_seq
-\begin{center}
-Sequence "public.psql\_serial\_tab\_id\_seq"
-\end{center}
-
-\begin{tabular}{l | r | r | r | r | l | r}
-\textit{Type} & \textit{Start} & \textit{Minimum} & \textit{Maximum} & \textit{Increment} & \textit{Cycles?} & \textit{Cache} \\
-\hline
-integer & 1 & 1 & 2147483647 & 1 & no & 1 \\
-\end{tabular}
-
-\noindent Owned by: public.psql\_serial\_tab.id \\
-
-\pset tuples_only true
-\df exp
-\begin{tabular}{l | l | l | l | l}
-pg\_catalog & exp & double precision & double precision & func \\
-pg\_catalog & exp & numeric & numeric & func \\
-\end{tabular}
-
-\noindent
-\pset tuples_only false
-\pset expanded on
-\d psql_serial_tab_id_seq
-\begin{center}
-Sequence "public.psql\_serial\_tab\_id\_seq"
-\end{center}
-
-\begin{tabular}{c|l}
-\multicolumn{2}{c}{\textit{Record 1}} \\
-\hline
-Type & integer \\
-Start & 1 \\
-Minimum & 1 \\
-Maximum & 2147483647 \\
-Increment & 1 \\
-Cycles? & no \\
-Cache & 1 \\
-\end{tabular}
-
-\noindent Owned by: public.psql\_serial\_tab.id \\
-
-\pset tuples_only true
-\df exp
-\begin{tabular}{c|l}
-\hline
-Schema & pg\_catalog \\
-Name & exp \\
-Result data type & double precision \\
-Argument data types & double precision \\
-Type & func \\
-\hline
-Schema & pg\_catalog \\
-Name & exp \\
-Result data type & numeric \\
-Argument data types & numeric \\
-Type & func \\
-\end{tabular}
-
-\noindent
-\pset tuples_only false
-prepare q as
- select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk",
- ' ' as "empty", n as int
- from generate_series(1,2) as n;
-\pset expanded off
-\pset border 0
-execute q;
-\begin{tabular}{lllr}
-\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\
-\hline
-some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\
-some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\
-\end{tabular}
-
-\noindent (2 rows) \\
-
-\pset border 1
-execute q;
-\begin{tabular}{l | l | l | r}
-\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\
-\hline
-some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\
-some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\
-\end{tabular}
-
-\noindent (2 rows) \\
-
-\pset border 2
-execute q;
-\begin{tabular}{| l | l | l | r |}
-\hline
-\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\
-\hline
-some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\
-some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\
-\hline
-\end{tabular}
-
-\noindent (2 rows) \\
-
-\pset border 3
-execute q;
-\begin{tabular}{| l | l | l | r |}
-\hline
-\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\
-\hline
-some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\
-\hline
-some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\
-\hline
-\end{tabular}
-
-\noindent (2 rows) \\
-
-\pset expanded on
-\pset border 0
-execute q;
-\begin{tabular}{cl}
-\multicolumn{2}{c}{\textit{Record 1}} \\
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\multicolumn{2}{c}{\textit{Record 2}} \\
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\end{tabular}
-
-\noindent
-\pset border 1
-execute q;
-\begin{tabular}{c|l}
-\multicolumn{2}{c}{\textit{Record 1}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\multicolumn{2}{c}{\textit{Record 2}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\end{tabular}
-
-\noindent
-\pset border 2
-execute q;
-\begin{tabular}{|c|l|}
-\hline
-\multicolumn{2}{|c|}{\textit{Record 1}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\hline
-\multicolumn{2}{|c|}{\textit{Record 2}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\hline
-\end{tabular}
-
-\noindent
-\pset border 3
-execute q;
-\begin{tabular}{|c|l|}
-\hline
-\multicolumn{2}{|c|}{\textit{Record 1}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\hline
-\multicolumn{2}{|c|}{\textit{Record 2}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\hline
-\end{tabular}
-
-\noindent
-deallocate q;
--- test latex-longtable output format
-\pset format latex-longtable
-\pset border 1
-\pset expanded off
-\d psql_serial_tab_id_seq
-\begin{longtable}{l | r | r | r | r | l | r}
-\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\
-\midrule
-\endfirsthead
-\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\
-\midrule
-\endhead
-\caption[Sequence "public.psql\_serial\_tab\_id\_seq" (Continued)]{Sequence "public.psql\_serial\_tab\_id\_seq"}
-\endfoot
-\caption[Sequence "public.psql\_serial\_tab\_id\_seq"]{Sequence "public.psql\_serial\_tab\_id\_seq"}
-\endlastfoot
-\raggedright{integer}
-&
-\raggedright{1}
-&
-\raggedright{1}
-&
-\raggedright{2147483647}
-&
-\raggedright{1}
-&
-\raggedright{no}
-&
-\raggedright{1} \tabularnewline
-\end{longtable}
-\pset tuples_only true
-\df exp
-\begin{longtable}{l | l | l | l | l}
-\raggedright{pg\_catalog}
-&
-\raggedright{exp}
-&
-\raggedright{double precision}
-&
-\raggedright{double precision}
-&
-\raggedright{func} \tabularnewline
-\raggedright{pg\_catalog}
-&
-\raggedright{exp}
-&
-\raggedright{numeric}
-&
-\raggedright{numeric}
-&
-\raggedright{func} \tabularnewline
-\end{longtable}
-\pset tuples_only false
-\pset expanded on
-\d psql_serial_tab_id_seq
-\begin{center}
-Sequence "public.psql\_serial\_tab\_id\_seq"
-\end{center}
-
-\begin{tabular}{c|l}
-\multicolumn{2}{c}{\textit{Record 1}} \\
-\hline
-Type & integer \\
-Start & 1 \\
-Minimum & 1 \\
-Maximum & 2147483647 \\
-Increment & 1 \\
-Cycles? & no \\
-Cache & 1 \\
-\end{tabular}
-
-\noindent Owned by: public.psql\_serial\_tab.id \\
-
-\pset tuples_only true
-\df exp
-\begin{tabular}{c|l}
-\hline
-Schema & pg\_catalog \\
-Name & exp \\
-Result data type & double precision \\
-Argument data types & double precision \\
-Type & func \\
-\hline
-Schema & pg\_catalog \\
-Name & exp \\
-Result data type & numeric \\
-Argument data types & numeric \\
-Type & func \\
-\end{tabular}
-
-\noindent
-\pset tuples_only false
-prepare q as
- select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk",
- ' ' as "empty", n as int
- from generate_series(1,2) as n;
-\pset expanded off
-\pset border 0
-execute q;
-\begin{longtable}{lllr}
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\midrule
-\endfirsthead
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\midrule
-\endhead
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{1} \tabularnewline
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{2} \tabularnewline
-\end{longtable}
-\pset border 1
-execute q;
-\begin{longtable}{l | l | l | r}
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\midrule
-\endfirsthead
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\midrule
-\endhead
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{1} \tabularnewline
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{2} \tabularnewline
-\end{longtable}
-\pset border 2
-execute q;
-\begin{longtable}{| l | l | l | r |}
-\toprule
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\midrule
-\endfirsthead
-\toprule
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\midrule
-\endhead
-\bottomrule
-\endfoot
-\bottomrule
-\endlastfoot
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{1} \tabularnewline
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{2} \tabularnewline
-\end{longtable}
-\pset border 3
-execute q;
-\begin{longtable}{| l | l | l | r |}
-\toprule
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\midrule
-\endfirsthead
-\toprule
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\endhead
-\bottomrule
-\endfoot
-\bottomrule
-\endlastfoot
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{1} \tabularnewline
- \hline
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{2} \tabularnewline
- \hline
-\end{longtable}
-\pset tableattr lr
-execute q;
-\begin{longtable}{| p{lr\textwidth} | p{lr\textwidth} | p{lr\textwidth} | r |}
-\toprule
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\midrule
-\endfirsthead
-\toprule
-\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\
-\endhead
-\bottomrule
-\endfoot
-\bottomrule
-\endlastfoot
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{1} \tabularnewline
- \hline
-\raggedright{some\textbackslash{}more\_text}
-&
-\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}}
-&
-\raggedright{ }
-&
-\raggedright{2} \tabularnewline
- \hline
-\end{longtable}
-\pset tableattr
-\pset expanded on
-\pset border 0
-execute q;
-\begin{tabular}{cl}
-\multicolumn{2}{c}{\textit{Record 1}} \\
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\multicolumn{2}{c}{\textit{Record 2}} \\
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\end{tabular}
-
-\noindent
-\pset border 1
-execute q;
-\begin{tabular}{c|l}
-\multicolumn{2}{c}{\textit{Record 1}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\multicolumn{2}{c}{\textit{Record 2}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\end{tabular}
-
-\noindent
-\pset border 2
-execute q;
-\begin{tabular}{|c|l|}
-\hline
-\multicolumn{2}{|c|}{\textit{Record 1}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\hline
-\multicolumn{2}{|c|}{\textit{Record 2}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\hline
-\end{tabular}
-
-\noindent
-\pset border 3
-execute q;
-\begin{tabular}{|c|l|}
-\hline
-\multicolumn{2}{|c|}{\textit{Record 1}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\hline
-\multicolumn{2}{|c|}{\textit{Record 2}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\hline
-\end{tabular}
-
-\noindent
-\pset tableattr lr
-execute q;
-\begin{tabular}{|c|l|}
-\hline
-\multicolumn{2}{|c|}{\textit{Record 1}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 1 \\
-\hline
-\multicolumn{2}{|c|}{\textit{Record 2}} \\
-\hline
-a\$title & some\textbackslash{}more\_text \\
-junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\
-empty & \\
-int & 2 \\
-\hline
-\end{tabular}
-
-\noindent
-\pset tableattr
-deallocate q;
--- test troff-ms output format
-\pset format troff-ms
-\pset border 1
-\pset expanded off
-\d psql_serial_tab_id_seq
-.LP
-.DS C
-Sequence "public.psql_serial_tab_id_seq"
-.DE
-.LP
-.TS
-center;
-l | r | r | r | r | l | r.
-\fIType\fP \fIStart\fP \fIMinimum\fP \fIMaximum\fP \fIIncrement\fP \fICycles?\fP \fICache\fP
-_
-integer 1 1 2147483647 1 no 1
-.TE
-.DS L
-Owned by: public.psql_serial_tab.id
-.DE
-\pset tuples_only true
-\df exp
-.LP
-.TS
-center;
-l | l | l | l | l.
-pg_catalog exp double precision double precision func
-pg_catalog exp numeric numeric func
-.TE
-.DS L
-.DE
-\pset tuples_only false
-\pset expanded on
-\d psql_serial_tab_id_seq
-.LP
-.DS C
-Sequence "public.psql_serial_tab_id_seq"
-.DE
-.LP
-.TS
-center;
-c s.
-\fIRecord 1\fP
-_
-.T&
-c | l.
-Type integer
-Start 1
-Minimum 1
-Maximum 2147483647
-Increment 1
-Cycles? no
-Cache 1
-.TE
-.DS L
-Owned by: public.psql_serial_tab.id
-.DE
-\pset tuples_only true
-\df exp
-.LP
-.TS
-center;
-c l;
-_
-Schema pg_catalog
-Name exp
-Result data type double precision
-Argument data types double precision
-Type func
-_
-Schema pg_catalog
-Name exp
-Result data type numeric
-Argument data types numeric
-Type func
-.TE
-.DS L
-.DE
-\pset tuples_only false
-prepare q as
- select 'some\text' as "a\title", E' \n' as "junk",
- ' ' as "empty", n as int
- from generate_series(1,2) as n;
-\pset expanded off
-\pset border 0
-execute q;
-.LP
-.TS
-center;
-lllr.
-\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP
-_
-some\(rstext
- 1
-some\(rstext
- 2
-.TE
-.DS L
-(2 rows)
-.DE
-\pset border 1
-execute q;
-.LP
-.TS
-center;
-l | l | l | r.
-\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP
-_
-some\(rstext
- 1
-some\(rstext
- 2
-.TE
-.DS L
-(2 rows)
-.DE
-\pset border 2
-execute q;
-.LP
-.TS
-center box;
-l | l | l | r.
-\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP
-_
-some\(rstext
- 1
-some\(rstext
- 2
-.TE
-.DS L
-(2 rows)
-.DE
-\pset expanded on
-\pset border 0
-execute q;
-.LP
-.TS
-center;
-c s.
-\fIRecord 1\fP
-.T&
-c l.
-a\(rstitle some\(rstext
-junk
-
-empty
-int 1
-.T&
-c s.
-\fIRecord 2\fP
-.T&
-c l.
-a\(rstitle some\(rstext
-junk
-
-empty
-int 2
-.TE
-.DS L
-.DE
-\pset border 1
-execute q;
-.LP
-.TS
-center;
-c s.
-\fIRecord 1\fP
-_
-.T&
-c | l.
-a\(rstitle some\(rstext
-junk
-
-empty
-int 1
-.T&
-c s.
-\fIRecord 2\fP
-_
-.T&
-c | l.
-a\(rstitle some\(rstext
-junk
-
-empty
-int 2
-.TE
-.DS L
-.DE
-\pset border 2
-execute q;
-.LP
-.TS
-center box;
-c s.
-\fIRecord 1\fP
-_
-.T&
-c l.
-a\(rstitle some\(rstext
-junk
-
-empty
-int 1
-_
-.T&
-c s.
-\fIRecord 2\fP
-_
-.T&
-c l.
-a\(rstitle some\(rstext
-junk
-
-empty
-int 2
-.TE
-.DS L
-.DE
-deallocate q;
--- check ambiguous format requests
-\pset format a
-\pset: ambiguous abbreviation "a" matches both "aligned" and "asciidoc"
-\pset format l
--- clean up after output format tests
-drop table psql_serial_tab;
-\pset format aligned
-\pset expanded off
-\pset border 1
--- \echo and allied features
-\echo this is a test
-this is a test
-\echo -n without newline
-without newline\echo with -n newline
-with -n newline
-\echo '-n' with newline
--n with newline
-\set foo bar
-\echo foo = :foo
-foo = bar
-\qecho this is a test
-this is a test
-\qecho foo = :foo
-foo = bar
-\warn this is a test
-this is a test
-\warn foo = :foo
-foo = bar
--- tests for \if ... \endif
-\if true
- select 'okay';
- ?column?
-----------
- okay
-(1 row)
-
- select 'still okay';
- ?column?
-------------
- still okay
-(1 row)
-
-\else
- not okay;
- still not okay
-\endif
--- at this point query buffer should still have last valid line
-\g
- ?column?
-------------
- still okay
-(1 row)
-
--- \if should work okay on part of a query
-select
- \if true
- 42
- \else
- (bogus
- \endif
- forty_two;
- forty_two
------------
- 42
-(1 row)
-
-select \if false \\ (bogus \else \\ 42 \endif \\ forty_two;
- forty_two
------------
- 42
-(1 row)
-
--- test a large nested if using a variety of true-equivalents
-\if true
- \if 1
- \if yes
- \if on
- \echo 'all true'
-all true
- \else
- \echo 'should not print #1-1'
- \endif
- \else
- \echo 'should not print #1-2'
- \endif
- \else
- \echo 'should not print #1-3'
- \endif
-\else
- \echo 'should not print #1-4'
-\endif
--- test a variety of false-equivalents in an if/elif/else structure
-\if false
- \echo 'should not print #2-1'
-\elif 0
- \echo 'should not print #2-2'
-\elif no
- \echo 'should not print #2-3'
-\elif off
- \echo 'should not print #2-4'
-\else
- \echo 'all false'
-all false
-\endif
--- test true-false elif after initial true branch
-\if true
- \echo 'should print #2-5'
-should print #2-5
-\elif true
- \echo 'should not print #2-6'
-\elif false
- \echo 'should not print #2-7'
-\else
- \echo 'should not print #2-8'
-\endif
--- test simple true-then-else
-\if true
- \echo 'first thing true'
-first thing true
-\else
- \echo 'should not print #3-1'
-\endif
--- test simple false-true-else
-\if false
- \echo 'should not print #4-1'
-\elif true
- \echo 'second thing true'
-second thing true
-\else
- \echo 'should not print #5-1'
-\endif
--- invalid boolean expressions are false
-\if invalid boolean expression
-unrecognized value "invalid boolean expression" for "\if expression": Boolean expected
- \echo 'will not print #6-1'
-\else
- \echo 'will print anyway #6-2'
-will print anyway #6-2
-\endif
--- test un-matched endif
-\endif
-\endif: no matching \if
--- test un-matched else
-\else
-\else: no matching \if
--- test un-matched elif
-\elif
-\elif: no matching \if
--- test double-else error
-\if true
-\else
-\else
-\else: cannot occur after \else
-\endif
--- test elif out-of-order
-\if false
-\else
-\elif
-\elif: cannot occur after \else
-\endif
--- test if-endif matching in a false branch
-\if false
- \if false
- \echo 'should not print #7-1'
- \else
- \echo 'should not print #7-2'
- \endif
- \echo 'should not print #7-3'
-\else
- \echo 'should print #7-4'
-should print #7-4
-\endif
--- show that vars and backticks are not expanded when ignoring extra args
-\set foo bar
-\echo :foo :'foo' :"foo"
-bar 'bar' "bar"
-\pset fieldsep | `nosuchcommand` :foo :'foo' :"foo"
-\pset: extra argument "nosuchcommand" ignored
-\pset: extra argument ":foo" ignored
-\pset: extra argument ":'foo'" ignored
-\pset: extra argument ":"foo"" ignored
--- show that vars and backticks are not expanded and commands are ignored
--- when in a false if-branch
-\set try_to_quit '\\q'
-\if false
- :try_to_quit
- \echo `nosuchcommand` :foo :'foo' :"foo"
- \pset fieldsep | `nosuchcommand` :foo :'foo' :"foo"
- \a
- SELECT $1 \bind 1 \g
- \bind_named stmt1 1 2 \g
- \C arg1
- \c arg1 arg2 arg3 arg4
- \cd arg1
- \close stmt1
- \conninfo
- \copy arg1 arg2 arg3 arg4 arg5 arg6
- \copyright
- SELECT 1 as one, 2, 3 \crosstabview
- \dt arg1
- \e arg1 arg2
- \ef whole_line
- \ev whole_line
- \echo arg1 arg2 arg3 arg4 arg5
- \echo arg1
- \encoding arg1
- \errverbose
- \f arg1
- \g arg1
- \gx arg1
- \gexec
- SELECT 1 AS one \gset
- \h
- \?
- \html
- \i arg1
- \ir arg1
- \l arg1
- \lo arg1 arg2
-invalid command \lo
- \lo_list
- \o arg1
- \p
- SELECT 1 \parse
- \password arg1
- \prompt arg1 arg2
- \pset arg1 arg2
- \q
- \reset
- \s arg1
- \set arg1 arg2 arg3 arg4 arg5 arg6 arg7
- \setenv arg1 arg2
- \sf whole_line
- \sv whole_line
- \t arg1
- \T arg1
- \timing arg1
- \unset arg1
- \w arg1
- \watch arg1 arg2
- \x arg1
- -- \else here is eaten as part of OT_FILEPIPE argument
- \w |/no/such/file \else
- -- \endif here is eaten as part of whole-line argument
- \! whole_line \endif
- \z
-\else
- \echo 'should print #8-1'
-should print #8-1
-\endif
--- :{?...} defined variable test
-\set i 1
-\if :{?i}
- \echo '#9-1 ok, variable i is defined'
-#9-1 ok, variable i is defined
-\else
- \echo 'should not print #9-2'
-\endif
-\if :{?no_such_variable}
- \echo 'should not print #10-1'
-\else
- \echo '#10-2 ok, variable no_such_variable is not defined'
-#10-2 ok, variable no_such_variable is not defined
-\endif
-SELECT :{?i} AS i_is_defined;
- i_is_defined
---------------
- t
-(1 row)
-
-SELECT NOT :{?no_such_var} AS no_such_var_is_not_defined;
- no_such_var_is_not_defined
-----------------------------
- t
-(1 row)
-
--- SHOW_CONTEXT
-\set SHOW_CONTEXT never
-do $$
-begin
- raise notice 'foo';
- raise exception 'bar';
-end $$;
-NOTICE: foo
-ERROR: bar
-\set SHOW_CONTEXT errors
-do $$
-begin
- raise notice 'foo';
- raise exception 'bar';
-end $$;
-NOTICE: foo
-ERROR: bar
-CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE
-\set SHOW_CONTEXT always
-do $$
-begin
- raise notice 'foo';
- raise exception 'bar';
-end $$;
-NOTICE: foo
-CONTEXT: PL/pgSQL function inline_code_block line 3 at RAISE
-ERROR: bar
-CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE
--- test printing and clearing the query buffer
-SELECT 1;
- ?column?
-----------
- 1
-(1 row)
-
-\p
-SELECT 1;
-SELECT 2 \r
-\p
-SELECT 1;
-SELECT 3 \p
-SELECT 3
-UNION SELECT 4 \p
-SELECT 3
-UNION SELECT 4
-UNION SELECT 5
-ORDER BY 1;
- ?column?
-----------
- 3
- 4
- 5
-(3 rows)
-
-\r
-\p
-SELECT 3
-UNION SELECT 4
-UNION SELECT 5
-ORDER BY 1;
--- tests for special result variables
--- working query, 2 rows selected
-SELECT 1 AS stuff UNION SELECT 2;
- stuff
--------
- 1
- 2
-(2 rows)
-
-\echo 'error:' :ERROR
-error: false
-\echo 'error code:' :SQLSTATE
-error code: 00000
-\echo 'number of rows:' :ROW_COUNT
-number of rows: 2
--- syntax error
-SELECT 1 UNION;
-ERROR: syntax error at or near ";"
-LINE 1: SELECT 1 UNION;
- ^
-\echo 'error:' :ERROR
-error: true
-\echo 'error code:' :SQLSTATE
-error code: 42601
-\echo 'number of rows:' :ROW_COUNT
-number of rows: 0
-\echo 'last error message:' :LAST_ERROR_MESSAGE
-last error message: syntax error at or near ";"
-\echo 'last error code:' :LAST_ERROR_SQLSTATE
-last error code: 42601
--- empty query
-;
-\echo 'error:' :ERROR
-error: false
-\echo 'error code:' :SQLSTATE
-error code: 00000
-\echo 'number of rows:' :ROW_COUNT
-number of rows: 0
--- must have kept previous values
-\echo 'last error message:' :LAST_ERROR_MESSAGE
-last error message: syntax error at or near ";"
-\echo 'last error code:' :LAST_ERROR_SQLSTATE
-last error code: 42601
--- other query error
-DROP TABLE this_table_does_not_exist;
-ERROR: table "this_table_does_not_exist" does not exist
-\echo 'error:' :ERROR
-error: true
-\echo 'error code:' :SQLSTATE
-error code: 42P01
-\echo 'number of rows:' :ROW_COUNT
-number of rows: 0
-\echo 'last error message:' :LAST_ERROR_MESSAGE
-last error message: table "this_table_does_not_exist" does not exist
-\echo 'last error code:' :LAST_ERROR_SQLSTATE
-last error code: 42P01
--- nondefault verbosity error settings (except verbose, which is too unstable)
-\set VERBOSITY terse
-SELECT 1 UNION;
-ERROR: syntax error at or near ";" at character 15
-\echo 'error:' :ERROR
-error: true
-\echo 'error code:' :SQLSTATE
-error code: 42601
-\echo 'last error message:' :LAST_ERROR_MESSAGE
-last error message: syntax error at or near ";"
-\set VERBOSITY sqlstate
-SELECT 1/0;
-ERROR: 22012
-\echo 'error:' :ERROR
-error: true
-\echo 'error code:' :SQLSTATE
-error code: 22012
-\echo 'last error message:' :LAST_ERROR_MESSAGE
-last error message: division by zero
-\set VERBOSITY default
--- working \gdesc
-SELECT 3 AS three, 4 AS four \gdesc
- Column | Type
---------+---------
- three | integer
- four | integer
-(2 rows)
-
-\echo 'error:' :ERROR
-error: false
-\echo 'error code:' :SQLSTATE
-error code: 00000
-\echo 'number of rows:' :ROW_COUNT
-number of rows: 2
--- \gdesc with an error
-SELECT 4 AS \gdesc
-ERROR: syntax error at end of input
-LINE 1: SELECT 4 AS
- ^
-\echo 'error:' :ERROR
-error: true
-\echo 'error code:' :SQLSTATE
-error code: 42601
-\echo 'number of rows:' :ROW_COUNT
-number of rows: 0
-\echo 'last error message:' :LAST_ERROR_MESSAGE
-last error message: syntax error at end of input
-\echo 'last error code:' :LAST_ERROR_SQLSTATE
-last error code: 42601
--- check row count for a query with chunked results
-\set FETCH_COUNT 10
-select unique2 from tenk1 order by unique2 limit 19;
- unique2
----------
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
-(19 rows)
-
-\echo 'error:' :ERROR
-error: false
-\echo 'error code:' :SQLSTATE
-error code: 00000
-\echo 'number of rows:' :ROW_COUNT
-number of rows: 19
--- chunked results with an error after the first chunk
--- (we must disable parallel query here, else the behavior is timing-dependent)
-set debug_parallel_query = off;
-select 1/(15-unique2) from tenk1 order by unique2 limit 19;
- ?column?
-----------
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
-ERROR: division by zero
-\echo 'error:' :ERROR
-error: true
-\echo 'error code:' :SQLSTATE
-error code: 22012
-\echo 'number of rows:' :ROW_COUNT
-number of rows: 0
-\echo 'last error message:' :LAST_ERROR_MESSAGE
-last error message: division by zero
-\echo 'last error code:' :LAST_ERROR_SQLSTATE
-last error code: 22012
-reset debug_parallel_query;
-\unset FETCH_COUNT
-create schema testpart;
-create role regress_partitioning_role;
-alter schema testpart owner to regress_partitioning_role;
-set role to regress_partitioning_role;
--- run test inside own schema and hide other partitions
-set search_path to testpart;
-create table testtable_apple(logdate date);
-create table testtable_orange(logdate date);
-create index testtable_apple_index on testtable_apple(logdate);
-create index testtable_orange_index on testtable_orange(logdate);
-create table testpart_apple(logdate date) partition by range(logdate);
-create table testpart_orange(logdate date) partition by range(logdate);
-create index testpart_apple_index on testpart_apple(logdate);
-create index testpart_orange_index on testpart_orange(logdate);
--- only partition related object should be displayed
-\dP test*apple*
- List of partitioned relations
- Schema | Name | Owner | Type | Parent name | Table
-----------+----------------------+---------------------------+-------------------+-------------+----------------
- testpart | testpart_apple | regress_partitioning_role | partitioned table | |
- testpart | testpart_apple_index | regress_partitioning_role | partitioned index | | testpart_apple
-(2 rows)
-
-\dPt test*apple*
- List of partitioned tables
- Schema | Name | Owner | Parent name
-----------+----------------+---------------------------+-------------
- testpart | testpart_apple | regress_partitioning_role |
-(1 row)
-
-\dPi test*apple*
- List of partitioned indexes
- Schema | Name | Owner | Parent name | Table
-----------+----------------------+---------------------------+-------------+----------------
- testpart | testpart_apple_index | regress_partitioning_role | | testpart_apple
-(1 row)
-
-drop table testtable_apple;
-drop table testtable_orange;
-drop table testpart_apple;
-drop table testpart_orange;
-create table parent_tab (id int) partition by range (id);
-create index parent_index on parent_tab (id);
-create table child_0_10 partition of parent_tab
- for values from (0) to (10);
-create table child_10_20 partition of parent_tab
- for values from (10) to (20);
-create table child_20_30 partition of parent_tab
- for values from (20) to (30);
-insert into parent_tab values (generate_series(0,29));
-create table child_30_40 partition of parent_tab
-for values from (30) to (40)
- partition by range(id);
-create table child_30_35 partition of child_30_40
- for values from (30) to (35);
-create table child_35_40 partition of child_30_40
- for values from (35) to (40);
-insert into parent_tab values (generate_series(30,39));
-\dPt
- List of partitioned tables
- Schema | Name | Owner
-----------+------------+---------------------------
- testpart | parent_tab | regress_partitioning_role
-(1 row)
-
-\dPi
- List of partitioned indexes
- Schema | Name | Owner | Table
-----------+--------------+---------------------------+------------
- testpart | parent_index | regress_partitioning_role | parent_tab
-(1 row)
-
-\dP testpart.*
- List of partitioned relations
- Schema | Name | Owner | Type | Parent name | Table
-----------+--------------------+---------------------------+-------------------+--------------+-------------
- testpart | parent_tab | regress_partitioning_role | partitioned table | |
- testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab |
- testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab
- testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40
-(4 rows)
-
-\dP
- List of partitioned relations
- Schema | Name | Owner | Type | Table
-----------+--------------+---------------------------+-------------------+------------
- testpart | parent_tab | regress_partitioning_role | partitioned table |
- testpart | parent_index | regress_partitioning_role | partitioned index | parent_tab
-(2 rows)
-
-\dPtn
- List of partitioned tables
- Schema | Name | Owner | Parent name
-----------+-------------+---------------------------+-------------
- testpart | parent_tab | regress_partitioning_role |
- testpart | child_30_40 | regress_partitioning_role | parent_tab
-(2 rows)
-
-\dPin
- List of partitioned indexes
- Schema | Name | Owner | Parent name | Table
-----------+--------------------+---------------------------+--------------+-------------
- testpart | parent_index | regress_partitioning_role | | parent_tab
- testpart | child_30_40_id_idx | regress_partitioning_role | parent_index | child_30_40
-(2 rows)
-
-\dPn
- List of partitioned relations
- Schema | Name | Owner | Type | Parent name | Table
-----------+--------------------+---------------------------+-------------------+--------------+-------------
- testpart | parent_tab | regress_partitioning_role | partitioned table | |
- testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab |
- testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab
- testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40
-(4 rows)
-
-\dPn testpart.*
- List of partitioned relations
- Schema | Name | Owner | Type | Parent name | Table
-----------+--------------------+---------------------------+-------------------+--------------+-------------
- testpart | parent_tab | regress_partitioning_role | partitioned table | |
- testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab |
- testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab
- testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40
-(4 rows)
-
-drop table parent_tab cascade;
-drop schema testpart;
-set search_path to default;
-set role to default;
-drop role regress_partitioning_role;
--- \d on toast table (use pg_statistic's toast table, which has a known name)
-\d pg_toast.pg_toast_2619
-TOAST table "pg_toast.pg_toast_2619"
- Column | Type
-------------+---------
- chunk_id | oid
- chunk_seq | integer
- chunk_data | bytea
-Owning table: "pg_catalog.pg_statistic"
-Indexes:
- "pg_toast_2619_index" PRIMARY KEY, btree (chunk_id, chunk_seq)
-
--- check printing info about access methods
-\dA
-List of access methods
- Name | Type
---------+-------
- brin | Index
- btree | Index
- gin | Index
- gist | Index
- hash | Index
- heap | Table
- heap2 | Table
- spgist | Index
-(8 rows)
-
-\dA *
-List of access methods
- Name | Type
---------+-------
- brin | Index
- btree | Index
- gin | Index
- gist | Index
- hash | Index
- heap | Table
- heap2 | Table
- spgist | Index
-(8 rows)
-
-\dA h*
-List of access methods
- Name | Type
--------+-------
- hash | Index
- heap | Table
- heap2 | Table
-(3 rows)
-
-\dA foo
-List of access methods
- Name | Type
-------+------
-(0 rows)
-
-\dA foo bar
-List of access methods
- Name | Type
-------+------
-(0 rows)
-
-\dA: extra argument "bar" ignored
-\dA+
- List of access methods
- Name | Type | Handler | Description
---------+-------+----------------------+----------------------------------------
- brin | Index | brinhandler | block range index (BRIN) access method
- btree | Index | bthandler | b-tree index access method
- gin | Index | ginhandler | GIN index access method
- gist | Index | gisthandler | GiST index access method
- hash | Index | hashhandler | hash index access method
- heap | Table | heap_tableam_handler | heap table access method
- heap2 | Table | heap_tableam_handler |
- spgist | Index | spghandler | SP-GiST index access method
-(8 rows)
-
-\dA+ *
- List of access methods
- Name | Type | Handler | Description
---------+-------+----------------------+----------------------------------------
- brin | Index | brinhandler | block range index (BRIN) access method
- btree | Index | bthandler | b-tree index access method
- gin | Index | ginhandler | GIN index access method
- gist | Index | gisthandler | GiST index access method
- hash | Index | hashhandler | hash index access method
- heap | Table | heap_tableam_handler | heap table access method
- heap2 | Table | heap_tableam_handler |
- spgist | Index | spghandler | SP-GiST index access method
-(8 rows)
-
-\dA+ h*
- List of access methods
- Name | Type | Handler | Description
--------+-------+----------------------+--------------------------
- hash | Index | hashhandler | hash index access method
- heap | Table | heap_tableam_handler | heap table access method
- heap2 | Table | heap_tableam_handler |
-(3 rows)
-
-\dA+ foo
- List of access methods
- Name | Type | Handler | Description
-------+------+---------+-------------
-(0 rows)
-
-\dAc brin pg*.oid*
- List of operator classes
- AM | Input type | Storage type | Operator class | Default?
-------+------------+--------------+----------------------+----------
- brin | oid | | oid_bloom_ops | no
- brin | oid | | oid_minmax_multi_ops | no
- brin | oid | | oid_minmax_ops | yes
-(3 rows)
-
-\dAf spgist
- List of operator families
- AM | Operator family | Applicable types
---------+-----------------+------------------
- spgist | box_ops | box
- spgist | kd_point_ops | point
- spgist | network_ops | inet
- spgist | poly_ops | polygon
- spgist | quad_point_ops | point
- spgist | range_ops | anyrange
- spgist | text_ops | text
-(7 rows)
-
-\dAf btree int4
- List of operator families
- AM | Operator family | Applicable types
--------+-----------------+---------------------------
- btree | integer_ops | smallint, integer, bigint
-(1 row)
-
-\dAo+ btree float_ops
- List of operators of operator families
- AM | Operator family | Operator | Strategy | Purpose | Sort opfamily
--------+-----------------+---------------------------------------+----------+---------+---------------
- btree | float_ops | <(double precision,double precision) | 1 | search |
- btree | float_ops | <=(double precision,double precision) | 2 | search |
- btree | float_ops | =(double precision,double precision) | 3 | search |
- btree | float_ops | >=(double precision,double precision) | 4 | search |
- btree | float_ops | >(double precision,double precision) | 5 | search |
- btree | float_ops | <(real,real) | 1 | search |
- btree | float_ops | <=(real,real) | 2 | search |
- btree | float_ops | =(real,real) | 3 | search |
- btree | float_ops | >=(real,real) | 4 | search |
- btree | float_ops | >(real,real) | 5 | search |
- btree | float_ops | <(double precision,real) | 1 | search |
- btree | float_ops | <=(double precision,real) | 2 | search |
- btree | float_ops | =(double precision,real) | 3 | search |
- btree | float_ops | >=(double precision,real) | 4 | search |
- btree | float_ops | >(double precision,real) | 5 | search |
- btree | float_ops | <(real,double precision) | 1 | search |
- btree | float_ops | <=(real,double precision) | 2 | search |
- btree | float_ops | =(real,double precision) | 3 | search |
- btree | float_ops | >=(real,double precision) | 4 | search |
- btree | float_ops | >(real,double precision) | 5 | search |
-(20 rows)
-
-\dAo * pg_catalog.jsonb_path_ops
- List of operators of operator families
- AM | Operator family | Operator | Strategy | Purpose
------+-----------------+--------------------+----------+---------
- gin | jsonb_path_ops | @>(jsonb,jsonb) | 7 | search
- gin | jsonb_path_ops | @?(jsonb,jsonpath) | 15 | search
- gin | jsonb_path_ops | @@(jsonb,jsonpath) | 16 | search
-(3 rows)
-
-\dAp+ btree float_ops
- List of support functions of operator families
- AM | Operator family | Registered left type | Registered right type | Number | Function
--------+-----------------+----------------------+-----------------------+--------+------------------------------------------------------------------------------
- btree | float_ops | double precision | double precision | 1 | btfloat8cmp(double precision,double precision)
- btree | float_ops | double precision | double precision | 2 | btfloat8sortsupport(internal)
- btree | float_ops | double precision | double precision | 3 | in_range(double precision,double precision,double precision,boolean,boolean)
- btree | float_ops | real | real | 1 | btfloat4cmp(real,real)
- btree | float_ops | real | real | 2 | btfloat4sortsupport(internal)
- btree | float_ops | double precision | real | 1 | btfloat84cmp(double precision,real)
- btree | float_ops | real | double precision | 1 | btfloat48cmp(real,double precision)
- btree | float_ops | real | double precision | 3 | in_range(real,real,double precision,boolean,boolean)
-(8 rows)
-
-\dAp * pg_catalog.uuid_ops
- List of support functions of operator families
- AM | Operator family | Registered left type | Registered right type | Number | Function
--------+-----------------+----------------------+-----------------------+--------+--------------------
- btree | uuid_ops | uuid | uuid | 1 | uuid_cmp
- btree | uuid_ops | uuid | uuid | 2 | uuid_sortsupport
- btree | uuid_ops | uuid | uuid | 4 | btequalimage
- hash | uuid_ops | uuid | uuid | 1 | uuid_hash
- hash | uuid_ops | uuid | uuid | 2 | uuid_hash_extended
-(5 rows)
-
--- check \dconfig
-set work_mem = 10240;
-\dconfig work_mem
-List of configuration parameters
- Parameter | Value
------------+-------
- work_mem | 10MB
-(1 row)
-
-\dconfig+ work*
- List of configuration parameters
- Parameter | Value | Type | Context | Access privileges
------------+-------+---------+---------+-------------------
- work_mem | 10MB | integer | user |
-(1 row)
-
-reset work_mem;
--- check \df, \do with argument specifications
-\df *sqrt
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+--------------+------------------+---------------------+------
- pg_catalog | dsqrt | double precision | double precision | func
- pg_catalog | numeric_sqrt | numeric | numeric | func
- pg_catalog | sqrt | double precision | double precision | func
- pg_catalog | sqrt | numeric | numeric | func
-(4 rows)
-
-\df *sqrt num*
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+--------------+------------------+---------------------+------
- pg_catalog | numeric_sqrt | numeric | numeric | func
- pg_catalog | sqrt | numeric | numeric | func
-(2 rows)
-
-\df int*pl
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+-------------+------------------+---------------------+------
- pg_catalog | int24pl | integer | smallint, integer | func
- pg_catalog | int28pl | bigint | smallint, bigint | func
- pg_catalog | int2pl | smallint | smallint, smallint | func
- pg_catalog | int42pl | integer | integer, smallint | func
- pg_catalog | int48pl | bigint | integer, bigint | func
- pg_catalog | int4pl | integer | integer, integer | func
- pg_catalog | int82pl | bigint | bigint, smallint | func
- pg_catalog | int84pl | bigint | bigint, integer | func
- pg_catalog | int8pl | bigint | bigint, bigint | func
- pg_catalog | interval_pl | interval | interval, interval | func
-(10 rows)
-
-\df int*pl int4
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+---------+------------------+---------------------+------
- pg_catalog | int42pl | integer | integer, smallint | func
- pg_catalog | int48pl | bigint | integer, bigint | func
- pg_catalog | int4pl | integer | integer, integer | func
-(3 rows)
-
-\df int*pl * pg_catalog.int8
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+---------+------------------+---------------------+------
- pg_catalog | int28pl | bigint | smallint, bigint | func
- pg_catalog | int48pl | bigint | integer, bigint | func
- pg_catalog | int8pl | bigint | bigint, bigint | func
-(3 rows)
-
-\df acl* aclitem[]
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+-------------+------------------+----------------------------------------------------------------------------------------------------+------
- pg_catalog | aclcontains | boolean | aclitem[], aclitem | func
- pg_catalog | aclexplode | SETOF record | acl aclitem[], OUT grantor oid, OUT grantee oid, OUT privilege_type text, OUT is_grantable boolean | func
- pg_catalog | aclinsert | aclitem[] | aclitem[], aclitem | func
- pg_catalog | aclremove | aclitem[] | aclitem[], aclitem | func
-(4 rows)
-
-\df has_database_privilege oid text
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+------------------------+------------------+---------------------+------
- pg_catalog | has_database_privilege | boolean | oid, text | func
- pg_catalog | has_database_privilege | boolean | oid, text, text | func
-(2 rows)
-
-\df has_database_privilege oid text -
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+------------------------+------------------+---------------------+------
- pg_catalog | has_database_privilege | boolean | oid, text | func
-(1 row)
-
-\dfa bit* small*
- List of functions
- Schema | Name | Result data type | Argument data types | Type
-------------+---------+------------------+---------------------+------
- pg_catalog | bit_and | smallint | smallint | agg
- pg_catalog | bit_or | smallint | smallint | agg
- pg_catalog | bit_xor | smallint | smallint | agg
-(3 rows)
-
-\df *._pg_expandarray
- List of functions
- Schema | Name | Result data type | Argument data types | Type
---------------------+-----------------+------------------+-------------------------------------------+------
- information_schema | _pg_expandarray | SETOF record | anyarray, OUT x anyelement, OUT n integer | func
-(1 row)
-
-\do - pg_catalog.int4
- List of operators
- Schema | Name | Left arg type | Right arg type | Result type | Description
-------------+------+---------------+----------------+-------------+-------------
- pg_catalog | - | | integer | integer | negate
-(1 row)
-
-\do && anyarray *
- List of operators
- Schema | Name | Left arg type | Right arg type | Result type | Description
-------------+------+---------------+----------------+-------------+-------------
- pg_catalog | && | anyarray | anyarray | boolean | overlaps
-(1 row)
-
--- check \df+
--- we have to use functions with a predictable owner name, so make a role
-create role regress_psql_user superuser;
-begin;
-set session authorization regress_psql_user;
-create function psql_df_internal (float8)
- returns float8
- language internal immutable parallel safe strict
- as 'dsin';
-create function psql_df_sql (x integer)
- returns integer
- security definer
- begin atomic select x + 1; end;
-create function psql_df_plpgsql ()
- returns void
- language plpgsql
- as $$ begin return; end; $$;
-comment on function psql_df_plpgsql () is 'some comment';
-\df+ psql_df_*
- List of functions
- Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description
---------+------------------+------------------+---------------------+------+------------+----------+-------------------+----------+-------------------+----------+---------------+--------------
- public | psql_df_internal | double precision | double precision | func | immutable | safe | regress_psql_user | invoker | | internal | dsin |
- public | psql_df_plpgsql | void | | func | volatile | unsafe | regress_psql_user | invoker | | plpgsql | | some comment
- public | psql_df_sql | integer | x integer | func | volatile | unsafe | regress_psql_user | definer | | sql | |
-(3 rows)
-
-rollback;
-drop role regress_psql_user;
--- check \sf
-\sf information_schema._pg_index_position
-CREATE OR REPLACE FUNCTION information_schema._pg_index_position(oid, smallint)
- RETURNS integer
- LANGUAGE sql
- STABLE STRICT
-BEGIN ATOMIC
- SELECT (ss.a).n AS n
- FROM ( SELECT information_schema._pg_expandarray(pg_index.indkey) AS a
- FROM pg_index
- WHERE (pg_index.indexrelid = $1)) ss
- WHERE ((ss.a).x = $2);
-END
-\sf+ information_schema._pg_index_position
- CREATE OR REPLACE FUNCTION information_schema._pg_index_position(oid, smallint)
- RETURNS integer
- LANGUAGE sql
- STABLE STRICT
-1 BEGIN ATOMIC
-2 SELECT (ss.a).n AS n
-3 FROM ( SELECT information_schema._pg_expandarray(pg_index.indkey) AS a
-4 FROM pg_index
-5 WHERE (pg_index.indexrelid = $1)) ss
-6 WHERE ((ss.a).x = $2);
-7 END
-\sf+ interval_pl_time
- CREATE OR REPLACE FUNCTION pg_catalog.interval_pl_time(interval, time without time zone)
- RETURNS time without time zone
- LANGUAGE sql
- IMMUTABLE PARALLEL SAFE STRICT COST 1
-1 RETURN ($2 + $1)
-\sf ts_debug(text);
-CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[])
- RETURNS SETOF record
- LANGUAGE sql
- STABLE PARALLEL SAFE STRICT
-BEGIN ATOMIC
- SELECT ts_debug.alias,
- ts_debug.description,
- ts_debug.token,
- ts_debug.dictionaries,
- ts_debug.dictionary,
- ts_debug.lexemes
- FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes);
-END
-\sf+ ts_debug(text)
- CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[])
- RETURNS SETOF record
- LANGUAGE sql
- STABLE PARALLEL SAFE STRICT
-1 BEGIN ATOMIC
-2 SELECT ts_debug.alias,
-3 ts_debug.description,
-4 ts_debug.token,
-5 ts_debug.dictionaries,
-6 ts_debug.dictionary,
-7 ts_debug.lexemes
-8 FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes);
-9 END
--- AUTOCOMMIT
-CREATE TABLE ac_test (a int);
-\set AUTOCOMMIT off
-INSERT INTO ac_test VALUES (1);
-COMMIT;
-SELECT * FROM ac_test;
- a
----
- 1
-(1 row)
-
-COMMIT;
-INSERT INTO ac_test VALUES (2);
-ROLLBACK;
-SELECT * FROM ac_test;
- a
----
- 1
-(1 row)
-
-COMMIT;
-BEGIN;
-INSERT INTO ac_test VALUES (3);
-COMMIT;
-SELECT * FROM ac_test;
- a
----
- 1
- 3
-(2 rows)
-
-COMMIT;
-BEGIN;
-INSERT INTO ac_test VALUES (4);
-ROLLBACK;
-SELECT * FROM ac_test;
- a
----
- 1
- 3
-(2 rows)
-
-COMMIT;
-\set AUTOCOMMIT on
-DROP TABLE ac_test;
-SELECT * FROM ac_test; -- should be gone now
-ERROR: relation "ac_test" does not exist
-LINE 1: SELECT * FROM ac_test;
- ^
--- ON_ERROR_ROLLBACK
-\set ON_ERROR_ROLLBACK on
-CREATE TABLE oer_test (a int);
-BEGIN;
-INSERT INTO oer_test VALUES (1);
-INSERT INTO oer_test VALUES ('foo');
-ERROR: invalid input syntax for type integer: "foo"
-LINE 1: INSERT INTO oer_test VALUES ('foo');
- ^
-INSERT INTO oer_test VALUES (3);
-COMMIT;
-SELECT * FROM oer_test;
- a
----
- 1
- 3
-(2 rows)
-
-BEGIN;
-INSERT INTO oer_test VALUES (4);
-ROLLBACK;
-SELECT * FROM oer_test;
- a
----
- 1
- 3
-(2 rows)
-
-BEGIN;
-INSERT INTO oer_test VALUES (5);
-COMMIT AND CHAIN;
-INSERT INTO oer_test VALUES (6);
-COMMIT;
-SELECT * FROM oer_test;
- a
----
- 1
- 3
- 5
- 6
-(4 rows)
-
-DROP TABLE oer_test;
-\set ON_ERROR_ROLLBACK off
--- ECHO errors
-\set ECHO errors
-ERROR: relation "notexists" does not exist
-LINE 1: SELECT * FROM notexists;
- ^
-STATEMENT: SELECT * FROM notexists;
---
--- combined queries
---
-CREATE FUNCTION warn(msg TEXT) RETURNS BOOLEAN LANGUAGE plpgsql
-AS $$
- BEGIN RAISE NOTICE 'warn %', msg ; RETURN TRUE ; END
-$$;
--- show both
-SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ;
-NOTICE: warn 1.5
-CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE
- one
------
- 1
-(1 row)
-
- warn
-------
- t
-(1 row)
-
- two
------
- 2
-(1 row)
-
--- \gset applies to last query only
-SELECT 3 AS three \; SELECT warn('3.5') \; SELECT 4 AS four \gset
-NOTICE: warn 3.5
-CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE
- three
--------
- 3
-(1 row)
-
- warn
-------
- t
-(1 row)
-
-\echo :three :four
-:three 4
--- syntax error stops all processing
-SELECT 5 \; SELECT 6 + \; SELECT warn('6.5') \; SELECT 7 ;
-ERROR: syntax error at or near ";"
-LINE 1: SELECT 5 ; SELECT 6 + ; SELECT warn('6.5') ; SELECT 7 ;
- ^
--- with aborted transaction, stop on first error
-BEGIN \; SELECT 8 AS eight \; SELECT 9/0 AS nine \; ROLLBACK \; SELECT 10 AS ten ;
- eight
--------
- 8
-(1 row)
-
-ERROR: division by zero
--- close previously aborted transaction
-ROLLBACK;
--- miscellaneous SQL commands
--- (non SELECT output is sent to stderr, thus is not shown in expected results)
-SELECT 'ok' AS "begin" \;
-CREATE TABLE psql_comics(s TEXT) \;
-INSERT INTO psql_comics VALUES ('Calvin'), ('hobbes') \;
-COPY psql_comics FROM STDIN \;
-UPDATE psql_comics SET s = 'Hobbes' WHERE s = 'hobbes' \;
-DELETE FROM psql_comics WHERE s = 'Moe' \;
-COPY psql_comics TO STDOUT \;
-TRUNCATE psql_comics \;
-DROP TABLE psql_comics \;
-SELECT 'ok' AS "done" ;
- begin
--------
- ok
-(1 row)
-
-Calvin
-Susie
-Hobbes
- done
-------
- ok
-(1 row)
-
-\set SHOW_ALL_RESULTS off
-SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ;
-NOTICE: warn 1.5
-CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE
- two
------
- 2
-(1 row)
-
-\set SHOW_ALL_RESULTS on
-DROP FUNCTION warn(TEXT);
---
--- \g with file
---
-\getenv abs_builddir PG_ABS_BUILDDIR
-\set g_out_file :abs_builddir '/results/psql-output1'
-CREATE TEMPORARY TABLE reload_output(
- lineno int NOT NULL GENERATED ALWAYS AS IDENTITY,
- line text
-);
-SELECT 1 AS a \g :g_out_file
-COPY reload_output(line) FROM :'g_out_file';
-SELECT 2 AS b\; SELECT 3 AS c\; SELECT 4 AS d \g :g_out_file
-COPY reload_output(line) FROM :'g_out_file';
-COPY (SELECT 'foo') TO STDOUT \; COPY (SELECT 'bar') TO STDOUT \g :g_out_file
-COPY reload_output(line) FROM :'g_out_file';
-SELECT line FROM reload_output ORDER BY lineno;
- line
----------
- a
- ---
- 1
- (1 row)
-
- b
- ---
- 2
- (1 row)
-
- c
- ---
- 3
- (1 row)
-
- d
- ---
- 4
- (1 row)
-
- foo
- bar
-(22 rows)
-
-TRUNCATE TABLE reload_output;
---
--- \o with file
---
-\set o_out_file :abs_builddir '/results/psql-output2'
-\o :o_out_file
-SELECT max(unique1) FROM onek;
-SELECT 1 AS a\; SELECT 2 AS b\; SELECT 3 AS c;
--- COPY TO file
--- The data goes to :g_out_file and the status to :o_out_file
-\set QUIET false
-COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO :'g_out_file';
--- DML command status
-UPDATE onek SET unique1 = unique1 WHERE false;
-\set QUIET true
-\o
--- Check the contents of the files generated.
-COPY reload_output(line) FROM :'g_out_file';
-SELECT line FROM reload_output ORDER BY lineno;
- line
-------
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
-(10 rows)
-
-TRUNCATE TABLE reload_output;
-COPY reload_output(line) FROM :'o_out_file';
-SELECT line FROM reload_output ORDER BY lineno;
- line
-----------
- max
- -----
- 999
- (1 row)
-
- a
- ---
- 1
- (1 row)
-
- b
- ---
- 2
- (1 row)
-
- c
- ---
- 3
- (1 row)
-
- COPY 10
- UPDATE 0
-(22 rows)
-
-TRUNCATE TABLE reload_output;
--- Multiple COPY TO STDOUT with output file
-\o :o_out_file
--- The data goes to :o_out_file with no status generated.
-COPY (SELECT 'foo1') TO STDOUT \; COPY (SELECT 'bar1') TO STDOUT;
--- Combination of \o and \g file with multiple COPY queries.
-COPY (SELECT 'foo2') TO STDOUT \; COPY (SELECT 'bar2') TO STDOUT \g :g_out_file
-\o
--- Check the contents of the files generated.
-COPY reload_output(line) FROM :'g_out_file';
-SELECT line FROM reload_output ORDER BY lineno;
- line
-------
- foo2
- bar2
-(2 rows)
-
-TRUNCATE TABLE reload_output;
-COPY reload_output(line) FROM :'o_out_file';
-SELECT line FROM reload_output ORDER BY lineno;
- line
-------
- foo1
- bar1
-(2 rows)
-
-DROP TABLE reload_output;
---
--- AUTOCOMMIT and combined queries
---
-\set AUTOCOMMIT off
-\echo '# AUTOCOMMIT:' :AUTOCOMMIT
-# AUTOCOMMIT: off
--- BEGIN is now implicit
-CREATE TABLE foo(s TEXT) \;
-ROLLBACK;
-CREATE TABLE foo(s TEXT) \;
-INSERT INTO foo(s) VALUES ('hello'), ('world') \;
-COMMIT;
-DROP TABLE foo \;
-ROLLBACK;
--- table foo is still there
-SELECT * FROM foo ORDER BY 1 \;
-DROP TABLE foo \;
-COMMIT;
- s
--------
- hello
- world
-(2 rows)
-
-\set AUTOCOMMIT on
-\echo '# AUTOCOMMIT:' :AUTOCOMMIT
-# AUTOCOMMIT: on
--- BEGIN now explicit for multi-statement transactions
-BEGIN \;
-CREATE TABLE foo(s TEXT) \;
-INSERT INTO foo(s) VALUES ('hello'), ('world') \;
-COMMIT;
-BEGIN \;
-DROP TABLE foo \;
-ROLLBACK \;
--- implicit transactions
-SELECT * FROM foo ORDER BY 1 \;
-DROP TABLE foo;
- s
--------
- hello
- world
-(2 rows)
-
---
--- test ON_ERROR_ROLLBACK and combined queries
---
-CREATE FUNCTION psql_error(msg TEXT) RETURNS BOOLEAN AS $$
- BEGIN
- RAISE EXCEPTION 'error %', msg;
- END;
-$$ LANGUAGE plpgsql;
-\set ON_ERROR_ROLLBACK on
-\echo '# ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK
-# ON_ERROR_ROLLBACK: on
-\echo '# AUTOCOMMIT:' :AUTOCOMMIT
-# AUTOCOMMIT: on
-BEGIN;
-CREATE TABLE bla(s NO_SUCH_TYPE); -- fails
-ERROR: type "no_such_type" does not exist
-LINE 1: CREATE TABLE bla(s NO_SUCH_TYPE);
- ^
-CREATE TABLE bla(s TEXT); -- succeeds
-SELECT psql_error('oops!'); -- fails
-ERROR: error oops!
-CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE
-INSERT INTO bla VALUES ('Calvin'), ('Hobbes');
-COMMIT;
-SELECT * FROM bla ORDER BY 1;
- s
---------
- Calvin
- Hobbes
-(2 rows)
-
-BEGIN;
-INSERT INTO bla VALUES ('Susie'); -- succeeds
--- now with combined queries
-INSERT INTO bla VALUES ('Rosalyn') \; -- will rollback
-SELECT 'before error' AS show \; -- will show nevertheless!
- SELECT psql_error('boum!') \; -- failure
- SELECT 'after error' AS noshow; -- hidden by preceding error
- show
---------------
- before error
-(1 row)
-
-ERROR: error boum!
-CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE
-INSERT INTO bla(s) VALUES ('Moe') \; -- will rollback
- SELECT psql_error('bam!');
-ERROR: error bam!
-CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE
-INSERT INTO bla VALUES ('Miss Wormwood'); -- succeeds
-COMMIT;
-SELECT * FROM bla ORDER BY 1;
- s
----------------
- Calvin
- Hobbes
- Miss Wormwood
- Susie
-(4 rows)
-
--- some with autocommit off
-\set AUTOCOMMIT off
-\echo '# AUTOCOMMIT:' :AUTOCOMMIT
-# AUTOCOMMIT: off
--- implicit BEGIN
-INSERT INTO bla VALUES ('Dad'); -- succeeds
-SELECT psql_error('bad!'); -- implicit partial rollback
-ERROR: error bad!
-CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE
-INSERT INTO bla VALUES ('Mum') \; -- will rollback
-SELECT COUNT(*) AS "#mum"
-FROM bla WHERE s = 'Mum' \; -- but be counted here
-SELECT psql_error('bad!'); -- implicit partial rollback
- #mum
-------
- 1
-(1 row)
-
-ERROR: error bad!
-CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE
-COMMIT;
-SELECT COUNT(*) AS "#mum"
-FROM bla WHERE s = 'Mum' \; -- no mum here
-SELECT * FROM bla ORDER BY 1;
- #mum
-------
- 0
-(1 row)
-
- s
----------------
- Calvin
- Dad
- Hobbes
- Miss Wormwood
- Susie
-(5 rows)
-
-COMMIT;
--- reset all
-\set AUTOCOMMIT on
-\set ON_ERROR_ROLLBACK off
-\echo '# final ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK
-# final ON_ERROR_ROLLBACK: off
-DROP TABLE bla;
-DROP FUNCTION psql_error;
--- check describing invalid multipart names
-\dA regression.heap
-improper qualified name (too many dotted names): regression.heap
-\dA nonesuch.heap
-improper qualified name (too many dotted names): nonesuch.heap
-\dt host.regression.pg_catalog.pg_class
-improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class
-\dt |.pg_catalog.pg_class
-cross-database references are not implemented: |.pg_catalog.pg_class
-\dt nonesuch.pg_catalog.pg_class
-cross-database references are not implemented: nonesuch.pg_catalog.pg_class
-\da host.regression.pg_catalog.sum
-improper qualified name (too many dotted names): host.regression.pg_catalog.sum
-\da +.pg_catalog.sum
-cross-database references are not implemented: +.pg_catalog.sum
-\da nonesuch.pg_catalog.sum
-cross-database references are not implemented: nonesuch.pg_catalog.sum
-\dAc nonesuch.brin
-improper qualified name (too many dotted names): nonesuch.brin
-\dAc regression.brin
-improper qualified name (too many dotted names): regression.brin
-\dAf nonesuch.brin
-improper qualified name (too many dotted names): nonesuch.brin
-\dAf regression.brin
-improper qualified name (too many dotted names): regression.brin
-\dAo nonesuch.brin
-improper qualified name (too many dotted names): nonesuch.brin
-\dAo regression.brin
-improper qualified name (too many dotted names): regression.brin
-\dAp nonesuch.brin
-improper qualified name (too many dotted names): nonesuch.brin
-\dAp regression.brin
-improper qualified name (too many dotted names): regression.brin
-\db nonesuch.pg_default
-improper qualified name (too many dotted names): nonesuch.pg_default
-\db regression.pg_default
-improper qualified name (too many dotted names): regression.pg_default
-\dc host.regression.public.conversion
-improper qualified name (too many dotted names): host.regression.public.conversion
-\dc (.public.conversion
-cross-database references are not implemented: (.public.conversion
-\dc nonesuch.public.conversion
-cross-database references are not implemented: nonesuch.public.conversion
-\dC host.regression.pg_catalog.int8
-improper qualified name (too many dotted names): host.regression.pg_catalog.int8
-\dC ).pg_catalog.int8
-cross-database references are not implemented: ).pg_catalog.int8
-\dC nonesuch.pg_catalog.int8
-cross-database references are not implemented: nonesuch.pg_catalog.int8
-\dd host.regression.pg_catalog.pg_class
-improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class
-\dd [.pg_catalog.pg_class
-cross-database references are not implemented: [.pg_catalog.pg_class
-\dd nonesuch.pg_catalog.pg_class
-cross-database references are not implemented: nonesuch.pg_catalog.pg_class
-\dD host.regression.public.gtestdomain1
-improper qualified name (too many dotted names): host.regression.public.gtestdomain1
-\dD ].public.gtestdomain1
-cross-database references are not implemented: ].public.gtestdomain1
-\dD nonesuch.public.gtestdomain1
-cross-database references are not implemented: nonesuch.public.gtestdomain1
-\ddp host.regression.pg_catalog.pg_class
-improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class
-\ddp {.pg_catalog.pg_class
-cross-database references are not implemented: {.pg_catalog.pg_class
-\ddp nonesuch.pg_catalog.pg_class
-cross-database references are not implemented: nonesuch.pg_catalog.pg_class
-\dE host.regression.public.ft
-improper qualified name (too many dotted names): host.regression.public.ft
-\dE }.public.ft
-cross-database references are not implemented: }.public.ft
-\dE nonesuch.public.ft
-cross-database references are not implemented: nonesuch.public.ft
-\di host.regression.public.tenk1_hundred
-improper qualified name (too many dotted names): host.regression.public.tenk1_hundred
-\di ..public.tenk1_hundred
-improper qualified name (too many dotted names): ..public.tenk1_hundred
-\di nonesuch.public.tenk1_hundred
-cross-database references are not implemented: nonesuch.public.tenk1_hundred
-\dm host.regression.public.mvtest_bb
-improper qualified name (too many dotted names): host.regression.public.mvtest_bb
-\dm ^.public.mvtest_bb
-cross-database references are not implemented: ^.public.mvtest_bb
-\dm nonesuch.public.mvtest_bb
-cross-database references are not implemented: nonesuch.public.mvtest_bb
-\ds host.regression.public.check_seq
-improper qualified name (too many dotted names): host.regression.public.check_seq
-\ds regression|mydb.public.check_seq
-cross-database references are not implemented: regression|mydb.public.check_seq
-\ds nonesuch.public.check_seq
-cross-database references are not implemented: nonesuch.public.check_seq
-\dt host.regression.public.b_star
-improper qualified name (too many dotted names): host.regression.public.b_star
-\dt regres+ion.public.b_star
-cross-database references are not implemented: regres+ion.public.b_star
-\dt nonesuch.public.b_star
-cross-database references are not implemented: nonesuch.public.b_star
-\dv host.regression.public.shoe
-improper qualified name (too many dotted names): host.regression.public.shoe
-\dv regress(ion).public.shoe
-cross-database references are not implemented: regress(ion).public.shoe
-\dv nonesuch.public.shoe
-cross-database references are not implemented: nonesuch.public.shoe
-\des nonesuch.server
-improper qualified name (too many dotted names): nonesuch.server
-\des regression.server
-improper qualified name (too many dotted names): regression.server
-\des nonesuch.server
-improper qualified name (too many dotted names): nonesuch.server
-\des regression.server
-improper qualified name (too many dotted names): regression.server
-\des nonesuch.username
-improper qualified name (too many dotted names): nonesuch.username
-\des regression.username
-improper qualified name (too many dotted names): regression.username
-\dew nonesuch.fdw
-improper qualified name (too many dotted names): nonesuch.fdw
-\dew regression.fdw
-improper qualified name (too many dotted names): regression.fdw
-\df host.regression.public.namelen
-improper qualified name (too many dotted names): host.regression.public.namelen
-\df regres[qrstuv]ion.public.namelen
-cross-database references are not implemented: regres[qrstuv]ion.public.namelen
-\df nonesuch.public.namelen
-cross-database references are not implemented: nonesuch.public.namelen
-\dF host.regression.pg_catalog.arabic
-improper qualified name (too many dotted names): host.regression.pg_catalog.arabic
-\dF regres{1,2}ion.pg_catalog.arabic
-cross-database references are not implemented: regres{1,2}ion.pg_catalog.arabic
-\dF nonesuch.pg_catalog.arabic
-cross-database references are not implemented: nonesuch.pg_catalog.arabic
-\dFd host.regression.pg_catalog.arabic_stem
-improper qualified name (too many dotted names): host.regression.pg_catalog.arabic_stem
-\dFd regres?ion.pg_catalog.arabic_stem
-cross-database references are not implemented: regres?ion.pg_catalog.arabic_stem
-\dFd nonesuch.pg_catalog.arabic_stem
-cross-database references are not implemented: nonesuch.pg_catalog.arabic_stem
-\dFp host.regression.pg_catalog.default
-improper qualified name (too many dotted names): host.regression.pg_catalog.default
-\dFp ^regression.pg_catalog.default
-cross-database references are not implemented: ^regression.pg_catalog.default
-\dFp nonesuch.pg_catalog.default
-cross-database references are not implemented: nonesuch.pg_catalog.default
-\dFt host.regression.pg_catalog.ispell
-improper qualified name (too many dotted names): host.regression.pg_catalog.ispell
-\dFt regression$.pg_catalog.ispell
-cross-database references are not implemented: regression$.pg_catalog.ispell
-\dFt nonesuch.pg_catalog.ispell
-cross-database references are not implemented: nonesuch.pg_catalog.ispell
-\dg nonesuch.pg_database_owner
-improper qualified name (too many dotted names): nonesuch.pg_database_owner
-\dg regression.pg_database_owner
-improper qualified name (too many dotted names): regression.pg_database_owner
-\dL host.regression.plpgsql
-improper qualified name (too many dotted names): host.regression.plpgsql
-\dL *.plpgsql
-cross-database references are not implemented: *.plpgsql
-\dL nonesuch.plpgsql
-cross-database references are not implemented: nonesuch.plpgsql
-\dn host.regression.public
-improper qualified name (too many dotted names): host.regression.public
-\dn """".public
-cross-database references are not implemented: """".public
-\dn nonesuch.public
-cross-database references are not implemented: nonesuch.public
-\do host.regression.public.!=-
-improper qualified name (too many dotted names): host.regression.public.!=-
-\do "regression|mydb".public.!=-
-cross-database references are not implemented: "regression|mydb".public.!=-
-\do nonesuch.public.!=-
-cross-database references are not implemented: nonesuch.public.!=-
-\dO host.regression.pg_catalog.POSIX
-improper qualified name (too many dotted names): host.regression.pg_catalog.POSIX
-\dO .pg_catalog.POSIX
-cross-database references are not implemented: .pg_catalog.POSIX
-\dO nonesuch.pg_catalog.POSIX
-cross-database references are not implemented: nonesuch.pg_catalog.POSIX
-\dp host.regression.public.a_star
-improper qualified name (too many dotted names): host.regression.public.a_star
-\dp "regres+ion".public.a_star
-cross-database references are not implemented: "regres+ion".public.a_star
-\dp nonesuch.public.a_star
-cross-database references are not implemented: nonesuch.public.a_star
-\dP host.regression.public.mlparted
-improper qualified name (too many dotted names): host.regression.public.mlparted
-\dP "regres(sion)".public.mlparted
-cross-database references are not implemented: "regres(sion)".public.mlparted
-\dP nonesuch.public.mlparted
-cross-database references are not implemented: nonesuch.public.mlparted
-\drds nonesuch.lc_messages
-improper qualified name (too many dotted names): nonesuch.lc_messages
-\drds regression.lc_messages
-improper qualified name (too many dotted names): regression.lc_messages
-\dRp public.mypub
-improper qualified name (too many dotted names): public.mypub
-\dRp regression.mypub
-improper qualified name (too many dotted names): regression.mypub
-\dRs public.mysub
-improper qualified name (too many dotted names): public.mysub
-\dRs regression.mysub
-improper qualified name (too many dotted names): regression.mysub
-\dT host.regression.public.widget
-improper qualified name (too many dotted names): host.regression.public.widget
-\dT "regression{1,2}".public.widget
-cross-database references are not implemented: "regression{1,2}".public.widget
-\dT nonesuch.public.widget
-cross-database references are not implemented: nonesuch.public.widget
-\dx regression.plpgsql
-improper qualified name (too many dotted names): regression.plpgsql
-\dx nonesuch.plpgsql
-improper qualified name (too many dotted names): nonesuch.plpgsql
-\dX host.regression.public.func_deps_stat
-improper qualified name (too many dotted names): host.regression.public.func_deps_stat
-\dX "^regression$".public.func_deps_stat
-cross-database references are not implemented: "^regression$".public.func_deps_stat
-\dX nonesuch.public.func_deps_stat
-cross-database references are not implemented: nonesuch.public.func_deps_stat
-\dy regression.myevt
-improper qualified name (too many dotted names): regression.myevt
-\dy nonesuch.myevt
-improper qualified name (too many dotted names): nonesuch.myevt
--- check that dots within quoted name segments are not counted
-\dA "no.such.access.method"
-List of access methods
- Name | Type
-------+------
-(0 rows)
-
-\dt "no.such.table.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\da "no.such.aggregate.function"
- List of aggregate functions
- Schema | Name | Result data type | Argument data types | Description
---------+------+------------------+---------------------+-------------
-(0 rows)
-
-\dAc "no.such.operator.class"
- List of operator classes
- AM | Input type | Storage type | Operator class | Default?
-----+------------+--------------+----------------+----------
-(0 rows)
-
-\dAf "no.such.operator.family"
- List of operator families
- AM | Operator family | Applicable types
-----+-----------------+------------------
-(0 rows)
-
-\dAo "no.such.operator.of.operator.family"
- List of operators of operator families
- AM | Operator family | Operator | Strategy | Purpose
-----+-----------------+----------+----------+---------
-(0 rows)
-
-\dAp "no.such.operator.support.function.of.operator.family"
- List of support functions of operator families
- AM | Operator family | Registered left type | Registered right type | Number | Function
-----+-----------------+----------------------+-----------------------+--------+----------
-(0 rows)
-
-\db "no.such.tablespace"
- List of tablespaces
- Name | Owner | Location
-------+-------+----------
-(0 rows)
-
-\dc "no.such.conversion"
- List of conversions
- Schema | Name | Source | Destination | Default?
---------+------+--------+-------------+----------
-(0 rows)
-
-\dC "no.such.cast"
- List of casts
- Source type | Target type | Function | Implicit?
--------------+-------------+----------+-----------
-(0 rows)
-
-\dd "no.such.object.description"
- Object descriptions
- Schema | Name | Object | Description
---------+------+--------+-------------
-(0 rows)
-
-\dD "no.such.domain"
- List of domains
- Schema | Name | Type | Collation | Nullable | Default | Check
---------+------+------+-----------+----------+---------+-------
-(0 rows)
-
-\ddp "no.such.default.access.privilege"
- Default access privileges
- Owner | Schema | Type | Access privileges
--------+--------+------+-------------------
-(0 rows)
-
-\di "no.such.index.relation"
- List of relations
- Schema | Name | Type | Owner | Table
---------+------+------+-------+-------
-(0 rows)
-
-\dm "no.such.materialized.view"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\ds "no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\dt "no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\dv "no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\des "no.such.foreign.server"
- List of foreign servers
- Name | Owner | Foreign-data wrapper
-------+-------+----------------------
-(0 rows)
-
-\dew "no.such.foreign.data.wrapper"
- List of foreign-data wrappers
- Name | Owner | Handler | Validator
-------+-------+---------+-----------
-(0 rows)
-
-\df "no.such.function"
- List of functions
- Schema | Name | Result data type | Argument data types | Type
---------+------+------------------+---------------------+------
-(0 rows)
-
-\dF "no.such.text.search.configuration"
-List of text search configurations
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFd "no.such.text.search.dictionary"
-List of text search dictionaries
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFp "no.such.text.search.parser"
- List of text search parsers
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFt "no.such.text.search.template"
-List of text search templates
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dg "no.such.role"
- List of roles
- Role name | Attributes
------------+------------
-
-\dL "no.such.language"
- List of languages
- Name | Owner | Trusted | Description
-------+-------+---------+-------------
-(0 rows)
-
-\dn "no.such.schema"
-List of schemas
- Name | Owner
-------+-------
-(0 rows)
-
-\do "no.such.operator"
- List of operators
- Schema | Name | Left arg type | Right arg type | Result type | Description
---------+------+---------------+----------------+-------------+-------------
-(0 rows)
-
-\dO "no.such.collation"
- List of collations
- Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic?
---------+------+----------+---------+-------+--------+-----------+----------------
-(0 rows)
-
-\dp "no.such.access.privilege"
- Access privileges
- Schema | Name | Type | Access privileges | Column privileges | Policies
---------+------+------+-------------------+-------------------+----------
-(0 rows)
-
-\dP "no.such.partitioned.relation"
- List of partitioned relations
- Schema | Name | Owner | Type | Parent name | Table
---------+------+-------+------+-------------+-------
-(0 rows)
-
-\drds "no.such.setting"
- List of settings
- Role | Database | Settings
-------+----------+----------
-(0 rows)
-
-\dRp "no.such.publication"
- List of publications
- Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
-------+-------+------------+---------+---------+---------+-----------+-------------------+----------
-(0 rows)
-
-\dRs "no.such.subscription"
- List of subscriptions
- Name | Owner | Enabled | Publication
-------+-------+---------+-------------
-(0 rows)
-
-\dT "no.such.data.type"
- List of data types
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dx "no.such.installed.extension"
- List of installed extensions
- Name | Version | Schema | Description
-------+---------+--------+-------------
-(0 rows)
-
-\dX "no.such.extended.statistics"
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+------+------------+-----------+--------------+-----
-(0 rows)
-
-\dy "no.such.event.trigger"
- List of event triggers
- Name | Event | Owner | Enabled | Function | Tags
-------+-------+-------+---------+----------+------
-(0 rows)
-
--- again, but with dotted schema qualifications.
-\dA "no.such.schema"."no.such.access.method"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.access.method"
-\dt "no.such.schema"."no.such.table.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\da "no.such.schema"."no.such.aggregate.function"
- List of aggregate functions
- Schema | Name | Result data type | Argument data types | Description
---------+------+------------------+---------------------+-------------
-(0 rows)
-
-\dAc "no.such.schema"."no.such.operator.class"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.class"
-\dAf "no.such.schema"."no.such.operator.family"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.family"
-\dAo "no.such.schema"."no.such.operator.of.operator.family"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.of.operator.family"
-\dAp "no.such.schema"."no.such.operator.support.function.of.operator.family"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.support.function.of.operator.family"
-\db "no.such.schema"."no.such.tablespace"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.tablespace"
-\dc "no.such.schema"."no.such.conversion"
- List of conversions
- Schema | Name | Source | Destination | Default?
---------+------+--------+-------------+----------
-(0 rows)
-
-\dC "no.such.schema"."no.such.cast"
- List of casts
- Source type | Target type | Function | Implicit?
--------------+-------------+----------+-----------
-(0 rows)
-
-\dd "no.such.schema"."no.such.object.description"
- Object descriptions
- Schema | Name | Object | Description
---------+------+--------+-------------
-(0 rows)
-
-\dD "no.such.schema"."no.such.domain"
- List of domains
- Schema | Name | Type | Collation | Nullable | Default | Check
---------+------+------+-----------+----------+---------+-------
-(0 rows)
-
-\ddp "no.such.schema"."no.such.default.access.privilege"
- Default access privileges
- Owner | Schema | Type | Access privileges
--------+--------+------+-------------------
-(0 rows)
-
-\di "no.such.schema"."no.such.index.relation"
- List of relations
- Schema | Name | Type | Owner | Table
---------+------+------+-------+-------
-(0 rows)
-
-\dm "no.such.schema"."no.such.materialized.view"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\ds "no.such.schema"."no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\dt "no.such.schema"."no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\dv "no.such.schema"."no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\des "no.such.schema"."no.such.foreign.server"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.server"
-\dew "no.such.schema"."no.such.foreign.data.wrapper"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.data.wrapper"
-\df "no.such.schema"."no.such.function"
- List of functions
- Schema | Name | Result data type | Argument data types | Type
---------+------+------------------+---------------------+------
-(0 rows)
-
-\dF "no.such.schema"."no.such.text.search.configuration"
-List of text search configurations
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFd "no.such.schema"."no.such.text.search.dictionary"
-List of text search dictionaries
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFp "no.such.schema"."no.such.text.search.parser"
- List of text search parsers
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFt "no.such.schema"."no.such.text.search.template"
-List of text search templates
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dg "no.such.schema"."no.such.role"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.role"
-\dL "no.such.schema"."no.such.language"
-cross-database references are not implemented: "no.such.schema"."no.such.language"
-\do "no.such.schema"."no.such.operator"
- List of operators
- Schema | Name | Left arg type | Right arg type | Result type | Description
---------+------+---------------+----------------+-------------+-------------
-(0 rows)
-
-\dO "no.such.schema"."no.such.collation"
- List of collations
- Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic?
---------+------+----------+---------+-------+--------+-----------+----------------
-(0 rows)
-
-\dp "no.such.schema"."no.such.access.privilege"
- Access privileges
- Schema | Name | Type | Access privileges | Column privileges | Policies
---------+------+------+-------------------+-------------------+----------
-(0 rows)
-
-\dP "no.such.schema"."no.such.partitioned.relation"
- List of partitioned relations
- Schema | Name | Owner | Type | Parent name | Table
---------+------+-------+------+-------------+-------
-(0 rows)
-
-\drds "no.such.schema"."no.such.setting"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.setting"
-\dRp "no.such.schema"."no.such.publication"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.publication"
-\dRs "no.such.schema"."no.such.subscription"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.subscription"
-\dT "no.such.schema"."no.such.data.type"
- List of data types
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dx "no.such.schema"."no.such.installed.extension"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.installed.extension"
-\dX "no.such.schema"."no.such.extended.statistics"
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+------+------------+-----------+--------------+-----
-(0 rows)
-
-\dy "no.such.schema"."no.such.event.trigger"
-improper qualified name (too many dotted names): "no.such.schema"."no.such.event.trigger"
--- again, but with current database and dotted schema qualifications.
-\dt regression."no.such.schema"."no.such.table.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\da regression."no.such.schema"."no.such.aggregate.function"
- List of aggregate functions
- Schema | Name | Result data type | Argument data types | Description
---------+------+------------------+---------------------+-------------
-(0 rows)
-
-\dc regression."no.such.schema"."no.such.conversion"
- List of conversions
- Schema | Name | Source | Destination | Default?
---------+------+--------+-------------+----------
-(0 rows)
-
-\dC regression."no.such.schema"."no.such.cast"
- List of casts
- Source type | Target type | Function | Implicit?
--------------+-------------+----------+-----------
-(0 rows)
-
-\dd regression."no.such.schema"."no.such.object.description"
- Object descriptions
- Schema | Name | Object | Description
---------+------+--------+-------------
-(0 rows)
-
-\dD regression."no.such.schema"."no.such.domain"
- List of domains
- Schema | Name | Type | Collation | Nullable | Default | Check
---------+------+------+-----------+----------+---------+-------
-(0 rows)
-
-\di regression."no.such.schema"."no.such.index.relation"
- List of relations
- Schema | Name | Type | Owner | Table
---------+------+------+-------+-------
-(0 rows)
-
-\dm regression."no.such.schema"."no.such.materialized.view"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\ds regression."no.such.schema"."no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\dt regression."no.such.schema"."no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\dv regression."no.such.schema"."no.such.relation"
- List of relations
- Schema | Name | Type | Owner
---------+------+------+-------
-(0 rows)
-
-\df regression."no.such.schema"."no.such.function"
- List of functions
- Schema | Name | Result data type | Argument data types | Type
---------+------+------------------+---------------------+------
-(0 rows)
-
-\dF regression."no.such.schema"."no.such.text.search.configuration"
-List of text search configurations
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFd regression."no.such.schema"."no.such.text.search.dictionary"
-List of text search dictionaries
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFp regression."no.such.schema"."no.such.text.search.parser"
- List of text search parsers
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dFt regression."no.such.schema"."no.such.text.search.template"
-List of text search templates
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\do regression."no.such.schema"."no.such.operator"
- List of operators
- Schema | Name | Left arg type | Right arg type | Result type | Description
---------+------+---------------+----------------+-------------+-------------
-(0 rows)
-
-\dO regression."no.such.schema"."no.such.collation"
- List of collations
- Schema | Name | Provider | Collate | Ctype | Locale | ICU Rules | Deterministic?
---------+------+----------+---------+-------+--------+-----------+----------------
-(0 rows)
-
-\dp regression."no.such.schema"."no.such.access.privilege"
- Access privileges
- Schema | Name | Type | Access privileges | Column privileges | Policies
---------+------+------+-------------------+-------------------+----------
-(0 rows)
-
-\dP regression."no.such.schema"."no.such.partitioned.relation"
- List of partitioned relations
- Schema | Name | Owner | Type | Parent name | Table
---------+------+-------+------+-------------+-------
-(0 rows)
-
-\dT regression."no.such.schema"."no.such.data.type"
- List of data types
- Schema | Name | Description
---------+------+-------------
-(0 rows)
-
-\dX regression."no.such.schema"."no.such.extended.statistics"
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+------+------------+-----------+--------------+-----
-(0 rows)
-
--- again, but with dotted database and dotted schema qualifications.
-\dt "no.such.database"."no.such.schema"."no.such.table.relation"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.table.relation"
-\da "no.such.database"."no.such.schema"."no.such.aggregate.function"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.aggregate.function"
-\dc "no.such.database"."no.such.schema"."no.such.conversion"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.conversion"
-\dC "no.such.database"."no.such.schema"."no.such.cast"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.cast"
-\dd "no.such.database"."no.such.schema"."no.such.object.description"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.object.description"
-\dD "no.such.database"."no.such.schema"."no.such.domain"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.domain"
-\ddp "no.such.database"."no.such.schema"."no.such.default.access.privilege"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.default.access.privilege"
-\di "no.such.database"."no.such.schema"."no.such.index.relation"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.index.relation"
-\dm "no.such.database"."no.such.schema"."no.such.materialized.view"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.materialized.view"
-\ds "no.such.database"."no.such.schema"."no.such.relation"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation"
-\dt "no.such.database"."no.such.schema"."no.such.relation"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation"
-\dv "no.such.database"."no.such.schema"."no.such.relation"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation"
-\df "no.such.database"."no.such.schema"."no.such.function"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.function"
-\dF "no.such.database"."no.such.schema"."no.such.text.search.configuration"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.configuration"
-\dFd "no.such.database"."no.such.schema"."no.such.text.search.dictionary"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.dictionary"
-\dFp "no.such.database"."no.such.schema"."no.such.text.search.parser"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.parser"
-\dFt "no.such.database"."no.such.schema"."no.such.text.search.template"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.template"
-\do "no.such.database"."no.such.schema"."no.such.operator"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.operator"
-\dO "no.such.database"."no.such.schema"."no.such.collation"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.collation"
-\dp "no.such.database"."no.such.schema"."no.such.access.privilege"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.access.privilege"
-\dP "no.such.database"."no.such.schema"."no.such.partitioned.relation"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.partitioned.relation"
-\dT "no.such.database"."no.such.schema"."no.such.data.type"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.data.type"
-\dX "no.such.database"."no.such.schema"."no.such.extended.statistics"
-cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.extended.statistics"
--- check \drg and \du
-CREATE ROLE regress_du_role0;
-CREATE ROLE regress_du_role1;
-CREATE ROLE regress_du_role2;
-CREATE ROLE regress_du_admin;
-GRANT regress_du_role0 TO regress_du_admin WITH ADMIN TRUE;
-GRANT regress_du_role1 TO regress_du_admin WITH ADMIN TRUE;
-GRANT regress_du_role2 TO regress_du_admin WITH ADMIN TRUE;
-GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN TRUE, INHERIT TRUE, SET TRUE GRANTED BY regress_du_admin;
-GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_admin;
-GRANT regress_du_role1 TO regress_du_role2 WITH ADMIN TRUE , INHERIT FALSE, SET TRUE GRANTED BY regress_du_admin;
-GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT TRUE, SET FALSE GRANTED BY regress_du_role1;
-GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT TRUE , SET TRUE GRANTED BY regress_du_role1;
-GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT FALSE, SET TRUE GRANTED BY regress_du_role2;
-GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_role2;
-\drg regress_du_role*
- List of role grants
- Role name | Member of | Options | Grantor
-------------------+------------------+---------------------+------------------
- regress_du_role1 | regress_du_role0 | ADMIN, INHERIT, SET | regress_du_admin
- regress_du_role1 | regress_du_role0 | INHERIT | regress_du_role1
- regress_du_role1 | regress_du_role0 | SET | regress_du_role2
- regress_du_role2 | regress_du_role0 | ADMIN | regress_du_admin
- regress_du_role2 | regress_du_role0 | INHERIT, SET | regress_du_role1
- regress_du_role2 | regress_du_role0 | | regress_du_role2
- regress_du_role2 | regress_du_role1 | ADMIN, SET | regress_du_admin
-(7 rows)
-
-\du regress_du_role*
- List of roles
- Role name | Attributes
-------------------+--------------
- regress_du_role0 | Cannot login
- regress_du_role1 | Cannot login
- regress_du_role2 | Cannot login
-
-DROP ROLE regress_du_role0;
-DROP ROLE regress_du_role1;
-DROP ROLE regress_du_role2;
-DROP ROLE regress_du_admin;
--- Test display of empty privileges.
-BEGIN;
--- Create an owner for tested objects because output contains owner name.
-CREATE ROLE regress_zeropriv_owner;
-SET LOCAL ROLE regress_zeropriv_owner;
-CREATE DOMAIN regress_zeropriv_domain AS int;
-REVOKE ALL ON DOMAIN regress_zeropriv_domain FROM CURRENT_USER, PUBLIC;
-\dD+ regress_zeropriv_domain
- List of domains
- Schema | Name | Type | Collation | Nullable | Default | Check | Access privileges | Description
---------+-------------------------+---------+-----------+----------+---------+-------+-------------------+-------------
- public | regress_zeropriv_domain | integer | | | | | (none) |
-(1 row)
-
-CREATE PROCEDURE regress_zeropriv_proc() LANGUAGE sql AS '';
-REVOKE ALL ON PROCEDURE regress_zeropriv_proc() FROM CURRENT_USER, PUBLIC;
-\df+ regress_zeropriv_proc
- List of functions
- Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description
---------+-----------------------+------------------+---------------------+------+------------+----------+------------------------+----------+-------------------+----------+---------------+-------------
- public | regress_zeropriv_proc | | | proc | volatile | unsafe | regress_zeropriv_owner | invoker | (none) | sql | |
-(1 row)
-
-CREATE TABLE regress_zeropriv_tbl (a int);
-REVOKE ALL ON TABLE regress_zeropriv_tbl FROM CURRENT_USER;
-\dp regress_zeropriv_tbl
- Access privileges
- Schema | Name | Type | Access privileges | Column privileges | Policies
---------+----------------------+-------+-------------------+-------------------+----------
- public | regress_zeropriv_tbl | table | (none) | |
-(1 row)
-
-CREATE TYPE regress_zeropriv_type AS (a int);
-REVOKE ALL ON TYPE regress_zeropriv_type FROM CURRENT_USER, PUBLIC;
-\dT+ regress_zeropriv_type
- List of data types
- Schema | Name | Internal name | Size | Elements | Owner | Access privileges | Description
---------+-----------------------+-----------------------+-------+----------+------------------------+-------------------+-------------
- public | regress_zeropriv_type | regress_zeropriv_type | tuple | | regress_zeropriv_owner | (none) |
-(1 row)
-
-ROLLBACK;
--- Test display of default privileges with \pset null.
-CREATE TABLE defprivs (a int);
-\pset null '(default)'
-\z defprivs
- Access privileges
- Schema | Name | Type | Access privileges | Column privileges | Policies
---------+----------+-------+-------------------+-------------------+----------
- public | defprivs | table | (default) | |
-(1 row)
-
-\pset null ''
-DROP TABLE defprivs;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/psql_crosstab.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/psql_crosstab.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/psql_crosstab.out 2024-11-15 02:50:52.490049049 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/psql_crosstab.out 2024-11-15 02:59:16.845115183 +0000
@@ -1,216 +1,2 @@
---
--- \crosstabview
---
-CREATE TABLE ctv_data (v, h, c, i, d) AS
-VALUES
- ('v1','h2','foo', 3, '2015-04-01'::date),
- ('v2','h1','bar', 3, '2015-01-02'),
- ('v1','h0','baz', NULL, '2015-07-12'),
- ('v0','h4','qux', 4, '2015-07-15'),
- ('v0','h4','dbl', -3, '2014-12-15'),
- ('v0',NULL,'qux', 5, '2014-07-15'),
- ('v1','h2','quux',7, '2015-04-04');
--- make plans more stable
-ANALYZE ctv_data;
--- running \crosstabview after query uses query in buffer
-SELECT v, EXTRACT(year FROM d), count(*)
- FROM ctv_data
- GROUP BY 1, 2
- ORDER BY 1, 2;
- v | extract | count
-----+---------+-------
- v0 | 2014 | 2
- v0 | 2015 | 1
- v1 | 2015 | 3
- v2 | 2015 | 1
-(4 rows)
-
--- basic usage with 3 columns
- \crosstabview
- v | 2014 | 2015
-----+------+------
- v0 | 2 | 1
- v1 | | 3
- v2 | | 1
-(3 rows)
-
--- ordered months in horizontal header, quoted column name
-SELECT v, to_char(d, 'Mon') AS "month name", EXTRACT(month FROM d) AS num,
- count(*) FROM ctv_data GROUP BY 1,2,3 ORDER BY 1
- \crosstabview v "month name" 4 num
- v | Jan | Apr | Jul | Dec
-----+-----+-----+-----+-----
- v0 | | | 2 | 1
- v1 | | 2 | 1 |
- v2 | 1 | | |
-(3 rows)
-
--- ordered months in vertical header, ordered years in horizontal header
-SELECT EXTRACT(year FROM d) AS year, to_char(d,'Mon') AS """month"" name",
- EXTRACT(month FROM d) AS month,
- format('sum=%s avg=%s', sum(i), avg(i)::numeric(2,1))
- FROM ctv_data
- GROUP BY EXTRACT(year FROM d), to_char(d,'Mon'), EXTRACT(month FROM d)
-ORDER BY month
-\crosstabview """month"" name" year format year
- "month" name | 2014 | 2015
---------------+-----------------+----------------
- Jan | | sum=3 avg=3.0
- Apr | | sum=10 avg=5.0
- Jul | sum=5 avg=5.0 | sum=4 avg=4.0
- Dec | sum=-3 avg=-3.0 |
-(4 rows)
-
--- combine contents vertically into the same cell (V/H duplicates)
-SELECT v, h, string_agg(c, E'\n') FROM ctv_data GROUP BY v, h ORDER BY 1,2,3
- \crosstabview 1 2 3
- v | h4 | | h0 | h2 | h1
-----+-----+-----+-----+------+-----
- v0 | qux+| qux | | |
- | dbl | | | |
- v1 | | | baz | foo +|
- | | | | quux |
- v2 | | | | | bar
-(3 rows)
-
--- horizontal ASC order from window function
-SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h) AS r
-FROM ctv_data GROUP BY v, h ORDER BY 1,3,2
- \crosstabview v h c r
- v | h0 | h1 | h2 | h4 |
-----+-----+-----+------+-----+-----
- v0 | | | | qux+| qux
- | | | | dbl |
- v1 | baz | | foo +| |
- | | | quux | |
- v2 | | bar | | |
-(3 rows)
-
--- horizontal DESC order from window function
-SELECT v, h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h DESC) AS r
-FROM ctv_data GROUP BY v, h ORDER BY 1,3,2
- \crosstabview v h c r
- v | | h4 | h2 | h1 | h0
-----+-----+-----+------+-----+-----
- v0 | qux | qux+| | |
- | | dbl | | |
- v1 | | | foo +| | baz
- | | | quux | |
- v2 | | | | bar |
-(3 rows)
-
--- horizontal ASC order from window function, NULLs pushed rightmost
-SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h NULLS LAST) AS r
-FROM ctv_data GROUP BY v, h ORDER BY 1,3,2
- \crosstabview v h c r
- v | h0 | h1 | h2 | h4 |
-----+-----+-----+------+-----+-----
- v0 | | | | qux+| qux
- | | | | dbl |
- v1 | baz | | foo +| |
- | | | quux | |
- v2 | | bar | | |
-(3 rows)
-
--- only null, no column name, 2 columns: error
-SELECT null,null \crosstabview
-\crosstabview: query must return at least three columns
--- only null, no column name, 3 columns: works
-SELECT null,null,null \crosstabview
- ?column? |
-----------+--
- |
-(1 row)
-
--- null display
-\pset null '#null#'
-SELECT v,h, string_agg(i::text, E'\n') AS i FROM ctv_data
-GROUP BY v, h ORDER BY h,v
- \crosstabview v h i
- v | h0 | h1 | h2 | h4 | #null#
-----+--------+----+----+----+--------
- v1 | #null# | | 3 +| |
- | | | 7 | |
- v2 | | 3 | | |
- v0 | | | | 4 +| 5
- | | | | -3 |
-(3 rows)
-
-\pset null ''
--- refer to columns by position
-SELECT v,h,string_agg(i::text, E'\n'), string_agg(c, E'\n')
-FROM ctv_data GROUP BY v, h ORDER BY h,v
- \crosstabview 2 1 4
- h | v1 | v2 | v0
-----+------+-----+-----
- h0 | baz | |
- h1 | | bar |
- h2 | foo +| |
- | quux | |
- h4 | | | qux+
- | | | dbl
- | | | qux
-(5 rows)
-
--- refer to columns by positions and names mixed
-SELECT v,h, string_agg(i::text, E'\n') AS i, string_agg(c, E'\n') AS c
-FROM ctv_data GROUP BY v, h ORDER BY h,v
- \crosstabview 1 "h" 4
- v | h0 | h1 | h2 | h4 |
-----+-----+-----+------+-----+-----
- v1 | baz | | foo +| |
- | | | quux | |
- v2 | | bar | | |
- v0 | | | | qux+| qux
- | | | | dbl |
-(3 rows)
-
--- refer to columns by quoted names, check downcasing of unquoted name
-SELECT 1 as "22", 2 as b, 3 as "Foo"
- \crosstabview "22" B "Foo"
- 22 | 2
-----+---
- 1 | 3
-(1 row)
-
--- error: bad column name
-SELECT v,h,c,i FROM ctv_data
- \crosstabview v h j
-\crosstabview: column name not found: "j"
--- error: need to quote name
-SELECT 1 as "22", 2 as b, 3 as "Foo"
- \crosstabview 1 2 Foo
-\crosstabview: column name not found: "foo"
--- error: need to not quote name
-SELECT 1 as "22", 2 as b, 3 as "Foo"
- \crosstabview 1 "B" "Foo"
-\crosstabview: column name not found: "B"
--- error: bad column number
-SELECT v,h,i,c FROM ctv_data
- \crosstabview 2 1 5
-\crosstabview: column number 5 is out of range 1..4
--- error: same H and V columns
-SELECT v,h,i,c FROM ctv_data
- \crosstabview 2 h 4
-\crosstabview: vertical and horizontal headers must be different columns
--- error: too many columns
-SELECT a,a,1 FROM generate_series(1,3000) AS a
- \crosstabview
-\crosstabview: maximum number of columns (1600) exceeded
--- error: only one column
-SELECT 1 \crosstabview
-\crosstabview: query must return at least three columns
-DROP TABLE ctv_data;
--- check error reporting (bug #14476)
-CREATE TABLE ctv_data (x int, y int, v text);
-INSERT INTO ctv_data SELECT 1, x, '*' || x FROM generate_series(1,10) x;
-SELECT * FROM ctv_data \crosstabview
- x | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10
----+----+----+----+----+----+----+----+----+----+-----
- 1 | *1 | *2 | *3 | *4 | *5 | *6 | *7 | *8 | *9 | *10
-(1 row)
-
-INSERT INTO ctv_data VALUES (1, 10, '*'); -- duplicate data to cause error
-SELECT * FROM ctv_data \crosstabview
-\crosstabview: query result contains multiple data values for row "1", column "10"
-DROP TABLE ctv_data;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/amutils.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/amutils.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/amutils.out 2024-11-15 02:50:52.418167543 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/amutils.out 2024-11-15 02:59:16.845115183 +0000
@@ -1,254 +1,2 @@
---
--- Test index AM property-reporting functions
---
-select prop,
- pg_indexam_has_property(a.oid, prop) as "AM",
- pg_index_has_property('onek_hundred'::regclass, prop) as "Index",
- pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as "Column"
- from pg_am a,
- unnest(array['asc', 'desc', 'nulls_first', 'nulls_last',
- 'orderable', 'distance_orderable', 'returnable',
- 'search_array', 'search_nulls',
- 'clusterable', 'index_scan', 'bitmap_scan',
- 'backward_scan',
- 'can_order', 'can_unique', 'can_multi_col',
- 'can_exclude', 'can_include',
- 'bogus']::text[])
- with ordinality as u(prop,ord)
- where a.amname = 'btree'
- order by ord;
- prop | AM | Index | Column
---------------------+----+-------+--------
- asc | | | t
- desc | | | f
- nulls_first | | | f
- nulls_last | | | t
- orderable | | | t
- distance_orderable | | | f
- returnable | | | t
- search_array | | | t
- search_nulls | | | t
- clusterable | | t |
- index_scan | | t |
- bitmap_scan | | t |
- backward_scan | | t |
- can_order | t | |
- can_unique | t | |
- can_multi_col | t | |
- can_exclude | t | |
- can_include | t | |
- bogus | | |
-(19 rows)
-
-select prop,
- pg_indexam_has_property(a.oid, prop) as "AM",
- pg_index_has_property('gcircleind'::regclass, prop) as "Index",
- pg_index_column_has_property('gcircleind'::regclass, 1, prop) as "Column"
- from pg_am a,
- unnest(array['asc', 'desc', 'nulls_first', 'nulls_last',
- 'orderable', 'distance_orderable', 'returnable',
- 'search_array', 'search_nulls',
- 'clusterable', 'index_scan', 'bitmap_scan',
- 'backward_scan',
- 'can_order', 'can_unique', 'can_multi_col',
- 'can_exclude', 'can_include',
- 'bogus']::text[])
- with ordinality as u(prop,ord)
- where a.amname = 'gist'
- order by ord;
- prop | AM | Index | Column
---------------------+----+-------+--------
- asc | | | f
- desc | | | f
- nulls_first | | | f
- nulls_last | | | f
- orderable | | | f
- distance_orderable | | | t
- returnable | | | f
- search_array | | | f
- search_nulls | | | t
- clusterable | | t |
- index_scan | | t |
- bitmap_scan | | t |
- backward_scan | | f |
- can_order | f | |
- can_unique | f | |
- can_multi_col | t | |
- can_exclude | t | |
- can_include | t | |
- bogus | | |
-(19 rows)
-
-select prop,
- pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as btree,
- pg_index_column_has_property('hash_i4_index'::regclass, 1, prop) as hash,
- pg_index_column_has_property('gcircleind'::regclass, 1, prop) as gist,
- pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist_radix,
- pg_index_column_has_property('sp_quad_ind'::regclass, 1, prop) as spgist_quad,
- pg_index_column_has_property('botharrayidx'::regclass, 1, prop) as gin,
- pg_index_column_has_property('brinidx'::regclass, 1, prop) as brin
- from unnest(array['asc', 'desc', 'nulls_first', 'nulls_last',
- 'orderable', 'distance_orderable', 'returnable',
- 'search_array', 'search_nulls',
- 'bogus']::text[])
- with ordinality as u(prop,ord)
- order by ord;
- prop | btree | hash | gist | spgist_radix | spgist_quad | gin | brin
---------------------+-------+------+------+--------------+-------------+-----+------
- asc | t | f | f | f | f | f | f
- desc | f | f | f | f | f | f | f
- nulls_first | f | f | f | f | f | f | f
- nulls_last | t | f | f | f | f | f | f
- orderable | t | f | f | f | f | f | f
- distance_orderable | f | f | t | f | t | f | f
- returnable | t | f | f | t | t | f | f
- search_array | t | f | f | f | f | f | f
- search_nulls | t | f | t | t | t | f | t
- bogus | | | | | | |
-(10 rows)
-
-select prop,
- pg_index_has_property('onek_hundred'::regclass, prop) as btree,
- pg_index_has_property('hash_i4_index'::regclass, prop) as hash,
- pg_index_has_property('gcircleind'::regclass, prop) as gist,
- pg_index_has_property('sp_radix_ind'::regclass, prop) as spgist,
- pg_index_has_property('botharrayidx'::regclass, prop) as gin,
- pg_index_has_property('brinidx'::regclass, prop) as brin
- from unnest(array['clusterable', 'index_scan', 'bitmap_scan',
- 'backward_scan',
- 'bogus']::text[])
- with ordinality as u(prop,ord)
- order by ord;
- prop | btree | hash | gist | spgist | gin | brin
----------------+-------+------+------+--------+-----+------
- clusterable | t | f | t | f | f | f
- index_scan | t | t | t | t | f | f
- bitmap_scan | t | t | t | t | t | t
- backward_scan | t | t | f | f | f | f
- bogus | | | | | |
-(5 rows)
-
-select amname, prop, pg_indexam_has_property(a.oid, prop) as p
- from pg_am a,
- unnest(array['can_order', 'can_unique', 'can_multi_col',
- 'can_exclude', 'can_include', 'bogus']::text[])
- with ordinality as u(prop,ord)
- where amtype = 'i'
- order by amname, ord;
- amname | prop | p
---------+---------------+---
- brin | can_order | f
- brin | can_unique | f
- brin | can_multi_col | t
- brin | can_exclude | f
- brin | can_include | f
- brin | bogus |
- btree | can_order | t
- btree | can_unique | t
- btree | can_multi_col | t
- btree | can_exclude | t
- btree | can_include | t
- btree | bogus |
- gin | can_order | f
- gin | can_unique | f
- gin | can_multi_col | t
- gin | can_exclude | f
- gin | can_include | f
- gin | bogus |
- gist | can_order | f
- gist | can_unique | f
- gist | can_multi_col | t
- gist | can_exclude | t
- gist | can_include | t
- gist | bogus |
- hash | can_order | f
- hash | can_unique | f
- hash | can_multi_col | f
- hash | can_exclude | t
- hash | can_include | f
- hash | bogus |
- spgist | can_order | f
- spgist | can_unique | f
- spgist | can_multi_col | f
- spgist | can_exclude | t
- spgist | can_include | t
- spgist | bogus |
-(36 rows)
-
---
--- additional checks for pg_index_column_has_property
---
-CREATE TEMP TABLE foo (f1 int, f2 int, f3 int, f4 int);
-CREATE INDEX fooindex ON foo (f1 desc, f2 asc, f3 nulls first, f4 nulls last);
-select col, prop, pg_index_column_has_property(o, col, prop)
- from (values ('fooindex'::regclass)) v1(o),
- (values (1,'orderable'),(2,'asc'),(3,'desc'),
- (4,'nulls_first'),(5,'nulls_last'),
- (6, 'bogus')) v2(idx,prop),
- generate_series(1,4) col
- order by col, idx;
- col | prop | pg_index_column_has_property
------+-------------+------------------------------
- 1 | orderable | t
- 1 | asc | f
- 1 | desc | t
- 1 | nulls_first | t
- 1 | nulls_last | f
- 1 | bogus |
- 2 | orderable | t
- 2 | asc | t
- 2 | desc | f
- 2 | nulls_first | f
- 2 | nulls_last | t
- 2 | bogus |
- 3 | orderable | t
- 3 | asc | t
- 3 | desc | f
- 3 | nulls_first | t
- 3 | nulls_last | f
- 3 | bogus |
- 4 | orderable | t
- 4 | asc | t
- 4 | desc | f
- 4 | nulls_first | f
- 4 | nulls_last | t
- 4 | bogus |
-(24 rows)
-
-CREATE INDEX foocover ON foo (f1) INCLUDE (f2,f3);
-select col, prop, pg_index_column_has_property(o, col, prop)
- from (values ('foocover'::regclass)) v1(o),
- (values (1,'orderable'),(2,'asc'),(3,'desc'),
- (4,'nulls_first'),(5,'nulls_last'),
- (6,'distance_orderable'),(7,'returnable'),
- (8, 'bogus')) v2(idx,prop),
- generate_series(1,3) col
- order by col, idx;
- col | prop | pg_index_column_has_property
------+--------------------+------------------------------
- 1 | orderable | t
- 1 | asc | t
- 1 | desc | f
- 1 | nulls_first | f
- 1 | nulls_last | t
- 1 | distance_orderable | f
- 1 | returnable | t
- 1 | bogus |
- 2 | orderable | f
- 2 | asc |
- 2 | desc |
- 2 | nulls_first |
- 2 | nulls_last |
- 2 | distance_orderable | f
- 2 | returnable | t
- 2 | bogus |
- 3 | orderable | f
- 3 | asc |
- 3 | desc |
- 3 | nulls_first |
- 3 | nulls_last |
- 3 | distance_orderable | f
- 3 | returnable | t
- 3 | bogus |
-(24 rows)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/stats_ext.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/stats_ext.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/stats_ext.out 2024-11-15 02:50:52.506022717 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/stats_ext.out 2024-11-15 02:59:16.837115173 +0000
@@ -1,3335 +1,2 @@
--- Generic extended statistics support
---
--- Note: tables for which we check estimated row counts should be created
--- with autovacuum_enabled = off, so that we don't have unstable results
--- from auto-analyze happening when we didn't expect it.
---
--- check the number of estimated/actual rows in the top node
-create function check_estimated_rows(text) returns table (estimated int, actual int)
-language plpgsql as
-$$
-declare
- ln text;
- tmp text[];
- first_row bool := true;
-begin
- for ln in
- execute format('explain analyze %s', $1)
- loop
- if first_row then
- first_row := false;
- tmp := regexp_match(ln, 'rows=(\d*) .* rows=(\d*)');
- return query select tmp[1]::int, tmp[2]::int;
- end if;
- end loop;
-end;
-$$;
--- Verify failures
-CREATE TABLE ext_stats_test (x text, y int, z int);
-CREATE STATISTICS tst;
-ERROR: syntax error at or near ";"
-LINE 1: CREATE STATISTICS tst;
- ^
-CREATE STATISTICS tst ON a, b;
-ERROR: syntax error at or near ";"
-LINE 1: CREATE STATISTICS tst ON a, b;
- ^
-CREATE STATISTICS tst FROM sometab;
-ERROR: syntax error at or near "FROM"
-LINE 1: CREATE STATISTICS tst FROM sometab;
- ^
-CREATE STATISTICS tst ON a, b FROM nonexistent;
-ERROR: relation "nonexistent" does not exist
-CREATE STATISTICS tst ON a, b FROM ext_stats_test;
-ERROR: column "a" does not exist
-CREATE STATISTICS tst ON x, x, y FROM ext_stats_test;
-ERROR: duplicate column name in statistics definition
-CREATE STATISTICS tst ON x, x, y, x, x, y, x, x, y FROM ext_stats_test;
-ERROR: cannot have more than 8 columns in statistics
-CREATE STATISTICS tst ON x, x, y, x, x, (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test;
-ERROR: cannot have more than 8 columns in statistics
-CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test;
-ERROR: cannot have more than 8 columns in statistics
-CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), y FROM ext_stats_test;
-ERROR: duplicate expression in statistics definition
-CREATE STATISTICS tst (unrecognized) ON x, y FROM ext_stats_test;
-ERROR: unrecognized statistics kind "unrecognized"
--- incorrect expressions
-CREATE STATISTICS tst ON (y) FROM ext_stats_test; -- single column reference
-ERROR: extended statistics require at least 2 columns
-CREATE STATISTICS tst ON y + z FROM ext_stats_test; -- missing parentheses
-ERROR: syntax error at or near "+"
-LINE 1: CREATE STATISTICS tst ON y + z FROM ext_stats_test;
- ^
-CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; -- tuple expression
-ERROR: syntax error at or near ","
-LINE 1: CREATE STATISTICS tst ON (x, y) FROM ext_stats_test;
- ^
-DROP TABLE ext_stats_test;
--- Ensure stats are dropped sanely, and test IF NOT EXISTS while at it
-CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER);
-CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1;
-COMMENT ON STATISTICS ab1_a_b_stats IS 'new comment';
-CREATE ROLE regress_stats_ext;
-SET SESSION AUTHORIZATION regress_stats_ext;
-COMMENT ON STATISTICS ab1_a_b_stats IS 'changed comment';
-ERROR: must be owner of statistics object ab1_a_b_stats
-DROP STATISTICS ab1_a_b_stats;
-ERROR: must be owner of statistics object ab1_a_b_stats
-ALTER STATISTICS ab1_a_b_stats RENAME TO ab1_a_b_stats_new;
-ERROR: must be owner of statistics object ab1_a_b_stats
-RESET SESSION AUTHORIZATION;
-DROP ROLE regress_stats_ext;
-CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1;
-NOTICE: statistics object "ab1_a_b_stats" already exists, skipping
-DROP STATISTICS ab1_a_b_stats;
-CREATE SCHEMA regress_schema_2;
-CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1;
--- Let's also verify the pg_get_statisticsobjdef output looks sane.
-SELECT pg_get_statisticsobjdef(oid) FROM pg_statistic_ext WHERE stxname = 'ab1_a_b_stats';
- pg_get_statisticsobjdef
--------------------------------------------------------------------
- CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1
-(1 row)
-
-DROP STATISTICS regress_schema_2.ab1_a_b_stats;
--- Ensure statistics are dropped when columns are
-CREATE STATISTICS ab1_b_c_stats ON b, c FROM ab1;
-CREATE STATISTICS ab1_a_b_c_stats ON a, b, c FROM ab1;
-CREATE STATISTICS ab1_b_a_stats ON b, a FROM ab1;
-ALTER TABLE ab1 DROP COLUMN a;
-\d ab1
- Table "public.ab1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- b | integer | | |
- c | integer | | |
-Statistics objects:
- "public.ab1_b_c_stats" ON b, c FROM ab1
-
--- Ensure statistics are dropped when table is
-SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%';
- stxname
----------------
- ab1_b_c_stats
-(1 row)
-
-DROP TABLE ab1;
-SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%';
- stxname
----------
-(0 rows)
-
--- Ensure things work sanely with SET STATISTICS 0
-CREATE TABLE ab1 (a INTEGER, b INTEGER);
-ALTER TABLE ab1 ALTER a SET STATISTICS 0;
-INSERT INTO ab1 SELECT a, a%23 FROM generate_series(1, 1000) a;
-CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1;
-ANALYZE ab1;
-WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1"
-ALTER TABLE ab1 ALTER a SET STATISTICS -1;
--- setting statistics target 0 skips the statistics, without printing any message, so check catalog
-ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0;
-\d ab1
- Table "public.ab1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
-Statistics objects:
- "public.ab1_a_b_stats" ON a, b FROM ab1; STATISTICS 0
-
-ANALYZE ab1;
-SELECT stxname, stxdndistinct, stxddependencies, stxdmcv, stxdinherit
- FROM pg_statistic_ext s LEFT JOIN pg_statistic_ext_data d ON (d.stxoid = s.oid)
- WHERE s.stxname = 'ab1_a_b_stats';
- stxname | stxdndistinct | stxddependencies | stxdmcv | stxdinherit
----------------+---------------+------------------+---------+-------------
- ab1_a_b_stats | | | |
-(1 row)
-
-ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1;
-\d+ ab1
- Table "public.ab1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+---------+--------------+-------------
- a | integer | | | | plain | |
- b | integer | | | | plain | |
-Statistics objects:
- "public.ab1_a_b_stats" ON a, b FROM ab1
-
--- partial analyze doesn't build stats either
-ANALYZE ab1 (a);
-WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1"
-ANALYZE ab1;
-DROP TABLE ab1;
-ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0;
-ERROR: statistics object "ab1_a_b_stats" does not exist
-ALTER STATISTICS IF EXISTS ab1_a_b_stats SET STATISTICS 0;
-NOTICE: statistics object "ab1_a_b_stats" does not exist, skipping
--- Ensure we can build statistics for tables with inheritance.
-CREATE TABLE ab1 (a INTEGER, b INTEGER);
-CREATE TABLE ab1c () INHERITS (ab1);
-INSERT INTO ab1 VALUES (1,1);
-CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1;
-ANALYZE ab1;
-DROP TABLE ab1 CASCADE;
-NOTICE: drop cascades to table ab1c
--- Tests for stats with inheritance
-CREATE TABLE stxdinh(a int, b int);
-CREATE TABLE stxdinh1() INHERITS(stxdinh);
-CREATE TABLE stxdinh2() INHERITS(stxdinh);
-INSERT INTO stxdinh SELECT mod(a,50), mod(a,100) FROM generate_series(0, 1999) a;
-INSERT INTO stxdinh1 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a;
-INSERT INTO stxdinh2 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a;
-VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2;
--- Ensure non-inherited stats are not applied to inherited query
--- Without stats object, it looks like this
-SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2');
- estimated | actual
------------+--------
- 400 | 150
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0');
- estimated | actual
------------+--------
- 3 | 40
-(1 row)
-
-CREATE STATISTICS stxdinh ON a, b FROM stxdinh;
-VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2;
--- See if the extended stats affect the estimates
-SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2');
- estimated | actual
------------+--------
- 150 | 150
-(1 row)
-
--- Dependencies are applied at individual relations (within append), so
--- this estimate changes a bit because we improve estimates for the parent
-SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0');
- estimated | actual
------------+--------
- 22 | 40
-(1 row)
-
--- Ensure correct (non-inherited) stats are applied to inherited query
-SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh GROUP BY 1, 2');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh WHERE a = 0 AND b = 0');
- estimated | actual
------------+--------
- 20 | 20
-(1 row)
-
-DROP TABLE stxdinh, stxdinh1, stxdinh2;
--- Ensure inherited stats ARE applied to inherited query in partitioned table
-CREATE TABLE stxdinp(i int, a int, b int) PARTITION BY RANGE (i);
-CREATE TABLE stxdinp1 PARTITION OF stxdinp FOR VALUES FROM (1) TO (100);
-INSERT INTO stxdinp SELECT 1, a/100, a/100 FROM generate_series(1, 999) a;
-CREATE STATISTICS stxdinp ON (a + 1), a, b FROM stxdinp;
-VACUUM ANALYZE stxdinp; -- partitions are processed recursively
-SELECT 1 FROM pg_statistic_ext WHERE stxrelid = 'stxdinp'::regclass;
- ?column?
-----------
- 1
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinp GROUP BY 1, 2');
- estimated | actual
------------+--------
- 10 | 10
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT a + 1, b FROM ONLY stxdinp GROUP BY 1, 2');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-DROP TABLE stxdinp;
--- basic test for statistics on expressions
-CREATE TABLE ab1 (a INTEGER, b INTEGER, c TIMESTAMP, d TIMESTAMPTZ);
--- expression stats may be built on a single expression column
-CREATE STATISTICS ab1_exprstat_1 ON (a+b) FROM ab1;
--- with a single expression, we only enable expression statistics
-CREATE STATISTICS ab1_exprstat_2 ON (a+b) FROM ab1;
-SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_2';
- stxkind
----------
- {e}
-(1 row)
-
--- adding anything to the expression builds all statistics kinds
-CREATE STATISTICS ab1_exprstat_3 ON (a+b), a FROM ab1;
-SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_3';
- stxkind
------------
- {d,f,m,e}
-(1 row)
-
--- date_trunc on timestamptz is not immutable, but that should not matter
-CREATE STATISTICS ab1_exprstat_4 ON date_trunc('day', d) FROM ab1;
--- date_trunc on timestamp is immutable
-CREATE STATISTICS ab1_exprstat_5 ON date_trunc('day', c) FROM ab1;
--- check use of a boolean-returning expression
-CREATE STATISTICS ab1_exprstat_6 ON
- (case a when 1 then true else false end), b FROM ab1;
--- insert some data and run analyze, to test that these cases build properly
-INSERT INTO ab1
-SELECT x / 10, x / 3,
- '2020-10-01'::timestamp + x * interval '1 day',
- '2020-10-01'::timestamptz + x * interval '1 day'
-FROM generate_series(1, 100) x;
-ANALYZE ab1;
--- apply some stats
-SELECT * FROM check_estimated_rows('SELECT * FROM ab1 WHERE (case a when 1 then true else false end) AND b=2');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-DROP TABLE ab1;
--- Verify supported object types for extended statistics
-CREATE schema tststats;
-CREATE TABLE tststats.t (a int, b int, c text);
-CREATE INDEX ti ON tststats.t (a, b);
-CREATE SEQUENCE tststats.s;
-CREATE VIEW tststats.v AS SELECT * FROM tststats.t;
-CREATE MATERIALIZED VIEW tststats.mv AS SELECT * FROM tststats.t;
-CREATE TYPE tststats.ty AS (a int, b int, c text);
-CREATE FOREIGN DATA WRAPPER extstats_dummy_fdw;
-CREATE SERVER extstats_dummy_srv FOREIGN DATA WRAPPER extstats_dummy_fdw;
-CREATE FOREIGN TABLE tststats.f (a int, b int, c text) SERVER extstats_dummy_srv;
-CREATE TABLE tststats.pt (a int, b int, c text) PARTITION BY RANGE (a, b);
-CREATE TABLE tststats.pt1 PARTITION OF tststats.pt FOR VALUES FROM (-10, -10) TO (10, 10);
-CREATE STATISTICS tststats.s1 ON a, b FROM tststats.t;
-CREATE STATISTICS tststats.s2 ON a, b FROM tststats.ti;
-ERROR: cannot define statistics for relation "ti"
-DETAIL: This operation is not supported for indexes.
-CREATE STATISTICS tststats.s3 ON a, b FROM tststats.s;
-ERROR: cannot define statistics for relation "s"
-DETAIL: This operation is not supported for sequences.
-CREATE STATISTICS tststats.s4 ON a, b FROM tststats.v;
-ERROR: cannot define statistics for relation "v"
-DETAIL: This operation is not supported for views.
-CREATE STATISTICS tststats.s5 ON a, b FROM tststats.mv;
-CREATE STATISTICS tststats.s6 ON a, b FROM tststats.ty;
-ERROR: cannot define statistics for relation "ty"
-DETAIL: This operation is not supported for composite types.
-CREATE STATISTICS tststats.s7 ON a, b FROM tststats.f;
-CREATE STATISTICS tststats.s8 ON a, b FROM tststats.pt;
-CREATE STATISTICS tststats.s9 ON a, b FROM tststats.pt1;
-DO $$
-DECLARE
- relname text := reltoastrelid::regclass FROM pg_class WHERE oid = 'tststats.t'::regclass;
-BEGIN
- EXECUTE 'CREATE STATISTICS tststats.s10 ON a, b FROM ' || relname;
-EXCEPTION WHEN wrong_object_type THEN
- RAISE NOTICE 'stats on toast table not created';
-END;
-$$;
-NOTICE: stats on toast table not created
-DROP SCHEMA tststats CASCADE;
-NOTICE: drop cascades to 7 other objects
-DETAIL: drop cascades to table tststats.t
-drop cascades to sequence tststats.s
-drop cascades to view tststats.v
-drop cascades to materialized view tststats.mv
-drop cascades to type tststats.ty
-drop cascades to foreign table tststats.f
-drop cascades to table tststats.pt
-DROP FOREIGN DATA WRAPPER extstats_dummy_fdw CASCADE;
-NOTICE: drop cascades to server extstats_dummy_srv
--- n-distinct tests
-CREATE TABLE ndistinct (
- filler1 TEXT,
- filler2 NUMERIC,
- a INT,
- b INT,
- filler3 DATE,
- c INT,
- d INT
-)
-WITH (autovacuum_enabled = off);
--- over-estimates when using only per-column statistics
-INSERT INTO ndistinct (a, b, c, filler1)
- SELECT i/100, i/100, i/100, (i/100) || ' dollars and zero cents'
- FROM generate_series(1,1000) s(i);
-ANALYZE ndistinct;
--- Group Aggregate, due to over-estimate of the number of groups
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 100 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c');
- estimated | actual
------------+--------
- 100 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c');
- estimated | actual
------------+--------
- 100 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d');
- estimated | actual
------------+--------
- 200 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d');
- estimated | actual
------------+--------
- 200 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)');
- estimated | actual
------------+--------
- 100 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)');
- estimated | actual
------------+--------
- 100 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)');
- estimated | actual
------------+--------
- 100 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)');
- estimated | actual
------------+--------
- 100 | 11
-(1 row)
-
--- correct command
-CREATE STATISTICS s10 ON a, b, c FROM ndistinct;
-ANALYZE ndistinct;
-SELECT s.stxkind, d.stxdndistinct
- FROM pg_statistic_ext s, pg_statistic_ext_data d
- WHERE s.stxrelid = 'ndistinct'::regclass
- AND d.stxoid = s.oid;
- stxkind | stxdndistinct
----------+-----------------------------------------------------
- {d,f,m} | {"3, 4": 11, "3, 6": 11, "4, 6": 11, "3, 4, 6": 11}
-(1 row)
-
--- minor improvement, make sure the ctid does not break the matching
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY ctid, a, b');
- estimated | actual
------------+--------
- 1000 | 1000
-(1 row)
-
--- Hash Aggregate, thanks to estimates improved by the statistic
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 11 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c');
- estimated | actual
------------+--------
- 11 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c');
- estimated | actual
------------+--------
- 11 | 11
-(1 row)
-
--- partial improvement (match on attributes)
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)');
- estimated | actual
------------+--------
- 11 | 11
-(1 row)
-
--- expressions - no improvement
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)');
- estimated | actual
------------+--------
- 11 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)');
- estimated | actual
------------+--------
- 11 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)');
- estimated | actual
------------+--------
- 11 | 11
-(1 row)
-
--- last two plans keep using Group Aggregate, because 'd' is not covered
--- by the statistic and while it's NULL-only we assume 200 values for it
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d');
- estimated | actual
------------+--------
- 200 | 11
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d');
- estimated | actual
------------+--------
- 200 | 11
-(1 row)
-
-TRUNCATE TABLE ndistinct;
--- under-estimates when using only per-column statistics
-INSERT INTO ndistinct (a, b, c, filler1)
- SELECT mod(i,13), mod(i,17), mod(i,19),
- mod(i,23) || ' dollars and zero cents'
- FROM generate_series(1,1000) s(i);
-ANALYZE ndistinct;
-SELECT s.stxkind, d.stxdndistinct
- FROM pg_statistic_ext s, pg_statistic_ext_data d
- WHERE s.stxrelid = 'ndistinct'::regclass
- AND d.stxoid = s.oid;
- stxkind | stxdndistinct
----------+----------------------------------------------------------
- {d,f,m} | {"3, 4": 221, "3, 6": 247, "4, 6": 323, "3, 4, 6": 1000}
-(1 row)
-
--- correct estimates
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 221 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c');
- estimated | actual
------------+--------
- 1000 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d');
- estimated | actual
------------+--------
- 1000 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d');
- estimated | actual
------------+--------
- 323 | 323
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d');
- estimated | actual
------------+--------
- 200 | 13
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)');
- estimated | actual
------------+--------
- 221 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)');
- estimated | actual
------------+--------
- 221 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)');
- estimated | actual
------------+--------
- 1000 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)');
- estimated | actual
------------+--------
- 221 | 221
-(1 row)
-
-DROP STATISTICS s10;
-SELECT s.stxkind, d.stxdndistinct
- FROM pg_statistic_ext s, pg_statistic_ext_data d
- WHERE s.stxrelid = 'ndistinct'::regclass
- AND d.stxoid = s.oid;
- stxkind | stxdndistinct
----------+---------------
-(0 rows)
-
--- dropping the statistics results in under-estimates
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 100 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c');
- estimated | actual
------------+--------
- 100 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d');
- estimated | actual
------------+--------
- 200 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d');
- estimated | actual
------------+--------
- 200 | 323
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d');
- estimated | actual
------------+--------
- 200 | 13
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)');
- estimated | actual
------------+--------
- 100 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)');
- estimated | actual
------------+--------
- 100 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)');
- estimated | actual
------------+--------
- 100 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)');
- estimated | actual
------------+--------
- 100 | 221
-(1 row)
-
--- ndistinct estimates with statistics on expressions
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)');
- estimated | actual
------------+--------
- 100 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)');
- estimated | actual
------------+--------
- 100 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)');
- estimated | actual
------------+--------
- 100 | 221
-(1 row)
-
-CREATE STATISTICS s10 (ndistinct) ON (a+1), (b+100), (2*c) FROM ndistinct;
-ANALYZE ndistinct;
-SELECT s.stxkind, d.stxdndistinct
- FROM pg_statistic_ext s, pg_statistic_ext_data d
- WHERE s.stxrelid = 'ndistinct'::regclass
- AND d.stxoid = s.oid;
- stxkind | stxdndistinct
----------+-------------------------------------------------------------------
- {d,e} | {"-1, -2": 221, "-1, -3": 247, "-2, -3": 323, "-1, -2, -3": 1000}
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)');
- estimated | actual
------------+--------
- 221 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)');
- estimated | actual
------------+--------
- 1000 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)');
- estimated | actual
------------+--------
- 221 | 221
-(1 row)
-
-DROP STATISTICS s10;
--- a mix of attributes and expressions
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 100 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)');
- estimated | actual
------------+--------
- 100 | 247
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)');
- estimated | actual
------------+--------
- 100 | 1000
-(1 row)
-
-CREATE STATISTICS s10 (ndistinct) ON a, b, (2*c) FROM ndistinct;
-ANALYZE ndistinct;
-SELECT s.stxkind, d.stxdndistinct
- FROM pg_statistic_ext s, pg_statistic_ext_data d
- WHERE s.stxrelid = 'ndistinct'::regclass
- AND d.stxoid = s.oid;
- stxkind | stxdndistinct
----------+-------------------------------------------------------------
- {d,e} | {"3, 4": 221, "3, -1": 247, "4, -1": 323, "3, 4, -1": 1000}
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 221 | 221
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)');
- estimated | actual
------------+--------
- 247 | 247
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)');
- estimated | actual
------------+--------
- 1000 | 1000
-(1 row)
-
-DROP STATISTICS s10;
--- combination of multiple ndistinct statistics, with/without expressions
-TRUNCATE ndistinct;
--- two mostly independent groups of columns
-INSERT INTO ndistinct (a, b, c, d)
- SELECT mod(i,3), mod(i,9), mod(i,5), mod(i,20)
- FROM generate_series(1,1000) s(i);
-ANALYZE ndistinct;
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 27 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)');
- estimated | actual
------------+--------
- 27 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b');
- estimated | actual
------------+--------
- 27 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)');
- estimated | actual
------------+--------
- 27 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c');
- estimated | actual
------------+--------
- 100 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)');
- estimated | actual
------------+--------
- 100 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)');
- estimated | actual
------------+--------
- 100 | 180
-(1 row)
-
--- basic statistics on both attributes (no expressions)
-CREATE STATISTICS s11 (ndistinct) ON a, b FROM ndistinct;
-CREATE STATISTICS s12 (ndistinct) ON c, d FROM ndistinct;
-ANALYZE ndistinct;
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)');
- estimated | actual
------------+--------
- 100 | 180
-(1 row)
-
--- replace the second statistics by statistics on expressions
-DROP STATISTICS s12;
-CREATE STATISTICS s12 (ndistinct) ON (c * 10), (d - 1) FROM ndistinct;
-ANALYZE ndistinct;
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)');
- estimated | actual
------------+--------
- 100 | 180
-(1 row)
-
--- replace the second statistics by statistics on both attributes and expressions
-DROP STATISTICS s12;
-CREATE STATISTICS s12 (ndistinct) ON c, d, (c * 10), (d - 1) FROM ndistinct;
-ANALYZE ndistinct;
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)');
- estimated | actual
------------+--------
- 100 | 180
-(1 row)
-
--- replace the other statistics by statistics on both attributes and expressions
-DROP STATISTICS s11;
-CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct;
-ANALYZE ndistinct;
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)');
- estimated | actual
------------+--------
- 100 | 180
-(1 row)
-
--- replace statistics by somewhat overlapping ones (this expected to get worse estimate
--- because the first statistics shall be applied to 3 columns, and the second one can't
--- be really applied)
-DROP STATISTICS s11;
-DROP STATISTICS s12;
-CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct;
-CREATE STATISTICS s12 (ndistinct) ON a, (b+1), (c * 10) FROM ndistinct;
-ANALYZE ndistinct;
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)');
- estimated | actual
------------+--------
- 9 | 9
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c');
- estimated | actual
------------+--------
- 45 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)');
- estimated | actual
------------+--------
- 100 | 45
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)');
- estimated | actual
------------+--------
- 100 | 180
-(1 row)
-
-DROP STATISTICS s11;
-DROP STATISTICS s12;
--- functional dependencies tests
-CREATE TABLE functional_dependencies (
- filler1 TEXT,
- filler2 NUMERIC,
- a INT,
- b TEXT,
- filler3 DATE,
- c INT,
- d TEXT
-)
-WITH (autovacuum_enabled = off);
-CREATE INDEX fdeps_ab_idx ON functional_dependencies (a, b);
-CREATE INDEX fdeps_abc_idx ON functional_dependencies (a, b, c);
--- random data (no functional dependencies)
-INSERT INTO functional_dependencies (a, b, c, filler1)
- SELECT mod(i, 5), mod(i, 7), mod(i, 11), i FROM generate_series(1,1000) s(i);
-ANALYZE functional_dependencies;
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 29 | 29
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 3 | 3
-(1 row)
-
--- create statistics
-CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies;
-ANALYZE functional_dependencies;
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 29 | 29
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 3 | 3
-(1 row)
-
--- a => b, a => c, b => c
-TRUNCATE functional_dependencies;
-DROP STATISTICS func_deps_stat;
--- now do the same thing, but with expressions
-INSERT INTO functional_dependencies (a, b, c, filler1)
- SELECT i, i, i, i FROM generate_series(1,5000) s(i);
-ANALYZE functional_dependencies;
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1');
- estimated | actual
------------+--------
- 1 | 35
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1');
- estimated | actual
------------+--------
- 1 | 5
-(1 row)
-
--- create statistics
-CREATE STATISTICS func_deps_stat (dependencies) ON (mod(a,11)), (mod(b::int, 13)), (mod(c, 7)) FROM functional_dependencies;
-ANALYZE functional_dependencies;
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1');
- estimated | actual
------------+--------
- 35 | 35
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1');
- estimated | actual
------------+--------
- 5 | 5
-(1 row)
-
--- a => b, a => c, b => c
-TRUNCATE functional_dependencies;
-DROP STATISTICS func_deps_stat;
-INSERT INTO functional_dependencies (a, b, c, filler1)
- SELECT mod(i,100), mod(i,50), mod(i,25), i FROM generate_series(1,5000) s(i);
-ANALYZE functional_dependencies;
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
--- IN
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1''');
- estimated | actual
------------+--------
- 2 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')');
- estimated | actual
------------+--------
- 4 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')');
- estimated | actual
------------+--------
- 8 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1''');
- estimated | actual
------------+--------
- 4 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)');
- estimated | actual
------------+--------
- 3 | 400
-(1 row)
-
--- OR clauses referencing the same attribute
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1''');
- estimated | actual
------------+--------
- 2 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')');
- estimated | actual
------------+--------
- 4 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')');
- estimated | actual
------------+--------
- 8 | 200
-(1 row)
-
--- OR clauses referencing different attributes
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1''');
- estimated | actual
------------+--------
- 3 | 100
-(1 row)
-
--- ANY
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1''');
- estimated | actual
------------+--------
- 2 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 4 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 8 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])');
- estimated | actual
------------+--------
- 3 | 400
-(1 row)
-
--- ANY with inequalities should not benefit from functional dependencies
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1''');
- estimated | actual
------------+--------
- 2472 | 2400
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1441 | 1250
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 3909 | 2550
-(1 row)
-
--- ALL (should not benefit from functional dependencies)
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])');
- estimated | actual
------------+--------
- 2 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
--- create statistics
-CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies;
-ANALYZE functional_dependencies;
--- print the detected dependencies
-SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat';
- dependencies
-------------------------------------------------------------------------------------------------------------
- {"3 => 4": 1.000000, "3 => 6": 1.000000, "4 => 6": 1.000000, "3, 4 => 6": 1.000000, "3, 6 => 4": 1.000000}
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
--- IN
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1''');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1''');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)');
- estimated | actual
------------+--------
- 400 | 400
-(1 row)
-
--- OR clauses referencing the same attribute
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1''');
- estimated | actual
------------+--------
- 99 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')');
- estimated | actual
------------+--------
- 99 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')');
- estimated | actual
------------+--------
- 197 | 200
-(1 row)
-
--- OR clauses referencing different attributes are incompatible
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1''');
- estimated | actual
------------+--------
- 3 | 100
-(1 row)
-
--- ANY
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1''');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])');
- estimated | actual
------------+--------
- 400 | 400
-(1 row)
-
--- ANY with inequalities should not benefit from functional dependencies
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1''');
- estimated | actual
------------+--------
- 2472 | 2400
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1441 | 1250
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 3909 | 2550
-(1 row)
-
--- ALL (should not benefit from functional dependencies)
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])');
- estimated | actual
------------+--------
- 2 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
--- changing the type of column c causes all its stats to be dropped, reverting
--- to default estimates without any statistics, i.e. 0.5% selectivity for each
--- condition
-ALTER TABLE functional_dependencies ALTER COLUMN c TYPE numeric;
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-ANALYZE functional_dependencies;
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-DROP STATISTICS func_deps_stat;
--- now try functional dependencies with expressions
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
--- IN
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)');
- estimated | actual
------------+--------
- 1 | 400
-(1 row)
-
--- OR clauses referencing the same attribute
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
--- OR clauses referencing different attributes
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
--- ANY
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])');
- estimated | actual
------------+--------
- 1 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])');
- estimated | actual
------------+--------
- 1 | 400
-(1 row)
-
--- ANY with inequalities should not benefit from functional dependencies
--- the estimates however improve thanks to having expression statistics
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1''');
- estimated | actual
------------+--------
- 926 | 2400
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1543 | 1250
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 2229 | 2550
-(1 row)
-
--- ALL (should not benefit from functional dependencies)
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
--- create statistics on expressions
-CREATE STATISTICS func_deps_stat (dependencies) ON (a * 2), upper(b), (c + 1) FROM functional_dependencies;
-ANALYZE functional_dependencies;
--- print the detected dependencies
-SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat';
- dependencies
-------------------------------------------------------------------------------------------------------------------------
- {"-1 => -2": 1.000000, "-1 => -3": 1.000000, "-2 => -3": 1.000000, "-1, -2 => -3": 1.000000, "-1, -3 => -2": 1.000000}
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
--- IN
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)');
- estimated | actual
------------+--------
- 400 | 400
-(1 row)
-
--- OR clauses referencing the same attribute
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 99 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')');
- estimated | actual
------------+--------
- 99 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')');
- estimated | actual
------------+--------
- 197 | 200
-(1 row)
-
--- OR clauses referencing different attributes
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 3 | 100
-(1 row)
-
--- ANY
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1''');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])');
- estimated | actual
------------+--------
- 400 | 400
-(1 row)
-
--- ANY with inequalities should not benefit from functional dependencies
--- the estimates however improve thanks to having expression statistics
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1''');
- estimated | actual
------------+--------
- 2472 | 2400
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1441 | 1250
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 3909 | 2550
-(1 row)
-
--- ALL (should not benefit from functional dependencies)
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])');
- estimated | actual
------------+--------
- 2 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
--- check the ability to use multiple functional dependencies
-CREATE TABLE functional_dependencies_multi (
- a INTEGER,
- b INTEGER,
- c INTEGER,
- d INTEGER
-)
-WITH (autovacuum_enabled = off);
-INSERT INTO functional_dependencies_multi (a, b, c, d)
- SELECT
- mod(i,7),
- mod(i,7),
- mod(i,11),
- mod(i,11)
- FROM generate_series(1,5000) s(i);
-ANALYZE functional_dependencies_multi;
--- estimates without any functional dependencies
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0');
- estimated | actual
------------+--------
- 102 | 714
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b');
- estimated | actual
------------+--------
- 102 | 714
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0');
- estimated | actual
------------+--------
- 41 | 454
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0');
- estimated | actual
------------+--------
- 1 | 64
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0');
- estimated | actual
------------+--------
- 1 | 64
-(1 row)
-
--- create separate functional dependencies
-CREATE STATISTICS functional_dependencies_multi_1 (dependencies) ON a, b FROM functional_dependencies_multi;
-CREATE STATISTICS functional_dependencies_multi_2 (dependencies) ON c, d FROM functional_dependencies_multi;
-ANALYZE functional_dependencies_multi;
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0');
- estimated | actual
------------+--------
- 714 | 714
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b');
- estimated | actual
------------+--------
- 714 | 714
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0');
- estimated | actual
------------+--------
- 454 | 454
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0');
- estimated | actual
------------+--------
- 65 | 64
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0');
- estimated | actual
------------+--------
- 65 | 64
-(1 row)
-
-DROP TABLE functional_dependencies_multi;
--- MCV lists
-CREATE TABLE mcv_lists (
- filler1 TEXT,
- filler2 NUMERIC,
- a INT,
- b VARCHAR,
- filler3 DATE,
- c INT,
- d TEXT,
- ia INT[]
-)
-WITH (autovacuum_enabled = off);
--- random data (no MCV list)
-INSERT INTO mcv_lists (a, b, c, filler1)
- SELECT mod(i,37), mod(i,41), mod(i,43), mod(i,47) FROM generate_series(1,5000) s(i);
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 3 | 4
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 1 | 1
-(1 row)
-
--- create statistics
-CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists;
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 3 | 4
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 1 | 1
-(1 row)
-
-TRUNCATE mcv_lists;
-DROP STATISTICS mcv_lists_stats;
--- random data (no MCV list), but with expression
-INSERT INTO mcv_lists (a, b, c, filler1)
- SELECT i, i, i, i FROM generate_series(1,1000) s(i);
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1');
- estimated | actual
------------+--------
- 1 | 13
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1');
- estimated | actual
------------+--------
- 1 | 1
-(1 row)
-
--- create statistics
-CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,7)), (mod(b::int,11)), (mod(c,13)) FROM mcv_lists;
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1');
- estimated | actual
------------+--------
- 13 | 13
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1');
- estimated | actual
------------+--------
- 1 | 1
-(1 row)
-
--- 100 distinct combinations, all in the MCV list
-TRUNCATE mcv_lists;
-DROP STATISTICS mcv_lists_stats;
-INSERT INTO mcv_lists (a, b, c, ia, filler1)
- SELECT mod(i,100), mod(i,50), mod(i,25), array[mod(i,25)], i
- FROM generate_series(1,5000) s(i);
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1''');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0''');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1');
- estimated | actual
------------+--------
- 343 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL');
- estimated | actual
------------+--------
- 343 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')');
- estimated | actual
------------+--------
- 8 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)');
- estimated | actual
------------+--------
- 8 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 8 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])');
- estimated | actual
------------+--------
- 8 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')');
- estimated | actual
------------+--------
- 26 | 150
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')');
- estimated | actual
------------+--------
- 26 | 150
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])');
- estimated | actual
------------+--------
- 10 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])');
- estimated | actual
------------+--------
- 10 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)');
- estimated | actual
------------+--------
- 4 | 50
-(1 row)
-
--- create statistics
-CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c, ia FROM mcv_lists;
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1''');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0''');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')');
- estimated | actual
------------+--------
- 150 | 150
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')');
- estimated | actual
------------+--------
- 150 | 150
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)');
- estimated | actual
------------+--------
- 4 | 50
-(1 row)
-
--- check change of unrelated column type does not reset the MCV statistics
-ALTER TABLE mcv_lists ALTER COLUMN d TYPE VARCHAR(64);
-SELECT d.stxdmcv IS NOT NULL
- FROM pg_statistic_ext s, pg_statistic_ext_data d
- WHERE s.stxname = 'mcv_lists_stats'
- AND d.stxoid = s.oid;
- ?column?
-----------
- t
-(1 row)
-
--- check change of column type resets the MCV statistics
-ALTER TABLE mcv_lists ALTER COLUMN c TYPE numeric;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1''');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
--- 100 distinct combinations, all in the MCV list, but with expressions
-TRUNCATE mcv_lists;
-DROP STATISTICS mcv_lists_stats;
-INSERT INTO mcv_lists (a, b, c, filler1)
- SELECT i, i, i, i FROM generate_series(1,1000) s(i);
-ANALYZE mcv_lists;
--- without any stats on the expressions, we have to use default selectivities, which
--- is why the estimates here are different from the pre-computed case above
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1');
- estimated | actual
------------+--------
- 111 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)');
- estimated | actual
------------+--------
- 111 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL');
- estimated | actual
------------+--------
- 15 | 120
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)');
- estimated | actual
------------+--------
- 11 | 150
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
--- create statistics with expressions only (we create three separate stats, in order not to build more complex extended stats)
-CREATE STATISTICS mcv_lists_stats_1 ON (mod(a,20)) FROM mcv_lists;
-CREATE STATISTICS mcv_lists_stats_2 ON (mod(b::int,10)) FROM mcv_lists;
-CREATE STATISTICS mcv_lists_stats_3 ON (mod(c,5)) FROM mcv_lists;
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1');
- estimated | actual
------------+--------
- 5 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)');
- estimated | actual
------------+--------
- 5 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1');
- estimated | actual
------------+--------
- 5 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)');
- estimated | actual
------------+--------
- 5 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL');
- estimated | actual
------------+--------
- 149 | 120
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)');
- estimated | actual
------------+--------
- 20 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])');
- estimated | actual
------------+--------
- 20 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)');
- estimated | actual
------------+--------
- 116 | 150
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])');
- estimated | actual
------------+--------
- 12 | 100
-(1 row)
-
-DROP STATISTICS mcv_lists_stats_1;
-DROP STATISTICS mcv_lists_stats_2;
-DROP STATISTICS mcv_lists_stats_3;
--- create statistics with both MCV and expressions
-CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,20)), (mod(b::int,10)), (mod(c,5)) FROM mcv_lists;
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL');
- estimated | actual
------------+--------
- 105 | 120
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)');
- estimated | actual
------------+--------
- 150 | 150
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
--- we can't use the statistic for OR clauses that are not fully covered (missing 'd' attribute)
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,5) = 1 OR d IS NOT NULL');
- estimated | actual
------------+--------
- 200 | 200
-(1 row)
-
--- 100 distinct combinations with NULL values, all in the MCV list
-TRUNCATE mcv_lists;
-DROP STATISTICS mcv_lists_stats;
-INSERT INTO mcv_lists (a, b, c, filler1)
- SELECT
- (CASE WHEN mod(i,100) = 1 THEN NULL ELSE mod(i,100) END),
- (CASE WHEN mod(i,50) = 1 THEN NULL ELSE mod(i,50) END),
- (CASE WHEN mod(i,25) = 1 THEN NULL ELSE mod(i,25) END),
- i
- FROM generate_series(1,5000) s(i);
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL');
- estimated | actual
------------+--------
- 49 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL');
- estimated | actual
------------+--------
- 95 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
--- create statistics
-CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists;
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
--- test pg_mcv_list_items with a very simple (single item) MCV list
-TRUNCATE mcv_lists;
-INSERT INTO mcv_lists (a, b, c) SELECT 1, 2, 3 FROM generate_series(1,1000) s(i);
-ANALYZE mcv_lists;
-SELECT m.*
- FROM pg_statistic_ext s, pg_statistic_ext_data d,
- pg_mcv_list_items(d.stxdmcv) m
- WHERE s.stxname = 'mcv_lists_stats'
- AND d.stxoid = s.oid;
- index | values | nulls | frequency | base_frequency
--------+---------+---------+-----------+----------------
- 0 | {1,2,3} | {f,f,f} | 1 | 1
-(1 row)
-
--- 2 distinct combinations with NULL values, all in the MCV list
-TRUNCATE mcv_lists;
-DROP STATISTICS mcv_lists_stats;
-INSERT INTO mcv_lists (a, b, c, d)
- SELECT
- NULL, -- always NULL
- (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END),
- (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 0 END),
- (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END)
- FROM generate_series(1,5000) s(i);
-ANALYZE mcv_lists;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x''');
- estimated | actual
------------+--------
- 3750 | 2500
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x''');
- estimated | actual
------------+--------
- 3750 | 2500
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')');
- estimated | actual
------------+--------
- 3750 | 2500
-(1 row)
-
--- create statistics
-CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, d FROM mcv_lists;
-ANALYZE mcv_lists;
--- test pg_mcv_list_items with MCV list containing variable-length data and NULLs
-SELECT m.*
- FROM pg_statistic_ext s, pg_statistic_ext_data d,
- pg_mcv_list_items(d.stxdmcv) m
- WHERE s.stxname = 'mcv_lists_stats'
- AND d.stxoid = s.oid;
- index | values | nulls | frequency | base_frequency
--------+------------------+---------+-----------+----------------
- 0 | {NULL,x,x} | {t,f,f} | 0.5 | 0.25
- 1 | {NULL,NULL,NULL} | {t,t,t} | 0.5 | 0.25
-(2 rows)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x''');
- estimated | actual
------------+--------
- 2500 | 2500
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x''');
- estimated | actual
------------+--------
- 2500 | 2500
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')');
- estimated | actual
------------+--------
- 2500 | 2500
-(1 row)
-
--- mcv with pass-by-ref fixlen types, e.g. uuid
-CREATE TABLE mcv_lists_uuid (
- a UUID,
- b UUID,
- c UUID
-)
-WITH (autovacuum_enabled = off);
-INSERT INTO mcv_lists_uuid (a, b, c)
- SELECT
- fipshash(mod(i,100)::text)::uuid,
- fipshash(mod(i,50)::text)::uuid,
- fipshash(mod(i,25)::text)::uuid
- FROM generate_series(1,5000) s(i);
-ANALYZE mcv_lists_uuid;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f''');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f''');
- estimated | actual
------------+--------
- 1 | 50
-(1 row)
-
-CREATE STATISTICS mcv_lists_uuid_stats (mcv) ON a, b, c
- FROM mcv_lists_uuid;
-ANALYZE mcv_lists_uuid;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f''');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f''');
- estimated | actual
------------+--------
- 50 | 50
-(1 row)
-
-DROP TABLE mcv_lists_uuid;
--- mcv with arrays
-CREATE TABLE mcv_lists_arrays (
- a TEXT[],
- b NUMERIC[],
- c INT[]
-)
-WITH (autovacuum_enabled = off);
-INSERT INTO mcv_lists_arrays (a, b, c)
- SELECT
- ARRAY[fipshash((i/100)::text), fipshash((i/100-1)::text), fipshash((i/100+1)::text)],
- ARRAY[(i/100-1)::numeric/1000, (i/100)::numeric/1000, (i/100+1)::numeric/1000],
- ARRAY[(i/100-1), i/100, (i/100+1)]
- FROM generate_series(1,5000) s(i);
-CREATE STATISTICS mcv_lists_arrays_stats (mcv) ON a, b, c
- FROM mcv_lists_arrays;
-ANALYZE mcv_lists_arrays;
--- mcv with bool
-CREATE TABLE mcv_lists_bool (
- a BOOL,
- b BOOL,
- c BOOL
-)
-WITH (autovacuum_enabled = off);
-INSERT INTO mcv_lists_bool (a, b, c)
- SELECT
- (mod(i,2) = 0), (mod(i,4) = 0), (mod(i,8) = 0)
- FROM generate_series(1,10000) s(i);
-ANALYZE mcv_lists_bool;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c');
- estimated | actual
------------+--------
- 156 | 1250
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c');
- estimated | actual
------------+--------
- 156 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c');
- estimated | actual
------------+--------
- 469 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c');
- estimated | actual
------------+--------
- 1094 | 0
-(1 row)
-
-CREATE STATISTICS mcv_lists_bool_stats (mcv) ON a, b, c
- FROM mcv_lists_bool;
-ANALYZE mcv_lists_bool;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c');
- estimated | actual
------------+--------
- 1250 | 1250
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
--- mcv covering just a small fraction of data
-CREATE TABLE mcv_lists_partial (
- a INT,
- b INT,
- c INT
-);
--- 10 frequent groups, each with 100 elements
-INSERT INTO mcv_lists_partial (a, b, c)
- SELECT
- mod(i,10),
- mod(i,10),
- mod(i,10)
- FROM generate_series(0,999) s(i);
--- 100 groups that will make it to the MCV list (includes the 10 frequent ones)
-INSERT INTO mcv_lists_partial (a, b, c)
- SELECT
- i,
- i,
- i
- FROM generate_series(0,99) s(i);
--- 4000 groups in total, most of which won't make it (just a single item)
-INSERT INTO mcv_lists_partial (a, b, c)
- SELECT
- i,
- i,
- i
- FROM generate_series(0,3999) s(i);
-ANALYZE mcv_lists_partial;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0');
- estimated | actual
------------+--------
- 1 | 102
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0');
- estimated | actual
------------+--------
- 300 | 102
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10');
- estimated | actual
------------+--------
- 1 | 2
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10');
- estimated | actual
------------+--------
- 6 | 2
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10');
- estimated | actual
------------+--------
- 204 | 104
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)');
- estimated | actual
------------+--------
- 1 | 306
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)');
- estimated | actual
------------+--------
- 6 | 102
-(1 row)
-
-CREATE STATISTICS mcv_lists_partial_stats (mcv) ON a, b, c
- FROM mcv_lists_partial;
-ANALYZE mcv_lists_partial;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0');
- estimated | actual
------------+--------
- 102 | 102
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0');
- estimated | actual
------------+--------
- 96 | 102
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10');
- estimated | actual
------------+--------
- 2 | 2
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10');
- estimated | actual
------------+--------
- 2 | 2
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10');
- estimated | actual
------------+--------
- 102 | 104
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)');
- estimated | actual
------------+--------
- 306 | 306
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)');
- estimated | actual
------------+--------
- 108 | 102
-(1 row)
-
-DROP TABLE mcv_lists_partial;
--- check the ability to use multiple MCV lists
-CREATE TABLE mcv_lists_multi (
- a INTEGER,
- b INTEGER,
- c INTEGER,
- d INTEGER
-)
-WITH (autovacuum_enabled = off);
-INSERT INTO mcv_lists_multi (a, b, c, d)
- SELECT
- mod(i,5),
- mod(i,5),
- mod(i,7),
- mod(i,7)
- FROM generate_series(1,5000) s(i);
-ANALYZE mcv_lists_multi;
--- estimates without any mcv statistics
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0');
- estimated | actual
------------+--------
- 200 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0');
- estimated | actual
------------+--------
- 102 | 714
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0');
- estimated | actual
------------+--------
- 143 | 142
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0');
- estimated | actual
------------+--------
- 1571 | 1572
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0');
- estimated | actual
------------+--------
- 4 | 142
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)');
- estimated | actual
------------+--------
- 298 | 1572
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0');
- estimated | actual
------------+--------
- 2649 | 1572
-(1 row)
-
--- create separate MCV statistics
-CREATE STATISTICS mcv_lists_multi_1 (mcv) ON a, b FROM mcv_lists_multi;
-CREATE STATISTICS mcv_lists_multi_2 (mcv) ON c, d FROM mcv_lists_multi;
-ANALYZE mcv_lists_multi;
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0');
- estimated | actual
------------+--------
- 1000 | 1000
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0');
- estimated | actual
------------+--------
- 714 | 714
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0');
- estimated | actual
------------+--------
- 143 | 142
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0');
- estimated | actual
------------+--------
- 1571 | 1572
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0');
- estimated | actual
------------+--------
- 143 | 142
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)');
- estimated | actual
------------+--------
- 1571 | 1572
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0');
- estimated | actual
------------+--------
- 1571 | 1572
-(1 row)
-
-DROP TABLE mcv_lists_multi;
--- statistics on integer expressions
-CREATE TABLE expr_stats (a int, b int, c int);
-INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i);
-ANALYZE expr_stats;
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-CREATE STATISTICS expr_stats_1 (mcv) ON (a+b), (a-b), (2*a), (3*b) FROM expr_stats;
-ANALYZE expr_stats;
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-DROP STATISTICS expr_stats_1;
-DROP TABLE expr_stats;
--- statistics on a mix columns and expressions
-CREATE TABLE expr_stats (a int, b int, c int);
-INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i);
-ANALYZE expr_stats;
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0');
- estimated | actual
------------+--------
- 1 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (2*a), (3*b), (a+b), (a-b) FROM expr_stats;
-ANALYZE expr_stats;
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0');
- estimated | actual
------------+--------
- 1 | 0
-(1 row)
-
-DROP TABLE expr_stats;
--- statistics on expressions with different data types
-CREATE TABLE expr_stats (a int, b name, c text);
-INSERT INTO expr_stats SELECT mod(i,10), fipshash(mod(i,10)::text), fipshash(mod(i,10)::text) FROM generate_series(1,1000) s(i);
-ANALYZE expr_stats;
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0''');
- estimated | actual
------------+--------
- 11 | 100
-(1 row)
-
-CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (b || c), (c || b) FROM expr_stats;
-ANALYZE expr_stats;
-SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0''');
- estimated | actual
------------+--------
- 100 | 100
-(1 row)
-
-DROP TABLE expr_stats;
--- test handling of a mix of compatible and incompatible expressions
-CREATE TABLE expr_stats_incompatible_test (
- c0 double precision,
- c1 boolean NOT NULL
-);
-CREATE STATISTICS expr_stat_comp_1 ON c0, c1 FROM expr_stats_incompatible_test;
-INSERT INTO expr_stats_incompatible_test VALUES (1234,false), (5678,true);
-ANALYZE expr_stats_incompatible_test;
-SELECT c0 FROM ONLY expr_stats_incompatible_test WHERE
-(
- upper('x') LIKE ('x'||('[0,1]'::int4range))
- AND
- (c0 IN (0, 1) OR c1)
-);
- c0
-----
-(0 rows)
-
-DROP TABLE expr_stats_incompatible_test;
--- Permission tests. Users should not be able to see specific data values in
--- the extended statistics, if they lack permission to see those values in
--- the underlying table.
---
--- Currently this is only relevant for MCV stats.
-CREATE SCHEMA tststats;
-CREATE TABLE tststats.priv_test_tbl (
- a int,
- b int
-);
-INSERT INTO tststats.priv_test_tbl
- SELECT mod(i,5), mod(i,10) FROM generate_series(1,100) s(i);
-CREATE STATISTICS tststats.priv_test_stats (mcv) ON a, b
- FROM tststats.priv_test_tbl;
-ANALYZE tststats.priv_test_tbl;
--- Check printing info about extended statistics by \dX
-create table stts_t1 (a int, b int);
-create statistics (ndistinct) on a, b from stts_t1;
-create statistics (ndistinct, dependencies) on a, b from stts_t1;
-create statistics (ndistinct, dependencies, mcv) on a, b from stts_t1;
-create table stts_t2 (a int, b int, c int);
-create statistics on b, c from stts_t2;
-create table stts_t3 (col1 int, col2 int, col3 int);
-create statistics stts_hoge on col1, col2, col3 from stts_t3;
-create schema stts_s1;
-create schema stts_s2;
-create statistics stts_s1.stts_foo on col1, col2 from stts_t3;
-create statistics stts_s2.stts_yama (dependencies, mcv) on col1, col3 from stts_t3;
-insert into stts_t1 select i,i from generate_series(1,100) i;
-analyze stts_t1;
-set search_path to public, stts_s1, stts_s2, tststats;
-\dX
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
-----------+------------------------+------------------------------------------------------------------+-----------+--------------+---------
- public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined |
- public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined
- public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined
- public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined
- public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
- public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
- public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
- public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
- stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined
- stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined
- tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined
-(12 rows)
-
-\dX stts_t*
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+-------------------+-------------------+-----------+--------------+---------
- public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
- public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
- public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
-(4 rows)
-
-\dX *stts_hoge
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+-----------+-------------------------------+-----------+--------------+---------
- public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
-(1 row)
-
-\dX+
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
-----------+------------------------+------------------------------------------------------------------+-----------+--------------+---------
- public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined |
- public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined
- public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined
- public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined
- public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
- public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
- public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
- public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
- stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined
- stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined
- tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined
-(12 rows)
-
-\dX+ stts_t*
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+-------------------+-------------------+-----------+--------------+---------
- public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
- public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
- public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
-(4 rows)
-
-\dX+ *stts_hoge
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+-----------+-------------------------------+-----------+--------------+---------
- public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
-(1 row)
-
-\dX+ stts_s2.stts_yama
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
----------+-----------+-------------------------+-----------+--------------+---------
- stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined
-(1 row)
-
-create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1;
-create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1;
-create statistics (mcv) ON (a+b), (a-b) FROM stts_t1;
-\dX stts_t*expr*
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+-----------------------------+-------------------------------------+-----------+--------------+---------
- public | stts_t1_a_b_expr_expr_stat | a, b, (a + b), (a - b) FROM stts_t1 | | | defined
- public | stts_t1_a_b_expr_expr_stat1 | a, b, (a + b), (a - b) FROM stts_t1 | | | defined
- public | stts_t1_expr_expr_stat | (a + b), (a - b) FROM stts_t1 | | | defined
-(3 rows)
-
-drop statistics stts_t1_a_b_expr_expr_stat;
-drop statistics stts_t1_a_b_expr_expr_stat1;
-drop statistics stts_t1_expr_expr_stat;
-set search_path to public, stts_s1;
-\dX
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
----------+------------------------+------------------------------------------------------------------+-----------+--------------+---------
- public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined |
- public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined
- public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined
- public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined
- public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
- public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
- public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
- public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
- stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined
-(10 rows)
-
-create role regress_stats_ext nosuperuser;
-set role regress_stats_ext;
-\dX
- List of extended statistics
- Schema | Name | Definition | Ndistinct | Dependencies | MCV
---------+------------------------+------------------------------------------------------------------+-----------+--------------+---------
- public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined |
- public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined
- public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined
- public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined
- public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined
- public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | |
- public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined |
- public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined
- public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined
-(9 rows)
-
-reset role;
-drop table stts_t1, stts_t2, stts_t3;
-drop schema stts_s1, stts_s2 cascade;
-drop user regress_stats_ext;
-reset search_path;
--- User with no access
-CREATE USER regress_stats_user1;
-GRANT USAGE ON SCHEMA tststats TO regress_stats_user1;
-SET SESSION AUTHORIZATION regress_stats_user1;
-SELECT * FROM tststats.priv_test_tbl; -- Permission denied
-ERROR: permission denied for table priv_test_tbl
--- Check individual columns if we don't have table privilege
-SELECT * FROM tststats.priv_test_tbl
- WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null;
-ERROR: permission denied for table priv_test_tbl
--- Attempt to gain access using a leaky operator
-CREATE FUNCTION op_leak(int, int) RETURNS bool
- AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END'
- LANGUAGE plpgsql;
-CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int,
- restrict = scalarltsel);
-SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
-ERROR: permission denied for table priv_test_tbl
-DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
-ERROR: permission denied for table priv_test_tbl
--- Grant access via a security barrier view, but hide all data
-RESET SESSION AUTHORIZATION;
-CREATE VIEW tststats.priv_test_view WITH (security_barrier=true)
- AS SELECT * FROM tststats.priv_test_tbl WHERE false;
-GRANT SELECT, DELETE ON tststats.priv_test_view TO regress_stats_user1;
--- Should now have access via the view, but see nothing and leak nothing
-SET SESSION AUTHORIZATION regress_stats_user1;
-SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak
- a | b
----+---
-(0 rows)
-
-DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak
--- Grant table access, but hide all data with RLS
-RESET SESSION AUTHORIZATION;
-ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY;
-GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1;
--- Should now have direct table access, but see nothing and leak nothing
-SET SESSION AUTHORIZATION regress_stats_user1;
-SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
- a | b
----+---
-(0 rows)
-
-DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
--- privilege checks for pg_stats_ext and pg_stats_ext_exprs
-RESET SESSION AUTHORIZATION;
-CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT);
-INSERT INTO stats_ext_tbl (col) VALUES ('secret'), ('secret'), ('very secret');
-CREATE STATISTICS s_col ON id, col FROM stats_ext_tbl;
-CREATE STATISTICS s_expr ON mod(id, 2), lower(col) FROM stats_ext_tbl;
-ANALYZE stats_ext_tbl;
--- unprivileged role should not have access
-SET SESSION AUTHORIZATION regress_stats_user1;
-SELECT statistics_name, most_common_vals FROM pg_stats_ext x
- WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*);
- statistics_name | most_common_vals
------------------+------------------
-(0 rows)
-
-SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x
- WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*);
- statistics_name | most_common_vals
------------------+------------------
-(0 rows)
-
--- give unprivileged role ownership of table
-RESET SESSION AUTHORIZATION;
-ALTER TABLE stats_ext_tbl OWNER TO regress_stats_user1;
--- unprivileged role should now have access
-SET SESSION AUTHORIZATION regress_stats_user1;
-SELECT statistics_name, most_common_vals FROM pg_stats_ext x
- WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*);
- statistics_name | most_common_vals
------------------+-------------------------------------------
- s_col | {{1,secret},{2,secret},{3,"very secret"}}
- s_expr | {{0,secret},{1,secret},{1,"very secret"}}
-(2 rows)
-
-SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x
- WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*);
- statistics_name | most_common_vals
------------------+------------------
- s_expr | {secret}
- s_expr | {1}
-(2 rows)
-
--- Tidy up
-DROP OPERATOR <<< (int, int);
-DROP FUNCTION op_leak(int, int);
-RESET SESSION AUTHORIZATION;
-DROP TABLE stats_ext_tbl;
-DROP SCHEMA tststats CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table tststats.priv_test_tbl
-drop cascades to view tststats.priv_test_view
-DROP USER regress_stats_user1;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/collate.linux.utf8_1.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/collate.linux.utf8.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/collate.linux.utf8_1.out 2024-11-15 02:50:52.422160960 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/collate.linux.utf8.out 2024-11-15 02:59:16.841115178 +0000
@@ -1,11 +1,2 @@
-/*
- * This test is for Linux/glibc systems and assumes that a full set of
- * locales is installed. It must be run in a database with UTF-8 encoding,
- * because other encodings don't support all the characters used.
- */
-SELECT getdatabaseencoding() <> 'UTF8' OR
- (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE', 'tr_TR') AND collencoding = pg_char_to_encoding('UTF8')) <> 4 OR
- version() !~ 'linux-gnu'
- AS skip_test \gset
-\if :skip_test
-\quit
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/collate.windows.win1252_1.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/collate.windows.win1252.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/collate.windows.win1252_1.out 2024-11-15 02:50:52.422160960 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/collate.windows.win1252.out 2024-11-15 02:59:16.837115173 +0000
@@ -1,13 +1,2 @@
-/*
- * This test is meant to run on Windows systems that has successfully
- * run pg_import_system_collations(). Also, the database must have
- * WIN1252 encoding, because of the locales' own encodings. Because
- * of this, some test are lost from UTF-8 version, such as Turkish
- * dotted and undotted 'i'.
- */
-SELECT getdatabaseencoding() <> 'WIN1252' OR
- (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE') AND collencoding = pg_char_to_encoding('WIN1252')) <> 3 OR
- (version() !~ 'Visual C\+\+' AND version() !~ 'mingw32' AND version() !~ 'windows')
- AS skip_test \gset
-\if :skip_test
-\quit
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/select_parallel.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/select_parallel.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/select_parallel.out 2024-11-15 02:50:52.498035883 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/select_parallel.out 2024-11-15 02:59:16.961115342 +0000
@@ -1,1436 +1,2 @@
---
--- PARALLEL
---
--- Save parallel worker stats, used for comparison at the end
-select pg_stat_force_next_flush();
- pg_stat_force_next_flush
---------------------------
-
-(1 row)
-
-select parallel_workers_to_launch as parallel_workers_to_launch_before,
- parallel_workers_launched as parallel_workers_launched_before
- from pg_stat_database
- where datname = current_database() \gset
-create function sp_parallel_restricted(int) returns int as
- $$begin return $1; end$$ language plpgsql parallel restricted;
-begin;
--- encourage use of parallel plans
-set parallel_setup_cost=0;
-set parallel_tuple_cost=0;
-set min_parallel_table_scan_size=0;
-set max_parallel_workers_per_gather=4;
--- Parallel Append with partial-subplans
-explain (costs off)
- select round(avg(aa)), sum(aa) from a_star;
- QUERY PLAN
---------------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 3
- -> Partial Aggregate
- -> Parallel Append
- -> Parallel Seq Scan on d_star a_star_4
- -> Parallel Seq Scan on f_star a_star_6
- -> Parallel Seq Scan on e_star a_star_5
- -> Parallel Seq Scan on b_star a_star_2
- -> Parallel Seq Scan on c_star a_star_3
- -> Parallel Seq Scan on a_star a_star_1
-(11 rows)
-
-select round(avg(aa)), sum(aa) from a_star a1;
- round | sum
--------+-----
- 14 | 355
-(1 row)
-
--- Parallel Append with both partial and non-partial subplans
-alter table c_star set (parallel_workers = 0);
-alter table d_star set (parallel_workers = 0);
-explain (costs off)
- select round(avg(aa)), sum(aa) from a_star;
- QUERY PLAN
---------------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 3
- -> Partial Aggregate
- -> Parallel Append
- -> Seq Scan on d_star a_star_4
- -> Seq Scan on c_star a_star_3
- -> Parallel Seq Scan on f_star a_star_6
- -> Parallel Seq Scan on e_star a_star_5
- -> Parallel Seq Scan on b_star a_star_2
- -> Parallel Seq Scan on a_star a_star_1
-(11 rows)
-
-select round(avg(aa)), sum(aa) from a_star a2;
- round | sum
--------+-----
- 14 | 355
-(1 row)
-
--- Parallel Append with only non-partial subplans
-alter table a_star set (parallel_workers = 0);
-alter table b_star set (parallel_workers = 0);
-alter table e_star set (parallel_workers = 0);
-alter table f_star set (parallel_workers = 0);
-explain (costs off)
- select round(avg(aa)), sum(aa) from a_star;
- QUERY PLAN
------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 3
- -> Partial Aggregate
- -> Parallel Append
- -> Seq Scan on d_star a_star_4
- -> Seq Scan on f_star a_star_6
- -> Seq Scan on e_star a_star_5
- -> Seq Scan on b_star a_star_2
- -> Seq Scan on c_star a_star_3
- -> Seq Scan on a_star a_star_1
-(11 rows)
-
-select round(avg(aa)), sum(aa) from a_star a3;
- round | sum
--------+-----
- 14 | 355
-(1 row)
-
--- Disable Parallel Append
-alter table a_star reset (parallel_workers);
-alter table b_star reset (parallel_workers);
-alter table c_star reset (parallel_workers);
-alter table d_star reset (parallel_workers);
-alter table e_star reset (parallel_workers);
-alter table f_star reset (parallel_workers);
-set enable_parallel_append to off;
-explain (costs off)
- select round(avg(aa)), sum(aa) from a_star;
- QUERY PLAN
---------------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 1
- -> Partial Aggregate
- -> Append
- -> Parallel Seq Scan on a_star a_star_1
- -> Parallel Seq Scan on b_star a_star_2
- -> Parallel Seq Scan on c_star a_star_3
- -> Parallel Seq Scan on d_star a_star_4
- -> Parallel Seq Scan on e_star a_star_5
- -> Parallel Seq Scan on f_star a_star_6
-(11 rows)
-
-select round(avg(aa)), sum(aa) from a_star a4;
- round | sum
--------+-----
- 14 | 355
-(1 row)
-
-reset enable_parallel_append;
--- Parallel Append that runs serially
-create function sp_test_func() returns setof text as
-$$ select 'foo'::varchar union all select 'bar'::varchar $$
-language sql stable;
-select sp_test_func() order by 1;
- sp_test_func
---------------
- bar
- foo
-(2 rows)
-
--- Parallel Append is not to be used when the subpath depends on the outer param
-create table part_pa_test(a int, b int) partition by range(a);
-create table part_pa_test_p1 partition of part_pa_test for values from (minvalue) to (0);
-create table part_pa_test_p2 partition of part_pa_test for values from (0) to (maxvalue);
-explain (costs off)
- select (select max((select pa1.b from part_pa_test pa1 where pa1.a = pa2.a)))
- from part_pa_test pa2;
- QUERY PLAN
---------------------------------------------------------------
- Aggregate
- -> Gather
- Workers Planned: 3
- -> Parallel Append
- -> Parallel Seq Scan on part_pa_test_p1 pa2_1
- -> Parallel Seq Scan on part_pa_test_p2 pa2_2
- SubPlan 2
- -> Result
- SubPlan 1
- -> Append
- -> Seq Scan on part_pa_test_p1 pa1_1
- Filter: (a = pa2.a)
- -> Seq Scan on part_pa_test_p2 pa1_2
- Filter: (a = pa2.a)
-(14 rows)
-
-drop table part_pa_test;
--- test with leader participation disabled
-set parallel_leader_participation = off;
-explain (costs off)
- select count(*) from tenk1 where stringu1 = 'GRAAAA';
- QUERY PLAN
----------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Seq Scan on tenk1
- Filter: (stringu1 = 'GRAAAA'::name)
-(6 rows)
-
-select count(*) from tenk1 where stringu1 = 'GRAAAA';
- count
--------
- 15
-(1 row)
-
--- test with leader participation disabled, but no workers available (so
--- the leader will have to run the plan despite the setting)
-set max_parallel_workers = 0;
-explain (costs off)
- select count(*) from tenk1 where stringu1 = 'GRAAAA';
- QUERY PLAN
----------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Seq Scan on tenk1
- Filter: (stringu1 = 'GRAAAA'::name)
-(6 rows)
-
-select count(*) from tenk1 where stringu1 = 'GRAAAA';
- count
--------
- 15
-(1 row)
-
-reset max_parallel_workers;
-reset parallel_leader_participation;
--- test that parallel_restricted function doesn't run in worker
-alter table tenk1 set (parallel_workers = 4);
-explain (verbose, costs off)
-select sp_parallel_restricted(unique1) from tenk1
- where stringu1 = 'GRAAAA' order by 1;
- QUERY PLAN
----------------------------------------------------------
- Sort
- Output: (sp_parallel_restricted(unique1))
- Sort Key: (sp_parallel_restricted(tenk1.unique1))
- -> Gather
- Output: sp_parallel_restricted(unique1)
- Workers Planned: 4
- -> Parallel Seq Scan on public.tenk1
- Output: unique1
- Filter: (tenk1.stringu1 = 'GRAAAA'::name)
-(9 rows)
-
--- test parallel plan when group by expression is in target list.
-explain (costs off)
- select length(stringu1) from tenk1 group by length(stringu1);
- QUERY PLAN
----------------------------------------------------
- Finalize HashAggregate
- Group Key: (length((stringu1)::text))
- -> Gather
- Workers Planned: 4
- -> Partial HashAggregate
- Group Key: length((stringu1)::text)
- -> Parallel Seq Scan on tenk1
-(7 rows)
-
-select length(stringu1) from tenk1 group by length(stringu1);
- length
---------
- 6
-(1 row)
-
-explain (costs off)
- select stringu1, count(*) from tenk1 group by stringu1 order by stringu1;
- QUERY PLAN
-----------------------------------------------------
- Sort
- Sort Key: stringu1
- -> Finalize HashAggregate
- Group Key: stringu1
- -> Gather
- Workers Planned: 4
- -> Partial HashAggregate
- Group Key: stringu1
- -> Parallel Seq Scan on tenk1
-(9 rows)
-
--- test that parallel plan for aggregates is not selected when
--- target list contains parallel restricted clause.
-explain (costs off)
- select sum(sp_parallel_restricted(unique1)) from tenk1
- group by(sp_parallel_restricted(unique1));
- QUERY PLAN
--------------------------------------------------------------------
- HashAggregate
- Group Key: sp_parallel_restricted(unique1)
- -> Gather
- Workers Planned: 4
- -> Parallel Index Only Scan using tenk1_unique1 on tenk1
-(5 rows)
-
--- test prepared statement
-prepare tenk1_count(integer) As select count((unique1)) from tenk1 where hundred > $1;
-explain (costs off) execute tenk1_count(1);
- QUERY PLAN
-----------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Seq Scan on tenk1
- Filter: (hundred > 1)
-(6 rows)
-
-execute tenk1_count(1);
- count
--------
- 9800
-(1 row)
-
-deallocate tenk1_count;
--- test parallel plans for queries containing un-correlated subplans.
-alter table tenk2 set (parallel_workers = 0);
-explain (costs off)
- select count(*) from tenk1 where (two, four) not in
- (select hundred, thousand from tenk2 where thousand > 100);
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Seq Scan on tenk1
- Filter: (NOT (ANY ((two = (hashed SubPlan 1).col1) AND (four = (hashed SubPlan 1).col2))))
- SubPlan 1
- -> Seq Scan on tenk2
- Filter: (thousand > 100)
-(9 rows)
-
-select count(*) from tenk1 where (two, four) not in
- (select hundred, thousand from tenk2 where thousand > 100);
- count
--------
- 10000
-(1 row)
-
--- this is not parallel-safe due to use of random() within SubLink's testexpr:
-explain (costs off)
- select * from tenk1 where (unique1 + random())::integer not in
- (select ten from tenk2);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------
- Seq Scan on tenk1
- Filter: (NOT (ANY ((((unique1)::double precision + random()))::integer = (hashed SubPlan 1).col1)))
- SubPlan 1
- -> Seq Scan on tenk2
-(4 rows)
-
-alter table tenk2 reset (parallel_workers);
--- test parallel plan for a query containing initplan.
-set enable_indexscan = off;
-set enable_indexonlyscan = off;
-set enable_bitmapscan = off;
-alter table tenk2 set (parallel_workers = 2);
-explain (costs off)
- select count(*) from tenk1
- where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2);
- QUERY PLAN
-------------------------------------------------------
- Aggregate
- InitPlan 1
- -> Finalize Aggregate
- -> Gather
- Workers Planned: 2
- -> Partial Aggregate
- -> Parallel Seq Scan on tenk2
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk1
- Filter: (unique1 = (InitPlan 1).col1)
-(11 rows)
-
-select count(*) from tenk1
- where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2);
- count
--------
- 1
-(1 row)
-
-reset enable_indexscan;
-reset enable_indexonlyscan;
-reset enable_bitmapscan;
-alter table tenk2 reset (parallel_workers);
--- test parallel index scans.
-set enable_seqscan to off;
-set enable_bitmapscan to off;
-set random_page_cost = 2;
-explain (costs off)
- select count((unique1)) from tenk1 where hundred > 1;
- QUERY PLAN
---------------------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Index Scan using tenk1_hundred on tenk1
- Index Cond: (hundred > 1)
-(6 rows)
-
-select count((unique1)) from tenk1 where hundred > 1;
- count
--------
- 9800
-(1 row)
-
--- Parallel ScalarArrayOp index scan
-explain (costs off)
- select count((unique1)) from tenk1
- where hundred = any ((select array_agg(i) from generate_series(1, 100, 15) i)::int[]);
- QUERY PLAN
----------------------------------------------------------------------
- Finalize Aggregate
- InitPlan 1
- -> Aggregate
- -> Function Scan on generate_series i
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Index Scan using tenk1_hundred on tenk1
- Index Cond: (hundred = ANY ((InitPlan 1).col1))
-(9 rows)
-
-select count((unique1)) from tenk1
-where hundred = any ((select array_agg(i) from generate_series(1, 100, 15) i)::int[]);
- count
--------
- 700
-(1 row)
-
--- test parallel index-only scans.
-explain (costs off)
- select count(*) from tenk1 where thousand > 95;
- QUERY PLAN
---------------------------------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1
- Index Cond: (thousand > 95)
-(6 rows)
-
-select count(*) from tenk1 where thousand > 95;
- count
--------
- 9040
-(1 row)
-
--- test rescan cases too
-set enable_material = false;
-explain (costs off)
-select * from
- (select count(unique1) from tenk1 where hundred > 10) ss
- right join (values (1),(2),(3)) v(x) on true;
- QUERY PLAN
---------------------------------------------------------------------------
- Nested Loop Left Join
- -> Values Scan on "*VALUES*"
- -> Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Index Scan using tenk1_hundred on tenk1
- Index Cond: (hundred > 10)
-(8 rows)
-
-select * from
- (select count(unique1) from tenk1 where hundred > 10) ss
- right join (values (1),(2),(3)) v(x) on true;
- count | x
--------+---
- 8900 | 1
- 8900 | 2
- 8900 | 3
-(3 rows)
-
-explain (costs off)
-select * from
- (select count(*) from tenk1 where thousand > 99) ss
- right join (values (1),(2),(3)) v(x) on true;
- QUERY PLAN
---------------------------------------------------------------------------------------
- Nested Loop Left Join
- -> Values Scan on "*VALUES*"
- -> Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1
- Index Cond: (thousand > 99)
-(8 rows)
-
-select * from
- (select count(*) from tenk1 where thousand > 99) ss
- right join (values (1),(2),(3)) v(x) on true;
- count | x
--------+---
- 9000 | 1
- 9000 | 2
- 9000 | 3
-(3 rows)
-
--- test rescans for a Limit node with a parallel node beneath it.
-reset enable_seqscan;
-set enable_indexonlyscan to off;
-set enable_indexscan to off;
-alter table tenk1 set (parallel_workers = 0);
-alter table tenk2 set (parallel_workers = 1);
-explain (costs off)
-select count(*) from tenk1
- left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
- on tenk1.unique1 < ss.unique1 + 1
- where tenk1.unique1 < 2;
- QUERY PLAN
-------------------------------------------------------------
- Aggregate
- -> Nested Loop Left Join
- Join Filter: (tenk1.unique1 < (tenk2.unique1 + 1))
- -> Seq Scan on tenk1
- Filter: (unique1 < 2)
- -> Limit
- -> Gather Merge
- Workers Planned: 1
- -> Sort
- Sort Key: tenk2.unique1
- -> Parallel Seq Scan on tenk2
-(11 rows)
-
-select count(*) from tenk1
- left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
- on tenk1.unique1 < ss.unique1 + 1
- where tenk1.unique1 < 2;
- count
--------
- 1999
-(1 row)
-
---reset the value of workers for each table as it was before this test.
-alter table tenk1 set (parallel_workers = 4);
-alter table tenk2 reset (parallel_workers);
-reset enable_material;
-reset enable_bitmapscan;
-reset enable_indexonlyscan;
-reset enable_indexscan;
--- test parallel bitmap heap scan.
-set enable_seqscan to off;
-set enable_indexscan to off;
-set enable_hashjoin to off;
-set enable_mergejoin to off;
-set enable_material to off;
--- test prefetching, if the platform allows it
-DO $$
-BEGIN
- SET effective_io_concurrency = 50;
-EXCEPTION WHEN invalid_parameter_value THEN
-END $$;
-set work_mem='64kB'; --set small work mem to force lossy pages
-explain (costs off)
- select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0;
- QUERY PLAN
-------------------------------------------------------------
- Aggregate
- -> Nested Loop
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk2
- Disabled: true
- Filter: (thousand = 0)
- -> Gather
- Workers Planned: 4
- -> Parallel Bitmap Heap Scan on tenk1
- Recheck Cond: (hundred > 1)
- -> Bitmap Index Scan on tenk1_hundred
- Index Cond: (hundred > 1)
-(13 rows)
-
-select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0;
- count
--------
- 98000
-(1 row)
-
-create table bmscantest (a int, t text);
-insert into bmscantest select r, 'fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' FROM generate_series(1,100000) r;
-create index i_bmtest ON bmscantest(a);
-select count(*) from bmscantest where a>1;
- count
--------
- 99999
-(1 row)
-
--- test accumulation of stats for parallel nodes
-reset enable_seqscan;
-alter table tenk2 set (parallel_workers = 0);
-explain (analyze, timing off, summary off, costs off)
- select count(*) from tenk1, tenk2 where tenk1.hundred > 1
- and tenk2.thousand=0;
- QUERY PLAN
---------------------------------------------------------------------------
- Aggregate (actual rows=1 loops=1)
- -> Nested Loop (actual rows=98000 loops=1)
- -> Seq Scan on tenk2 (actual rows=10 loops=1)
- Filter: (thousand = 0)
- Rows Removed by Filter: 9990
- -> Gather (actual rows=9800 loops=10)
- Workers Planned: 4
- Workers Launched: 4
- -> Parallel Seq Scan on tenk1 (actual rows=1960 loops=50)
- Filter: (hundred > 1)
- Rows Removed by Filter: 40
-(11 rows)
-
-alter table tenk2 reset (parallel_workers);
-reset work_mem;
-create function explain_parallel_sort_stats() returns setof text
-language plpgsql as
-$$
-declare ln text;
-begin
- for ln in
- explain (analyze, timing off, summary off, costs off)
- select * from
- (select ten from tenk1 where ten < 100 order by ten) ss
- right join (values (1),(2),(3)) v(x) on true
- loop
- ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx');
- return next ln;
- end loop;
-end;
-$$;
-select * from explain_parallel_sort_stats();
- explain_parallel_sort_stats
---------------------------------------------------------------------------
- Nested Loop Left Join (actual rows=30000 loops=1)
- -> Values Scan on "*VALUES*" (actual rows=3 loops=1)
- -> Gather Merge (actual rows=10000 loops=3)
- Workers Planned: 4
- Workers Launched: 4
- -> Sort (actual rows=2000 loops=15)
- Sort Key: tenk1.ten
- Sort Method: quicksort Memory: xxx
- Worker 0: Sort Method: quicksort Memory: xxx
- Worker 1: Sort Method: quicksort Memory: xxx
- Worker 2: Sort Method: quicksort Memory: xxx
- Worker 3: Sort Method: quicksort Memory: xxx
- -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=15)
- Filter: (ten < 100)
-(14 rows)
-
-reset enable_indexscan;
-reset enable_hashjoin;
-reset enable_mergejoin;
-reset enable_material;
-reset effective_io_concurrency;
-drop table bmscantest;
-drop function explain_parallel_sort_stats();
--- test parallel merge join path.
-set enable_hashjoin to off;
-set enable_nestloop to off;
-explain (costs off)
- select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1;
- QUERY PLAN
--------------------------------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Merge Join
- Merge Cond: (tenk1.unique1 = tenk2.unique1)
- -> Parallel Index Only Scan using tenk1_unique1 on tenk1
- -> Index Only Scan using tenk2_unique1 on tenk2
-(8 rows)
-
-select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1;
- count
--------
- 10000
-(1 row)
-
-reset enable_hashjoin;
-reset enable_nestloop;
--- test parallel nestloop join path with materialization of the inner path
-alter table tenk2 set (parallel_workers = 0);
-explain (costs off)
-select * from tenk1 t1, tenk2 t2 where t1.two > t2.two;
- QUERY PLAN
--------------------------------------------
- Gather
- Workers Planned: 4
- -> Nested Loop
- Join Filter: (t1.two > t2.two)
- -> Parallel Seq Scan on tenk1 t1
- -> Materialize
- -> Seq Scan on tenk2 t2
-(7 rows)
-
--- test that parallel nestloop join is not generated if the inner path is
--- not parallel-safe
-explain (costs off)
-select * from tenk1 t1
- left join lateral
- (select t1.unique1 as x, * from tenk2 t2 order by 1) t2
- on true
-where t1.two > t2.two;
- QUERY PLAN
--------------------------------------------
- Nested Loop
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk1 t1
- -> Subquery Scan on t2
- Filter: (t1.two > t2.two)
- -> Seq Scan on tenk2 t2_1
-(7 rows)
-
-alter table tenk2 reset (parallel_workers);
--- test gather merge
-set enable_hashagg = false;
-explain (costs off)
- select count(*) from tenk1 group by twenty;
- QUERY PLAN
-----------------------------------------------------
- Finalize GroupAggregate
- Group Key: twenty
- -> Gather Merge
- Workers Planned: 4
- -> Partial GroupAggregate
- Group Key: twenty
- -> Sort
- Sort Key: twenty
- -> Parallel Seq Scan on tenk1
-(9 rows)
-
-select count(*) from tenk1 group by twenty;
- count
--------
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
-(20 rows)
-
---test expressions in targetlist are pushed down for gather merge
-create function sp_simple_func(var1 integer) returns integer
-as $$
-begin
- return var1 + 10;
-end;
-$$ language plpgsql PARALLEL SAFE;
-explain (costs off, verbose)
- select ten, sp_simple_func(ten) from tenk1 where ten < 100 order by ten;
- QUERY PLAN
------------------------------------------------------
- Gather Merge
- Output: ten, (sp_simple_func(ten))
- Workers Planned: 4
- -> Result
- Output: ten, sp_simple_func(ten)
- -> Sort
- Output: ten
- Sort Key: tenk1.ten
- -> Parallel Seq Scan on public.tenk1
- Output: ten
- Filter: (tenk1.ten < 100)
-(11 rows)
-
-drop function sp_simple_func(integer);
--- test handling of SRFs in targetlist (bug in 10.0)
-explain (costs off)
- select count(*), generate_series(1,2) from tenk1 group by twenty;
- QUERY PLAN
-----------------------------------------------------------
- ProjectSet
- -> Finalize GroupAggregate
- Group Key: twenty
- -> Gather Merge
- Workers Planned: 4
- -> Partial GroupAggregate
- Group Key: twenty
- -> Sort
- Sort Key: twenty
- -> Parallel Seq Scan on tenk1
-(10 rows)
-
-select count(*), generate_series(1,2) from tenk1 group by twenty;
- count | generate_series
--------+-----------------
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
- 500 | 1
- 500 | 2
-(40 rows)
-
--- test gather merge with parallel leader participation disabled
-set parallel_leader_participation = off;
-explain (costs off)
- select count(*) from tenk1 group by twenty;
- QUERY PLAN
-----------------------------------------------------
- Finalize GroupAggregate
- Group Key: twenty
- -> Gather Merge
- Workers Planned: 4
- -> Partial GroupAggregate
- Group Key: twenty
- -> Sort
- Sort Key: twenty
- -> Parallel Seq Scan on tenk1
-(9 rows)
-
-select count(*) from tenk1 group by twenty;
- count
--------
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
- 500
-(20 rows)
-
-reset parallel_leader_participation;
---test rescan behavior of gather merge
-set enable_material = false;
-explain (costs off)
-select * from
- (select string4, count(unique2)
- from tenk1 group by string4 order by string4) ss
- right join (values (1),(2),(3)) v(x) on true;
- QUERY PLAN
-----------------------------------------------------------
- Nested Loop Left Join
- -> Values Scan on "*VALUES*"
- -> Finalize GroupAggregate
- Group Key: tenk1.string4
- -> Gather Merge
- Workers Planned: 4
- -> Partial GroupAggregate
- Group Key: tenk1.string4
- -> Sort
- Sort Key: tenk1.string4
- -> Parallel Seq Scan on tenk1
-(11 rows)
-
-select * from
- (select string4, count(unique2)
- from tenk1 group by string4 order by string4) ss
- right join (values (1),(2),(3)) v(x) on true;
- string4 | count | x
----------+-------+---
- AAAAxx | 2500 | 1
- HHHHxx | 2500 | 1
- OOOOxx | 2500 | 1
- VVVVxx | 2500 | 1
- AAAAxx | 2500 | 2
- HHHHxx | 2500 | 2
- OOOOxx | 2500 | 2
- VVVVxx | 2500 | 2
- AAAAxx | 2500 | 3
- HHHHxx | 2500 | 3
- OOOOxx | 2500 | 3
- VVVVxx | 2500 | 3
-(12 rows)
-
-reset enable_material;
-reset enable_hashagg;
--- check parallelized int8 aggregate (bug #14897)
-explain (costs off)
-select avg(unique1::int8) from tenk1;
- QUERY PLAN
--------------------------------------------------------------------------
- Finalize Aggregate
- -> Gather
- Workers Planned: 4
- -> Partial Aggregate
- -> Parallel Index Only Scan using tenk1_unique1 on tenk1
-(5 rows)
-
-select avg(unique1::int8) from tenk1;
- avg
------------------------
- 4999.5000000000000000
-(1 row)
-
--- gather merge test with a LIMIT
-explain (costs off)
- select fivethous from tenk1 order by fivethous limit 4;
- QUERY PLAN
-----------------------------------------------
- Limit
- -> Gather Merge
- Workers Planned: 4
- -> Sort
- Sort Key: fivethous
- -> Parallel Seq Scan on tenk1
-(6 rows)
-
-select fivethous from tenk1 order by fivethous limit 4;
- fivethous
------------
- 0
- 0
- 1
- 1
-(4 rows)
-
--- gather merge test with 0 worker
-set max_parallel_workers = 0;
-explain (costs off)
- select string4 from tenk1 order by string4 limit 5;
- QUERY PLAN
-----------------------------------------------
- Limit
- -> Gather Merge
- Workers Planned: 4
- -> Sort
- Sort Key: string4
- -> Parallel Seq Scan on tenk1
-(6 rows)
-
-select string4 from tenk1 order by string4 limit 5;
- string4
----------
- AAAAxx
- AAAAxx
- AAAAxx
- AAAAxx
- AAAAxx
-(5 rows)
-
--- gather merge test with 0 workers, with parallel leader
--- participation disabled (the leader will have to run the plan
--- despite the setting)
-set parallel_leader_participation = off;
-explain (costs off)
- select string4 from tenk1 order by string4 limit 5;
- QUERY PLAN
-----------------------------------------------
- Limit
- -> Gather Merge
- Workers Planned: 4
- -> Sort
- Sort Key: string4
- -> Parallel Seq Scan on tenk1
-(6 rows)
-
-select string4 from tenk1 order by string4 limit 5;
- string4
----------
- AAAAxx
- AAAAxx
- AAAAxx
- AAAAxx
- AAAAxx
-(5 rows)
-
-reset parallel_leader_participation;
-reset max_parallel_workers;
-create function parallel_safe_volatile(a int) returns int as
- $$ begin return a; end; $$ parallel safe volatile language plpgsql;
--- Test gather merge atop of a sort of a partial path
-explain (costs off)
-select * from tenk1 where four = 2
-order by four, hundred, parallel_safe_volatile(thousand);
- QUERY PLAN
----------------------------------------------------------------
- Gather Merge
- Workers Planned: 4
- -> Sort
- Sort Key: hundred, (parallel_safe_volatile(thousand))
- -> Parallel Seq Scan on tenk1
- Filter: (four = 2)
-(6 rows)
-
--- Test gather merge atop of an incremental sort a of partial path
-set min_parallel_index_scan_size = 0;
-set enable_seqscan = off;
-explain (costs off)
-select * from tenk1 where four = 2
-order by four, hundred, parallel_safe_volatile(thousand);
- QUERY PLAN
----------------------------------------------------------------
- Gather Merge
- Workers Planned: 4
- -> Incremental Sort
- Sort Key: hundred, (parallel_safe_volatile(thousand))
- Presorted Key: hundred
- -> Parallel Index Scan using tenk1_hundred on tenk1
- Filter: (four = 2)
-(7 rows)
-
-reset min_parallel_index_scan_size;
-reset enable_seqscan;
--- Test GROUP BY with a gather merge path atop of a sort of a partial path
-explain (costs off)
-select count(*) from tenk1
-group by twenty, parallel_safe_volatile(two);
- QUERY PLAN
---------------------------------------------------------------------
- Finalize GroupAggregate
- Group Key: twenty, (parallel_safe_volatile(two))
- -> Gather Merge
- Workers Planned: 4
- -> Sort
- Sort Key: twenty, (parallel_safe_volatile(two))
- -> Partial HashAggregate
- Group Key: twenty, parallel_safe_volatile(two)
- -> Parallel Seq Scan on tenk1
-(9 rows)
-
-drop function parallel_safe_volatile(int);
-SAVEPOINT settings;
-SET LOCAL debug_parallel_query = 1;
-explain (costs off)
- select stringu1::int2 from tenk1 where unique1 = 1;
- QUERY PLAN
------------------------------------------------
- Gather
- Workers Planned: 1
- Single Copy: true
- -> Index Scan using tenk1_unique1 on tenk1
- Index Cond: (unique1 = 1)
-(5 rows)
-
-ROLLBACK TO SAVEPOINT settings;
--- exercise record typmod remapping between backends
-CREATE FUNCTION make_record(n int)
- RETURNS RECORD LANGUAGE plpgsql PARALLEL SAFE AS
-$$
-BEGIN
- RETURN CASE n
- WHEN 1 THEN ROW(1)
- WHEN 2 THEN ROW(1, 2)
- WHEN 3 THEN ROW(1, 2, 3)
- WHEN 4 THEN ROW(1, 2, 3, 4)
- ELSE ROW(1, 2, 3, 4, 5)
- END;
-END;
-$$;
-SAVEPOINT settings;
-SET LOCAL debug_parallel_query = 1;
-SELECT make_record(x) FROM (SELECT generate_series(1, 5) x) ss ORDER BY x;
- make_record
--------------
- (1)
- (1,2)
- (1,2,3)
- (1,2,3,4)
- (1,2,3,4,5)
-(5 rows)
-
-ROLLBACK TO SAVEPOINT settings;
-DROP function make_record(n int);
--- test the sanity of parallel query after the active role is dropped.
-drop role if exists regress_parallel_worker;
-NOTICE: role "regress_parallel_worker" does not exist, skipping
-create role regress_parallel_worker;
-set role regress_parallel_worker;
-reset session authorization;
-drop role regress_parallel_worker;
-set debug_parallel_query = 1;
-select count(*) from tenk1;
- count
--------
- 10000
-(1 row)
-
-reset debug_parallel_query;
-reset role;
--- Window function calculation can't be pushed to workers.
-explain (costs off, verbose)
- select count(*) from tenk1 a where (unique1, two) in
- (select unique1, row_number() over() from tenk1 b);
- QUERY PLAN
-----------------------------------------------------------------------------------------
- Aggregate
- Output: count(*)
- -> Hash Right Semi Join
- Hash Cond: ((b.unique1 = a.unique1) AND ((row_number() OVER (?)) = a.two))
- -> WindowAgg
- Output: b.unique1, row_number() OVER (?)
- -> Gather
- Output: b.unique1
- Workers Planned: 4
- -> Parallel Index Only Scan using tenk1_unique1 on public.tenk1 b
- Output: b.unique1
- -> Hash
- Output: a.unique1, a.two
- -> Gather
- Output: a.unique1, a.two
- Workers Planned: 4
- -> Parallel Seq Scan on public.tenk1 a
- Output: a.unique1, a.two
-(18 rows)
-
--- LIMIT/OFFSET within sub-selects can't be pushed to workers.
-explain (costs off)
- select * from tenk1 a where two in
- (select two from tenk1 b where stringu1 like '%AAAA' limit 3);
- QUERY PLAN
----------------------------------------------------------------
- Hash Semi Join
- Hash Cond: (a.two = b.two)
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk1 a
- -> Hash
- -> Limit
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk1 b
- Filter: (stringu1 ~~ '%AAAA'::text)
-(11 rows)
-
--- to increase the parallel query test coverage
-SAVEPOINT settings;
-SET LOCAL debug_parallel_query = 1;
-EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1;
- QUERY PLAN
--------------------------------------------------------------
- Gather (actual rows=10000 loops=1)
- Workers Planned: 4
- Workers Launched: 4
- -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
-(4 rows)
-
-ROLLBACK TO SAVEPOINT settings;
--- provoke error in worker
--- (make the error message long enough to require multiple bufferloads)
-SAVEPOINT settings;
-SET LOCAL debug_parallel_query = 1;
-select (stringu1 || repeat('abcd', 5000))::int2 from tenk1 where unique1 = 1;
-ERROR: invalid input syntax for type smallint: "BAAAAAabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
-CONTEXT: parallel worker
-ROLLBACK TO SAVEPOINT settings;
--- test interaction with set-returning functions
-SAVEPOINT settings;
--- multiple subqueries under a single Gather node
--- must set parallel_setup_cost > 0 to discourage multiple Gather nodes
-SET LOCAL parallel_setup_cost = 10;
-EXPLAIN (COSTS OFF)
-SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1
-UNION ALL
-SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1;
- QUERY PLAN
-----------------------------------------------------
- Gather
- Workers Planned: 4
- -> Parallel Append
- -> Parallel Seq Scan on tenk1
- Filter: (fivethous = (tenthous + 1))
- -> Parallel Seq Scan on tenk1 tenk1_1
- Filter: (fivethous = (tenthous + 1))
-(7 rows)
-
-ROLLBACK TO SAVEPOINT settings;
--- can't use multiple subqueries under a single Gather node due to initPlans
-EXPLAIN (COSTS OFF)
-SELECT unique1 FROM tenk1 WHERE fivethous =
- (SELECT unique1 FROM tenk1 WHERE fivethous = 1 LIMIT 1)
-UNION ALL
-SELECT unique1 FROM tenk1 WHERE fivethous =
- (SELECT unique2 FROM tenk1 WHERE fivethous = 1 LIMIT 1)
-ORDER BY 1;
- QUERY PLAN
---------------------------------------------------------------------
- Sort
- Sort Key: tenk1.unique1
- -> Append
- -> Gather
- Workers Planned: 4
- InitPlan 1
- -> Limit
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk1 tenk1_2
- Filter: (fivethous = 1)
- -> Parallel Seq Scan on tenk1
- Filter: (fivethous = (InitPlan 1).col1)
- -> Gather
- Workers Planned: 4
- InitPlan 2
- -> Limit
- -> Gather
- Workers Planned: 4
- -> Parallel Seq Scan on tenk1 tenk1_3
- Filter: (fivethous = 1)
- -> Parallel Seq Scan on tenk1 tenk1_1
- Filter: (fivethous = (InitPlan 2).col1)
-(23 rows)
-
--- test interaction with SRFs
-SELECT * FROM information_schema.foreign_data_wrapper_options
-ORDER BY 1, 2, 3;
- foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value
-------------------------------+---------------------------+-------------+--------------
-(0 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT generate_series(1, two), array(select generate_series(1, two))
- FROM tenk1 ORDER BY tenthous;
- QUERY PLAN
----------------------------------------------------------------------------
- ProjectSet
- Output: generate_series(1, tenk1.two), ARRAY(SubPlan 1), tenk1.tenthous
- -> Gather Merge
- Output: tenk1.two, tenk1.tenthous
- Workers Planned: 4
- -> Result
- Output: tenk1.two, tenk1.tenthous
- -> Sort
- Output: tenk1.tenthous, tenk1.two
- Sort Key: tenk1.tenthous
- -> Parallel Seq Scan on public.tenk1
- Output: tenk1.tenthous, tenk1.two
- SubPlan 1
- -> ProjectSet
- Output: generate_series(1, tenk1.two)
- -> Result
-(16 rows)
-
--- must disallow pushing sort below gather when pathkey contains an SRF
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey
- FROM tenk1 t1 JOIN tenk1 t2 ON TRUE
- ORDER BY pathkey;
- QUERY PLAN
------------------------------------------------------------------------------------------------------
- Sort
- Output: (((unnest('{}'::integer[])) + 1))
- Sort Key: (((unnest('{}'::integer[])) + 1))
- -> Result
- Output: ((unnest('{}'::integer[])) + 1)
- -> ProjectSet
- Output: unnest('{}'::integer[])
- -> Nested Loop
- -> Gather
- Workers Planned: 4
- -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t1
- -> Materialize
- -> Gather
- Workers Planned: 4
- -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t2
-(15 rows)
-
--- test passing expanded-value representations to workers
-CREATE FUNCTION make_some_array(int,int) returns int[] as
-$$declare x int[];
- begin
- x[1] := $1;
- x[2] := $2;
- return x;
- end$$ language plpgsql parallel safe;
-CREATE TABLE fooarr(f1 text, f2 int[], f3 text);
-INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one');
-PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2;
-EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2));
- QUERY PLAN
-------------------------------------------------------------------
- Gather
- Workers Planned: 3
- -> Parallel Seq Scan on fooarr
- Filter: ((f1 = '1'::text) AND (f2 = '{1,2}'::integer[]))
-(4 rows)
-
-EXECUTE pstmt('1', make_some_array(1,2));
- f1 | f2 | f3
-----+-------+-----
- 1 | {1,2} | one
-(1 row)
-
-DEALLOCATE pstmt;
--- test interaction between subquery and partial_paths
-CREATE VIEW tenk1_vw_sec WITH (security_barrier) AS SELECT * FROM tenk1;
-EXPLAIN (COSTS OFF)
-SELECT 1 FROM tenk1_vw_sec
- WHERE (SELECT sum(f1) FROM int4_tbl WHERE f1 < unique1) < 100;
- QUERY PLAN
--------------------------------------------------------------------
- Subquery Scan on tenk1_vw_sec
- Filter: ((SubPlan 1) < 100)
- -> Gather
- Workers Planned: 4
- -> Parallel Index Only Scan using tenk1_unique1 on tenk1
- SubPlan 1
- -> Aggregate
- -> Seq Scan on int4_tbl
- Filter: (f1 < tenk1_vw_sec.unique1)
-(9 rows)
-
-rollback;
--- test that a newly-created session role propagates to workers.
-begin;
-create role regress_parallel_worker;
-set session authorization regress_parallel_worker;
-select current_setting('session_authorization');
- current_setting
--------------------------
- regress_parallel_worker
-(1 row)
-
-set debug_parallel_query = 1;
-select current_setting('session_authorization');
- current_setting
--------------------------
- regress_parallel_worker
-(1 row)
-
-rollback;
--- test that function option SET ROLE works in parallel workers.
-create role regress_parallel_worker;
-create function set_and_report_role() returns text as
- $$ select current_setting('role') $$ language sql parallel safe
- set role = regress_parallel_worker;
-create function set_role_and_error(int) returns int as
- $$ select 1 / $1 $$ language sql parallel safe
- set role = regress_parallel_worker;
-set debug_parallel_query = 0;
-select set_and_report_role();
- set_and_report_role
--------------------------
- regress_parallel_worker
-(1 row)
-
-select set_role_and_error(0);
-ERROR: division by zero
-CONTEXT: SQL function "set_role_and_error" statement 1
-set debug_parallel_query = 1;
-select set_and_report_role();
- set_and_report_role
--------------------------
- regress_parallel_worker
-(1 row)
-
-select set_role_and_error(0);
-ERROR: division by zero
-CONTEXT: SQL function "set_role_and_error" statement 1
-parallel worker
-reset debug_parallel_query;
-drop function set_and_report_role();
-drop function set_role_and_error(int);
-drop role regress_parallel_worker;
--- don't freeze in ParallelFinish while holding an LWLock
-BEGIN;
-CREATE FUNCTION my_cmp (int4, int4)
-RETURNS int LANGUAGE sql AS
-$$
- SELECT
- CASE WHEN $1 < $2 THEN -1
- WHEN $1 > $2 THEN 1
- ELSE 0
- END;
-$$;
-CREATE TABLE parallel_hang (i int4);
-INSERT INTO parallel_hang
- (SELECT * FROM generate_series(1, 400) gs);
-CREATE OPERATOR CLASS int4_custom_ops FOR TYPE int4 USING btree AS
- OPERATOR 1 < (int4, int4), OPERATOR 2 <= (int4, int4),
- OPERATOR 3 = (int4, int4), OPERATOR 4 >= (int4, int4),
- OPERATOR 5 > (int4, int4), FUNCTION 1 my_cmp(int4, int4);
-CREATE UNIQUE INDEX parallel_hang_idx
- ON parallel_hang
- USING btree (i int4_custom_ops);
-SET debug_parallel_query = on;
-DELETE FROM parallel_hang WHERE 380 <= i AND i <= 420;
-ROLLBACK;
--- Check parallel worker stats
-select pg_stat_force_next_flush();
- pg_stat_force_next_flush
---------------------------
-
-(1 row)
-
-select parallel_workers_to_launch > :'parallel_workers_to_launch_before' AS wrk_to_launch,
- parallel_workers_launched > :'parallel_workers_launched_before' AS wrk_launched
- from pg_stat_database
- where datname = current_database();
- wrk_to_launch | wrk_launched
----------------+--------------
- t | t
-(1 row)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/write_parallel.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/write_parallel.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/write_parallel.out 2024-11-15 02:50:52.521996385 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/write_parallel.out 2024-11-15 02:59:16.985115376 +0000
@@ -1,80 +1,2 @@
---
--- PARALLEL
---
-begin;
--- encourage use of parallel plans
-set parallel_setup_cost=0;
-set parallel_tuple_cost=0;
-set min_parallel_table_scan_size=0;
-set max_parallel_workers_per_gather=4;
---
--- Test write operations that has an underlying query that is eligible
--- for parallel plans
---
-explain (costs off) create table parallel_write as
- select length(stringu1) from tenk1 group by length(stringu1);
- QUERY PLAN
----------------------------------------------------
- Finalize HashAggregate
- Group Key: (length((stringu1)::text))
- -> Gather
- Workers Planned: 4
- -> Partial HashAggregate
- Group Key: length((stringu1)::text)
- -> Parallel Seq Scan on tenk1
-(7 rows)
-
-create table parallel_write as
- select length(stringu1) from tenk1 group by length(stringu1);
-drop table parallel_write;
-explain (costs off) select length(stringu1) into parallel_write
- from tenk1 group by length(stringu1);
- QUERY PLAN
----------------------------------------------------
- Finalize HashAggregate
- Group Key: (length((stringu1)::text))
- -> Gather
- Workers Planned: 4
- -> Partial HashAggregate
- Group Key: length((stringu1)::text)
- -> Parallel Seq Scan on tenk1
-(7 rows)
-
-select length(stringu1) into parallel_write
- from tenk1 group by length(stringu1);
-drop table parallel_write;
-explain (costs off) create materialized view parallel_mat_view as
- select length(stringu1) from tenk1 group by length(stringu1);
- QUERY PLAN
----------------------------------------------------
- Finalize HashAggregate
- Group Key: (length((stringu1)::text))
- -> Gather
- Workers Planned: 4
- -> Partial HashAggregate
- Group Key: length((stringu1)::text)
- -> Parallel Seq Scan on tenk1
-(7 rows)
-
-create materialized view parallel_mat_view as
- select length(stringu1) from tenk1 group by length(stringu1);
-create unique index on parallel_mat_view(length);
-refresh materialized view parallel_mat_view;
-refresh materialized view concurrently parallel_mat_view;
-drop materialized view parallel_mat_view;
-prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1);
-explain (costs off) create table parallel_write as execute prep_stmt;
- QUERY PLAN
----------------------------------------------------
- Finalize HashAggregate
- Group Key: (length((stringu1)::text))
- -> Gather
- Workers Planned: 4
- -> Partial HashAggregate
- Group Key: length((stringu1)::text)
- -> Parallel Seq Scan on tenk1
-(7 rows)
-
-create table parallel_write as execute prep_stmt;
-drop table parallel_write;
-rollback;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/vacuum_parallel.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/vacuum_parallel.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/vacuum_parallel.out 2024-11-15 02:50:52.518002968 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/vacuum_parallel.out 2024-11-15 02:59:17.037115445 +0000
@@ -1,49 +1,2 @@
-SET max_parallel_maintenance_workers TO 4;
-SET min_parallel_index_scan_size TO '128kB';
--- Bug #17245: Make sure that we don't totally fail to VACUUM individual indexes that
--- happen to be below min_parallel_index_scan_size during parallel VACUUM:
-CREATE TABLE parallel_vacuum_table (a int) WITH (autovacuum_enabled = off);
-INSERT INTO parallel_vacuum_table SELECT i from generate_series(1, 10000) i;
--- Parallel VACUUM will never be used unless there are at least two indexes
--- that exceed min_parallel_index_scan_size. Create two such indexes, and
--- a third index that is smaller than min_parallel_index_scan_size.
-CREATE INDEX regular_sized_index ON parallel_vacuum_table(a);
-CREATE INDEX typically_sized_index ON parallel_vacuum_table(a);
--- Note: vacuum_in_leader_small_index can apply deduplication, making it ~3x
--- smaller than the other indexes
-CREATE INDEX vacuum_in_leader_small_index ON parallel_vacuum_table((1));
--- Verify (as best we can) that the cost model for parallel VACUUM
--- will make our VACUUM run in parallel, while always leaving it up to the
--- parallel leader to handle the vacuum_in_leader_small_index index:
-SELECT EXISTS (
-SELECT 1
-FROM pg_class
-WHERE oid = 'vacuum_in_leader_small_index'::regclass AND
- pg_relation_size(oid) <
- pg_size_bytes(current_setting('min_parallel_index_scan_size'))
-) as leader_will_handle_small_index;
- leader_will_handle_small_index
---------------------------------
- t
-(1 row)
-
-SELECT count(*) as trigger_parallel_vacuum_nindexes
-FROM pg_class
-WHERE oid in ('regular_sized_index'::regclass, 'typically_sized_index'::regclass) AND
- pg_relation_size(oid) >=
- pg_size_bytes(current_setting('min_parallel_index_scan_size'));
- trigger_parallel_vacuum_nindexes
-----------------------------------
- 2
-(1 row)
-
--- Parallel VACUUM with B-Tree page deletions, ambulkdelete calls:
-DELETE FROM parallel_vacuum_table;
-VACUUM (PARALLEL 4, INDEX_CLEANUP ON) parallel_vacuum_table;
--- Since vacuum_in_leader_small_index uses deduplication, we expect an
--- assertion failure with bug #17245 (in the absence of bugfix):
-INSERT INTO parallel_vacuum_table SELECT i FROM generate_series(1, 10000) i;
-RESET max_parallel_maintenance_workers;
-RESET min_parallel_index_scan_size;
--- Deliberately don't drop table, to get further coverage from tools like
--- pg_amcheck in some testing scenarios
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/publication.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/publication.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/publication.out 2024-11-15 02:50:52.490049049 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/publication.out 2024-11-15 02:59:17.105115536 +0000
@@ -1,1853 +1,2 @@
---
--- PUBLICATION
---
-CREATE ROLE regress_publication_user LOGIN SUPERUSER;
-CREATE ROLE regress_publication_user2;
-CREATE ROLE regress_publication_user_dummy LOGIN NOSUPERUSER;
-SET SESSION AUTHORIZATION 'regress_publication_user';
--- suppress warning that depends on wal_level
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_default;
-RESET client_min_messages;
-COMMENT ON PUBLICATION testpub_default IS 'test publication';
-SELECT obj_description(p.oid, 'pg_publication') FROM pg_publication p;
- obj_description
-------------------
- test publication
-(1 row)
-
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpib_ins_trunct WITH (publish = insert);
-RESET client_min_messages;
-ALTER PUBLICATION testpub_default SET (publish = update);
--- error cases
-CREATE PUBLICATION testpub_xxx WITH (foo);
-ERROR: unrecognized publication parameter: "foo"
-CREATE PUBLICATION testpub_xxx WITH (publish = 'cluster, vacuum');
-ERROR: unrecognized value for publication option "publish": "cluster"
-CREATE PUBLICATION testpub_xxx WITH (publish_via_partition_root = 'true', publish_via_partition_root = '0');
-ERROR: conflicting or redundant options
-LINE 1: ...ub_xxx WITH (publish_via_partition_root = 'true', publish_vi...
- ^
-CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = 'true', publish_generated_columns = '0');
-ERROR: conflicting or redundant options
-LINE 1: ...pub_xxx WITH (publish_generated_columns = 'true', publish_ge...
- ^
-CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = 'foo');
-ERROR: publish_generated_columns requires a Boolean value
-\dRp
- List of publications
- Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------+--------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f | f
- testpub_default | regress_publication_user | f | f | t | f | f | f | f
-(2 rows)
-
-ALTER PUBLICATION testpub_default SET (publish = 'insert, update, delete');
-\dRp
- List of publications
- Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------+--------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f | f
- testpub_default | regress_publication_user | f | t | t | t | f | f | f
-(2 rows)
-
---- adding tables
-CREATE SCHEMA pub_test;
-CREATE TABLE testpub_tbl1 (id serial primary key, data text);
-CREATE TABLE pub_test.testpub_nopk (foo int, bar int);
-CREATE VIEW testpub_view AS SELECT 1;
-CREATE TABLE testpub_parted (a int) PARTITION BY LIST (a);
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_foralltables FOR ALL TABLES WITH (publish = 'insert');
-RESET client_min_messages;
-ALTER PUBLICATION testpub_foralltables SET (publish = 'insert, update');
-CREATE TABLE testpub_tbl2 (id serial primary key, data text);
--- fail - can't add to for all tables publication
-ALTER PUBLICATION testpub_foralltables ADD TABLE testpub_tbl2;
-ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES
-DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications.
--- fail - can't drop from all tables publication
-ALTER PUBLICATION testpub_foralltables DROP TABLE testpub_tbl2;
-ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES
-DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications.
--- fail - can't add to for all tables publication
-ALTER PUBLICATION testpub_foralltables SET TABLE pub_test.testpub_nopk;
-ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES
-DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications.
--- fail - can't add schema to 'FOR ALL TABLES' publication
-ALTER PUBLICATION testpub_foralltables ADD TABLES IN SCHEMA pub_test;
-ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES
-DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications.
--- fail - can't drop schema from 'FOR ALL TABLES' publication
-ALTER PUBLICATION testpub_foralltables DROP TABLES IN SCHEMA pub_test;
-ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES
-DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications.
--- fail - can't set schema to 'FOR ALL TABLES' publication
-ALTER PUBLICATION testpub_foralltables SET TABLES IN SCHEMA pub_test;
-ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES
-DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications.
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1;
-RESET client_min_messages;
--- should be able to add schema to 'FOR TABLE' publication
-ALTER PUBLICATION testpub_fortable ADD TABLES IN SCHEMA pub_test;
-\dRp+ testpub_fortable
- Publication testpub_fortable
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.testpub_tbl1"
-Tables from schemas:
- "pub_test"
-
--- should be able to drop schema from 'FOR TABLE' publication
-ALTER PUBLICATION testpub_fortable DROP TABLES IN SCHEMA pub_test;
-\dRp+ testpub_fortable
- Publication testpub_fortable
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.testpub_tbl1"
-
--- should be able to set schema to 'FOR TABLE' publication
-ALTER PUBLICATION testpub_fortable SET TABLES IN SCHEMA pub_test;
-\dRp+ testpub_fortable
- Publication testpub_fortable
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test"
-
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pub_test;
--- should be able to create publication with schema and table of the same
--- schema
-CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA pub_test, TABLE pub_test.testpub_nopk;
-RESET client_min_messages;
-\dRp+ testpub_for_tbl_schema
- Publication testpub_for_tbl_schema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "pub_test.testpub_nopk"
-Tables from schemas:
- "pub_test"
-
--- weird parser corner case
-CREATE PUBLICATION testpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SCHEMA;
-ERROR: invalid table name
-LINE 1: ...estpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SC...
- ^
-CREATE PUBLICATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo;
-ERROR: invalid schema name
-LINE 1: ...CATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo;
- ^
--- should be able to add a table of the same schema to the schema publication
-ALTER PUBLICATION testpub_forschema ADD TABLE pub_test.testpub_nopk;
-\dRp+ testpub_forschema
- Publication testpub_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "pub_test.testpub_nopk"
-Tables from schemas:
- "pub_test"
-
--- should be able to drop the table
-ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk;
-\dRp+ testpub_forschema
- Publication testpub_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test"
-
--- fail - can't drop a table from the schema publication which isn't in the
--- publication
-ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk;
-ERROR: relation "testpub_nopk" is not part of the publication
--- should be able to set table to schema publication
-ALTER PUBLICATION testpub_forschema SET TABLE pub_test.testpub_nopk;
-\dRp+ testpub_forschema
- Publication testpub_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "pub_test.testpub_nopk"
-
-SELECT pubname, puballtables FROM pg_publication WHERE pubname = 'testpub_foralltables';
- pubname | puballtables
-----------------------+--------------
- testpub_foralltables | t
-(1 row)
-
-\d+ testpub_tbl2
- Table "public.testpub_tbl2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+------------------------------------------+----------+--------------+-------------
- id | integer | | not null | nextval('testpub_tbl2_id_seq'::regclass) | plain | |
- data | text | | | | extended | |
-Indexes:
- "testpub_tbl2_pkey" PRIMARY KEY, btree (id)
-Publications:
- "testpub_foralltables"
-Not-null constraints:
- "testpub_tbl2_id_not_null" NOT NULL "id"
-
-\dRp+ testpub_foralltables
- Publication testpub_foralltables
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | t | t | t | f | f | f | f
-(1 row)
-
-DROP TABLE testpub_tbl2;
-DROP PUBLICATION testpub_foralltables, testpub_fortable, testpub_forschema, testpub_for_tbl_schema;
-CREATE TABLE testpub_tbl3 (a int);
-CREATE TABLE testpub_tbl3a (b text) INHERITS (testpub_tbl3);
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub3 FOR TABLE testpub_tbl3;
-CREATE PUBLICATION testpub4 FOR TABLE ONLY testpub_tbl3;
-RESET client_min_messages;
-\dRp+ testpub3
- Publication testpub3
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.testpub_tbl3"
- "public.testpub_tbl3a"
-
-\dRp+ testpub4
- Publication testpub4
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.testpub_tbl3"
-
-DROP TABLE testpub_tbl3, testpub_tbl3a;
-DROP PUBLICATION testpub3, testpub4;
--- Tests for partitioned tables
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_forparted;
-CREATE PUBLICATION testpub_forparted1;
-RESET client_min_messages;
-CREATE TABLE testpub_parted1 (LIKE testpub_parted);
-CREATE TABLE testpub_parted2 (LIKE testpub_parted);
-ALTER PUBLICATION testpub_forparted1 SET (publish='insert');
-ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted1 FOR VALUES IN (1);
-ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted2 FOR VALUES IN (2);
--- works despite missing REPLICA IDENTITY, because updates are not replicated
-UPDATE testpub_parted1 SET a = 1;
--- only parent is listed as being in publication, not the partition
-ALTER PUBLICATION testpub_forparted ADD TABLE testpub_parted;
-\dRp+ testpub_forparted
- Publication testpub_forparted
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.testpub_parted"
-
--- works despite missing REPLICA IDENTITY, because no actual update happened
-UPDATE testpub_parted SET a = 1 WHERE false;
--- should now fail, because parent's publication replicates updates
-UPDATE testpub_parted1 SET a = 1;
-ERROR: cannot update table "testpub_parted1" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-ALTER TABLE testpub_parted DETACH PARTITION testpub_parted1;
--- works again, because parent's publication is no longer considered
-UPDATE testpub_parted1 SET a = 1;
-ALTER PUBLICATION testpub_forparted SET (publish_via_partition_root = true);
-\dRp+ testpub_forparted
- Publication testpub_forparted
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | t
-Tables:
- "public.testpub_parted"
-
--- still fail, because parent's publication replicates updates
-UPDATE testpub_parted2 SET a = 2;
-ERROR: cannot update table "testpub_parted2" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-ALTER PUBLICATION testpub_forparted DROP TABLE testpub_parted;
--- works again, because update is no longer replicated
-UPDATE testpub_parted2 SET a = 2;
-DROP TABLE testpub_parted1, testpub_parted2;
-DROP PUBLICATION testpub_forparted, testpub_forparted1;
--- Tests for row filters
-CREATE TABLE testpub_rf_tbl1 (a integer, b text);
-CREATE TABLE testpub_rf_tbl2 (c text, d integer);
-CREATE TABLE testpub_rf_tbl3 (e integer);
-CREATE TABLE testpub_rf_tbl4 (g text);
-CREATE TABLE testpub_rf_tbl5 (a xml);
-CREATE SCHEMA testpub_rf_schema1;
-CREATE TABLE testpub_rf_schema1.testpub_rf_tbl5 (h integer);
-CREATE SCHEMA testpub_rf_schema2;
-CREATE TABLE testpub_rf_schema2.testpub_rf_tbl6 (i integer);
-SET client_min_messages = 'ERROR';
--- Firstly, test using the option publish='insert' because the row filter
--- validation of referenced columns is less strict than for delete/update.
-CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert');
-RESET client_min_messages;
-\dRp+ testpub5
- Publication testpub5
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | f | f | f | f | f
-Tables:
- "public.testpub_rf_tbl1"
- "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5))
-
-\d testpub_rf_tbl3
- Table "public.testpub_rf_tbl3"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- e | integer | | |
-
-ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000);
-\dRp+ testpub5
- Publication testpub5
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | f | f | f | f | f
-Tables:
- "public.testpub_rf_tbl1"
- "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5))
- "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000))
-
-\d testpub_rf_tbl3
- Table "public.testpub_rf_tbl3"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- e | integer | | |
-Publications:
- "testpub5" WHERE ((e > 1000) AND (e < 2000))
-
-ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2;
-\dRp+ testpub5
- Publication testpub5
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | f | f | f | f | f
-Tables:
- "public.testpub_rf_tbl1"
- "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000))
-
--- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression)
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500);
-\dRp+ testpub5
- Publication testpub5
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | f | f | f | f | f
-Tables:
- "public.testpub_rf_tbl3" WHERE ((e > 300) AND (e < 500))
-
-\d testpub_rf_tbl3
- Table "public.testpub_rf_tbl3"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- e | integer | | |
-Publications:
- "testpub5" WHERE ((e > 300) AND (e < 500))
-
--- test \d (now it displays filter information)
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert');
-CREATE PUBLICATION testpub_rf_no FOR TABLE testpub_rf_tbl1;
-RESET client_min_messages;
-\d testpub_rf_tbl1
- Table "public.testpub_rf_tbl1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | text | | |
-Publications:
- "testpub_rf_no"
- "testpub_rf_yes" WHERE (a > 1)
-
-DROP PUBLICATION testpub_rf_yes, testpub_rf_no;
--- some more syntax tests to exercise other parser pathways
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_syntax1 FOR TABLE testpub_rf_tbl1, ONLY testpub_rf_tbl3 WHERE (e < 999) WITH (publish = 'insert');
-RESET client_min_messages;
-\dRp+ testpub_syntax1
- Publication testpub_syntax1
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | f | f | f | f | f
-Tables:
- "public.testpub_rf_tbl1"
- "public.testpub_rf_tbl3" WHERE (e < 999)
-
-DROP PUBLICATION testpub_syntax1;
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_syntax2 FOR TABLE testpub_rf_tbl1, testpub_rf_schema1.testpub_rf_tbl5 WHERE (h < 999) WITH (publish = 'insert');
-RESET client_min_messages;
-\dRp+ testpub_syntax2
- Publication testpub_syntax2
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | f | f | f | f | f
-Tables:
- "public.testpub_rf_tbl1"
- "testpub_rf_schema1.testpub_rf_tbl5" WHERE (h < 999)
-
-DROP PUBLICATION testpub_syntax2;
--- fail - schemas don't allow WHERE clause
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1 WHERE (a = 123);
-ERROR: syntax error at or near "WHERE"
-LINE 1: ...b_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1 WHERE (a =...
- ^
-CREATE PUBLICATION testpub_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1, testpub_rf_schema1 WHERE (a = 123);
-ERROR: WHERE clause not allowed for schema
-LINE 1: ..._syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1, testpub_rf...
- ^
-RESET client_min_messages;
--- fail - duplicate tables are not allowed if that table has any WHERE clause
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1 WHERE (a = 1), testpub_rf_tbl1 WITH (publish = 'insert');
-ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1"
-CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1, testpub_rf_tbl1 WHERE (a = 2) WITH (publish = 'insert');
-ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1"
-RESET client_min_messages;
--- fail - publication WHERE clause must be boolean
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234);
-ERROR: argument of PUBLICATION WHERE must be type boolean, not type integer
-LINE 1: ...PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234);
- ^
--- fail - aggregate functions not allowed in WHERE clause
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e));
-ERROR: aggregate functions are not allowed in WHERE
-LINE 1: ...ATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e));
- ^
--- fail - user-defined operators are not allowed
-CREATE FUNCTION testpub_rf_func1(integer, integer) RETURNS boolean AS $$ SELECT hashint4($1) > $2 $$ LANGUAGE SQL;
-CREATE OPERATOR =#> (PROCEDURE = testpub_rf_func1, LEFTARG = integer, RIGHTARG = integer);
-CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27);
-ERROR: invalid publication WHERE expression
-LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27);
- ^
-DETAIL: User-defined operators are not allowed.
--- fail - user-defined functions are not allowed
-CREATE FUNCTION testpub_rf_func2() RETURNS integer AS $$ BEGIN RETURN 123; END; $$ LANGUAGE plpgsql;
-ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf_func2());
-ERROR: invalid publication WHERE expression
-LINE 1: ...ON testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf...
- ^
-DETAIL: User-defined or built-in mutable functions are not allowed.
--- fail - non-immutable functions are not allowed. random() is volatile.
-ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random());
-ERROR: invalid publication WHERE expression
-LINE 1: ...ION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random());
- ^
-DETAIL: User-defined or built-in mutable functions are not allowed.
--- fail - user-defined collations are not allowed
-CREATE COLLATION user_collation FROM "C";
-ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' COLLATE user_collation);
-ERROR: invalid publication WHERE expression
-LINE 1: ...ICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' CO...
- ^
-DETAIL: User-defined collations are not allowed.
--- ok - NULLIF is allowed
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1,2) = a);
--- ok - built-in operators are allowed
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS NULL);
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a > 5) IS FALSE);
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS DISTINCT FROM 5);
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a, a + 1) < (2, 3));
--- ok - built-in type coercions between two binary compatible datatypes are allowed
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (b::varchar < '2');
--- ok - immutable built-in functions are allowed
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl4 WHERE (length(g) < 6);
--- fail - user-defined types are not allowed
-CREATE TYPE rf_bug_status AS ENUM ('new', 'open', 'closed');
-CREATE TABLE rf_bug (id serial, description text, status rf_bug_status);
-CREATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = 'open') WITH (publish = 'insert');
-ERROR: invalid publication WHERE expression
-LINE 1: ...EATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = '...
- ^
-DETAIL: User-defined types are not allowed.
-DROP TABLE rf_bug;
-DROP TYPE rf_bug_status;
--- fail - row filter expression is not simple
-CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELECT generate_series(1,5)));
-ERROR: invalid publication WHERE expression
-LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELE...
- ^
-DETAIL: Only columns, constants, built-in operators, built-in data types, built-in collations, and immutable built-in functions are allowed.
--- fail - system columns are not allowed
-CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid);
-ERROR: invalid publication WHERE expression
-LINE 1: ...tpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid);
- ^
-DETAIL: System columns are not allowed.
--- ok - conditional expressions are allowed
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (a IS DOCUMENT);
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (xmlexists('//foo[text() = ''bar'']' PASSING BY VALUE a));
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1, 2) = a);
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (CASE a WHEN 5 THEN true ELSE false END);
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (COALESCE(b, 'foo') = 'foo');
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (GREATEST(a, 10) > 10);
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IN (2, 4, 6));
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ARRAY[a] <@ ARRAY[2, 4, 6]);
-ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ROW(a, 2) IS NULL);
--- fail - WHERE not allowed in DROP
-ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl1 WHERE (e < 27);
-ERROR: cannot use a WHERE clause when removing a table from a publication
--- fail - cannot ALTER SET table which is a member of a pre-existing schema
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub6 FOR TABLES IN SCHEMA testpub_rf_schema2;
--- should be able to set publication with schema and table of the same schema
-ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpub_rf_schema2.testpub_rf_tbl6 WHERE (i < 99);
-RESET client_min_messages;
-\dRp+ testpub6
- Publication testpub6
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "testpub_rf_schema2.testpub_rf_tbl6" WHERE (i < 99)
-Tables from schemas:
- "testpub_rf_schema2"
-
-DROP TABLE testpub_rf_tbl1;
-DROP TABLE testpub_rf_tbl2;
-DROP TABLE testpub_rf_tbl3;
-DROP TABLE testpub_rf_tbl4;
-DROP TABLE testpub_rf_tbl5;
-DROP TABLE testpub_rf_schema1.testpub_rf_tbl5;
-DROP TABLE testpub_rf_schema2.testpub_rf_tbl6;
-DROP SCHEMA testpub_rf_schema1;
-DROP SCHEMA testpub_rf_schema2;
-DROP PUBLICATION testpub5;
-DROP PUBLICATION testpub6;
-DROP OPERATOR =#>(integer, integer);
-DROP FUNCTION testpub_rf_func1(integer, integer);
-DROP FUNCTION testpub_rf_func2();
-DROP COLLATION user_collation;
--- ======================================================
--- More row filter tests for validating column references
-CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int);
-CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b));
-CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a);
-CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY);
-ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10);
--- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing)
--- 1a. REPLICA IDENTITY is DEFAULT and table has a PK.
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk WHERE (a > 99);
-RESET client_min_messages;
--- ok - "a" is a PK col
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (b > 99);
--- ok - "b" is a PK col
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99);
--- fail - "c" is not part of the PK
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (d > 99);
--- fail - "d" is not part of the PK
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
--- 1b. REPLICA IDENTITY is DEFAULT and table has no PK
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99);
--- fail - "a" is not part of REPLICA IDENTITY
-UPDATE rf_tbl_abcd_nopk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_nopk"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
--- Case 2. REPLICA IDENTITY FULL
-ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL;
-ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99);
--- ok - "c" is in REPLICA IDENTITY now even though not in PK
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99);
--- ok - "a" is in REPLICA IDENTITY now
-UPDATE rf_tbl_abcd_nopk SET a = 1;
--- Case 3. REPLICA IDENTITY NOTHING
-ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING;
-ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99);
--- fail - "a" is in PK but it is not part of REPLICA IDENTITY NOTHING
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99);
--- fail - "c" is not in PK and not in REPLICA IDENTITY NOTHING
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99);
--- fail - "a" is not in REPLICA IDENTITY NOTHING
-UPDATE rf_tbl_abcd_nopk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_nopk"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
--- Case 4. REPLICA IDENTITY INDEX
-ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL;
-CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c);
-ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c;
-ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL;
-CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c);
-ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99);
--- fail - "a" is in PK but it is not part of REPLICA IDENTITY INDEX
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99);
--- ok - "c" is not in PK but it is part of REPLICA IDENTITY INDEX
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99);
--- fail - "a" is not in REPLICA IDENTITY INDEX
-UPDATE rf_tbl_abcd_nopk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_nopk"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (c > 99);
--- ok - "c" is part of REPLICA IDENTITY INDEX
-UPDATE rf_tbl_abcd_nopk SET a = 1;
--- Tests for partitioned table
--- set PUBLISH_VIA_PARTITION_ROOT to false and test row filter for partitioned
--- table
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0);
--- fail - cannot use row filter for partitioned table
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99);
-ERROR: cannot use publication WHERE clause for relation "rf_tbl_abcd_part_pk"
-DETAIL: WHERE clause cannot be used for a partitioned table when publish_via_partition_root is false.
--- ok - can use row filter for partition
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (a > 99);
--- ok - "a" is a PK col
-UPDATE rf_tbl_abcd_part_pk SET a = 1;
--- set PUBLISH_VIA_PARTITION_ROOT to true and test row filter for partitioned
--- table
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1);
--- ok - can use row filter for partitioned table
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99);
--- ok - "a" is a PK col
-UPDATE rf_tbl_abcd_part_pk SET a = 1;
--- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any row filter is
--- used for partitioned table
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0);
-ERROR: cannot set parameter "publish_via_partition_root" to false for publication "testpub6"
-DETAIL: The publication contains a WHERE clause for partitioned table "rf_tbl_abcd_part_pk", which is not allowed when "publish_via_partition_root" is false.
--- remove partitioned table's row filter
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk;
--- ok - we don't have row filter for partitioned table.
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0);
--- Now change the root filter to use a column "b"
--- (which is not in the replica identity)
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (b > 99);
--- ok - we don't have row filter for partitioned table.
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0);
--- fail - "b" is not in REPLICA IDENTITY INDEX
-UPDATE rf_tbl_abcd_part_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_part_pk_1"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
--- set PUBLISH_VIA_PARTITION_ROOT to true
--- can use row filter for partitioned table
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1);
--- ok - can use row filter for partitioned table
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (b > 99);
--- fail - "b" is not in REPLICA IDENTITY INDEX
-UPDATE rf_tbl_abcd_part_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_part_pk_1"
-DETAIL: Column used in the publication WHERE expression is not part of the replica identity.
-DROP PUBLICATION testpub6;
-DROP TABLE rf_tbl_abcd_pk;
-DROP TABLE rf_tbl_abcd_nopk;
-DROP TABLE rf_tbl_abcd_part_pk;
--- ======================================================
--- fail - duplicate tables are not allowed if that table has any column lists
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_dups FOR TABLE testpub_tbl1 (a), testpub_tbl1 WITH (publish = 'insert');
-ERROR: conflicting or redundant column lists for table "testpub_tbl1"
-CREATE PUBLICATION testpub_dups FOR TABLE testpub_tbl1, testpub_tbl1 (a) WITH (publish = 'insert');
-ERROR: conflicting or redundant column lists for table "testpub_tbl1"
-RESET client_min_messages;
--- test for column lists
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1;
-CREATE PUBLICATION testpub_fortable_insert WITH (publish = 'insert');
-RESET client_min_messages;
-CREATE TABLE testpub_tbl5 (a int PRIMARY KEY, b text, c text,
- d int generated always as (a + length(b)) stored);
--- error: column "x" does not exist
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, x);
-ERROR: column "x" of relation "testpub_tbl5" does not exist
--- error: replica identity "a" not included in the column list
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c);
-UPDATE testpub_tbl5 SET a = 1;
-ERROR: cannot update table "testpub_tbl5"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5;
--- error: system attributes "ctid" not allowed in column list
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, ctid);
-ERROR: cannot use system column "ctid" in publication column list
-ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl1 (id, ctid);
-ERROR: cannot use system column "ctid" in publication column list
--- error: duplicates not allowed in column list
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, a);
-ERROR: duplicate column "a" in publication column list
-ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl5 (a, a);
-ERROR: duplicate column "a" in publication column list
--- ok
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c);
-ALTER TABLE testpub_tbl5 DROP COLUMN c; -- no dice
-ERROR: cannot drop column c of table testpub_tbl5 because other objects depend on it
-DETAIL: publication of table testpub_tbl5 in publication testpub_fortable depends on column c of table testpub_tbl5
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
--- ok: for insert-only publication, any column list is acceptable
-ALTER PUBLICATION testpub_fortable_insert ADD TABLE testpub_tbl5 (b, c);
-/* not all replica identities are good enough */
-CREATE UNIQUE INDEX testpub_tbl5_b_key ON testpub_tbl5 (b, c);
-ALTER TABLE testpub_tbl5 ALTER b SET NOT NULL, ALTER c SET NOT NULL;
-ALTER TABLE testpub_tbl5 REPLICA IDENTITY USING INDEX testpub_tbl5_b_key;
--- error: replica identity (b,c) is not covered by column list (a, c)
-UPDATE testpub_tbl5 SET a = 1;
-ERROR: cannot update table "testpub_tbl5"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5;
--- ok: generated column "d" can be in the list too
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, d);
-ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5;
--- error: change the replica identity to "b", and column list to (a, c)
--- then update fails, because (a, c) does not cover replica identity
-ALTER TABLE testpub_tbl5 REPLICA IDENTITY USING INDEX testpub_tbl5_b_key;
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c);
-UPDATE testpub_tbl5 SET a = 1;
-ERROR: cannot update table "testpub_tbl5"
-DETAIL: Column list used by the publication does not cover the replica identity.
-/* But if upd/del are not published, it works OK */
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_table_ins WITH (publish = 'insert, truncate');
-RESET client_min_messages;
-ALTER PUBLICATION testpub_table_ins ADD TABLE testpub_tbl5 (a); -- ok
-\dRp+ testpub_table_ins
- Publication testpub_table_ins
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | f | f | t | f | f
-Tables:
- "public.testpub_tbl5" (a)
-
--- error: cannot work with deferrable primary keys
-CREATE TABLE testpub_tbl5d (a int PRIMARY KEY DEFERRABLE);
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5d;
-UPDATE testpub_tbl5d SET a = 1;
-ERROR: cannot update table "testpub_tbl5d" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-/* but works fine with FULL replica identity */
-ALTER TABLE testpub_tbl5d REPLICA IDENTITY FULL;
-UPDATE testpub_tbl5d SET a = 1;
-DROP TABLE testpub_tbl5d;
--- tests with REPLICA IDENTITY FULL
-CREATE TABLE testpub_tbl6 (a int, b text, c text);
-ALTER TABLE testpub_tbl6 REPLICA IDENTITY FULL;
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl6 (a, b, c);
-UPDATE testpub_tbl6 SET a = 1;
-ERROR: cannot update table "testpub_tbl6"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl6;
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl6; -- ok
-UPDATE testpub_tbl6 SET a = 1;
--- make sure changing the column list is propagated to the catalog
-CREATE TABLE testpub_tbl7 (a int primary key, b text, c text);
-ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl7 (a, b);
-\d+ testpub_tbl7
- Table "public.testpub_tbl7"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- a | integer | | not null | | plain | |
- b | text | | | | extended | |
- c | text | | | | extended | |
-Indexes:
- "testpub_tbl7_pkey" PRIMARY KEY, btree (a)
-Publications:
- "testpub_fortable" (a, b)
-Not-null constraints:
- "testpub_tbl7_a_not_null" NOT NULL "a"
-
--- ok: the column list is the same, we should skip this table (or at least not fail)
-ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, b);
-\d+ testpub_tbl7
- Table "public.testpub_tbl7"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- a | integer | | not null | | plain | |
- b | text | | | | extended | |
- c | text | | | | extended | |
-Indexes:
- "testpub_tbl7_pkey" PRIMARY KEY, btree (a)
-Publications:
- "testpub_fortable" (a, b)
-Not-null constraints:
- "testpub_tbl7_a_not_null" NOT NULL "a"
-
--- ok: the column list changes, make sure the catalog gets updated
-ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, c);
-\d+ testpub_tbl7
- Table "public.testpub_tbl7"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- a | integer | | not null | | plain | |
- b | text | | | | extended | |
- c | text | | | | extended | |
-Indexes:
- "testpub_tbl7_pkey" PRIMARY KEY, btree (a)
-Publications:
- "testpub_fortable" (a, c)
-Not-null constraints:
- "testpub_tbl7_a_not_null" NOT NULL "a"
-
--- column list for partitioned tables has to cover replica identities for
--- all child relations
-CREATE TABLE testpub_tbl8 (a int, b text, c text) PARTITION BY HASH (a);
--- first partition has replica identity "a"
-CREATE TABLE testpub_tbl8_0 PARTITION OF testpub_tbl8 FOR VALUES WITH (modulus 2, remainder 0);
-ALTER TABLE testpub_tbl8_0 ADD PRIMARY KEY (a);
-ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY USING INDEX testpub_tbl8_0_pkey;
--- second partition has replica identity "b"
-CREATE TABLE testpub_tbl8_1 PARTITION OF testpub_tbl8 FOR VALUES WITH (modulus 2, remainder 1);
-ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (b);
-ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey;
--- ok: column list covers both "a" and "b"
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_col_list FOR TABLE testpub_tbl8 (a, b) WITH (publish_via_partition_root = 'true');
-RESET client_min_messages;
--- ok: the same thing, but try plain ADD TABLE
-ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8;
-ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b);
-UPDATE testpub_tbl8 SET a = 1;
--- failure: column list does not cover replica identity for the second partition
-ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8;
-ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, c);
-UPDATE testpub_tbl8 SET a = 1;
-ERROR: cannot update table "testpub_tbl8_1"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8;
--- failure: one of the partitions has REPLICA IDENTITY FULL
-ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY FULL;
-ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, c);
-UPDATE testpub_tbl8 SET a = 1;
-ERROR: cannot update table "testpub_tbl8_1"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8;
--- add table and then try changing replica identity
-ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey;
-ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b);
--- failure: replica identity full can't be used with a column list
-ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY FULL;
-UPDATE testpub_tbl8 SET a = 1;
-ERROR: cannot update table "testpub_tbl8_1"
-DETAIL: Column list used by the publication does not cover the replica identity.
--- failure: replica identity has to be covered by the column list
-ALTER TABLE testpub_tbl8_1 DROP CONSTRAINT testpub_tbl8_1_pkey;
-ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (c);
-ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey;
-UPDATE testpub_tbl8 SET a = 1;
-ERROR: cannot update table "testpub_tbl8_1"
-DETAIL: Column list used by the publication does not cover the replica identity.
-DROP TABLE testpub_tbl8;
--- column list for partitioned tables has to cover replica identities for
--- all child relations
-CREATE TABLE testpub_tbl8 (a int, b text, c text) PARTITION BY HASH (a);
-ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b);
--- first partition has replica identity "a"
-CREATE TABLE testpub_tbl8_0 (a int, b text, c text);
-ALTER TABLE testpub_tbl8_0 ADD PRIMARY KEY (a);
-ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY USING INDEX testpub_tbl8_0_pkey;
--- second partition has replica identity "b"
-CREATE TABLE testpub_tbl8_1 (a int, b text, c text);
-ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (c);
-ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey;
--- ok: attaching first partition works, because (a) is in column list
-ALTER TABLE testpub_tbl8 ATTACH PARTITION testpub_tbl8_0 FOR VALUES WITH (modulus 2, remainder 0);
--- failure: second partition has replica identity (c), which si not in column list
-ALTER TABLE testpub_tbl8 ATTACH PARTITION testpub_tbl8_1 FOR VALUES WITH (modulus 2, remainder 1);
-UPDATE testpub_tbl8 SET a = 1;
-ERROR: cannot update table "testpub_tbl8_1"
-DETAIL: Column list used by the publication does not cover the replica identity.
--- failure: changing replica identity to FULL for partition fails, because
--- of the column list on the parent
-ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY FULL;
-UPDATE testpub_tbl8 SET a = 1;
-ERROR: cannot update table "testpub_tbl8_0"
-DETAIL: Column list used by the publication does not cover the replica identity.
--- test that using column list for table is disallowed if any schemas are
--- part of the publication
-SET client_min_messages = 'ERROR';
--- failure - cannot use column list and schema together
-CREATE PUBLICATION testpub_tbl9 FOR TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a);
-ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9"
-DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements.
--- ok - only publish schema
-CREATE PUBLICATION testpub_tbl9 FOR TABLES IN SCHEMA public;
--- failure - add a table with column list when there is already a schema in the
--- publication
-ALTER PUBLICATION testpub_tbl9 ADD TABLE public.testpub_tbl7(a);
-ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9"
-DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements.
--- ok - only publish table with column list
-ALTER PUBLICATION testpub_tbl9 SET TABLE public.testpub_tbl7(a);
--- failure - specify a schema when there is already a column list in the
--- publication
-ALTER PUBLICATION testpub_tbl9 ADD TABLES IN SCHEMA public;
-ERROR: cannot add schema to publication "testpub_tbl9"
-DETAIL: Schemas cannot be added if any tables that specify a column list are already part of the publication.
--- failure - cannot SET column list and schema together
-ALTER PUBLICATION testpub_tbl9 SET TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a);
-ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9"
-DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements.
--- ok - drop table
-ALTER PUBLICATION testpub_tbl9 DROP TABLE public.testpub_tbl7;
--- failure - cannot ADD column list and schema together
-ALTER PUBLICATION testpub_tbl9 ADD TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a);
-ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9"
-DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements.
-RESET client_min_messages;
-DROP TABLE testpub_tbl5, testpub_tbl6, testpub_tbl7, testpub_tbl8, testpub_tbl8_1;
-DROP PUBLICATION testpub_table_ins, testpub_fortable, testpub_fortable_insert, testpub_col_list, testpub_tbl9;
--- ======================================================
--- Test combination of column list and row filter
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_both_filters;
-RESET client_min_messages;
-CREATE TABLE testpub_tbl_both_filters (a int, b int, c int, PRIMARY KEY (a,c));
-ALTER TABLE testpub_tbl_both_filters REPLICA IDENTITY USING INDEX testpub_tbl_both_filters_pkey;
-ALTER PUBLICATION testpub_both_filters ADD TABLE testpub_tbl_both_filters (a,c) WHERE (c != 1);
-\dRp+ testpub_both_filters
- Publication testpub_both_filters
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.testpub_tbl_both_filters" (a, c) WHERE (c <> 1)
-
-\d+ testpub_tbl_both_filters
- Table "public.testpub_tbl_both_filters"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+---------+--------------+-------------
- a | integer | | not null | | plain | |
- b | integer | | | | plain | |
- c | integer | | not null | | plain | |
-Indexes:
- "testpub_tbl_both_filters_pkey" PRIMARY KEY, btree (a, c) REPLICA IDENTITY
-Publications:
- "testpub_both_filters" (a, c) WHERE (c <> 1)
-Not-null constraints:
- "testpub_tbl_both_filters_a_not_null" NOT NULL "a"
- "testpub_tbl_both_filters_c_not_null" NOT NULL "c"
-
-DROP TABLE testpub_tbl_both_filters;
-DROP PUBLICATION testpub_both_filters;
--- ======================================================
--- More column list tests for validating column references
-CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int);
-CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b));
-CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a);
-CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY);
-ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10);
--- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing)
--- 1a. REPLICA IDENTITY is DEFAULT and table has a PK.
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b);
-RESET client_min_messages;
--- ok - (a,b) covers all PK cols
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c);
--- ok - (a,b,c) covers all PK cols
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
--- fail - "b" is missing from the column list
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (b);
--- fail - "a" is missing from the column list
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column list used by the publication does not cover the replica identity.
--- 1b. REPLICA IDENTITY is DEFAULT and table has no PK
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a);
--- ok - there's no replica identity, so any column list works
--- note: it fails anyway, just a bit later because UPDATE requires RI
-UPDATE rf_tbl_abcd_nopk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_nopk" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
--- Case 2. REPLICA IDENTITY FULL
-ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL;
-ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (c);
--- fail - with REPLICA IDENTITY FULL no column list is allowed
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a, b, c, d);
--- fail - with REPLICA IDENTITY FULL no column list is allowed
-UPDATE rf_tbl_abcd_nopk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_nopk"
-DETAIL: Column list used by the publication does not cover the replica identity.
--- Case 3. REPLICA IDENTITY NOTHING
-ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING;
-ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
--- ok - REPLICA IDENTITY NOTHING means all column lists are valid
--- it still fails later because without RI we can't replicate updates
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c, d);
--- ok - REPLICA IDENTITY NOTHING means all column lists are valid
--- it still fails later because without RI we can't replicate updates
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (d);
--- ok - REPLICA IDENTITY NOTHING means all column lists are valid
--- it still fails later because without RI we can't replicate updates
-UPDATE rf_tbl_abcd_nopk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_nopk" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
--- Case 4. REPLICA IDENTITY INDEX
-ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL;
-CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c);
-ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c;
-ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL;
-CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c);
-ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
--- fail - column list "a" does not cover the REPLICA IDENTITY INDEX on "c"
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_pk"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (c);
--- ok - column list "c" does cover the REPLICA IDENTITY INDEX on "c"
-UPDATE rf_tbl_abcd_pk SET a = 1;
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a);
--- fail - column list "a" does not cover the REPLICA IDENTITY INDEX on "c"
-UPDATE rf_tbl_abcd_nopk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_nopk"
-DETAIL: Column list used by the publication does not cover the replica identity.
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (c);
--- ok - column list "c" does cover the REPLICA IDENTITY INDEX on "c"
-UPDATE rf_tbl_abcd_nopk SET a = 1;
--- Tests for partitioned table
--- set PUBLISH_VIA_PARTITION_ROOT to false and test column list for partitioned
--- table
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0);
--- fail - cannot use column list for partitioned table
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (a);
-ERROR: cannot use column list for relation "public.rf_tbl_abcd_part_pk" in publication "testpub6"
-DETAIL: Column lists cannot be specified for partitioned tables when publish_via_partition_root is false.
--- ok - can use column list for partition
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 (a);
--- ok - "a" is a PK col
-UPDATE rf_tbl_abcd_part_pk SET a = 1;
--- set PUBLISH_VIA_PARTITION_ROOT to true and test column list for partitioned
--- table
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1);
--- ok - can use column list for partitioned table
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (a);
--- ok - "a" is a PK col
-UPDATE rf_tbl_abcd_part_pk SET a = 1;
--- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any column list is
--- used for partitioned table
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0);
-ERROR: cannot set parameter "publish_via_partition_root" to false for publication "testpub6"
-DETAIL: The publication contains a column list for partitioned table "rf_tbl_abcd_part_pk", which is not allowed when "publish_via_partition_root" is false.
--- remove partitioned table's column list
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk;
--- ok - we don't have column list for partitioned table.
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0);
--- Now change the root column list to use a column "b"
--- (which is not in the replica identity)
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 (b);
--- ok - we don't have column list for partitioned table.
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0);
--- fail - "b" is not in REPLICA IDENTITY INDEX
-UPDATE rf_tbl_abcd_part_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_part_pk_1"
-DETAIL: Column list used by the publication does not cover the replica identity.
--- set PUBLISH_VIA_PARTITION_ROOT to true
--- can use column list for partitioned table
-ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1);
--- ok - can use column list for partitioned table
-ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (b);
--- fail - "b" is not in REPLICA IDENTITY INDEX
-UPDATE rf_tbl_abcd_part_pk SET a = 1;
-ERROR: cannot update table "rf_tbl_abcd_part_pk_1"
-DETAIL: Column list used by the publication does not cover the replica identity.
-DROP PUBLICATION testpub6;
-DROP TABLE rf_tbl_abcd_pk;
-DROP TABLE rf_tbl_abcd_nopk;
-DROP TABLE rf_tbl_abcd_part_pk;
--- ======================================================
--- Test cache invalidation FOR ALL TABLES publication
-SET client_min_messages = 'ERROR';
-CREATE TABLE testpub_tbl4(a int);
-INSERT INTO testpub_tbl4 values(1);
-UPDATE testpub_tbl4 set a = 2;
-CREATE PUBLICATION testpub_foralltables FOR ALL TABLES;
-RESET client_min_messages;
--- fail missing REPLICA IDENTITY
-UPDATE testpub_tbl4 set a = 3;
-ERROR: cannot update table "testpub_tbl4" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-DROP PUBLICATION testpub_foralltables;
--- should pass after dropping the publication
-UPDATE testpub_tbl4 set a = 3;
-DROP TABLE testpub_tbl4;
--- fail - view
-CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_view;
-ERROR: cannot add relation "testpub_view" to publication
-DETAIL: This operation is not supported for views.
-CREATE TEMPORARY TABLE testpub_temptbl(a int);
--- fail - temporary table
-CREATE PUBLICATION testpub_fortemptbl FOR TABLE testpub_temptbl;
-ERROR: cannot add relation "testpub_temptbl" to publication
-DETAIL: This operation is not supported for temporary tables.
-DROP TABLE testpub_temptbl;
-CREATE UNLOGGED TABLE testpub_unloggedtbl(a int);
--- fail - unlogged table
-CREATE PUBLICATION testpub_forunloggedtbl FOR TABLE testpub_unloggedtbl;
-ERROR: cannot add relation "testpub_unloggedtbl" to publication
-DETAIL: This operation is not supported for unlogged tables.
-DROP TABLE testpub_unloggedtbl;
--- fail - system table
-CREATE PUBLICATION testpub_forsystemtbl FOR TABLE pg_publication;
-ERROR: cannot add relation "pg_publication" to publication
-DETAIL: This operation is not supported for system tables.
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1, pub_test.testpub_nopk;
-RESET client_min_messages;
--- fail - already added
-ALTER PUBLICATION testpub_fortbl ADD TABLE testpub_tbl1;
-ERROR: relation "testpub_tbl1" is already member of publication "testpub_fortbl"
--- fail - already added
-CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1;
-ERROR: publication "testpub_fortbl" already exists
-\dRp+ testpub_fortbl
- Publication testpub_fortbl
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "pub_test.testpub_nopk"
- "public.testpub_tbl1"
-
--- fail - view
-ALTER PUBLICATION testpub_default ADD TABLE testpub_view;
-ERROR: cannot add relation "testpub_view" to publication
-DETAIL: This operation is not supported for views.
-ALTER PUBLICATION testpub_default ADD TABLE testpub_tbl1;
-ALTER PUBLICATION testpub_default SET TABLE testpub_tbl1;
-ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_nopk;
-ALTER PUBLICATION testpib_ins_trunct ADD TABLE pub_test.testpub_nopk, testpub_tbl1;
-\d+ pub_test.testpub_nopk
- Table "pub_test.testpub_nopk"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+---------+--------------+-------------
- foo | integer | | | | plain | |
- bar | integer | | | | plain | |
-Publications:
- "testpib_ins_trunct"
- "testpub_default"
- "testpub_fortbl"
-
-\d+ testpub_tbl1
- Table "public.testpub_tbl1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+------------------------------------------+----------+--------------+-------------
- id | integer | | not null | nextval('testpub_tbl1_id_seq'::regclass) | plain | |
- data | text | | | | extended | |
-Indexes:
- "testpub_tbl1_pkey" PRIMARY KEY, btree (id)
-Publications:
- "testpib_ins_trunct"
- "testpub_default"
- "testpub_fortbl"
-Not-null constraints:
- "testpub_tbl1_id_not_null" NOT NULL "id"
-
-\dRp+ testpub_default
- Publication testpub_default
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | f | f | f
-Tables:
- "pub_test.testpub_nopk"
- "public.testpub_tbl1"
-
-ALTER PUBLICATION testpub_default DROP TABLE testpub_tbl1, pub_test.testpub_nopk;
--- fail - nonexistent
-ALTER PUBLICATION testpub_default DROP TABLE pub_test.testpub_nopk;
-ERROR: relation "testpub_nopk" is not part of the publication
-\d+ testpub_tbl1
- Table "public.testpub_tbl1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+------------------------------------------+----------+--------------+-------------
- id | integer | | not null | nextval('testpub_tbl1_id_seq'::regclass) | plain | |
- data | text | | | | extended | |
-Indexes:
- "testpub_tbl1_pkey" PRIMARY KEY, btree (id)
-Publications:
- "testpib_ins_trunct"
- "testpub_fortbl"
-Not-null constraints:
- "testpub_tbl1_id_not_null" NOT NULL "id"
-
--- verify relation cache invalidation when a primary key is added using
--- an existing index
-CREATE TABLE pub_test.testpub_addpk (id int not null, data int);
-ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_addpk;
-INSERT INTO pub_test.testpub_addpk VALUES(1, 11);
-CREATE UNIQUE INDEX testpub_addpk_id_idx ON pub_test.testpub_addpk(id);
--- fail:
-UPDATE pub_test.testpub_addpk SET id = 2;
-ERROR: cannot update table "testpub_addpk" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-ALTER TABLE pub_test.testpub_addpk ADD PRIMARY KEY USING INDEX testpub_addpk_id_idx;
--- now it should work:
-UPDATE pub_test.testpub_addpk SET id = 2;
-DROP TABLE pub_test.testpub_addpk;
--- permissions
-SET ROLE regress_publication_user2;
-CREATE PUBLICATION testpub2; -- fail
-ERROR: permission denied for database regression
-SET ROLE regress_publication_user;
-GRANT CREATE ON DATABASE regression TO regress_publication_user2;
-SET ROLE regress_publication_user2;
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub2; -- ok
-CREATE PUBLICATION testpub3 FOR TABLES IN SCHEMA pub_test; -- fail
-ERROR: must be superuser to create FOR TABLES IN SCHEMA publication
-CREATE PUBLICATION testpub3; -- ok
-RESET client_min_messages;
-ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- fail
-ERROR: must be owner of table testpub_tbl1
-ALTER PUBLICATION testpub3 ADD TABLES IN SCHEMA pub_test; -- fail
-ERROR: must be superuser to add or set schemas
-SET ROLE regress_publication_user;
-GRANT regress_publication_user TO regress_publication_user2;
-SET ROLE regress_publication_user2;
-ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- ok
-DROP PUBLICATION testpub2;
-DROP PUBLICATION testpub3;
-SET ROLE regress_publication_user;
-CREATE ROLE regress_publication_user3;
-GRANT regress_publication_user2 TO regress_publication_user3;
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub4 FOR TABLES IN SCHEMA pub_test;
-RESET client_min_messages;
-ALTER PUBLICATION testpub4 OWNER TO regress_publication_user3;
-SET ROLE regress_publication_user3;
--- fail - new owner must be superuser
-ALTER PUBLICATION testpub4 owner to regress_publication_user2; -- fail
-ERROR: permission denied to change owner of publication "testpub4"
-HINT: The owner of a FOR TABLES IN SCHEMA publication must be a superuser.
-ALTER PUBLICATION testpub4 owner to regress_publication_user; -- ok
-SET ROLE regress_publication_user;
-DROP PUBLICATION testpub4;
-DROP ROLE regress_publication_user3;
-REVOKE CREATE ON DATABASE regression FROM regress_publication_user2;
-DROP TABLE testpub_parted;
-DROP TABLE testpub_tbl1;
-\dRp+ testpub_default
- Publication testpub_default
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | f | f | f
-(1 row)
-
--- fail - must be owner of publication
-SET ROLE regress_publication_user_dummy;
-ALTER PUBLICATION testpub_default RENAME TO testpub_dummy;
-ERROR: must be owner of publication testpub_default
-RESET ROLE;
-ALTER PUBLICATION testpub_default RENAME TO testpub_foo;
-\dRp testpub_foo
- List of publications
- Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
--------------+--------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- testpub_foo | regress_publication_user | f | t | t | t | f | f | f
-(1 row)
-
--- rename back to keep the rest simple
-ALTER PUBLICATION testpub_foo RENAME TO testpub_default;
-ALTER PUBLICATION testpub_default OWNER TO regress_publication_user2;
-\dRp testpub_default
- List of publications
- Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
------------------+---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- testpub_default | regress_publication_user2 | f | t | t | t | f | f | f
-(1 row)
-
--- adding schemas and tables
-CREATE SCHEMA pub_test1;
-CREATE SCHEMA pub_test2;
-CREATE SCHEMA pub_test3;
-CREATE SCHEMA "CURRENT_SCHEMA";
-CREATE TABLE pub_test1.tbl (id int, data text);
-CREATE TABLE pub_test1.tbl1 (id serial primary key, data text);
-CREATE TABLE pub_test2.tbl1 (id serial primary key, data text);
-CREATE TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"(id int);
--- suppress warning that depends on wal_level
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub1_forschema FOR TABLES IN SCHEMA pub_test1;
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
-
-CREATE PUBLICATION testpub2_forschema FOR TABLES IN SCHEMA pub_test1, pub_test2, pub_test3;
-\dRp+ testpub2_forschema
- Publication testpub2_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
- "pub_test2"
- "pub_test3"
-
--- check create publication on CURRENT_SCHEMA
-CREATE PUBLICATION testpub3_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA;
-CREATE PUBLICATION testpub4_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA";
-CREATE PUBLICATION testpub5_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA, "CURRENT_SCHEMA";
-CREATE PUBLICATION testpub6_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA", CURRENT_SCHEMA;
-CREATE PUBLICATION testpub_fortable FOR TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA";
-RESET client_min_messages;
-\dRp+ testpub3_forschema
- Publication testpub3_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "public"
-
-\dRp+ testpub4_forschema
- Publication testpub4_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "CURRENT_SCHEMA"
-
-\dRp+ testpub5_forschema
- Publication testpub5_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "CURRENT_SCHEMA"
- "public"
-
-\dRp+ testpub6_forschema
- Publication testpub6_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "CURRENT_SCHEMA"
- "public"
-
-\dRp+ testpub_fortable
- Publication testpub_fortable
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "CURRENT_SCHEMA.CURRENT_SCHEMA"
-
--- check create publication on CURRENT_SCHEMA where search_path is not set
-SET SEARCH_PATH='';
-CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA;
-ERROR: no schema has been selected for CURRENT_SCHEMA
-RESET SEARCH_PATH;
--- check create publication on CURRENT_SCHEMA where TABLE/TABLES in SCHEMA
--- is not specified
-CREATE PUBLICATION testpub_forschema1 FOR CURRENT_SCHEMA;
-ERROR: invalid publication object list
-LINE 1: CREATE PUBLICATION testpub_forschema1 FOR CURRENT_SCHEMA;
- ^
-DETAIL: One of TABLE or TABLES IN SCHEMA must be specified before a standalone table or schema name.
--- check create publication on CURRENT_SCHEMA along with FOR TABLE
-CREATE PUBLICATION testpub_forschema1 FOR TABLE CURRENT_SCHEMA;
-ERROR: syntax error at or near "CURRENT_SCHEMA"
-LINE 1: CREATE PUBLICATION testpub_forschema1 FOR TABLE CURRENT_SCHE...
- ^
--- check create publication on a schema that does not exist
-CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA non_existent_schema;
-ERROR: schema "non_existent_schema" does not exist
--- check create publication on a system schema
-CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pg_catalog;
-ERROR: cannot add schema "pg_catalog" to publication
-DETAIL: This operation is not supported for system schemas.
--- check create publication on an object which is not schema
-CREATE PUBLICATION testpub1_forschema1 FOR TABLES IN SCHEMA testpub_view;
-ERROR: schema "testpub_view" does not exist
--- dropping the schema should reflect the change in publication
-DROP SCHEMA pub_test3;
-\dRp+ testpub2_forschema
- Publication testpub2_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
- "pub_test2"
-
--- renaming the schema should reflect the change in publication
-ALTER SCHEMA pub_test1 RENAME to pub_test1_renamed;
-\dRp+ testpub2_forschema
- Publication testpub2_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1_renamed"
- "pub_test2"
-
-ALTER SCHEMA pub_test1_renamed RENAME to pub_test1;
-\dRp+ testpub2_forschema
- Publication testpub2_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
- "pub_test2"
-
--- alter publication add schema
-ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test2;
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
- "pub_test2"
-
--- add non existent schema
-ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA non_existent_schema;
-ERROR: schema "non_existent_schema" does not exist
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
- "pub_test2"
-
--- add a schema which is already added to the publication
-ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test1;
-ERROR: schema "pub_test1" is already member of publication "testpub1_forschema"
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
- "pub_test2"
-
--- alter publication drop schema
-ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2;
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
-
--- drop schema that is not present in the publication
-ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2;
-ERROR: tables from schema "pub_test2" are not part of the publication
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
-
--- drop a schema that does not exist in the system
-ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA non_existent_schema;
-ERROR: schema "non_existent_schema" does not exist
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
-
--- drop all schemas
-ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1;
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-(1 row)
-
--- alter publication set multiple schema
-ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test2;
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
- "pub_test2"
-
--- alter publication set non-existent schema
-ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA non_existent_schema;
-ERROR: schema "non_existent_schema" does not exist
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
- "pub_test2"
-
--- alter publication set it duplicate schemas should set the schemas after
--- removing the duplicate schemas
-ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test1;
-\dRp+ testpub1_forschema
- Publication testpub1_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
-
--- Verify that it fails to add a schema with a column specification
-ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b);
-ERROR: syntax error at or near "("
-LINE 1: ...LICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b);
- ^
-ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b);
-ERROR: column specification not allowed for schema
-LINE 1: ...TION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b)...
- ^
--- cleanup pub_test1 schema for invalidation tests
-ALTER PUBLICATION testpub2_forschema DROP TABLES IN SCHEMA pub_test1;
-DROP PUBLICATION testpub3_forschema, testpub4_forschema, testpub5_forschema, testpub6_forschema, testpub_fortable;
-DROP SCHEMA "CURRENT_SCHEMA" CASCADE;
-NOTICE: drop cascades to table "CURRENT_SCHEMA"."CURRENT_SCHEMA"
--- verify relation cache invalidations through update statement for the
--- default REPLICA IDENTITY on the relation, if schema is part of the
--- publication then update will fail because relation's relreplident
--- option will be set, if schema is not part of the publication then update
--- will be successful.
-INSERT INTO pub_test1.tbl VALUES(1, 'test');
--- fail
-UPDATE pub_test1.tbl SET id = 2;
-ERROR: cannot update table "tbl" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1;
--- success
-UPDATE pub_test1.tbl SET id = 2;
-ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1;
--- fail
-UPDATE pub_test1.tbl SET id = 2;
-ERROR: cannot update table "tbl" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
--- verify invalidation of partition table having parent and child tables in
--- different schema
-CREATE SCHEMA pub_testpart1;
-CREATE SCHEMA pub_testpart2;
-CREATE TABLE pub_testpart1.parent1 (a int) partition by list (a);
-CREATE TABLE pub_testpart2.child_parent1 partition of pub_testpart1.parent1 for values in (1);
-INSERT INTO pub_testpart2.child_parent1 values(1);
-UPDATE pub_testpart2.child_parent1 set a = 1;
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpubpart_forschema FOR TABLES IN SCHEMA pub_testpart1;
-RESET client_min_messages;
--- fail
-UPDATE pub_testpart1.parent1 set a = 1;
-ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-UPDATE pub_testpart2.child_parent1 set a = 1;
-ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-DROP PUBLICATION testpubpart_forschema;
--- verify invalidation of partition tables for schema publication that has
--- parent and child tables of different partition hierarchies
-CREATE TABLE pub_testpart2.parent2 (a int) partition by list (a);
-CREATE TABLE pub_testpart1.child_parent2 partition of pub_testpart2.parent2 for values in (1);
-INSERT INTO pub_testpart1.child_parent2 values(1);
-UPDATE pub_testpart1.child_parent2 set a = 1;
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpubpart_forschema FOR TABLES IN SCHEMA pub_testpart2;
-RESET client_min_messages;
--- fail
-UPDATE pub_testpart2.child_parent1 set a = 1;
-ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-UPDATE pub_testpart2.parent2 set a = 1;
-ERROR: cannot update table "child_parent2" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
-UPDATE pub_testpart1.child_parent2 set a = 1;
-ERROR: cannot update table "child_parent2" because it does not have a replica identity and publishes updates
-HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE.
--- alter publication set 'TABLES IN SCHEMA' on an empty publication.
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub3_forschema;
-RESET client_min_messages;
-\dRp+ testpub3_forschema
- Publication testpub3_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-(1 row)
-
-ALTER PUBLICATION testpub3_forschema SET TABLES IN SCHEMA pub_test1;
-\dRp+ testpub3_forschema
- Publication testpub3_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables from schemas:
- "pub_test1"
-
--- create publication including both 'FOR TABLE' and 'FOR TABLES IN SCHEMA'
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION testpub_forschema_fortable FOR TABLES IN SCHEMA pub_test1, TABLE pub_test2.tbl1;
-CREATE PUBLICATION testpub_fortable_forschema FOR TABLE pub_test2.tbl1, TABLES IN SCHEMA pub_test1;
-RESET client_min_messages;
-\dRp+ testpub_forschema_fortable
- Publication testpub_forschema_fortable
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "pub_test2.tbl1"
-Tables from schemas:
- "pub_test1"
-
-\dRp+ testpub_fortable_forschema
- Publication testpub_fortable_forschema
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "pub_test2.tbl1"
-Tables from schemas:
- "pub_test1"
-
--- fail specifying table without any of 'FOR TABLES IN SCHEMA' or
---'FOR TABLE' or 'FOR ALL TABLES'
-CREATE PUBLICATION testpub_error FOR pub_test2.tbl1;
-ERROR: invalid publication object list
-LINE 1: CREATE PUBLICATION testpub_error FOR pub_test2.tbl1;
- ^
-DETAIL: One of TABLE or TABLES IN SCHEMA must be specified before a standalone table or schema name.
-DROP VIEW testpub_view;
-DROP PUBLICATION testpub_default;
-DROP PUBLICATION testpib_ins_trunct;
-DROP PUBLICATION testpub_fortbl;
-DROP PUBLICATION testpub1_forschema;
-DROP PUBLICATION testpub2_forschema;
-DROP PUBLICATION testpub3_forschema;
-DROP PUBLICATION testpub_forschema_fortable;
-DROP PUBLICATION testpub_fortable_forschema;
-DROP PUBLICATION testpubpart_forschema;
-DROP SCHEMA pub_test CASCADE;
-NOTICE: drop cascades to table pub_test.testpub_nopk
-DROP SCHEMA pub_test1 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table pub_test1.tbl
-drop cascades to table pub_test1.tbl1
-DROP SCHEMA pub_test2 CASCADE;
-NOTICE: drop cascades to table pub_test2.tbl1
-DROP SCHEMA pub_testpart1 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table pub_testpart1.parent1
-drop cascades to table pub_testpart1.child_parent2
-DROP SCHEMA pub_testpart2 CASCADE;
-NOTICE: drop cascades to table pub_testpart2.parent2
--- Test the list of partitions published with or without
--- 'PUBLISH_VIA_PARTITION_ROOT' parameter
-SET client_min_messages = 'ERROR';
-CREATE SCHEMA sch1;
-CREATE SCHEMA sch2;
-CREATE TABLE sch1.tbl1 (a int) PARTITION BY RANGE(a);
-CREATE TABLE sch2.tbl1_part1 PARTITION OF sch1.tbl1 FOR VALUES FROM (1) to (10);
--- Schema publication that does not include the schema that has the parent table
-CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch2 WITH (PUBLISH_VIA_PARTITION_ROOT=1);
-SELECT * FROM pg_publication_tables;
- pubname | schemaname | tablename | attnames | rowfilter
----------+------------+------------+----------+-----------
- pub | sch2 | tbl1_part1 | {a} |
-(1 row)
-
-DROP PUBLICATION pub;
--- Table publication that does not include the parent table
-CREATE PUBLICATION pub FOR TABLE sch2.tbl1_part1 WITH (PUBLISH_VIA_PARTITION_ROOT=1);
-SELECT * FROM pg_publication_tables;
- pubname | schemaname | tablename | attnames | rowfilter
----------+------------+------------+----------+-----------
- pub | sch2 | tbl1_part1 | {a} |
-(1 row)
-
--- Table publication that includes both the parent table and the child table
-ALTER PUBLICATION pub ADD TABLE sch1.tbl1;
-SELECT * FROM pg_publication_tables;
- pubname | schemaname | tablename | attnames | rowfilter
----------+------------+-----------+----------+-----------
- pub | sch1 | tbl1 | {a} |
-(1 row)
-
-DROP PUBLICATION pub;
--- Schema publication that does not include the schema that has the parent table
-CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch2 WITH (PUBLISH_VIA_PARTITION_ROOT=0);
-SELECT * FROM pg_publication_tables;
- pubname | schemaname | tablename | attnames | rowfilter
----------+------------+------------+----------+-----------
- pub | sch2 | tbl1_part1 | {a} |
-(1 row)
-
-DROP PUBLICATION pub;
--- Table publication that does not include the parent table
-CREATE PUBLICATION pub FOR TABLE sch2.tbl1_part1 WITH (PUBLISH_VIA_PARTITION_ROOT=0);
-SELECT * FROM pg_publication_tables;
- pubname | schemaname | tablename | attnames | rowfilter
----------+------------+------------+----------+-----------
- pub | sch2 | tbl1_part1 | {a} |
-(1 row)
-
--- Table publication that includes both the parent table and the child table
-ALTER PUBLICATION pub ADD TABLE sch1.tbl1;
-SELECT * FROM pg_publication_tables;
- pubname | schemaname | tablename | attnames | rowfilter
----------+------------+------------+----------+-----------
- pub | sch2 | tbl1_part1 | {a} |
-(1 row)
-
-DROP PUBLICATION pub;
-DROP TABLE sch2.tbl1_part1;
-DROP TABLE sch1.tbl1;
-CREATE TABLE sch1.tbl1 (a int) PARTITION BY RANGE(a);
-CREATE TABLE sch1.tbl1_part1 PARTITION OF sch1.tbl1 FOR VALUES FROM (1) to (10);
-CREATE TABLE sch1.tbl1_part2 PARTITION OF sch1.tbl1 FOR VALUES FROM (10) to (20);
-CREATE TABLE sch1.tbl1_part3 (a int) PARTITION BY RANGE(a);
-ALTER TABLE sch1.tbl1 ATTACH PARTITION sch1.tbl1_part3 FOR VALUES FROM (20) to (30);
-CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch1 WITH (PUBLISH_VIA_PARTITION_ROOT=1);
-SELECT * FROM pg_publication_tables;
- pubname | schemaname | tablename | attnames | rowfilter
----------+------------+-----------+----------+-----------
- pub | sch1 | tbl1 | {a} |
-(1 row)
-
-RESET client_min_messages;
-DROP PUBLICATION pub;
-DROP TABLE sch1.tbl1;
-DROP SCHEMA sch1 cascade;
-DROP SCHEMA sch2 cascade;
--- ======================================================
--- Test the publication 'publish_generated_columns' parameter enabled or disabled
-SET client_min_messages = 'ERROR';
-CREATE PUBLICATION pub1 FOR ALL TABLES WITH (publish_generated_columns=1);
-\dRp+ pub1
- Publication pub1
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | t | t | t | t | t | t | f
-(1 row)
-
-CREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish_generated_columns=0);
-\dRp+ pub2
- Publication pub2
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | t | t | t | t | t | f | f
-(1 row)
-
-DROP PUBLICATION pub1;
-DROP PUBLICATION pub2;
--- Test the 'publish_generated_columns' parameter enabled or disabled for
--- different scenarios with/without generated columns in column lists.
-CREATE TABLE gencols (a int, gen1 int GENERATED ALWAYS AS (a * 2) STORED);
--- Generated columns in column list, when 'publish_generated_columns'=false
-CREATE PUBLICATION pub1 FOR table gencols(a, gen1) WITH (publish_generated_columns=false);
-\dRp+ pub1
- Publication pub1
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.gencols" (a, gen1)
-
--- Generated columns in column list, when 'publish_generated_columns'=true
-CREATE PUBLICATION pub2 FOR table gencols(a, gen1) WITH (publish_generated_columns=true);
-\dRp+ pub2
- Publication pub2
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | t | f
-Tables:
- "public.gencols" (a, gen1)
-
--- Generated columns in column list, then set 'publication_generate_columns'=false
-ALTER PUBLICATION pub2 SET (publish_generated_columns = false);
-\dRp+ pub2
- Publication pub2
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.gencols" (a, gen1)
-
--- Remove generated columns from column list, when 'publish_generated_columns'=false
-ALTER PUBLICATION pub2 SET TABLE gencols(a);
-\dRp+ pub2
- Publication pub2
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.gencols" (a)
-
--- Add generated columns in column list, when 'publish_generated_columns'=false
-ALTER PUBLICATION pub2 SET TABLE gencols(a, gen1);
-\dRp+ pub2
- Publication pub2
- Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
---------------------------+------------+---------+---------+---------+-----------+-------------------+----------
- regress_publication_user | f | t | t | t | t | f | f
-Tables:
- "public.gencols" (a, gen1)
-
-DROP PUBLICATION pub1;
-DROP PUBLICATION pub2;
-DROP TABLE gencols;
-RESET client_min_messages;
-RESET SESSION AUTHORIZATION;
-DROP ROLE regress_publication_user, regress_publication_user2;
-DROP ROLE regress_publication_user_dummy;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/subscription.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/subscription.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/subscription.out 2024-11-15 02:50:52.506022717 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/subscription.out 2024-11-15 02:59:17.117115552 +0000
@@ -1,482 +1,2 @@
---
--- SUBSCRIPTION
---
-CREATE ROLE regress_subscription_user LOGIN SUPERUSER;
-CREATE ROLE regress_subscription_user2;
-CREATE ROLE regress_subscription_user3 IN ROLE pg_create_subscription;
-CREATE ROLE regress_subscription_user_dummy LOGIN NOSUPERUSER;
-SET SESSION AUTHORIZATION 'regress_subscription_user';
--- fail - no publications
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo';
-ERROR: syntax error at or near ";"
-LINE 1: CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo';
- ^
--- fail - no connection
-CREATE SUBSCRIPTION regress_testsub PUBLICATION foo;
-ERROR: syntax error at or near "PUBLICATION"
-LINE 1: CREATE SUBSCRIPTION regress_testsub PUBLICATION foo;
- ^
--- fail - cannot do CREATE SUBSCRIPTION CREATE SLOT inside transaction block
-BEGIN;
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'testconn' PUBLICATION testpub WITH (create_slot);
-ERROR: CREATE SUBSCRIPTION ... WITH (create_slot = true) cannot run inside a transaction block
-COMMIT;
--- fail - invalid connection string
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'testconn' PUBLICATION testpub;
-ERROR: invalid connection string syntax: missing "=" after "testconn" in connection info string
-
--- fail - duplicate publications
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo, testpub, foo WITH (connect = false);
-ERROR: publication name "foo" used more than once
--- ok
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-COMMENT ON SUBSCRIPTION regress_testsub IS 'test subscription';
-SELECT obj_description(s.oid, 'pg_subscription') FROM pg_subscription s;
- obj_description
--------------------
- test subscription
-(1 row)
-
--- Check if the subscription stats are created and stats_reset is updated
--- by pg_stat_reset_subscription_stats().
-SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub';
- subname | stats_reset_is_null
------------------+---------------------
- regress_testsub | t
-(1 row)
-
-SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub';
- pg_stat_reset_subscription_stats
-----------------------------------
-
-(1 row)
-
-SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub';
- subname | stats_reset_is_null
------------------+---------------------
- regress_testsub | f
-(1 row)
-
--- Reset the stats again and check if the new reset_stats is updated.
-SELECT stats_reset as prev_stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub' \gset
-SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub';
- pg_stat_reset_subscription_stats
-----------------------------------
-
-(1 row)
-
-SELECT :'prev_stats_reset' < stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub';
- ?column?
-----------
- t
-(1 row)
-
--- fail - name already exists
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false);
-ERROR: subscription "regress_testsub" already exists
--- fail - must be superuser
-SET SESSION AUTHORIZATION 'regress_subscription_user2';
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo WITH (connect = false);
-ERROR: permission denied to create subscription
-DETAIL: Only roles with privileges of the "pg_create_subscription" role may create subscriptions.
-SET SESSION AUTHORIZATION 'regress_subscription_user';
--- fail - invalid option combinations
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, copy_data = true);
-ERROR: connect = false and copy_data = true are mutually exclusive options
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, enabled = true);
-ERROR: connect = false and enabled = true are mutually exclusive options
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, create_slot = true);
-ERROR: connect = false and create_slot = true are mutually exclusive options
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = true);
-ERROR: slot_name = NONE and enabled = true are mutually exclusive options
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = false, create_slot = true);
-ERROR: slot_name = NONE and create_slot = true are mutually exclusive options
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE);
-ERROR: subscription with slot_name = NONE must also set enabled = false
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = false);
-ERROR: subscription with slot_name = NONE must also set create_slot = false
-CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, create_slot = false);
-ERROR: subscription with slot_name = NONE must also set enabled = false
--- ok - with slot_name = NONE
-CREATE SUBSCRIPTION regress_testsub3 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
--- fail
-ALTER SUBSCRIPTION regress_testsub3 ENABLE;
-ERROR: cannot enable subscription that does not have a slot name
-ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION;
-ERROR: ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions
--- fail - origin must be either none or any
-CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = foo);
-ERROR: unrecognized origin value: "foo"
--- now it works
-CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = none);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-\dRs+ regress_testsub4
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
-------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | none | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub4 SET (origin = any);
-\dRs+ regress_testsub4
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
-------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-DROP SUBSCRIPTION regress_testsub3;
-DROP SUBSCRIPTION regress_testsub4;
--- fail, connection string does not parse
-CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'i_dont_exist=param' PUBLICATION testpub;
-ERROR: invalid connection string syntax: invalid connection option "i_dont_exist"
-
--- fail, connection string parses, but doesn't work (and does so without
--- connecting, so this is reliable and safe)
-CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'port=-1' PUBLICATION testpub;
-ERROR: subscription "regress_testsub5" could not connect to the publisher: invalid port number: "-1"
--- fail - invalid connection string during ALTER
-ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar';
-ERROR: invalid connection string syntax: missing "=" after "foobar" in connection info string
-
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false);
-ALTER SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist2';
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = 'newname');
-ALTER SUBSCRIPTION regress_testsub SET (password_required = false);
-ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = true);
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | f | t | f | off | dbname=regress_doesnotexist2 | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET (password_required = true);
-ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = false);
--- fail
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = '');
-ERROR: replication slot name "" is too short
--- fail
-ALTER SUBSCRIPTION regress_doesnotexist CONNECTION 'dbname=regress_doesnotexist2';
-ERROR: subscription "regress_doesnotexist" does not exist
-ALTER SUBSCRIPTION regress_testsub SET (create_slot = false);
-ERROR: unrecognized subscription parameter: "create_slot"
--- ok
-ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/12345');
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist2 | 0/12345
-(1 row)
-
--- ok - with lsn = NONE
-ALTER SUBSCRIPTION regress_testsub SKIP (lsn = NONE);
--- fail
-ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/0');
-ERROR: invalid WAL location (LSN): 0/0
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist2 | 0/0
-(1 row)
-
-BEGIN;
-ALTER SUBSCRIPTION regress_testsub ENABLE;
-\dRs
- List of subscriptions
- Name | Owner | Enabled | Publication
------------------+---------------------------+---------+---------------------
- regress_testsub | regress_subscription_user | t | {testpub2,testpub3}
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub DISABLE;
-\dRs
- List of subscriptions
- Name | Owner | Enabled | Publication
------------------+---------------------------+---------+---------------------
- regress_testsub | regress_subscription_user | f | {testpub2,testpub3}
-(1 row)
-
-COMMIT;
--- fail - must be owner of subscription
-SET ROLE regress_subscription_user_dummy;
-ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_dummy;
-ERROR: must be owner of subscription regress_testsub
-RESET ROLE;
-ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_foo;
-ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = local);
-ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = foobar);
-ERROR: invalid value for parameter "synchronous_commit": "foobar"
-HINT: Available values: local, remote_write, remote_apply, on, off.
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
----------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+----------
- regress_testsub_foo | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | local | dbname=regress_doesnotexist2 | 0/0
-(1 row)
-
--- rename back to keep the rest simple
-ALTER SUBSCRIPTION regress_testsub_foo RENAME TO regress_testsub;
--- ok, we're a superuser
-ALTER SUBSCRIPTION regress_testsub OWNER TO regress_subscription_user2;
--- fail - cannot do DROP SUBSCRIPTION inside transaction block with slot name
-BEGIN;
-DROP SUBSCRIPTION regress_testsub;
-ERROR: DROP SUBSCRIPTION cannot run inside a transaction block
-COMMIT;
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
--- now it works
-BEGIN;
-DROP SUBSCRIPTION regress_testsub;
-COMMIT;
-DROP SUBSCRIPTION IF EXISTS regress_testsub;
-NOTICE: subscription "regress_testsub" does not exist, skipping
-DROP SUBSCRIPTION regress_testsub; -- fail
-ERROR: subscription "regress_testsub" does not exist
--- fail - binary must be boolean
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = foo);
-ERROR: binary requires a Boolean value
--- now it works
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = true);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | t | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET (binary = false);
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-DROP SUBSCRIPTION regress_testsub;
--- fail - streaming must be boolean or 'parallel'
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = foo);
-ERROR: streaming requires a Boolean value or "parallel"
--- now it works
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | on | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET (streaming = parallel);
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET (streaming = false);
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
--- fail - publication already exists
-ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub WITH (refresh = false);
-ERROR: publication "testpub" is already in subscription "regress_testsub"
--- fail - publication used more than once
-ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub1 WITH (refresh = false);
-ERROR: publication name "testpub1" used more than once
--- ok - add two publications into subscription
-ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false);
--- fail - publications already exist
-ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false);
-ERROR: publication "testpub1" is already in subscription "regress_testsub"
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-----------------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub,testpub1,testpub2} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
--- fail - publication used more than once
-ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub1 WITH (refresh = false);
-ERROR: publication name "testpub1" used more than once
--- fail - all publications are deleted
-ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub, testpub1, testpub2 WITH (refresh = false);
-ERROR: cannot drop all the publications from a subscription
--- fail - publication does not exist in subscription
-ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub3 WITH (refresh = false);
-ERROR: publication "testpub3" is not in subscription "regress_testsub"
--- ok - delete publications
-ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub2 WITH (refresh = false);
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-DROP SUBSCRIPTION regress_testsub;
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION mypub
- WITH (connect = false, create_slot = false, copy_data = false);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-ALTER SUBSCRIPTION regress_testsub ENABLE;
--- fail - ALTER SUBSCRIPTION with refresh is not allowed in a transaction
--- block or function
-BEGIN;
-ALTER SUBSCRIPTION regress_testsub SET PUBLICATION mypub WITH (refresh = true);
-ERROR: ALTER SUBSCRIPTION with refresh cannot run inside a transaction block
-END;
-BEGIN;
-ALTER SUBSCRIPTION regress_testsub REFRESH PUBLICATION;
-ERROR: ALTER SUBSCRIPTION ... REFRESH cannot run inside a transaction block
-END;
-CREATE FUNCTION func() RETURNS VOID AS
-$$ ALTER SUBSCRIPTION regress_testsub SET PUBLICATION mypub WITH (refresh = true) $$ LANGUAGE SQL;
-SELECT func();
-ERROR: ALTER SUBSCRIPTION with refresh cannot be executed from a function
-CONTEXT: SQL function "func" statement 1
-ALTER SUBSCRIPTION regress_testsub DISABLE;
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
-DROP SUBSCRIPTION regress_testsub;
-DROP FUNCTION func;
--- fail - two_phase must be boolean
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = foo);
-ERROR: two_phase requires a Boolean value
--- now it works
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = true);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
--- we can alter streaming when two_phase enabled
-ALTER SUBSCRIPTION regress_testsub SET (streaming = true);
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
-DROP SUBSCRIPTION regress_testsub;
--- two_phase and streaming are compatible.
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true, two_phase = true);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
-DROP SUBSCRIPTION regress_testsub;
--- fail - disable_on_error must be boolean
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = foo);
-ERROR: disable_on_error requires a Boolean value
--- now it works
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = false);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET (disable_on_error = true);
-\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | t | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
-(1 row)
-
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
-DROP SUBSCRIPTION regress_testsub;
--- let's do some tests with pg_create_subscription rather than superuser
-SET SESSION AUTHORIZATION regress_subscription_user3;
--- fail, not enough privileges
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false);
-ERROR: permission denied for database regression
--- fail, must specify password
-RESET SESSION AUTHORIZATION;
-GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3;
-SET SESSION AUTHORIZATION regress_subscription_user3;
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false);
-ERROR: password is required
-DETAIL: Non-superusers must provide a password in the connection string.
--- fail, can't set password_required=false
-RESET SESSION AUTHORIZATION;
-GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3;
-SET SESSION AUTHORIZATION regress_subscription_user3;
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, password_required = false);
-ERROR: password_required=false is superuser-only
-HINT: Subscriptions with the password_required option set to false may only be created or modified by the superuser.
--- ok
-RESET SESSION AUTHORIZATION;
-GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3;
-SET SESSION AUTHORIZATION regress_subscription_user3;
-CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist password=regress_fakepassword' PUBLICATION testpub WITH (connect = false);
-WARNING: subscription was created, but is not connected
-HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
--- we cannot give the subscription away to some random user
-ALTER SUBSCRIPTION regress_testsub OWNER TO regress_subscription_user;
-ERROR: must be able to SET ROLE "regress_subscription_user"
--- but we can rename the subscription we just created
-ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2;
--- ok, even after losing pg_create_subscription we can still rename it
-RESET SESSION AUTHORIZATION;
-REVOKE pg_create_subscription FROM regress_subscription_user3;
-SET SESSION AUTHORIZATION regress_subscription_user3;
-ALTER SUBSCRIPTION regress_testsub2 RENAME TO regress_testsub;
--- fail, after losing CREATE on the database we can't rename it any more
-RESET SESSION AUTHORIZATION;
-REVOKE CREATE ON DATABASE REGRESSION FROM regress_subscription_user3;
-SET SESSION AUTHORIZATION regress_subscription_user3;
-ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2;
-ERROR: permission denied for database regression
--- fail - cannot do ALTER SUBSCRIPTION SET (failover) inside transaction block
-BEGIN;
-ALTER SUBSCRIPTION regress_testsub SET (failover);
-ERROR: ALTER SUBSCRIPTION ... SET (failover) cannot run inside a transaction block
-COMMIT;
--- ok, owning it is enough for this stuff
-ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
-DROP SUBSCRIPTION regress_testsub;
-RESET SESSION AUTHORIZATION;
-DROP ROLE regress_subscription_user;
-DROP ROLE regress_subscription_user2;
-DROP ROLE regress_subscription_user3;
-DROP ROLE regress_subscription_user_dummy;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/select_views.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/select_views.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/select_views.out 2024-11-15 02:50:52.502029300 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/select_views.out 2024-11-15 02:59:17.245115723 +0000
@@ -1,1552 +1,2 @@
---
--- SELECT_VIEWS
--- test the views defined in CREATE_VIEWS
---
-SELECT * FROM street;
- name | thepath | cname
-------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------
- Access Rd 25 | [(-121.9283,37.894),(-121.9283,37.9)] | Oakland
- Ada St | [(-122.2487,37.398),(-122.2496,37.401)] | Lafayette
- Agua Fria Creek | [(-121.9254,37.922),(-121.9281,37.889)] | Oakland
- Allen Ct | [(-122.0131,37.602),(-122.0117,37.597)] | Berkeley
- Alvarado Niles Road | [(-122.0325,37.903),(-122.0316,37.9)] | Berkeley
- Andrea Cir | [(-121.733218,37.88641),(-121.733286,37.90617)] | Oakland
- Apricot Lane | [(-121.9471,37.401),(-121.9456,37.392)] | Oakland
- Apricot Lane | [(-121.9471,37.401),(-121.9456,37.392)] | Oakland
- Arden Road | [(-122.0978,37.177),(-122.1,37.177)] | Oakland
- Arizona St | [(-122.0381,37.901),(-122.0367,37.898)] | Berkeley
- Arlington Dr | [(-121.8802,37.408),(-121.8807,37.394)] | Oakland
- Arlington Dr | [(-121.8802,37.408),(-121.8807,37.394)] | Oakland
- Arlington Road | [(-121.7957,37.898),(-121.7956,37.906)] | Oakland
- Arroyo Las Positas | [(-121.7973,37.997),(-121.7957,37.005)] | Oakland
- Arroyo Las Positas | [(-121.7973,37.997),(-121.7957,37.005)] | Oakland
- Arroyo Seco | [(-121.7073,37.766),(-121.6997,37.729)] | Oakland
- Ash St | [(-122.0408,37.31),(-122.04,37.292)] | Oakland
- Avenue 134th | [(-122.1823,37.002),(-122.1851,37.992)] | Oakland
- Avenue 134th | [(-122.1823,37.002),(-122.1851,37.992)] | Berkeley
- Avenue 140th | [(-122.1656,37.003),(-122.1691,37.988)] | Oakland
- Avenue 140th | [(-122.1656,37.003),(-122.1691,37.988)] | Berkeley
- Avenue D | [(-122.298,37.848),(-122.3024,37.849)] | Berkeley
- B St | [(-122.1749,37.451),(-122.1743,37.443)] | Oakland
- Bancroft Ave | [(-122.15714,37.4242),(-122.156,37.409)] | Oakland
- Bancroft Ave | [(-122.1643,37.523),(-122.1631,37.508),(-122.1621,37.493)] | Oakland
- Birch St | [(-122.1617,37.425),(-122.1614,37.417)] | Oakland
- Birch St | [(-122.1673,37.509),(-122.1661,37.492)] | Oakland
- Blacow Road | [(-122.0179,37.469),(-122.0167,37.465)] | Oakland
- Bridgepointe Dr | [(-122.0514,37.305),(-122.0509,37.299)] | Oakland
- Broadmore Ave | [(-122.095,37.522),(-122.0936,37.497)] | Oakland
- Broadway | [(-122.2409,37.586),(-122.2395,37.601)] | Berkeley
- Buckingham Blvd | [(-122.2231,37.59),(-122.2214,37.606)] | Berkeley
- Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Oakland
- Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Oakland
- Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Berkeley
- C St | [(-122.1768,37.46),(-122.1749,37.435)] | Oakland
- Calaveras Creek | [(-121.8203,37.035),(-121.8207,37.931)] | Oakland
- Calaveras Creek | [(-121.8203,37.035),(-121.8207,37.931)] | Oakland
- California St | [(-122.2032,37.005),(-122.2016,37.996)] | Berkeley
- California St | [(-122.2032,37.005),(-122.2016,37.996)] | Lafayette
- Cameron Ave | [(-122.1316,37.502),(-122.1327,37.481)] | Oakland
- Campus Dr | [(-122.1704,37.905),(-122.1678,37.868),(-122.1671,37.865)] | Berkeley
- Capricorn Ave | [(-122.2176,37.404),(-122.2164,37.384)] | Lafayette
- Carson St | [(-122.1846,37.9),(-122.1843,37.901)] | Berkeley
- Cedar Blvd | [(-122.0282,37.446),(-122.0265,37.43)] | Oakland
- Cedar St | [(-122.3011,37.737),(-122.2999,37.739)] | Berkeley
- Celia St | [(-122.0611,37.3),(-122.0616,37.299)] | Oakland
- Central Ave | [(-122.2343,37.602),(-122.2331,37.595)] | Berkeley
- Chambers Dr | [(-122.2004,37.352),(-122.1972,37.368)] | Lafayette
- Chambers Lane | [(-122.2001,37.359),(-122.1975,37.371)] | Lafayette
- Champion St | [(-122.214,37.991),(-122.2147,37.002)] | Berkeley
- Champion St | [(-122.214,37.991),(-122.2147,37.002)] | Lafayette
- Chapman Dr | [(-122.0421,37.504),(-122.0414,37.498)] | Oakland
- Charles St | [(-122.0255,37.505),(-122.0252,37.499)] | Oakland
- Cherry St | [(-122.0437,37.42),(-122.0434,37.413)] | Oakland
- Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Oakland
- Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Oakland
- Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Berkeley
- Coliseum Way | [(-122.2001,37.47),(-122.1978,37.516)] | Oakland
- Coliseum Way | [(-122.2113,37.626),(-122.2085,37.592),(-122.2063,37.568)] | Berkeley
- Coolidge Ave | [(-122.2007,37.058),(-122.1992,37.06)] | Lafayette
- Cornell Ave | [(-122.2956,37.925),(-122.2949,37.906),(-122.2939,37.875)] | Berkeley
- Corriea Way | [(-121.9501,37.402),(-121.9505,37.398)] | Oakland
- Corriea Way | [(-121.9501,37.402),(-121.9505,37.398)] | Oakland
- Cowing Road | [(-122.0002,37.934),(-121.9772,37.782)] | Oakland
- Creston Road | [(-122.2639,37.002),(-122.2613,37.986),(-122.2602,37.978),(-122.2598,37.973)] | Berkeley
- Creston Road | [(-122.2639,37.002),(-122.2613,37.986),(-122.2602,37.978),(-122.2598,37.973)] | Lafayette
- Crow Canyon Creek | [(-122.043,37.905),(-122.0368,37.71)] | Berkeley
- Crystaline Dr | [(-121.925856,37),(-121.925869,37.00527)] | Oakland
- Cull Canyon Road | [(-122.0536,37.435),(-122.0499,37.315)] | Oakland
- Cull Creek | [(-122.0624,37.875),(-122.0582,37.527)] | Berkeley
- D St | [(-122.1811,37.505),(-122.1805,37.497)] | Oakland
- Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Oakland
- Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Oakland
- Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Berkeley
- Deering St | [(-122.2146,37.904),(-122.2126,37.897)] | Berkeley
- Dimond Ave | [(-122.2167,37.994),(-122.2162,37.006)] | Berkeley
- Dimond Ave | [(-122.2167,37.994),(-122.2162,37.006)] | Lafayette
- Donna Way | [(-122.1333,37.606),(-122.1316,37.599)] | Berkeley
- Driftwood Dr | [(-122.0109,37.482),(-122.0113,37.477)] | Oakland
- Driscoll Road | [(-121.9482,37.403),(-121.948451,37.39995)] | Oakland
- Driscoll Road | [(-121.9482,37.403),(-121.948451,37.39995)] | Oakland
- E St | [(-122.1832,37.505),(-122.1826,37.498),(-122.182,37.49)] | Oakland
- Eden Ave | [(-122.1143,37.505),(-122.1142,37.491)] | Oakland
- Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Oakland
- Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Oakland
- Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Berkeley
- Edgewater Dr | [(-122.201,37.379),(-122.2042,37.41)] | Lafayette
- Enos Way | [(-121.7677,37.896),(-121.7673,37.91)] | Oakland
- Euclid Ave | [(-122.2671,37.009),(-122.2666,37.987)] | Berkeley
- Euclid Ave | [(-122.2671,37.009),(-122.2666,37.987)] | Lafayette
- Fairview Ave | [(-121.999,37.428),(-121.9863,37.351)] | Oakland
- Fairview Ave | [(-121.999,37.428),(-121.9863,37.351)] | Oakland
- Foothill Blvd | [(-122.2414,37.9),(-122.2403,37.893)] | Berkeley
- Fountain St | [(-122.2306,37.593),(-122.2293,37.605)] | Berkeley
- Gading Road | [(-122.0801,37.343),(-122.08,37.336)] | Oakland
- Grizzly Peak Blvd | [(-122.2213,37.638),(-122.2127,37.581)] | Berkeley
- Grove Way | [(-122.0643,37.884),(-122.062679,37.89162),(-122.061796,37.89578),(-122.0609,37.9)] | Berkeley
- Harris Road | [(-122.0659,37.372),(-122.0675,37.363)] | Oakland
- Heartwood Dr | [(-122.2006,37.341),(-122.1992,37.338)] | Lafayette
- Hegenberger Exwy | [(-122.1946,37.52),(-122.1947,37.497)] | Oakland
- Herrier St | [(-122.1943,37.006),(-122.1936,37.998)] | Oakland
- Herrier St | [(-122.1943,37.006),(-122.1936,37.998)] | Berkeley
- Hesperian Blvd | [(-122.097,37.333),(-122.0956,37.31),(-122.0946,37.293)] | Oakland
- Hesperian Blvd | [(-122.097,37.333),(-122.0956,37.31),(-122.0946,37.293)] | Oakland
- Hesperian Blvd | [(-122.1132,37.6),(-122.1123,37.586)] | Berkeley
- Hollis St | [(-122.2885,37.397),(-122.289,37.414)] | Lafayette
- I- 580 | [(-121.727,37.074),(-121.7229,37.093),(-121.722301,37.09522),(-121.721001,37.10005),(-121.7194,37.106),(-121.7188,37.109),(-121.7168,37.12),(-121.7163,37.123),(-121.7145,37.127),(-121.7096,37.148),(-121.707731,37.1568),(-121.7058,37.166),(-121.7055,37.168),(-121.7044,37.174),(-121.7038,37.172),(-121.7037,37.172),(-121.7027,37.175),(-121.7001,37.181),(-121.6957,37.191),(-121.6948,37.192),(-121.6897,37.204),(-121.6697,37.185)] | Oakland
- I- 580 | [(-121.9322,37.989),(-121.9243,37.006),(-121.9217,37.014)] | Oakland
- I- 580 | [(-121.9322,37.989),(-121.9243,37.006),(-121.9217,37.014)] | Oakland
- I- 580 | [(-122.018,37.019),(-122.0009,37.032),(-121.9787,37.983),(-121.958,37.984),(-121.9571,37.986)] | Oakland
- I- 580 | [(-122.018,37.019),(-122.0009,37.032),(-121.9787,37.983),(-121.958,37.984),(-121.9571,37.986)] | Oakland
- I- 580 | [(-122.1108,37.023),(-122.1101,37.02),(-122.108103,37.00764),(-122.108,37.007),(-122.1069,37.998),(-122.1064,37.994),(-122.1053,37.982),(-122.1048,37.977),(-122.1032,37.958),(-122.1026,37.953),(-122.1013,37.938),(-122.0989,37.911),(-122.0984,37.91),(-122.098,37.908)] | Oakland
- I- 580 | [(-122.1108,37.023),(-122.1101,37.02),(-122.108103,37.00764),(-122.108,37.007),(-122.1069,37.998),(-122.1064,37.994),(-122.1053,37.982),(-122.1048,37.977),(-122.1032,37.958),(-122.1026,37.953),(-122.1013,37.938),(-122.0989,37.911),(-122.0984,37.91),(-122.098,37.908)] | Berkeley
- I- 580 | [(-122.1543,37.703),(-122.1535,37.694),(-122.1512,37.655),(-122.1475,37.603),(-122.1468,37.583),(-122.1472,37.569),(-122.149044,37.54874),(-122.1493,37.546),(-122.1501,37.532),(-122.1506,37.509),(-122.1495,37.482),(-122.1487,37.467),(-122.1477,37.447),(-122.1414,37.383),(-122.1404,37.376),(-122.1398,37.372),(-122.139,37.356),(-122.1388,37.353),(-122.1385,37.34),(-122.1382,37.33),(-122.1378,37.316)] | Oakland
- I- 580 | [(-122.1543,37.703),(-122.1535,37.694),(-122.1512,37.655),(-122.1475,37.603),(-122.1468,37.583),(-122.1472,37.569),(-122.149044,37.54874),(-122.1493,37.546),(-122.1501,37.532),(-122.1506,37.509),(-122.1495,37.482),(-122.1487,37.467),(-122.1477,37.447),(-122.1414,37.383),(-122.1404,37.376),(-122.1398,37.372),(-122.139,37.356),(-122.1388,37.353),(-122.1385,37.34),(-122.1382,37.33),(-122.1378,37.316)] | Berkeley
- I- 580 | [(-122.2197,37.99),(-122.22,37.99),(-122.222092,37.99523),(-122.2232,37.998),(-122.224146,37.99963),(-122.2261,37.003),(-122.2278,37.007),(-122.2302,37.026),(-122.2323,37.043),(-122.2344,37.059),(-122.235405,37.06427),(-122.2365,37.07)] | Berkeley
- I- 580 | [(-122.2197,37.99),(-122.22,37.99),(-122.222092,37.99523),(-122.2232,37.998),(-122.224146,37.99963),(-122.2261,37.003),(-122.2278,37.007),(-122.2302,37.026),(-122.2323,37.043),(-122.2344,37.059),(-122.235405,37.06427),(-122.2365,37.07)] | Lafayette
- I- 580 Ramp | [(-121.8521,37.011),(-121.8479,37.999),(-121.8476,37.999),(-121.8456,37.01),(-121.8455,37.011)] | Oakland
- I- 580 Ramp | [(-121.8521,37.011),(-121.8479,37.999),(-121.8476,37.999),(-121.8456,37.01),(-121.8455,37.011)] | Oakland
- I- 580 Ramp | [(-121.8743,37.014),(-121.8722,37.999),(-121.8714,37.999)] | Oakland
- I- 580 Ramp | [(-121.8743,37.014),(-121.8722,37.999),(-121.8714,37.999)] | Oakland
- I- 580 Ramp | [(-121.9043,37.998),(-121.9036,37.013),(-121.902632,37.0174),(-121.9025,37.018)] | Oakland
- I- 580 Ramp | [(-121.9043,37.998),(-121.9036,37.013),(-121.902632,37.0174),(-121.9025,37.018)] | Oakland
- I- 580 Ramp | [(-121.9368,37.986),(-121.936483,37.98832),(-121.9353,37.997),(-121.93504,37.00035),(-121.9346,37.006),(-121.933764,37.00031),(-121.9333,37.997),(-121.9322,37.989)] | Oakland
- I- 580 Ramp | [(-121.9368,37.986),(-121.936483,37.98832),(-121.9353,37.997),(-121.93504,37.00035),(-121.9346,37.006),(-121.933764,37.00031),(-121.9333,37.997),(-121.9322,37.989)] | Oakland
- I- 580 Ramp | [(-122.093241,37.90351),(-122.09364,37.89634),(-122.093788,37.89212)] | Berkeley
- I- 580 Ramp | [(-122.0934,37.896),(-122.09257,37.89961),(-122.0911,37.906)] | Berkeley
- I- 580 Ramp | [(-122.0941,37.897),(-122.0943,37.902)] | Berkeley
- I- 580 Ramp | [(-122.096,37.888),(-122.0962,37.891),(-122.0964,37.9)] | Berkeley
- I- 580 Ramp | [(-122.101,37.898),(-122.1005,37.902),(-122.0989,37.911)] | Berkeley
- I- 580 Ramp | [(-122.1086,37.003),(-122.1068,37.993),(-122.1066,37.992),(-122.1053,37.982)] | Oakland
- I- 580 Ramp | [(-122.1086,37.003),(-122.1068,37.993),(-122.1066,37.992),(-122.1053,37.982)] | Berkeley
- I- 580 Ramp | [(-122.1414,37.383),(-122.1407,37.376),(-122.1403,37.372),(-122.139,37.356)] | Oakland
- I- 580/I-680 Ramp | ((-121.9207,37.988),(-121.9192,37.016)) | Oakland
- I- 580/I-680 Ramp | ((-121.9207,37.988),(-121.9192,37.016)) | Oakland
- I- 680 | ((-121.939,37.15),(-121.9387,37.145),(-121.9373,37.125),(-121.934242,37.07643),(-121.933886,37.0709),(-121.9337,37.068),(-121.933122,37.06139),(-121.932736,37.05698),(-121.93222,37.05108),(-121.931844,37.04678),(-121.930113,37.027),(-121.926829,37),(-121.9265,37.998),(-121.9217,37.96),(-121.9203,37.949),(-121.9184,37.934)) | Oakland
- I- 680 | ((-121.939,37.15),(-121.9387,37.145),(-121.9373,37.125),(-121.934242,37.07643),(-121.933886,37.0709),(-121.9337,37.068),(-121.933122,37.06139),(-121.932736,37.05698),(-121.93222,37.05108),(-121.931844,37.04678),(-121.930113,37.027),(-121.926829,37),(-121.9265,37.998),(-121.9217,37.96),(-121.9203,37.949),(-121.9184,37.934)) | Oakland
- I- 680 | [(-121.9101,37.715),(-121.911269,37.74682),(-121.9119,37.764),(-121.9124,37.776),(-121.9174,37.905),(-121.9194,37.957),(-121.9207,37.988)] | Oakland
- I- 680 | [(-121.9184,37.934),(-121.917,37.913),(-121.9122,37.83),(-121.9052,37.702)] | Oakland
- I- 680 Ramp | [(-121.8833,37.376),(-121.8833,37.392),(-121.883,37.4),(-121.8835,37.402),(-121.8852,37.422)] | Oakland
- I- 680 Ramp | [(-121.8833,37.376),(-121.8833,37.392),(-121.883,37.4),(-121.8835,37.402),(-121.8852,37.422)] | Oakland
- I- 680 Ramp | [(-121.92,37.438),(-121.9218,37.424),(-121.9238,37.408),(-121.9252,37.392)] | Oakland
- I- 680 Ramp | [(-121.92,37.438),(-121.9218,37.424),(-121.9238,37.408),(-121.9252,37.392)] | Oakland
- I- 680 Ramp | [(-121.9238,37.402),(-121.9234,37.395),(-121.923,37.399)] | Oakland
- I- 680 Ramp | [(-121.9238,37.402),(-121.9234,37.395),(-121.923,37.399)] | Oakland
- I- 80 | ((-122.2937,37.277),(-122.3016,37.262)) | Lafayette
- I- 80 | ((-122.2962,37.273),(-122.3004,37.264)) | Lafayette
- I- 80 Ramp | [(-122.2962,37.413),(-122.2959,37.382),(-122.2951,37.372)] | Lafayette
- I- 880 | ((-121.9669,37.075),(-121.9663,37.071),(-121.9656,37.065),(-121.9618,37.037),(-121.95689,37),(-121.948,37.933)) | Oakland
- I- 880 | ((-121.9669,37.075),(-121.9663,37.071),(-121.9656,37.065),(-121.9618,37.037),(-121.95689,37),(-121.948,37.933)) | Oakland
- I- 880 | [(-121.948,37.933),(-121.9471,37.925),(-121.9467,37.923),(-121.946,37.918),(-121.9452,37.912),(-121.937,37.852)] | Oakland
- I- 880 | [(-122.0219,37.466),(-122.0205,37.447),(-122.020331,37.44447),(-122.020008,37.43962),(-122.0195,37.432),(-122.0193,37.429),(-122.0164,37.393),(-122.010219,37.34771),(-122.0041,37.313)] | Oakland
- I- 880 | [(-122.0375,37.632),(-122.0359,37.619),(-122.0358,37.616),(-122.034514,37.60409),(-122.031876,37.57965),(-122.031193,37.57332),(-122.03016,37.56375),(-122.02943,37.55698),(-122.028689,37.54929),(-122.027833,37.53908),(-122.025979,37.51698),(-122.0238,37.491)] | Oakland
- I- 880 | [(-122.0375,37.632),(-122.0359,37.619),(-122.0358,37.616),(-122.034514,37.60409),(-122.031876,37.57965),(-122.031193,37.57332),(-122.03016,37.56375),(-122.02943,37.55698),(-122.028689,37.54929),(-122.027833,37.53908),(-122.025979,37.51698),(-122.0238,37.491)] | Berkeley
- I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Oakland
- I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Oakland
- I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Berkeley
- I- 880 | [(-122.0831,37.312),(-122.0819,37.296),(-122.081,37.285),(-122.0786,37.248),(-122.078,37.24),(-122.077642,37.23496),(-122.076983,37.22567),(-122.076599,37.22026),(-122.076229,37.21505),(-122.0758,37.209)] | Oakland
- I- 880 | [(-122.0978,37.528),(-122.096,37.496),(-122.0931,37.453),(-122.09277,37.4496),(-122.090189,37.41442),(-122.0896,37.405),(-122.085,37.34)] | Oakland
- I- 880 | [(-122.1365,37.902),(-122.1358,37.898),(-122.1333,37.881),(-122.1323,37.874),(-122.1311,37.866),(-122.1308,37.865),(-122.1307,37.864),(-122.1289,37.851),(-122.1277,37.843),(-122.1264,37.834),(-122.1231,37.812),(-122.1165,37.766),(-122.1104,37.72),(-122.109695,37.71094),(-122.109,37.702),(-122.108312,37.69168),(-122.1076,37.681)] | Berkeley
- I- 880 | [(-122.1755,37.185),(-122.1747,37.178),(-122.1742,37.173),(-122.1692,37.126),(-122.167792,37.11594),(-122.16757,37.11435),(-122.1671,37.111),(-122.1655,37.1),(-122.165169,37.09811),(-122.1641,37.092),(-122.1596,37.061),(-122.158381,37.05275),(-122.155991,37.03657),(-122.1531,37.017),(-122.1478,37.98),(-122.1407,37.932),(-122.1394,37.924),(-122.1389,37.92),(-122.1376,37.91)] | Oakland
- I- 880 | [(-122.1755,37.185),(-122.1747,37.178),(-122.1742,37.173),(-122.1692,37.126),(-122.167792,37.11594),(-122.16757,37.11435),(-122.1671,37.111),(-122.1655,37.1),(-122.165169,37.09811),(-122.1641,37.092),(-122.1596,37.061),(-122.158381,37.05275),(-122.155991,37.03657),(-122.1531,37.017),(-122.1478,37.98),(-122.1407,37.932),(-122.1394,37.924),(-122.1389,37.92),(-122.1376,37.91)] | Berkeley
- I- 880 | [(-122.2214,37.711),(-122.2202,37.699),(-122.2199,37.695),(-122.219,37.682),(-122.2184,37.672),(-122.2173,37.652),(-122.2159,37.638),(-122.2144,37.616),(-122.2138,37.612),(-122.2135,37.609),(-122.212,37.592),(-122.2116,37.586),(-122.2111,37.581)] | Berkeley
- I- 880 | [(-122.2707,37.975),(-122.2693,37.972),(-122.2681,37.966),(-122.267,37.962),(-122.2659,37.957),(-122.2648,37.952),(-122.2636,37.946),(-122.2625,37.935),(-122.2617,37.927),(-122.2607,37.921),(-122.2593,37.916),(-122.258,37.911),(-122.2536,37.898),(-122.2432,37.858),(-122.2408,37.845),(-122.2386,37.827),(-122.2374,37.811)] | Berkeley
- I- 880 Ramp | [(-122.0019,37.301),(-122.002,37.293)] | Oakland
- I- 880 Ramp | [(-122.0041,37.313),(-122.0018,37.315),(-122.0007,37.315),(-122.0005,37.313),(-122.0002,37.308),(-121.9995,37.289)] | Oakland
- I- 880 Ramp | [(-122.0041,37.313),(-122.0038,37.308),(-122.0039,37.284),(-122.0013,37.287),(-121.9995,37.289)] | Oakland
- I- 880 Ramp | [(-122.0236,37.488),(-122.0231,37.458),(-122.0227,37.458),(-122.0223,37.452),(-122.0205,37.447)] | Oakland
- I- 880 Ramp | [(-122.0238,37.491),(-122.0215,37.483),(-122.0211,37.477),(-122.0205,37.447)] | Oakland
- I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Oakland
- I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Oakland
- I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Berkeley
- I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Oakland
- I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Oakland
- I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Berkeley
- I- 880 Ramp | [(-122.085,37.34),(-122.0801,37.316),(-122.081,37.285)] | Oakland
- I- 880 Ramp | [(-122.085,37.34),(-122.0801,37.316),(-122.081,37.285)] | Oakland
- I- 880 Ramp | [(-122.085,37.34),(-122.0866,37.316),(-122.0819,37.296)] | Oakland
- I- 880 Ramp | [(-122.085,37.34),(-122.0866,37.316),(-122.0819,37.296)] | Oakland
- I- 880 Ramp | [(-122.1029,37.61),(-122.1013,37.587),(-122.0999,37.569)] | Berkeley
- I- 880 Ramp | [(-122.1379,37.891),(-122.1383,37.897),(-122.1377,37.902)] | Berkeley
- I- 880 Ramp | [(-122.1379,37.931),(-122.137597,37.92736),(-122.1374,37.925),(-122.1373,37.924),(-122.1369,37.914),(-122.1358,37.905),(-122.1365,37.908),(-122.1358,37.898)] | Berkeley
- I- 880 Ramp | [(-122.2536,37.898),(-122.254,37.902)] | Berkeley
- I- 880 Ramp | [(-122.2771,37.002),(-122.278,37)] | Lafayette
- Indian Way | [(-122.2066,37.398),(-122.2045,37.411)] | Lafayette
- Jackson St | [(-122.0845,37.6),(-122.0842,37.606)] | Berkeley
- Johnson Dr | [(-121.9145,37.901),(-121.915,37.877)] | Oakland
- Joyce St | [(-122.0792,37.604),(-122.0774,37.581)] | Berkeley
- Juniper St | [(-121.7823,37.897),(-121.7815,37.9)] | Oakland
- Kaiser Dr | [(-122.067163,37.47821),(-122.060402,37.51961)] | Oakland
- Keeler Ave | [(-122.2578,37.906),(-122.2579,37.899)] | Berkeley
- Kildare Road | [(-122.0968,37.016),(-122.0959,37)] | Oakland
- La Playa Dr | [(-122.1039,37.545),(-122.101,37.493)] | Oakland
- Laguna Ave | [(-122.2099,37.989),(-122.2089,37)] | Berkeley
- Laguna Ave | [(-122.2099,37.989),(-122.2089,37)] | Lafayette
- Lakehurst Cir | [(-122.284729,37.89025),(-122.286096,37.90364)] | Berkeley
- Lakeshore Ave | [(-122.2586,37.99),(-122.2556,37.006)] | Berkeley
- Lakeshore Ave | [(-122.2586,37.99),(-122.2556,37.006)] | Lafayette
- Las Positas Road | [(-121.764488,37.99199),(-121.75569,37.02022)] | Oakland
- Las Positas Road | [(-121.764488,37.99199),(-121.75569,37.02022)] | Oakland
- Linden St | [(-122.2867,37.998),(-122.2864,37.008)] | Berkeley
- Linden St | [(-122.2867,37.998),(-122.2864,37.008)] | Lafayette
- Livermore Ave | [(-121.7687,37.448),(-121.769,37.375)] | Oakland
- Livermore Ave | [(-121.7687,37.448),(-121.769,37.375)] | Oakland
- Livermore Ave | [(-121.772719,37.99085),(-121.7728,37.001)] | Oakland
- Livermore Ave | [(-121.772719,37.99085),(-121.7728,37.001)] | Oakland
- Locust St | [(-122.1606,37.007),(-122.1593,37.987)] | Oakland
- Locust St | [(-122.1606,37.007),(-122.1593,37.987)] | Berkeley
- Logan Ct | [(-122.0053,37.492),(-122.0061,37.484)] | Oakland
- Magnolia St | [(-122.0971,37.5),(-122.0962,37.484)] | Oakland
- Mandalay Road | [(-122.2322,37.397),(-122.2321,37.403)] | Lafayette
- Marin Ave | [(-122.2741,37.894),(-122.272,37.901)] | Berkeley
- Martin Luther King Jr Way | [(-122.2712,37.608),(-122.2711,37.599)] | Berkeley
- Mattos Dr | [(-122.0005,37.502),(-122.000898,37.49683)] | Oakland
- Maubert Ave | [(-122.1114,37.009),(-122.1096,37.995)] | Oakland
- Maubert Ave | [(-122.1114,37.009),(-122.1096,37.995)] | Berkeley
- McClure Ave | [(-122.1431,37.001),(-122.1436,37.998)] | Oakland
- McClure Ave | [(-122.1431,37.001),(-122.1436,37.998)] | Berkeley
- Medlar Dr | [(-122.0627,37.378),(-122.0625,37.375)] | Oakland
- Mildred Ct | [(-122.0002,37.388),(-121.9998,37.386)] | Oakland
- Miller Road | [(-122.0902,37.645),(-122.0865,37.545)] | Berkeley
- Miramar Ave | [(-122.1009,37.025),(-122.099089,37.03209)] | Oakland
- Mission Blvd | [(-121.918886,37),(-121.9194,37.976),(-121.9198,37.975)] | Oakland
- Mission Blvd | [(-121.918886,37),(-121.9194,37.976),(-121.9198,37.975)] | Oakland
- Mission Blvd | [(-122.0006,37.896),(-121.9989,37.88)] | Oakland
- Mission Blvd | [(-122.0006,37.896),(-121.9989,37.88)] | Berkeley
- Moores Ave | [(-122.0087,37.301),(-122.0094,37.292)] | Oakland
- National Ave | [(-122.1192,37.5),(-122.1281,37.489)] | Oakland
- Navajo Ct | [(-121.8779,37.901),(-121.8783,37.9)] | Oakland
- Newark Blvd | [(-122.0352,37.438),(-122.0341,37.423)] | Oakland
- Oakland Inner Harbor | [(-122.2625,37.913),(-122.260016,37.89484)] | Berkeley
- Oakridge Road | [(-121.8316,37.049),(-121.828382,37)] | Oakland
- Oneil Ave | [(-122.076754,37.62476),(-122.0745,37.595)] | Berkeley
- Parkridge Dr | [(-122.1438,37.884),(-122.1428,37.9)] | Berkeley
- Parkside Dr | [(-122.0475,37.603),(-122.0443,37.596)] | Berkeley
- Paseo Padre Pkwy | [(-121.9143,37.005),(-121.913522,37)] | Oakland
- Paseo Padre Pkwy | [(-122.0021,37.639),(-121.996,37.628)] | Oakland
- Paseo Padre Pkwy | [(-122.0021,37.639),(-121.996,37.628)] | Berkeley
- Pearl St | [(-122.2383,37.594),(-122.2366,37.615)] | Berkeley
- Periwinkle Road | [(-122.0451,37.301),(-122.044758,37.29844)] | Oakland
- Pimlico Dr | [(-121.8616,37.998),(-121.8618,37.008)] | Oakland
- Pimlico Dr | [(-121.8616,37.998),(-121.8618,37.008)] | Oakland
- Portsmouth Ave | [(-122.1064,37.315),(-122.1064,37.308)] | Oakland
- Proctor Ave | [(-122.2267,37.406),(-122.2251,37.386)] | Lafayette
- Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Oakland
- Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Oakland
- Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Berkeley
- Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Oakland
- Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Oakland
- Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Berkeley
- Redding St | [(-122.1978,37.901),(-122.1975,37.895)] | Berkeley
- Redwood Road | [(-122.1493,37.98),(-122.1437,37.001)] | Oakland
- Redwood Road | [(-122.1493,37.98),(-122.1437,37.001)] | Berkeley
- Roca Dr | [(-122.0335,37.609),(-122.0314,37.599)] | Berkeley
- Rosedale Ct | [(-121.9232,37.9),(-121.924,37.897)] | Oakland
- Sacramento St | [(-122.2799,37.606),(-122.2797,37.597)] | Berkeley
- Saddle Brook Dr | [(-122.1478,37.909),(-122.1454,37.904),(-122.1451,37.888)] | Berkeley
- Saginaw Ct | [(-121.8803,37.898),(-121.8806,37.901)] | Oakland
- San Andreas Dr | [(-122.0609,37.9),(-122.0614,37.895)] | Berkeley
- Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Oakland
- Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Oakland
- Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Berkeley
- Shattuck Ave | [(-122.2686,37.904),(-122.2686,37.897)] | Berkeley
- Sheridan Road | [(-122.2279,37.425),(-122.2253,37.411),(-122.2223,37.377)] | Lafayette
- Shoreline Dr | [(-122.2657,37.603),(-122.2648,37.6)] | Berkeley
- Skyline Blvd | [(-122.1738,37.01),(-122.1714,37.996)] | Oakland
- Skyline Blvd | [(-122.1738,37.01),(-122.1714,37.996)] | Berkeley
- Skyline Dr | [(-122.0277,37.5),(-122.0284,37.498)] | Oakland
- Skywest Dr | [(-122.1161,37.62),(-122.1123,37.586)] | Berkeley
- Southern Pacific Railroad | [(-122.3002,37.674),(-122.2999,37.661)] | Berkeley
- Sp Railroad | [(-121.893564,37.99009),(-121.897,37.016)] | Oakland
- Sp Railroad | [(-121.893564,37.99009),(-121.897,37.016)] | Oakland
- Sp Railroad | [(-121.9565,37.898),(-121.9562,37.9)] | Oakland
- Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Oakland
- Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Oakland
- Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Berkeley
- Sp Railroad | [(-122.0914,37.601),(-122.087,37.56),(-122.086408,37.5551)] | Berkeley
- Sp Railroad | [(-122.137792,37.003),(-122.1365,37.992),(-122.131257,37.94612)] | Oakland
- Sp Railroad | [(-122.137792,37.003),(-122.1365,37.992),(-122.131257,37.94612)] | Berkeley
- Sp Railroad | [(-122.1947,37.497),(-122.193328,37.4848)] | Oakland
- Stanton Ave | [(-122.100392,37.0697),(-122.099513,37.06052)] | Oakland
- State Hwy 123 | [(-122.3004,37.986),(-122.2998,37.969),(-122.2995,37.962),(-122.2992,37.952),(-122.299,37.942),(-122.2987,37.935),(-122.2984,37.924),(-122.2982,37.92),(-122.2976,37.904),(-122.297,37.88),(-122.2966,37.869),(-122.2959,37.848),(-122.2961,37.843)] | Berkeley
- State Hwy 13 | [(-122.1797,37.943),(-122.179871,37.91849),(-122.18,37.9),(-122.179023,37.86615),(-122.1787,37.862),(-122.1781,37.851),(-122.1777,37.845),(-122.1773,37.839),(-122.177,37.833)] | Berkeley
- State Hwy 13 | [(-122.2049,37.2),(-122.20328,37.17975),(-122.1989,37.125),(-122.198078,37.11641),(-122.1975,37.11)] | Lafayette
- State Hwy 13 Ramp | [(-122.2244,37.427),(-122.223,37.414),(-122.2214,37.396),(-122.2213,37.388)] | Lafayette
- State Hwy 238 | ((-122.098,37.908),(-122.0983,37.907),(-122.099,37.905),(-122.101,37.898),(-122.101535,37.89711),(-122.103173,37.89438),(-122.1046,37.892),(-122.106,37.89)) | Berkeley
- State Hwy 238 Ramp | [(-122.1288,37.9),(-122.1293,37.895),(-122.1296,37.906)] | Berkeley
- State Hwy 24 | [(-122.2674,37.246),(-122.2673,37.248),(-122.267,37.261),(-122.2668,37.271),(-122.2663,37.298),(-122.2659,37.315),(-122.2655,37.336),(-122.265007,37.35882),(-122.264443,37.37286),(-122.2641,37.381),(-122.2638,37.388),(-122.2631,37.396),(-122.2617,37.405),(-122.2615,37.407),(-122.2605,37.412)] | Lafayette
- State Hwy 84 | [(-121.9565,37.898),(-121.956589,37.89911),(-121.9569,37.903),(-121.956,37.91),(-121.9553,37.919)] | Oakland
- State Hwy 84 | [(-122.0671,37.426),(-122.07,37.402),(-122.074,37.37),(-122.0773,37.338)] | Oakland
- State Hwy 92 | [(-122.1085,37.326),(-122.1095,37.322),(-122.1111,37.316),(-122.1119,37.313),(-122.1125,37.311),(-122.1131,37.308),(-122.1167,37.292),(-122.1187,37.285),(-122.12,37.28)] | Oakland
- State Hwy 92 Ramp | [(-122.1086,37.321),(-122.1089,37.315),(-122.1111,37.316)] | Oakland
- Stuart St | [(-122.2518,37.6),(-122.2507,37.601),(-122.2491,37.606)] | Berkeley
- Sunol Ridge Trl | [(-121.9419,37.455),(-121.9345,37.38)] | Oakland
- Sunol Ridge Trl | [(-121.9419,37.455),(-121.9345,37.38)] | Oakland
- Tassajara Creek | [(-121.87866,37.98898),(-121.8782,37.015)] | Oakland
- Tassajara Creek | [(-121.87866,37.98898),(-121.8782,37.015)] | Oakland
- Taurus Ave | [(-122.2159,37.416),(-122.2128,37.389)] | Lafayette
- Tennyson Road | [(-122.0891,37.317),(-122.0927,37.317)] | Oakland
- Thackeray Ave | [(-122.072,37.305),(-122.0715,37.298)] | Oakland
- Theresa Way | [(-121.7289,37.906),(-121.728,37.899)] | Oakland
- Tissiack Way | [(-121.920364,37),(-121.9208,37.995)] | Oakland
- Tissiack Way | [(-121.920364,37),(-121.9208,37.995)] | Oakland
- Tupelo Ter | [(-122.059087,37.6113),(-122.057021,37.59942)] | Berkeley
- Vallecitos Road | [(-121.8699,37.916),(-121.8703,37.891)] | Oakland
- Warm Springs Blvd | [(-121.933956,37),(-121.9343,37.97)] | Oakland
- Warm Springs Blvd | [(-121.933956,37),(-121.9343,37.97)] | Oakland
- Welch Creek Road | [(-121.7695,37.386),(-121.7737,37.413)] | Oakland
- Welch Creek Road | [(-121.7695,37.386),(-121.7737,37.413)] | Oakland
- West Loop Road | [(-122.0576,37.604),(-122.0602,37.586)] | Berkeley
- Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Oakland
- Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Oakland
- Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Berkeley
- Whitlock Creek | [(-121.74683,37.91276),(-121.733107,37)] | Oakland
- Whitlock Creek | [(-121.74683,37.91276),(-121.733107,37)] | Oakland
- Willimet Way | [(-122.0964,37.517),(-122.0949,37.493)] | Oakland
- Wisconsin St | [(-122.1994,37.017),(-122.1975,37.998),(-122.1971,37.994)] | Oakland
- Wisconsin St | [(-122.1994,37.017),(-122.1975,37.998),(-122.1971,37.994)] | Berkeley
- Wp Railroad | [(-122.254,37.902),(-122.2506,37.891)] | Berkeley
- 100th Ave | [(-122.1657,37.429),(-122.1647,37.432)] | Oakland
- 107th Ave | [(-122.1555,37.403),(-122.1531,37.41)] | Oakland
- 14th St | [(-122.299,37.147),(-122.3,37.148)] | Lafayette
- 19th Ave | [(-122.2366,37.897),(-122.2359,37.905)] | Berkeley
- 1st St | [(-121.75508,37.89294),(-121.753581,37.90031)] | Oakland
- 5th St | [(-122.278,37),(-122.2792,37.005),(-122.2803,37.009)] | Lafayette
- 5th St | [(-122.296,37.615),(-122.2953,37.598)] | Berkeley
- 82nd Ave | [(-122.1695,37.596),(-122.1681,37.603)] | Berkeley
- 85th Ave | [(-122.1877,37.466),(-122.186,37.476)] | Oakland
- 89th Ave | [(-122.1822,37.459),(-122.1803,37.471)] | Oakland
- 98th Ave | [(-122.1568,37.498),(-122.1558,37.502)] | Oakland
- 98th Ave | [(-122.1693,37.438),(-122.1682,37.444)] | Oakland
- 98th Ave | [(-122.2001,37.258),(-122.1974,37.27)] | Lafayette
-(333 rows)
-
-SELECT name, #thepath FROM iexit ORDER BY name COLLATE "C", 2;
- name | ?column?
-------------------------------------+----------
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 2
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 3
- I- 580 | 4
- I- 580 | 4
- I- 580 | 4
- I- 580 | 4
- I- 580 | 5
- I- 580 | 5
- I- 580 | 5
- I- 580 | 5
- I- 580 | 5
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 6
- I- 580 | 7
- I- 580 | 7
- I- 580 | 7
- I- 580 | 7
- I- 580 | 7
- I- 580 | 7
- I- 580 | 7
- I- 580 | 8
- I- 580 | 8
- I- 580 | 8
- I- 580 | 8
- I- 580 | 8
- I- 580 | 8
- I- 580 | 8
- I- 580 | 8
- I- 580 | 8
- I- 580 | 9
- I- 580 | 9
- I- 580 | 9
- I- 580 | 9
- I- 580 | 9
- I- 580 | 12
- I- 580 | 12
- I- 580 | 12
- I- 580 | 12
- I- 580 | 12
- I- 580 | 12
- I- 580 | 12
- I- 580 | 12
- I- 580 | 12
- I- 580 | 12
- I- 580 | 13
- I- 580 | 13
- I- 580 | 13
- I- 580 | 13
- I- 580 | 13
- I- 580 | 13
- I- 580 | 14
- I- 580 | 14
- I- 580 | 14
- I- 580 | 14
- I- 580 | 14
- I- 580 | 14
- I- 580 | 14
- I- 580 | 14
- I- 580 | 18
- I- 580 | 18
- I- 580 | 18
- I- 580 | 18
- I- 580 | 18
- I- 580 | 18
- I- 580 | 21
- I- 580 | 21
- I- 580 | 21
- I- 580 | 21
- I- 580 | 21
- I- 580 | 21
- I- 580 | 21
- I- 580 | 21
- I- 580 | 21
- I- 580 | 21
- I- 580 | 22
- I- 580 | 22
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 2
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 3
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 4
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 5
- I- 580 Ramp | 6
- I- 580 Ramp | 6
- I- 580 Ramp | 6
- I- 580 Ramp | 7
- I- 580 Ramp | 8
- I- 580 Ramp | 8
- I- 580 Ramp | 8
- I- 580 Ramp | 8
- I- 580 Ramp | 8
- I- 580 Ramp | 8
- I- 580/I-680 Ramp | 2
- I- 580/I-680 Ramp | 2
- I- 580/I-680 Ramp | 2
- I- 580/I-680 Ramp | 2
- I- 580/I-680 Ramp | 2
- I- 580/I-680 Ramp | 2
- I- 580/I-680 Ramp | 4
- I- 580/I-680 Ramp | 4
- I- 580/I-680 Ramp | 4
- I- 580/I-680 Ramp | 4
- I- 580/I-680 Ramp | 5
- I- 580/I-680 Ramp | 6
- I- 580/I-680 Ramp | 6
- I- 580/I-680 Ramp | 6
- I- 680 | 2
- I- 680 | 2
- I- 680 | 2
- I- 680 | 2
- I- 680 | 2
- I- 680 | 2
- I- 680 | 2
- I- 680 | 3
- I- 680 | 3
- I- 680 | 3
- I- 680 | 4
- I- 680 | 4
- I- 680 | 4
- I- 680 | 5
- I- 680 | 5
- I- 680 | 5
- I- 680 | 7
- I- 680 | 7
- I- 680 | 7
- I- 680 | 7
- I- 680 | 8
- I- 680 | 8
- I- 680 | 8
- I- 680 | 8
- I- 680 | 10
- I- 680 | 10
- I- 680 | 10
- I- 680 | 10
- I- 680 | 10
- I- 680 | 10
- I- 680 | 10
- I- 680 | 16
- I- 680 | 16
- I- 680 | 16
- I- 680 | 16
- I- 680 | 16
- I- 680 | 16
- I- 680 | 16
- I- 680 | 16
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 2
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 3
- I- 680 Ramp | 4
- I- 680 Ramp | 4
- I- 680 Ramp | 4
- I- 680 Ramp | 5
- I- 680 Ramp | 5
- I- 680 Ramp | 5
- I- 680 Ramp | 5
- I- 680 Ramp | 5
- I- 680 Ramp | 5
- I- 680 Ramp | 6
- I- 680 Ramp | 6
- I- 680 Ramp | 6
- I- 680 Ramp | 6
- I- 680 Ramp | 7
- I- 680 Ramp | 7
- I- 680 Ramp | 7
- I- 680 Ramp | 7
- I- 680 Ramp | 8
- I- 680 Ramp | 8
- I- 680 Ramp | 8
- I- 680 Ramp | 8
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 2
- I- 80 | 3
- I- 80 | 3
- I- 80 | 3
- I- 80 | 4
- I- 80 | 4
- I- 80 | 4
- I- 80 | 4
- I- 80 | 4
- I- 80 | 5
- I- 80 | 5
- I- 80 | 5
- I- 80 | 5
- I- 80 | 5
- I- 80 | 5
- I- 80 | 5
- I- 80 | 5
- I- 80 | 5
- I- 80 | 11
- I- 80 | 11
- I- 80 | 11
- I- 80 | 11
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 2
- I- 80 Ramp | 3
- I- 80 Ramp | 3
- I- 80 Ramp | 3
- I- 80 Ramp | 3
- I- 80 Ramp | 3
- I- 80 Ramp | 3
- I- 80 Ramp | 3
- I- 80 Ramp | 3
- I- 80 Ramp | 3
- I- 80 Ramp | 4
- I- 80 Ramp | 4
- I- 80 Ramp | 4
- I- 80 Ramp | 4
- I- 80 Ramp | 5
- I- 80 Ramp | 5
- I- 80 Ramp | 5
- I- 80 Ramp | 5
- I- 80 Ramp | 5
- I- 80 Ramp | 5
- I- 80 Ramp | 5
- I- 80 Ramp | 7
- I- 80 Ramp | 7
- I- 80 Ramp | 7
- I- 80 Ramp | 7
- I- 880 | 2
- I- 880 | 2
- I- 880 | 2
- I- 880 | 2
- I- 880 | 2
- I- 880 | 5
- I- 880 | 5
- I- 880 | 5
- I- 880 | 5
- I- 880 | 5
- I- 880 | 5
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 6
- I- 880 | 7
- I- 880 | 7
- I- 880 | 7
- I- 880 | 7
- I- 880 | 7
- I- 880 | 7
- I- 880 | 7
- I- 880 | 9
- I- 880 | 9
- I- 880 | 9
- I- 880 | 9
- I- 880 | 9
- I- 880 | 9
- I- 880 | 9
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 10
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 12
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 13
- I- 880 | 14
- I- 880 | 14
- I- 880 | 14
- I- 880 | 14
- I- 880 | 14
- I- 880 | 14
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 17
- I- 880 | 19
- I- 880 | 19
- I- 880 | 19
- I- 880 | 19
- I- 880 | 19
- I- 880 | 19
- I- 880 | 19
- I- 880 | 19
- I- 880 | 19
- I- 880 | 19
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 2
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 3
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 4
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 5
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 6
- I- 880 Ramp | 8
- I- 880 Ramp | 8
- I- 880 Ramp | 8
- I- 980 | 2
- I- 980 | 2
- I- 980 | 2
- I- 980 | 2
- I- 980 | 2
- I- 980 | 2
- I- 980 | 2
- I- 980 | 2
- I- 980 | 3
- I- 980 | 3
- I- 980 | 3
- I- 980 | 3
- I- 980 | 3
- I- 980 | 3
- I- 980 | 3
- I- 980 | 3
- I- 980 | 3
- I- 980 | 4
- I- 980 | 4
- I- 980 | 5
- I- 980 | 5
- I- 980 | 7
- I- 980 | 7
- I- 980 | 7
- I- 980 | 7
- I- 980 | 12
- I- 980 Ramp | 3
- I- 980 Ramp | 3
- I- 980 Ramp | 3
- I- 980 Ramp | 7
-(896 rows)
-
-SELECT * FROM toyemp WHERE name = 'sharon';
- name | age | location | annualsal
---------+-----+----------+-----------
- sharon | 25 | (15,12) | 12000
-(1 row)
-
---
--- Test for Leaky view scenario
---
-CREATE ROLE regress_alice;
-CREATE FUNCTION f_leak (text)
- RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001
- AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END';
-CREATE TABLE customer (
- cid int primary key,
- name text not null,
- tel text,
- passwd text
-);
-CREATE TABLE credit_card (
- cid int references customer(cid),
- cnum text,
- climit int
-);
-CREATE TABLE credit_usage (
- cid int references customer(cid),
- ymd date,
- usage int
-);
-INSERT INTO customer
- VALUES (101, 'regress_alice', '+81-12-3456-7890', 'passwd123'),
- (102, 'regress_bob', '+01-234-567-8901', 'beafsteak'),
- (103, 'regress_eve', '+49-8765-43210', 'hamburger');
-INSERT INTO credit_card
- VALUES (101, '1111-2222-3333-4444', 4000),
- (102, '5555-6666-7777-8888', 3000),
- (103, '9801-2345-6789-0123', 2000);
-INSERT INTO credit_usage
- VALUES (101, '2011-09-15', 120),
- (101, '2011-10-05', 90),
- (101, '2011-10-18', 110),
- (101, '2011-10-21', 200),
- (101, '2011-11-10', 80),
- (102, '2011-09-22', 300),
- (102, '2011-10-12', 120),
- (102, '2011-10-28', 200),
- (103, '2011-10-15', 480);
-CREATE VIEW my_property_normal AS
- SELECT * FROM customer WHERE name = current_user;
-CREATE VIEW my_property_secure WITH (security_barrier) AS
- SELECT * FROM customer WHERE name = current_user;
-CREATE VIEW my_credit_card_normal AS
- SELECT * FROM customer l NATURAL JOIN credit_card r
- WHERE l.name = current_user;
-CREATE VIEW my_credit_card_secure WITH (security_barrier) AS
- SELECT * FROM customer l NATURAL JOIN credit_card r
- WHERE l.name = current_user;
-CREATE VIEW my_credit_card_usage_normal AS
- SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r;
-CREATE VIEW my_credit_card_usage_secure WITH (security_barrier) AS
- SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r;
-GRANT SELECT ON my_property_normal TO public;
-GRANT SELECT ON my_property_secure TO public;
-GRANT SELECT ON my_credit_card_normal TO public;
-GRANT SELECT ON my_credit_card_secure TO public;
-GRANT SELECT ON my_credit_card_usage_normal TO public;
-GRANT SELECT ON my_credit_card_usage_secure TO public;
---
--- Run leaky view scenarios
---
-SET SESSION AUTHORIZATION regress_alice;
---
--- scenario: if a qualifier with tiny-cost is given, it shall be launched
--- prior to the security policy of the view.
---
-SELECT * FROM my_property_normal WHERE f_leak(passwd);
-NOTICE: f_leak => passwd123
-NOTICE: f_leak => beafsteak
-NOTICE: f_leak => hamburger
- cid | name | tel | passwd
------+---------------+------------------+-----------
- 101 | regress_alice | +81-12-3456-7890 | passwd123
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd);
- QUERY PLAN
-------------------------------------------------------
- Seq Scan on customer
- Filter: (f_leak(passwd) AND (name = CURRENT_USER))
-(2 rows)
-
-SELECT * FROM my_property_secure WHERE f_leak(passwd);
-NOTICE: f_leak => passwd123
- cid | name | tel | passwd
------+---------------+------------------+-----------
- 101 | regress_alice | +81-12-3456-7890 | passwd123
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd);
- QUERY PLAN
----------------------------------------------
- Subquery Scan on my_property_secure
- Filter: f_leak(my_property_secure.passwd)
- -> Seq Scan on customer
- Filter: (name = CURRENT_USER)
-(4 rows)
-
---
--- scenario: qualifiers can be pushed down if they contain leaky functions,
--- provided they aren't passed data from inside the view.
---
-SELECT * FROM my_property_normal v
- WHERE f_leak('passwd') AND f_leak(passwd);
-NOTICE: f_leak => passwd
-NOTICE: f_leak => passwd123
-NOTICE: f_leak => passwd
-NOTICE: f_leak => beafsteak
-NOTICE: f_leak => passwd
-NOTICE: f_leak => hamburger
- cid | name | tel | passwd
------+---------------+------------------+-----------
- 101 | regress_alice | +81-12-3456-7890 | passwd123
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal v
- WHERE f_leak('passwd') AND f_leak(passwd);
- QUERY PLAN
----------------------------------------------------------------------------------
- Seq Scan on customer
- Filter: (f_leak('passwd'::text) AND f_leak(passwd) AND (name = CURRENT_USER))
-(2 rows)
-
-SELECT * FROM my_property_secure v
- WHERE f_leak('passwd') AND f_leak(passwd);
-NOTICE: f_leak => passwd
-NOTICE: f_leak => passwd123
-NOTICE: f_leak => passwd
-NOTICE: f_leak => passwd
- cid | name | tel | passwd
------+---------------+------------------+-----------
- 101 | regress_alice | +81-12-3456-7890 | passwd123
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure v
- WHERE f_leak('passwd') AND f_leak(passwd);
- QUERY PLAN
---------------------------------------------------------------------
- Subquery Scan on v
- Filter: f_leak(v.passwd)
- -> Seq Scan on customer
- Filter: (f_leak('passwd'::text) AND (name = CURRENT_USER))
-(4 rows)
-
---
--- scenario: if a qualifier references only one-side of a particular join-
--- tree, it shall be distributed to the most deep scan plan as
--- possible as we can.
---
-SELECT * FROM my_credit_card_normal WHERE f_leak(cnum);
-NOTICE: f_leak => 1111-2222-3333-4444
-NOTICE: f_leak => 5555-6666-7777-8888
-NOTICE: f_leak => 9801-2345-6789-0123
- cid | name | tel | passwd | cnum | climit
------+---------------+------------------+-----------+---------------------+--------
- 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum);
- QUERY PLAN
----------------------------------------------
- Hash Join
- Hash Cond: (r.cid = l.cid)
- -> Seq Scan on credit_card r
- Filter: f_leak(cnum)
- -> Hash
- -> Seq Scan on customer l
- Filter: (name = CURRENT_USER)
-(7 rows)
-
-SELECT * FROM my_credit_card_secure WHERE f_leak(cnum);
-NOTICE: f_leak => 1111-2222-3333-4444
- cid | name | tel | passwd | cnum | climit
------+---------------+------------------+-----------+---------------------+--------
- 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum);
- QUERY PLAN
----------------------------------------------------
- Subquery Scan on my_credit_card_secure
- Filter: f_leak(my_credit_card_secure.cnum)
- -> Hash Join
- Hash Cond: (r.cid = l.cid)
- -> Seq Scan on credit_card r
- -> Hash
- -> Seq Scan on customer l
- Filter: (name = CURRENT_USER)
-(8 rows)
-
---
--- scenario: an external qualifier can be pushed-down by in-front-of the
--- views with "security_barrier" attribute, except for operators
--- implemented with leakproof functions.
---
-SELECT * FROM my_credit_card_usage_normal
- WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
-NOTICE: f_leak => 1111-2222-3333-4444
- cid | name | tel | passwd | cnum | climit | ymd | usage
------+---------------+------------------+-----------+---------------------+--------+------------+-------
- 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-05-2011 | 90
- 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-18-2011 | 110
- 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-21-2011 | 200
-(3 rows)
-
-EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal
- WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
- QUERY PLAN
-------------------------------------------------------------------------------
- Nested Loop
- Join Filter: (l.cid = r.cid)
- -> Seq Scan on credit_usage r
- Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date))
- -> Materialize
- -> Subquery Scan on l
- Filter: f_leak(l.cnum)
- -> Hash Join
- Hash Cond: (r_1.cid = l_1.cid)
- -> Seq Scan on credit_card r_1
- -> Hash
- -> Seq Scan on customer l_1
- Filter: (name = CURRENT_USER)
-(13 rows)
-
-SELECT * FROM my_credit_card_usage_secure
- WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
-NOTICE: f_leak => 1111-2222-3333-4444
-NOTICE: f_leak => 1111-2222-3333-4444
-NOTICE: f_leak => 1111-2222-3333-4444
- cid | name | tel | passwd | cnum | climit | ymd | usage
------+---------------+------------------+-----------+---------------------+--------+------------+-------
- 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-05-2011 | 90
- 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-18-2011 | 110
- 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-21-2011 | 200
-(3 rows)
-
-EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure
- WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01';
- QUERY PLAN
-------------------------------------------------------------------------------------
- Subquery Scan on my_credit_card_usage_secure
- Filter: f_leak(my_credit_card_usage_secure.cnum)
- -> Nested Loop
- Join Filter: (l.cid = r.cid)
- -> Seq Scan on credit_usage r
- Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date))
- -> Materialize
- -> Hash Join
- Hash Cond: (r_1.cid = l.cid)
- -> Seq Scan on credit_card r_1
- -> Hash
- -> Seq Scan on customer l
- Filter: (name = CURRENT_USER)
-(13 rows)
-
---
--- Test for the case when security_barrier gets changed between rewriter
--- and planner stage.
---
-PREPARE p1 AS SELECT * FROM my_property_normal WHERE f_leak(passwd);
-PREPARE p2 AS SELECT * FROM my_property_secure WHERE f_leak(passwd);
-EXECUTE p1;
-NOTICE: f_leak => passwd123
-NOTICE: f_leak => beafsteak
-NOTICE: f_leak => hamburger
- cid | name | tel | passwd
------+---------------+------------------+-----------
- 101 | regress_alice | +81-12-3456-7890 | passwd123
-(1 row)
-
-EXECUTE p2;
-NOTICE: f_leak => passwd123
- cid | name | tel | passwd
------+---------------+------------------+-----------
- 101 | regress_alice | +81-12-3456-7890 | passwd123
-(1 row)
-
-RESET SESSION AUTHORIZATION;
-ALTER VIEW my_property_normal SET (security_barrier=true);
-ALTER VIEW my_property_secure SET (security_barrier=false);
-SET SESSION AUTHORIZATION regress_alice;
-EXECUTE p1; -- To be perform as a view with security-barrier
-NOTICE: f_leak => passwd123
- cid | name | tel | passwd
------+---------------+------------------+-----------
- 101 | regress_alice | +81-12-3456-7890 | passwd123
-(1 row)
-
-EXECUTE p2; -- To be perform as a view without security-barrier
-NOTICE: f_leak => passwd123
-NOTICE: f_leak => beafsteak
-NOTICE: f_leak => hamburger
- cid | name | tel | passwd
------+---------------+------------------+-----------
- 101 | regress_alice | +81-12-3456-7890 | passwd123
-(1 row)
-
--- Cleanup.
-RESET SESSION AUTHORIZATION;
-DROP ROLE regress_alice;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/portals_p2.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/portals_p2.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/portals_p2.out 2024-11-15 02:50:52.486055632 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/portals_p2.out 2024-11-15 02:59:17.225115696 +0000
@@ -1,122 +1,2 @@
---
--- PORTALS_P2
---
-BEGIN;
-DECLARE foo13 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 50;
-DECLARE foo14 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 51;
-DECLARE foo15 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 52;
-DECLARE foo16 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 53;
-DECLARE foo17 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 54;
-DECLARE foo18 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 55;
-DECLARE foo19 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 56;
-DECLARE foo20 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 57;
-DECLARE foo21 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 58;
-DECLARE foo22 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 59;
-DECLARE foo23 CURSOR FOR
- SELECT * FROM onek WHERE unique1 = 60;
-DECLARE foo24 CURSOR FOR
- SELECT * FROM onek2 WHERE unique1 = 50;
-DECLARE foo25 CURSOR FOR
- SELECT * FROM onek2 WHERE unique1 = 60;
-FETCH all in foo13;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 50 | 253 | 0 | 2 | 0 | 10 | 0 | 50 | 50 | 50 | 50 | 0 | 1 | YBAAAA | TJAAAA | HHHHxx
-(1 row)
-
-FETCH all in foo14;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 51 | 76 | 1 | 3 | 1 | 11 | 1 | 51 | 51 | 51 | 51 | 2 | 3 | ZBAAAA | YCAAAA | AAAAxx
-(1 row)
-
-FETCH all in foo15;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 52 | 985 | 0 | 0 | 2 | 12 | 2 | 52 | 52 | 52 | 52 | 4 | 5 | ACAAAA | XLBAAA | HHHHxx
-(1 row)
-
-FETCH all in foo16;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 53 | 196 | 1 | 1 | 3 | 13 | 3 | 53 | 53 | 53 | 53 | 6 | 7 | BCAAAA | OHAAAA | AAAAxx
-(1 row)
-
-FETCH all in foo17;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 54 | 356 | 0 | 2 | 4 | 14 | 4 | 54 | 54 | 54 | 54 | 8 | 9 | CCAAAA | SNAAAA | AAAAxx
-(1 row)
-
-FETCH all in foo18;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 55 | 627 | 1 | 3 | 5 | 15 | 5 | 55 | 55 | 55 | 55 | 10 | 11 | DCAAAA | DYAAAA | VVVVxx
-(1 row)
-
-FETCH all in foo19;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 56 | 54 | 0 | 0 | 6 | 16 | 6 | 56 | 56 | 56 | 56 | 12 | 13 | ECAAAA | CCAAAA | OOOOxx
-(1 row)
-
-FETCH all in foo20;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 57 | 942 | 1 | 1 | 7 | 17 | 7 | 57 | 57 | 57 | 57 | 14 | 15 | FCAAAA | GKBAAA | OOOOxx
-(1 row)
-
-FETCH all in foo21;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 58 | 114 | 0 | 2 | 8 | 18 | 8 | 58 | 58 | 58 | 58 | 16 | 17 | GCAAAA | KEAAAA | OOOOxx
-(1 row)
-
-FETCH all in foo22;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 59 | 593 | 1 | 3 | 9 | 19 | 9 | 59 | 59 | 59 | 59 | 18 | 19 | HCAAAA | VWAAAA | HHHHxx
-(1 row)
-
-FETCH all in foo23;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 60 | 483 | 0 | 0 | 0 | 0 | 0 | 60 | 60 | 60 | 60 | 0 | 1 | ICAAAA | PSAAAA | VVVVxx
-(1 row)
-
-FETCH all in foo24;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 50 | 253 | 0 | 2 | 0 | 10 | 0 | 50 | 50 | 50 | 50 | 0 | 1 | YBAAAA | TJAAAA | HHHHxx
-(1 row)
-
-FETCH all in foo25;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 60 | 483 | 0 | 0 | 0 | 0 | 0 | 60 | 60 | 60 | 60 | 0 | 1 | ICAAAA | PSAAAA | VVVVxx
-(1 row)
-
-CLOSE foo13;
-CLOSE foo14;
-CLOSE foo15;
-CLOSE foo16;
-CLOSE foo17;
-CLOSE foo18;
-CLOSE foo19;
-CLOSE foo20;
-CLOSE foo21;
-CLOSE foo22;
-CLOSE foo23;
-CLOSE foo24;
-CLOSE foo25;
-END;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/foreign_key.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/foreign_key.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/foreign_key.out 2024-11-15 02:50:52.438134628 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/foreign_key.out 2024-11-15 02:59:17.221115691 +0000
@@ -1,3047 +1,2 @@
---
--- FOREIGN KEY
---
--- MATCH FULL
---
--- First test, check and cascade
---
-CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text );
-CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL ON DELETE CASCADE ON UPDATE CASCADE, ftest2 int );
--- Insert test data into PKTABLE
-INSERT INTO PKTABLE VALUES (1, 'Test1');
-INSERT INTO PKTABLE VALUES (2, 'Test2');
-INSERT INTO PKTABLE VALUES (3, 'Test3');
-INSERT INTO PKTABLE VALUES (4, 'Test4');
-INSERT INTO PKTABLE VALUES (5, 'Test5');
--- Insert successful rows into FK TABLE
-INSERT INTO FKTABLE VALUES (1, 2);
-INSERT INTO FKTABLE VALUES (2, 3);
-INSERT INTO FKTABLE VALUES (3, 4);
-INSERT INTO FKTABLE VALUES (NULL, 1);
--- Insert a failed row into FK TABLE
-INSERT INTO FKTABLE VALUES (100, 2);
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(100) is not present in table "pktable".
--- Check FKTABLE
-SELECT * FROM FKTABLE;
- ftest1 | ftest2
---------+--------
- 1 | 2
- 2 | 3
- 3 | 4
- | 1
-(4 rows)
-
--- Delete a row from PK TABLE
-DELETE FROM PKTABLE WHERE ptest1=1;
--- Check FKTABLE for removal of matched row
-SELECT * FROM FKTABLE;
- ftest1 | ftest2
---------+--------
- 2 | 3
- 3 | 4
- | 1
-(3 rows)
-
--- Update a row from PK TABLE
-UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2;
--- Check FKTABLE for update of matched row
-SELECT * FROM FKTABLE;
- ftest1 | ftest2
---------+--------
- 3 | 4
- | 1
- 1 | 3
-(3 rows)
-
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
---
--- check set NULL and table constraint on multiple columns
---
-CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
-CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2)
- REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL);
--- Test comments
-COMMENT ON CONSTRAINT constrname_wrong ON FKTABLE IS 'fk constraint comment';
-ERROR: constraint "constrname_wrong" for table "fktable" does not exist
-COMMENT ON CONSTRAINT constrname ON FKTABLE IS 'fk constraint comment';
-COMMENT ON CONSTRAINT constrname ON FKTABLE IS NULL;
--- Insert test data into PKTABLE
-INSERT INTO PKTABLE VALUES (1, 2, 'Test1');
-INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2');
-INSERT INTO PKTABLE VALUES (2, 4, 'Test2');
-INSERT INTO PKTABLE VALUES (3, 6, 'Test3');
-INSERT INTO PKTABLE VALUES (4, 8, 'Test4');
-INSERT INTO PKTABLE VALUES (5, 10, 'Test5');
--- Insert successful rows into FK TABLE
-INSERT INTO FKTABLE VALUES (1, 2, 4);
-INSERT INTO FKTABLE VALUES (1, 3, 5);
-INSERT INTO FKTABLE VALUES (2, 4, 8);
-INSERT INTO FKTABLE VALUES (3, 6, 12);
-INSERT INTO FKTABLE VALUES (NULL, NULL, 0);
--- Insert failed rows into FK TABLE
-INSERT INTO FKTABLE VALUES (100, 2, 4);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname"
-DETAIL: Key (ftest1, ftest2)=(100, 2) is not present in table "pktable".
-INSERT INTO FKTABLE VALUES (2, 2, 4);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname"
-DETAIL: Key (ftest1, ftest2)=(2, 2) is not present in table "pktable".
-INSERT INTO FKTABLE VALUES (NULL, 2, 4);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname"
-DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
-INSERT INTO FKTABLE VALUES (1, NULL, 4);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname"
-DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
--- Check FKTABLE
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 1 | 2 | 4
- 1 | 3 | 5
- 2 | 4 | 8
- 3 | 6 | 12
- | | 0
-(5 rows)
-
--- Delete a row from PK TABLE
-DELETE FROM PKTABLE WHERE ptest1=1 and ptest2=2;
--- Check FKTABLE for removal of matched row
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 1 | 3 | 5
- 2 | 4 | 8
- 3 | 6 | 12
- | | 0
- | | 4
-(5 rows)
-
--- Delete another row from PK TABLE
-DELETE FROM PKTABLE WHERE ptest1=5 and ptest2=10;
--- Check FKTABLE (should be no change)
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 1 | 3 | 5
- 2 | 4 | 8
- 3 | 6 | 12
- | | 0
- | | 4
-(5 rows)
-
--- Update a row from PK TABLE
-UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2;
--- Check FKTABLE for update of matched row
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 1 | 3 | 5
- 3 | 6 | 12
- | | 0
- | | 4
- | | 8
-(5 rows)
-
--- Check update with part of key null
-UPDATE FKTABLE SET ftest1 = NULL WHERE ftest1 = 1;
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname"
-DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
--- Check update with old and new key values equal
-UPDATE FKTABLE SET ftest1 = 1 WHERE ftest1 = 1;
--- Try altering the column type where foreign keys are involved
-ALTER TABLE PKTABLE ALTER COLUMN ptest1 TYPE bigint;
-ALTER TABLE FKTABLE ALTER COLUMN ftest1 TYPE bigint;
-SELECT * FROM PKTABLE;
- ptest1 | ptest2 | ptest3
---------+--------+---------
- 1 | 3 | Test1-2
- 3 | 6 | Test3
- 4 | 8 | Test4
- 1 | 4 | Test2
-(4 rows)
-
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 3 | 6 | 12
- | | 0
- | | 4
- | | 8
- 1 | 3 | 5
-(5 rows)
-
-DROP TABLE PKTABLE CASCADE;
-NOTICE: drop cascades to constraint constrname on table fktable
-DROP TABLE FKTABLE;
---
--- check set default and table constraint on multiple columns
---
-CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
-CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2)
- REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT);
--- Insert a value in PKTABLE for default
-INSERT INTO PKTABLE VALUES (-1, -2, 'The Default!');
--- Insert test data into PKTABLE
-INSERT INTO PKTABLE VALUES (1, 2, 'Test1');
-INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2');
-INSERT INTO PKTABLE VALUES (2, 4, 'Test2');
-INSERT INTO PKTABLE VALUES (3, 6, 'Test3');
-INSERT INTO PKTABLE VALUES (4, 8, 'Test4');
-INSERT INTO PKTABLE VALUES (5, 10, 'Test5');
--- Insert successful rows into FK TABLE
-INSERT INTO FKTABLE VALUES (1, 2, 4);
-INSERT INTO FKTABLE VALUES (1, 3, 5);
-INSERT INTO FKTABLE VALUES (2, 4, 8);
-INSERT INTO FKTABLE VALUES (3, 6, 12);
-INSERT INTO FKTABLE VALUES (NULL, NULL, 0);
--- Insert failed rows into FK TABLE
-INSERT INTO FKTABLE VALUES (100, 2, 4);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2"
-DETAIL: Key (ftest1, ftest2)=(100, 2) is not present in table "pktable".
-INSERT INTO FKTABLE VALUES (2, 2, 4);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2"
-DETAIL: Key (ftest1, ftest2)=(2, 2) is not present in table "pktable".
-INSERT INTO FKTABLE VALUES (NULL, 2, 4);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2"
-DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
-INSERT INTO FKTABLE VALUES (1, NULL, 4);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2"
-DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
--- Check FKTABLE
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 1 | 2 | 4
- 1 | 3 | 5
- 2 | 4 | 8
- 3 | 6 | 12
- | | 0
-(5 rows)
-
--- Delete a row from PK TABLE
-DELETE FROM PKTABLE WHERE ptest1=1 and ptest2=2;
--- Check FKTABLE to check for removal
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 1 | 3 | 5
- 2 | 4 | 8
- 3 | 6 | 12
- | | 0
- -1 | -2 | 4
-(5 rows)
-
--- Delete another row from PK TABLE
-DELETE FROM PKTABLE WHERE ptest1=5 and ptest2=10;
--- Check FKTABLE (should be no change)
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 1 | 3 | 5
- 2 | 4 | 8
- 3 | 6 | 12
- | | 0
- -1 | -2 | 4
-(5 rows)
-
--- Update a row from PK TABLE
-UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2;
--- Check FKTABLE for update of matched row
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3
---------+--------+--------
- 1 | 3 | 5
- 3 | 6 | 12
- | | 0
- -1 | -2 | 4
- -1 | -2 | 8
-(5 rows)
-
--- this should fail for lack of CASCADE
-DROP TABLE PKTABLE;
-ERROR: cannot drop table pktable because other objects depend on it
-DETAIL: constraint constrname2 on table fktable depends on table pktable
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE PKTABLE CASCADE;
-NOTICE: drop cascades to constraint constrname2 on table fktable
-DROP TABLE FKTABLE;
---
--- First test, check with no on delete or on update
---
-CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text );
-CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL, ftest2 int );
--- Insert test data into PKTABLE
-INSERT INTO PKTABLE VALUES (1, 'Test1');
-INSERT INTO PKTABLE VALUES (2, 'Test2');
-INSERT INTO PKTABLE VALUES (3, 'Test3');
-INSERT INTO PKTABLE VALUES (4, 'Test4');
-INSERT INTO PKTABLE VALUES (5, 'Test5');
--- Insert successful rows into FK TABLE
-INSERT INTO FKTABLE VALUES (1, 2);
-INSERT INTO FKTABLE VALUES (2, 3);
-INSERT INTO FKTABLE VALUES (3, 4);
-INSERT INTO FKTABLE VALUES (NULL, 1);
--- Insert a failed row into FK TABLE
-INSERT INTO FKTABLE VALUES (100, 2);
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(100) is not present in table "pktable".
--- Check FKTABLE
-SELECT * FROM FKTABLE;
- ftest1 | ftest2
---------+--------
- 1 | 2
- 2 | 3
- 3 | 4
- | 1
-(4 rows)
-
--- Check PKTABLE
-SELECT * FROM PKTABLE;
- ptest1 | ptest2
---------+--------
- 1 | Test1
- 2 | Test2
- 3 | Test3
- 4 | Test4
- 5 | Test5
-(5 rows)
-
--- Delete a row from PK TABLE (should fail)
-DELETE FROM PKTABLE WHERE ptest1=1;
-ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable"
-DETAIL: Key (ptest1)=(1) is still referenced from table "fktable".
--- Delete a row from PK TABLE (should succeed)
-DELETE FROM PKTABLE WHERE ptest1=5;
--- Check PKTABLE for deletes
-SELECT * FROM PKTABLE;
- ptest1 | ptest2
---------+--------
- 1 | Test1
- 2 | Test2
- 3 | Test3
- 4 | Test4
-(4 rows)
-
--- Update a row from PK TABLE (should fail)
-UPDATE PKTABLE SET ptest1=0 WHERE ptest1=2;
-ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable"
-DETAIL: Key (ptest1)=(2) is still referenced from table "fktable".
--- Update a row from PK TABLE (should succeed)
-UPDATE PKTABLE SET ptest1=0 WHERE ptest1=4;
--- Check PKTABLE for updates
-SELECT * FROM PKTABLE;
- ptest1 | ptest2
---------+--------
- 1 | Test1
- 2 | Test2
- 3 | Test3
- 0 | Test4
-(4 rows)
-
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
---
--- Check initial check upon ALTER TABLE
---
-CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, PRIMARY KEY(ptest1, ptest2) );
-CREATE TABLE FKTABLE ( ftest1 int, ftest2 int );
-INSERT INTO PKTABLE VALUES (1, 2);
-INSERT INTO FKTABLE VALUES (1, NULL);
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) REFERENCES PKTABLE MATCH FULL;
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey"
-DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- MATCH SIMPLE
--- Base test restricting update/delete
-CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
-CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
- FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE);
--- Insert Primary Key values
-INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
-INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
-INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3');
-INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
--- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
-INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
-INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
-INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5);
--- Insert a failed values
-INSERT INTO FKTABLE VALUES (1, 2, 7, 6);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3"
-DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable".
--- Show FKTABLE
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- 1 | 2 | 3 | 1
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
-(5 rows)
-
--- Try to update something that should fail
-UPDATE PKTABLE set ptest2=5 where ptest2=2;
-ERROR: update or delete on table "pktable" violates foreign key constraint "constrname3" on table "fktable"
-DETAIL: Key (ptest1, ptest2, ptest3)=(1, 2, 3) is still referenced from table "fktable".
--- Try to update something that should succeed
-UPDATE PKTABLE set ptest1=1 WHERE ptest2=3;
--- Try to delete something that should fail
-DELETE FROM PKTABLE where ptest1=1 and ptest2=2 and ptest3=3;
-ERROR: update or delete on table "pktable" violates foreign key constraint "constrname3" on table "fktable"
-DETAIL: Key (ptest1, ptest2, ptest3)=(1, 2, 3) is still referenced from table "fktable".
--- Try to delete something that should work
-DELETE FROM PKTABLE where ptest1=2;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 1 | 2 | 3 | test1
- 1 | 3 | 3 | test2
- 1 | 3 | 4 | test3
-(3 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- 1 | 2 | 3 | 1
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
-(5 rows)
-
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- restrict with null values
-CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, UNIQUE(ptest1, ptest2, ptest3) );
-CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
- FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE (ptest1, ptest2, ptest3));
-INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
-INSERT INTO PKTABLE VALUES (1, 3, NULL, 'test2');
-INSERT INTO PKTABLE VALUES (2, NULL, 4, 'test3');
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-DELETE FROM PKTABLE WHERE ptest1 = 2;
-SELECT * FROM PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 1 | 2 | 3 | test1
- 1 | 3 | | test2
-(2 rows)
-
-SELECT * FROM FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- 1 | 2 | 3 | 1
-(1 row)
-
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- cascade update/delete
-CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
-CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
- FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
- ON DELETE CASCADE ON UPDATE CASCADE);
--- Insert Primary Key values
-INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
-INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
-INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3');
-INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
--- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
-INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
-INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
-INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5);
--- Insert a failed values
-INSERT INTO FKTABLE VALUES (1, 2, 7, 6);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3"
-DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable".
--- Show FKTABLE
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- 1 | 2 | 3 | 1
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
-(5 rows)
-
--- Try to update something that will cascade
-UPDATE PKTABLE set ptest2=5 where ptest2=2;
--- Try to update something that should not cascade
-UPDATE PKTABLE set ptest1=1 WHERE ptest2=3;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 2 | 4 | 5 | test4
- 1 | 5 | 3 | test1
- 1 | 3 | 3 | test2
- 1 | 3 | 4 | test3
-(4 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
- 1 | 5 | 3 | 1
-(5 rows)
-
--- Try to delete something that should cascade
-DELETE FROM PKTABLE where ptest1=1 and ptest2=5 and ptest3=3;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 2 | 4 | 5 | test4
- 1 | 3 | 3 | test2
- 1 | 3 | 4 | test3
-(3 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
-(4 rows)
-
--- Try to delete something that should not have a cascade
-DELETE FROM PKTABLE where ptest1=2;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 1 | 3 | 3 | test2
- 1 | 3 | 4 | test3
-(2 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
-(4 rows)
-
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- set null update / set default delete
-CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
-CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
- FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
- ON DELETE SET DEFAULT ON UPDATE SET NULL);
--- Insert Primary Key values
-INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
-INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
-INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3');
-INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
--- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
-INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
-INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
-INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
-INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5);
--- Insert a failed values
-INSERT INTO FKTABLE VALUES (1, 2, 7, 6);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3"
-DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable".
--- Show FKTABLE
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- 1 | 2 | 3 | 1
- 2 | 3 | 4 | 1
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
-(6 rows)
-
--- Try to update something that will set null
-UPDATE PKTABLE set ptest2=5 where ptest2=2;
--- Try to update something that should not set null
-UPDATE PKTABLE set ptest2=2 WHERE ptest2=3 and ptest1=1;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 2 | 3 | 4 | test3
- 2 | 4 | 5 | test4
- 1 | 5 | 3 | test1
- 1 | 2 | 3 | test2
-(4 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- 2 | 3 | 4 | 1
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
- | | | 1
-(6 rows)
-
--- Try to delete something that should set default
-DELETE FROM PKTABLE where ptest1=2 and ptest2=3 and ptest3=4;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 2 | 4 | 5 | test4
- 1 | 5 | 3 | test1
- 1 | 2 | 3 | test2
-(3 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
- | | | 1
- 0 | | | 1
-(6 rows)
-
--- Try to delete something that should not set default
-DELETE FROM PKTABLE where ptest2=5;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 2 | 4 | 5 | test4
- 1 | 2 | 3 | test2
-(2 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
- | | | 1
- 0 | | | 1
-(6 rows)
-
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- set default update / set null delete
-CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
-CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int DEFAULT -1, ftest3 int DEFAULT -2, ftest4 int, CONSTRAINT constrname3
- FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
- ON DELETE SET NULL ON UPDATE SET DEFAULT);
--- Insert Primary Key values
-INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
-INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
-INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3');
-INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4');
-INSERT INTO PKTABLE VALUES (2, -1, 5, 'test5');
--- Insert Foreign Key values
-INSERT INTO FKTABLE VALUES (1, 2, 3, 1);
-INSERT INTO FKTABLE VALUES (2, 3, 4, 1);
-INSERT INTO FKTABLE VALUES (2, 4, 5, 1);
-INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2);
-INSERT INTO FKTABLE VALUES (2, NULL, 3, 3);
-INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4);
-INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5);
--- Insert a failed values
-INSERT INTO FKTABLE VALUES (1, 2, 7, 6);
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3"
-DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable".
--- Show FKTABLE
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- 1 | 2 | 3 | 1
- 2 | 3 | 4 | 1
- 2 | 4 | 5 | 1
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
-(7 rows)
-
--- Try to update something that will fail
-UPDATE PKTABLE set ptest2=5 where ptest2=2;
-ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3"
-DETAIL: Key (ftest1, ftest2, ftest3)=(0, -1, -2) is not present in table "pktable".
--- Try to update something that will set default
-UPDATE PKTABLE set ptest1=0, ptest2=-1, ptest3=-2 where ptest2=2;
-UPDATE PKTABLE set ptest2=10 where ptest2=4;
--- Try to update something that should not set default
-UPDATE PKTABLE set ptest2=2 WHERE ptest2=3 and ptest1=1;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 2 | 3 | 4 | test3
- 2 | -1 | 5 | test5
- 0 | -1 | -2 | test1
- 2 | 10 | 5 | test4
- 1 | 2 | 3 | test2
-(5 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- 2 | 3 | 4 | 1
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
- 0 | -1 | -2 | 1
- 0 | -1 | -2 | 1
-(7 rows)
-
--- Try to delete something that should set null
-DELETE FROM PKTABLE where ptest1=2 and ptest2=3 and ptest3=4;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 2 | -1 | 5 | test5
- 0 | -1 | -2 | test1
- 2 | 10 | 5 | test4
- 1 | 2 | 3 | test2
-(4 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
- 0 | -1 | -2 | 1
- 0 | -1 | -2 | 1
- | | | 1
-(7 rows)
-
--- Try to delete something that should not set null
-DELETE FROM PKTABLE where ptest2=-1 and ptest3=5;
--- Show PKTABLE and FKTABLE
-SELECT * from PKTABLE;
- ptest1 | ptest2 | ptest3 | ptest4
---------+--------+--------+--------
- 0 | -1 | -2 | test1
- 2 | 10 | 5 | test4
- 1 | 2 | 3 | test2
-(3 rows)
-
-SELECT * from FKTABLE;
- ftest1 | ftest2 | ftest3 | ftest4
---------+--------+--------+--------
- | 2 | 3 | 2
- 2 | | 3 | 3
- | 2 | 7 | 4
- | 3 | 4 | 5
- 0 | -1 | -2 | 1
- 0 | -1 | -2 | 1
- | | | 1
-(7 rows)
-
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- Test for ON DELETE SET NULL/DEFAULT (column_list);
-CREATE TABLE PKTABLE (tid int, id int, PRIMARY KEY (tid, id));
-CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, id) REFERENCES PKTABLE ON DELETE SET NULL (bar));
-ERROR: column "bar" referenced in foreign key constraint does not exist
-CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, id) REFERENCES PKTABLE ON DELETE SET NULL (foo));
-ERROR: column "foo" referenced in ON DELETE SET action must be part of foreign key
-CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, foo) REFERENCES PKTABLE ON UPDATE SET NULL (foo));
-ERROR: a column list with SET NULL is only supported for ON DELETE actions
-LINE 1: ...oo int, FOREIGN KEY (tid, foo) REFERENCES PKTABLE ON UPDATE ...
- ^
-CREATE TABLE FKTABLE (
- tid int, id int,
- fk_id_del_set_null int,
- fk_id_del_set_default int DEFAULT 0,
- FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES PKTABLE ON DELETE SET NULL (fk_id_del_set_null),
- FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES PKTABLE ON DELETE SET DEFAULT (fk_id_del_set_default)
-);
-SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid;
- pg_get_constraintdef
---------------------------------------------------------------------------------------------------------------------
- FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
- FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
-(2 rows)
-
-INSERT INTO PKTABLE VALUES (1, 0), (1, 1), (1, 2);
-INSERT INTO FKTABLE VALUES
- (1, 1, 1, NULL),
- (1, 2, NULL, 2);
-DELETE FROM PKTABLE WHERE id = 1 OR id = 2;
-SELECT * FROM FKTABLE ORDER BY id;
- tid | id | fk_id_del_set_null | fk_id_del_set_default
------+----+--------------------+-----------------------
- 1 | 1 | |
- 1 | 2 | | 0
-(2 rows)
-
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- Test some invalid FK definitions
-CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY, someoid oid);
-CREATE TABLE FKTABLE_FAIL1 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest2) REFERENCES PKTABLE);
-ERROR: column "ftest2" referenced in foreign key constraint does not exist
-CREATE TABLE FKTABLE_FAIL2 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(ptest2));
-ERROR: column "ptest2" referenced in foreign key constraint does not exist
-CREATE TABLE FKTABLE_FAIL3 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (tableoid) REFERENCES PKTABLE(someoid));
-ERROR: system columns cannot be used in foreign keys
-CREATE TABLE FKTABLE_FAIL4 ( ftest1 oid, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(tableoid));
-ERROR: system columns cannot be used in foreign keys
-DROP TABLE PKTABLE;
--- Test for referencing column number smaller than referenced constraint
-CREATE TABLE PKTABLE (ptest1 int, ptest2 int, UNIQUE(ptest1, ptest2));
-CREATE TABLE FKTABLE_FAIL1 (ftest1 int REFERENCES pktable(ptest1));
-ERROR: there is no unique constraint matching given keys for referenced table "pktable"
-DROP TABLE FKTABLE_FAIL1;
-ERROR: table "fktable_fail1" does not exist
-DROP TABLE PKTABLE;
---
--- Tests for mismatched types
---
--- Basic one column, two table setup
-CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY);
-INSERT INTO PKTABLE VALUES(42);
--- This next should fail, because int=inet does not exist
-CREATE TABLE FKTABLE (ftest1 inet REFERENCES pktable);
-ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
--- This should also fail for the same reason, but here we
--- give the column name
-CREATE TABLE FKTABLE (ftest1 inet REFERENCES pktable(ptest1));
-ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
--- This should succeed, even though they are different types,
--- because int=int8 exists and is a member of the integer opfamily
-CREATE TABLE FKTABLE (ftest1 int8 REFERENCES pktable);
--- Check it actually works
-INSERT INTO FKTABLE VALUES(42); -- should succeed
-INSERT INTO FKTABLE VALUES(43); -- should fail
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(43) is not present in table "pktable".
-UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed
-UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(43) is not present in table "pktable".
-DROP TABLE FKTABLE;
--- This should fail, because we'd have to cast numeric to int which is
--- not an implicit coercion (or use numeric=numeric, but that's not part
--- of the integer opfamily)
-CREATE TABLE FKTABLE (ftest1 numeric REFERENCES pktable);
-ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: numeric and integer.
-DROP TABLE PKTABLE;
--- On the other hand, this should work because int implicitly promotes to
--- numeric, and we allow promotion on the FK side
-CREATE TABLE PKTABLE (ptest1 numeric PRIMARY KEY);
-INSERT INTO PKTABLE VALUES(42);
-CREATE TABLE FKTABLE (ftest1 int REFERENCES pktable);
--- Check it actually works
-INSERT INTO FKTABLE VALUES(42); -- should succeed
-INSERT INTO FKTABLE VALUES(43); -- should fail
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(43) is not present in table "pktable".
-UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed
-UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(43) is not present in table "pktable".
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- Two columns, two tables
-CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, PRIMARY KEY(ptest1, ptest2));
--- This should fail, because we just chose really odd types
-CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable);
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: cidr and integer.
--- Again, so should this...
-CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2));
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: cidr and integer.
--- This fails because we mixed up the column ordering
-CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable);
-ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest2" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
--- As does this...
-CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest1, ptest2));
-ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest2" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
--- And again..
-CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest2, ptest1));
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest2" of the referenced table are of incompatible types: integer and inet.
--- This works...
-CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest2, ptest1));
-DROP TABLE FKTABLE;
--- As does this
-CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2));
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- Two columns, same table
--- Make sure this still works...
-CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
-ptest4) REFERENCES pktable(ptest1, ptest2));
-DROP TABLE PKTABLE;
--- And this,
-CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
-ptest4) REFERENCES pktable);
-DROP TABLE PKTABLE;
--- This shouldn't (mixed up columns)
-CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
-ptest4) REFERENCES pktable(ptest2, ptest1));
-ERROR: foreign key constraint "pktable_ptest3_ptest4_fkey" cannot be implemented
-DETAIL: Key columns "ptest3" of the referencing table and "ptest2" of the referenced table are of incompatible types: integer and inet.
--- Nor should this... (same reason, we have 4,3 referencing 1,2 which mismatches types
-CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4,
-ptest3) REFERENCES pktable(ptest1, ptest2));
-ERROR: foreign key constraint "pktable_ptest4_ptest3_fkey" cannot be implemented
-DETAIL: Key columns "ptest4" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
--- Not this one either... Same as the last one except we didn't defined the columns being referenced.
-CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4,
-ptest3) REFERENCES pktable);
-ERROR: foreign key constraint "pktable_ptest4_ptest3_fkey" cannot be implemented
-DETAIL: Key columns "ptest4" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
---
--- Now some cases with inheritance
--- Basic 2 table case: 1 column of matching types.
-create table pktable_base (base1 int not null);
-create table pktable (ptest1 int, primary key(base1), unique(base1, ptest1)) inherits (pktable_base);
-create table fktable (ftest1 int references pktable(base1));
--- now some ins, upd, del
-insert into pktable(base1) values (1);
-insert into pktable(base1) values (2);
--- let's insert a non-existent fktable value
-insert into fktable(ftest1) values (3);
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(3) is not present in table "pktable".
--- let's make a valid row for that
-insert into pktable(base1) values (3);
-insert into fktable(ftest1) values (3);
--- let's try removing a row that should fail from pktable
-delete from pktable where base1>2;
-ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable"
-DETAIL: Key (base1)=(3) is still referenced from table "fktable".
--- okay, let's try updating all of the base1 values to *4
--- which should fail.
-update pktable set base1=base1*4;
-ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable"
-DETAIL: Key (base1)=(3) is still referenced from table "fktable".
--- okay, let's try an update that should work.
-update pktable set base1=base1*4 where base1<3;
--- and a delete that should work
-delete from pktable where base1>3;
--- cleanup
-drop table fktable;
-delete from pktable;
--- Now 2 columns 2 tables, matching types
-create table fktable (ftest1 int, ftest2 int, foreign key(ftest1, ftest2) references pktable(base1, ptest1));
--- now some ins, upd, del
-insert into pktable(base1, ptest1) values (1, 1);
-insert into pktable(base1, ptest1) values (2, 2);
--- let's insert a non-existent fktable value
-insert into fktable(ftest1, ftest2) values (3, 1);
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey"
-DETAIL: Key (ftest1, ftest2)=(3, 1) is not present in table "pktable".
--- let's make a valid row for that
-insert into pktable(base1,ptest1) values (3, 1);
-insert into fktable(ftest1, ftest2) values (3, 1);
--- let's try removing a row that should fail from pktable
-delete from pktable where base1>2;
-ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" on table "fktable"
-DETAIL: Key (base1, ptest1)=(3, 1) is still referenced from table "fktable".
--- okay, let's try updating all of the base1 values to *4
--- which should fail.
-update pktable set base1=base1*4;
-ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" on table "fktable"
-DETAIL: Key (base1, ptest1)=(3, 1) is still referenced from table "fktable".
--- okay, let's try an update that should work.
-update pktable set base1=base1*4 where base1<3;
--- and a delete that should work
-delete from pktable where base1>3;
--- cleanup
-drop table fktable;
-drop table pktable;
-drop table pktable_base;
--- Now we'll do one all in 1 table with 2 columns of matching types
-create table pktable_base(base1 int not null, base2 int);
-create table pktable(ptest1 int, ptest2 int, primary key(base1, ptest1), foreign key(base2, ptest2) references
- pktable(base1, ptest1)) inherits (pktable_base);
-insert into pktable (base1, ptest1, base2, ptest2) values (1, 1, 1, 1);
-insert into pktable (base1, ptest1, base2, ptest2) values (2, 1, 1, 1);
-insert into pktable (base1, ptest1, base2, ptest2) values (2, 2, 2, 1);
-insert into pktable (base1, ptest1, base2, ptest2) values (1, 3, 2, 2);
--- fails (3,2) isn't in base1, ptest1
-insert into pktable (base1, ptest1, base2, ptest2) values (2, 3, 3, 2);
-ERROR: insert or update on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey"
-DETAIL: Key (base2, ptest2)=(3, 2) is not present in table "pktable".
--- fails (2,2) is being referenced
-delete from pktable where base1=2;
-ERROR: update or delete on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" on table "pktable"
-DETAIL: Key (base1, ptest1)=(2, 2) is still referenced from table "pktable".
--- fails (1,1) is being referenced (twice)
-update pktable set base1=3 where base1=1;
-ERROR: update or delete on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" on table "pktable"
-DETAIL: Key (base1, ptest1)=(1, 1) is still referenced from table "pktable".
--- this sequence of two deletes will work, since after the first there will be no (2,*) references
-delete from pktable where base2=2;
-delete from pktable where base1=2;
-drop table pktable;
-drop table pktable_base;
--- 2 columns (2 tables), mismatched types
-create table pktable_base(base1 int not null);
-create table pktable(ptest1 inet, primary key(base1, ptest1)) inherits (pktable_base);
--- just generally bad types (with and without column references on the referenced table)
-create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable);
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "base1" of the referenced table are of incompatible types: cidr and integer.
-create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable(base1, ptest1));
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "base1" of the referenced table are of incompatible types: cidr and integer.
--- let's mix up which columns reference which
-create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable);
-ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest2" of the referencing table and "base1" of the referenced table are of incompatible types: inet and integer.
-create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable(base1, ptest1));
-ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest2" of the referencing table and "base1" of the referenced table are of incompatible types: inet and integer.
-create table fktable(ftest1 int, ftest2 inet, foreign key(ftest1, ftest2) references pktable(ptest1, base1));
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: integer and inet.
-drop table pktable;
-drop table pktable_base;
--- 2 columns (1 table), mismatched types
-create table pktable_base(base1 int not null, base2 int);
-create table pktable(ptest1 inet, ptest2 inet[], primary key(base1, ptest1), foreign key(base2, ptest2) references
- pktable(base1, ptest1)) inherits (pktable_base);
-ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented
-DETAIL: Key columns "ptest2" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet[] and inet.
-create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(base2, ptest2) references
- pktable(ptest1, base1)) inherits (pktable_base);
-ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented
-DETAIL: Key columns "base2" of the referencing table and "ptest1" of the referenced table are of incompatible types: integer and inet.
-create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references
- pktable(base1, ptest1)) inherits (pktable_base);
-ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented
-DETAIL: Key columns "ptest2" of the referencing table and "base1" of the referenced table are of incompatible types: inet and integer.
-create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references
- pktable(base1, ptest1)) inherits (pktable_base);
-ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented
-DETAIL: Key columns "ptest2" of the referencing table and "base1" of the referenced table are of incompatible types: inet and integer.
-drop table pktable;
-ERROR: table "pktable" does not exist
-drop table pktable_base;
---
--- Deferrable constraints
---
--- deferrable, explicitly deferred
-CREATE TABLE pktable (
- id INT4 PRIMARY KEY,
- other INT4
-);
-CREATE TABLE fktable (
- id INT4 PRIMARY KEY,
- fk INT4 REFERENCES pktable DEFERRABLE
-);
--- default to immediate: should fail
-INSERT INTO fktable VALUES (5, 10);
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(10) is not present in table "pktable".
--- explicitly defer the constraint
-BEGIN;
-SET CONSTRAINTS ALL DEFERRED;
-INSERT INTO fktable VALUES (10, 15);
-INSERT INTO pktable VALUES (15, 0); -- make the FK insert valid
-COMMIT;
-DROP TABLE fktable, pktable;
--- deferrable, initially deferred
-CREATE TABLE pktable (
- id INT4 PRIMARY KEY,
- other INT4
-);
-CREATE TABLE fktable (
- id INT4 PRIMARY KEY,
- fk INT4 REFERENCES pktable DEFERRABLE INITIALLY DEFERRED
-);
--- default to deferred, should succeed
-BEGIN;
-INSERT INTO fktable VALUES (100, 200);
-INSERT INTO pktable VALUES (200, 500); -- make the FK insert valid
-COMMIT;
--- default to deferred, explicitly make immediate
-BEGIN;
-SET CONSTRAINTS ALL IMMEDIATE;
--- should fail
-INSERT INTO fktable VALUES (500, 1000);
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(1000) is not present in table "pktable".
-COMMIT;
-DROP TABLE fktable, pktable;
--- tricky behavior: according to SQL99, if a deferred constraint is set
--- to 'immediate' mode, it should be checked for validity *immediately*,
--- not when the current transaction commits (i.e. the mode change applies
--- retroactively)
-CREATE TABLE pktable (
- id INT4 PRIMARY KEY,
- other INT4
-);
-CREATE TABLE fktable (
- id INT4 PRIMARY KEY,
- fk INT4 REFERENCES pktable DEFERRABLE
-);
-BEGIN;
-SET CONSTRAINTS ALL DEFERRED;
--- should succeed, for now
-INSERT INTO fktable VALUES (1000, 2000);
--- should cause transaction abort, due to preceding error
-SET CONSTRAINTS ALL IMMEDIATE;
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(2000) is not present in table "pktable".
-INSERT INTO pktable VALUES (2000, 3); -- too late
-ERROR: current transaction is aborted, commands ignored until end of transaction block
-COMMIT;
-DROP TABLE fktable, pktable;
--- deferrable, initially deferred
-CREATE TABLE pktable (
- id INT4 PRIMARY KEY,
- other INT4
-);
-CREATE TABLE fktable (
- id INT4 PRIMARY KEY,
- fk INT4 REFERENCES pktable DEFERRABLE INITIALLY DEFERRED
-);
-BEGIN;
--- no error here
-INSERT INTO fktable VALUES (100, 200);
--- error here on commit
-COMMIT;
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(200) is not present in table "pktable".
-DROP TABLE pktable, fktable;
--- test notice about expensive referential integrity checks,
--- where the index cannot be used because of type incompatibilities.
-CREATE TEMP TABLE pktable (
- id1 INT4 PRIMARY KEY,
- id2 VARCHAR(4) UNIQUE,
- id3 REAL UNIQUE,
- UNIQUE(id1, id2, id3)
-);
-CREATE TEMP TABLE fktable (
- x1 INT4 REFERENCES pktable(id1),
- x2 VARCHAR(4) REFERENCES pktable(id2),
- x3 REAL REFERENCES pktable(id3),
- x4 TEXT,
- x5 INT2
-);
--- check individual constraints with alter table.
--- should fail
--- varchar does not promote to real
-ALTER TABLE fktable ADD CONSTRAINT fk_2_3
-FOREIGN KEY (x2) REFERENCES pktable(id3);
-ERROR: foreign key constraint "fk_2_3" cannot be implemented
-DETAIL: Key columns "x2" of the referencing table and "id3" of the referenced table are of incompatible types: character varying and real.
--- nor to int4
-ALTER TABLE fktable ADD CONSTRAINT fk_2_1
-FOREIGN KEY (x2) REFERENCES pktable(id1);
-ERROR: foreign key constraint "fk_2_1" cannot be implemented
-DETAIL: Key columns "x2" of the referencing table and "id1" of the referenced table are of incompatible types: character varying and integer.
--- real does not promote to int4
-ALTER TABLE fktable ADD CONSTRAINT fk_3_1
-FOREIGN KEY (x3) REFERENCES pktable(id1);
-ERROR: foreign key constraint "fk_3_1" cannot be implemented
-DETAIL: Key columns "x3" of the referencing table and "id1" of the referenced table are of incompatible types: real and integer.
--- int4 does not promote to text
-ALTER TABLE fktable ADD CONSTRAINT fk_1_2
-FOREIGN KEY (x1) REFERENCES pktable(id2);
-ERROR: foreign key constraint "fk_1_2" cannot be implemented
-DETAIL: Key columns "x1" of the referencing table and "id2" of the referenced table are of incompatible types: integer and character varying.
--- should succeed
--- int4 promotes to real
-ALTER TABLE fktable ADD CONSTRAINT fk_1_3
-FOREIGN KEY (x1) REFERENCES pktable(id3);
--- text is compatible with varchar
-ALTER TABLE fktable ADD CONSTRAINT fk_4_2
-FOREIGN KEY (x4) REFERENCES pktable(id2);
--- int2 is part of integer opfamily as of 8.0
-ALTER TABLE fktable ADD CONSTRAINT fk_5_1
-FOREIGN KEY (x5) REFERENCES pktable(id1);
--- check multikey cases, especially out-of-order column lists
--- these should work
-ALTER TABLE fktable ADD CONSTRAINT fk_123_123
-FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id1,id2,id3);
-ALTER TABLE fktable ADD CONSTRAINT fk_213_213
-FOREIGN KEY (x2,x1,x3) REFERENCES pktable(id2,id1,id3);
-ALTER TABLE fktable ADD CONSTRAINT fk_253_213
-FOREIGN KEY (x2,x5,x3) REFERENCES pktable(id2,id1,id3);
--- these should fail
-ALTER TABLE fktable ADD CONSTRAINT fk_123_231
-FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id2,id3,id1);
-ERROR: foreign key constraint "fk_123_231" cannot be implemented
-DETAIL: Key columns "x1" of the referencing table and "id2" of the referenced table are of incompatible types: integer and character varying.
-ALTER TABLE fktable ADD CONSTRAINT fk_241_132
-FOREIGN KEY (x2,x4,x1) REFERENCES pktable(id1,id3,id2);
-ERROR: foreign key constraint "fk_241_132" cannot be implemented
-DETAIL: Key columns "x2" of the referencing table and "id1" of the referenced table are of incompatible types: character varying and integer.
-DROP TABLE pktable, fktable;
--- test a tricky case: we can elide firing the FK check trigger during
--- an UPDATE if the UPDATE did not change the foreign key
--- field. However, we can't do this if our transaction was the one that
--- created the updated row and the trigger is deferred, since our UPDATE
--- will have invalidated the original newly-inserted tuple, and therefore
--- cause the on-INSERT RI trigger not to be fired.
-CREATE TEMP TABLE pktable (
- id int primary key,
- other int
-);
-CREATE TEMP TABLE fktable (
- id int primary key,
- fk int references pktable deferrable initially deferred
-);
-INSERT INTO pktable VALUES (5, 10);
-BEGIN;
--- doesn't match PK, but no error yet
-INSERT INTO fktable VALUES (0, 20);
--- don't change FK
-UPDATE fktable SET id = id + 1;
--- should catch error from initial INSERT
-COMMIT;
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(20) is not present in table "pktable".
--- check same case when insert is in a different subtransaction than update
-BEGIN;
--- doesn't match PK, but no error yet
-INSERT INTO fktable VALUES (0, 20);
--- UPDATE will be in a subxact
-SAVEPOINT savept1;
--- don't change FK
-UPDATE fktable SET id = id + 1;
--- should catch error from initial INSERT
-COMMIT;
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(20) is not present in table "pktable".
-BEGIN;
--- INSERT will be in a subxact
-SAVEPOINT savept1;
--- doesn't match PK, but no error yet
-INSERT INTO fktable VALUES (0, 20);
-RELEASE SAVEPOINT savept1;
--- don't change FK
-UPDATE fktable SET id = id + 1;
--- should catch error from initial INSERT
-COMMIT;
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(20) is not present in table "pktable".
-BEGIN;
--- doesn't match PK, but no error yet
-INSERT INTO fktable VALUES (0, 20);
--- UPDATE will be in a subxact
-SAVEPOINT savept1;
--- don't change FK
-UPDATE fktable SET id = id + 1;
--- Roll back the UPDATE
-ROLLBACK TO savept1;
--- should catch error from initial INSERT
-COMMIT;
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(20) is not present in table "pktable".
---
--- check ALTER CONSTRAINT
---
-INSERT INTO fktable VALUES (1, 5);
-ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey DEFERRABLE INITIALLY IMMEDIATE;
-BEGIN;
--- doesn't match FK, should throw error now
-UPDATE pktable SET id = 10 WHERE id = 5;
-ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_fk_fkey" on table "fktable"
-DETAIL: Key (id)=(5) is still referenced from table "fktable".
-COMMIT;
-BEGIN;
--- doesn't match PK, should throw error now
-INSERT INTO fktable VALUES (0, 20);
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey"
-DETAIL: Key (fk)=(20) is not present in table "pktable".
-COMMIT;
--- try additional syntax
-ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE;
--- illegal option
-ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY DEFERRED;
-ERROR: constraint declared INITIALLY DEFERRED must be DEFERRABLE
-LINE 1: ...e ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY ...
- ^
--- test order of firing of FK triggers when several RI-induced changes need to
--- be made to the same row. This was broken by subtransaction-related
--- changes in 8.0.
-CREATE TEMP TABLE users (
- id INT PRIMARY KEY,
- name VARCHAR NOT NULL
-);
-INSERT INTO users VALUES (1, 'Jozko');
-INSERT INTO users VALUES (2, 'Ferko');
-INSERT INTO users VALUES (3, 'Samko');
-CREATE TEMP TABLE tasks (
- id INT PRIMARY KEY,
- owner INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL,
- worker INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL,
- checked_by INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL
-);
-INSERT INTO tasks VALUES (1,1,NULL,NULL);
-INSERT INTO tasks VALUES (2,2,2,NULL);
-INSERT INTO tasks VALUES (3,3,3,3);
-SELECT * FROM tasks;
- id | owner | worker | checked_by
-----+-------+--------+------------
- 1 | 1 | |
- 2 | 2 | 2 |
- 3 | 3 | 3 | 3
-(3 rows)
-
-UPDATE users SET id = 4 WHERE id = 3;
-SELECT * FROM tasks;
- id | owner | worker | checked_by
-----+-------+--------+------------
- 1 | 1 | |
- 2 | 2 | 2 |
- 3 | 4 | 4 | 4
-(3 rows)
-
-DELETE FROM users WHERE id = 4;
-SELECT * FROM tasks;
- id | owner | worker | checked_by
-----+-------+--------+------------
- 1 | 1 | |
- 2 | 2 | 2 |
- 3 | | |
-(3 rows)
-
--- could fail with only 2 changes to make, if row was already updated
-BEGIN;
-UPDATE tasks set id=id WHERE id=2;
-SELECT * FROM tasks;
- id | owner | worker | checked_by
-----+-------+--------+------------
- 1 | 1 | |
- 3 | | |
- 2 | 2 | 2 |
-(3 rows)
-
-DELETE FROM users WHERE id = 2;
-SELECT * FROM tasks;
- id | owner | worker | checked_by
-----+-------+--------+------------
- 1 | 1 | |
- 3 | | |
- 2 | | |
-(3 rows)
-
-COMMIT;
---
--- Test self-referential FK with CASCADE (bug #6268)
---
-create temp table selfref (
- a int primary key,
- b int,
- foreign key (b) references selfref (a)
- on update cascade on delete cascade
-);
-insert into selfref (a, b)
-values
- (0, 0),
- (1, 1);
-begin;
- update selfref set a = 123 where a = 0;
- select a, b from selfref;
- a | b
------+-----
- 1 | 1
- 123 | 123
-(2 rows)
-
- update selfref set a = 456 where a = 123;
- select a, b from selfref;
- a | b
------+-----
- 1 | 1
- 456 | 456
-(2 rows)
-
-commit;
---
--- Test that SET DEFAULT actions recognize updates to default values
---
-create temp table defp (f1 int primary key);
-create temp table defc (f1 int default 0
- references defp on delete set default);
-insert into defp values (0), (1), (2);
-insert into defc values (2);
-select * from defc;
- f1
-----
- 2
-(1 row)
-
-delete from defp where f1 = 2;
-select * from defc;
- f1
-----
- 0
-(1 row)
-
-delete from defp where f1 = 0; -- fail
-ERROR: update or delete on table "defp" violates foreign key constraint "defc_f1_fkey" on table "defc"
-DETAIL: Key (f1)=(0) is still referenced from table "defc".
-alter table defc alter column f1 set default 1;
-delete from defp where f1 = 0;
-select * from defc;
- f1
-----
- 1
-(1 row)
-
-delete from defp where f1 = 1; -- fail
-ERROR: update or delete on table "defp" violates foreign key constraint "defc_f1_fkey" on table "defc"
-DETAIL: Key (f1)=(1) is still referenced from table "defc".
---
--- Test the difference between NO ACTION and RESTRICT
---
-create temp table pp (f1 int primary key);
-create temp table cc (f1 int references pp on update no action on delete no action);
-insert into pp values(12);
-insert into pp values(11);
-update pp set f1=f1+1;
-insert into cc values(13);
-update pp set f1=f1+1;
-update pp set f1=f1+1; -- fail
-ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc"
-DETAIL: Key (f1)=(13) is still referenced from table "cc".
-delete from pp where f1 = 13; -- fail
-ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc"
-DETAIL: Key (f1)=(13) is still referenced from table "cc".
-drop table pp, cc;
-create temp table pp (f1 int primary key);
-create temp table cc (f1 int references pp on update restrict on delete restrict);
-insert into pp values(12);
-insert into pp values(11);
-update pp set f1=f1+1;
-insert into cc values(13);
-update pp set f1=f1+1; -- fail
-ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc"
-DETAIL: Key (f1)=(13) is still referenced from table "cc".
-delete from pp where f1 = 13; -- fail
-ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc"
-DETAIL: Key (f1)=(13) is still referenced from table "cc".
-drop table pp, cc;
---
--- Test interaction of foreign-key optimization with rules (bug #14219)
---
-create temp table t1 (a integer primary key, b text);
-create temp table t2 (a integer primary key, b integer references t1);
-create rule r1 as on delete to t1 do delete from t2 where t2.b = old.a;
-explain (costs off) delete from t1 where a = 1;
- QUERY PLAN
---------------------------------------------
- Delete on t2
- -> Nested Loop
- -> Index Scan using t1_pkey on t1
- Index Cond: (a = 1)
- -> Seq Scan on t2
- Filter: (b = 1)
-
- Delete on t1
- -> Index Scan using t1_pkey on t1
- Index Cond: (a = 1)
-(10 rows)
-
-delete from t1 where a = 1;
--- Test a primary key with attributes located in later attnum positions
--- compared to the fk attributes.
-create table pktable2 (a int, b int, c int, d int, e int, primary key (d, e));
-create table fktable2 (d int, e int, foreign key (d, e) references pktable2);
-insert into pktable2 values (1, 2, 3, 4, 5);
-insert into fktable2 values (4, 5);
-delete from pktable2;
-ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_e_fkey" on table "fktable2"
-DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2".
-update pktable2 set d = 5;
-ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_e_fkey" on table "fktable2"
-DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2".
-drop table pktable2, fktable2;
--- Test truncation of long foreign key names
-create table pktable1 (a int primary key);
-create table pktable2 (a int, b int, primary key (a, b));
-create table fktable2 (
- a int,
- b int,
- very_very_long_column_name_to_exceed_63_characters int,
- foreign key (very_very_long_column_name_to_exceed_63_characters) references pktable1,
- foreign key (a, very_very_long_column_name_to_exceed_63_characters) references pktable2,
- foreign key (a, very_very_long_column_name_to_exceed_63_characters) references pktable2
-);
-select conname from pg_constraint where conrelid = 'fktable2'::regclass order by conname;
- conname
------------------------------------------------------------------
- fktable2_a_very_very_long_column_name_to_exceed_63_charac_fkey1
- fktable2_a_very_very_long_column_name_to_exceed_63_charact_fkey
- fktable2_very_very_long_column_name_to_exceed_63_character_fkey
-(3 rows)
-
-drop table pktable1, pktable2, fktable2;
---
--- Test deferred FK check on a tuple deleted by a rolled-back subtransaction
---
-create table pktable2(f1 int primary key);
-create table fktable2(f1 int references pktable2 deferrable initially deferred);
-insert into pktable2 values(1);
-begin;
-insert into fktable2 values(1);
-savepoint x;
-delete from fktable2;
-rollback to x;
-commit;
-begin;
-insert into fktable2 values(2);
-savepoint x;
-delete from fktable2;
-rollback to x;
-commit; -- fail
-ERROR: insert or update on table "fktable2" violates foreign key constraint "fktable2_f1_fkey"
-DETAIL: Key (f1)=(2) is not present in table "pktable2".
---
--- Test that we prevent dropping FK constraint with pending trigger events
---
-begin;
-insert into fktable2 values(2);
-alter table fktable2 drop constraint fktable2_f1_fkey;
-ERROR: cannot ALTER TABLE "fktable2" because it has pending trigger events
-commit;
-begin;
-delete from pktable2 where f1 = 1;
-alter table fktable2 drop constraint fktable2_f1_fkey;
-ERROR: cannot ALTER TABLE "pktable2" because it has pending trigger events
-commit;
-drop table pktable2, fktable2;
---
--- Test keys that "look" different but compare as equal
---
-create table pktable2 (a float8, b float8, primary key (a, b));
-create table fktable2 (x float8, y float8, foreign key (x, y) references pktable2 (a, b) on update cascade);
-insert into pktable2 values ('-0', '-0');
-insert into fktable2 values ('-0', '-0');
-select * from pktable2;
- a | b
-----+----
- -0 | -0
-(1 row)
-
-select * from fktable2;
- x | y
-----+----
- -0 | -0
-(1 row)
-
-update pktable2 set a = '0' where a = '-0';
-select * from pktable2;
- a | b
----+----
- 0 | -0
-(1 row)
-
--- should have updated fktable2.x
-select * from fktable2;
- x | y
----+----
- 0 | -0
-(1 row)
-
-drop table pktable2, fktable2;
---
--- Foreign keys and partitioned tables
---
--- Creation of a partitioned hierarchy with irregular definitions
-CREATE TABLE fk_notpartitioned_pk (fdrop1 int, a int, fdrop2 int, b int,
- PRIMARY KEY (a, b));
-ALTER TABLE fk_notpartitioned_pk DROP COLUMN fdrop1, DROP COLUMN fdrop2;
-CREATE TABLE fk_partitioned_fk (b int, fdrop1 int, a int) PARTITION BY RANGE (a, b);
-ALTER TABLE fk_partitioned_fk DROP COLUMN fdrop1;
-CREATE TABLE fk_partitioned_fk_1 (fdrop1 int, fdrop2 int, a int, fdrop3 int, b int);
-ALTER TABLE fk_partitioned_fk_1 DROP COLUMN fdrop1, DROP COLUMN fdrop2, DROP COLUMN fdrop3;
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_1 FOR VALUES FROM (0,0) TO (1000,1000);
-ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk;
-CREATE TABLE fk_partitioned_fk_2 (b int, fdrop1 int, fdrop2 int, a int);
-ALTER TABLE fk_partitioned_fk_2 DROP COLUMN fdrop1, DROP COLUMN fdrop2;
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES FROM (1000,1000) TO (2000,2000);
-CREATE TABLE fk_partitioned_fk_3 (fdrop1 int, fdrop2 int, fdrop3 int, fdrop4 int, b int, a int)
- PARTITION BY HASH (a);
-ALTER TABLE fk_partitioned_fk_3 DROP COLUMN fdrop1, DROP COLUMN fdrop2,
- DROP COLUMN fdrop3, DROP COLUMN fdrop4;
-CREATE TABLE fk_partitioned_fk_3_0 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 0);
-CREATE TABLE fk_partitioned_fk_3_1 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 1);
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3
- FOR VALUES FROM (2000,2000) TO (3000,3000);
--- Creating a foreign key with ONLY on a partitioned table referencing
--- a non-partitioned table fails.
-ALTER TABLE ONLY fk_partitioned_fk ADD FOREIGN KEY (a, b)
- REFERENCES fk_notpartitioned_pk;
-ERROR: cannot use ONLY for foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk"
--- Adding a NOT VALID foreign key on a partitioned table referencing
--- a non-partitioned table fails.
-ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b)
- REFERENCES fk_notpartitioned_pk NOT VALID;
-ERROR: cannot add NOT VALID foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk"
-DETAIL: This feature is not yet supported on partitioned tables.
--- these inserts, targeting both the partition directly as well as the
--- partitioned table, should all fail
-INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501);
-ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_partitioned_fk_1 (a,b) VALUES (500, 501);
-ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501);
-ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_partitioned_fk_2 (a,b) VALUES (1500, 1501);
-ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502);
-ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2500, 2502);
-ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503);
-ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2501, 2503);
-ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk".
--- but if we insert the values that make them valid, then they work
-INSERT INTO fk_notpartitioned_pk VALUES (500, 501), (1500, 1501),
- (2500, 2502), (2501, 2503);
-INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501);
-INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501);
-INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502);
-INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503);
--- this update fails because there is no referenced row
-UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501;
-ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk".
--- but we can fix it thusly:
-INSERT INTO fk_notpartitioned_pk (a,b) VALUES (2502, 2503);
-UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501;
--- these updates would leave lingering rows in the referencing table; disallow
-UPDATE fk_notpartitioned_pk SET b = 502 WHERE a = 500;
-ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk"
-DETAIL: Key (a, b)=(500, 501) is still referenced from table "fk_partitioned_fk".
-UPDATE fk_notpartitioned_pk SET b = 1502 WHERE a = 1500;
-ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk"
-DETAIL: Key (a, b)=(1500, 1501) is still referenced from table "fk_partitioned_fk".
-UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500;
-ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk"
-DETAIL: Key (a, b)=(2500, 2502) is still referenced from table "fk_partitioned_fk".
--- check psql behavior
-\d fk_notpartitioned_pk
- Table "public.fk_notpartitioned_pk"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | not null |
- b | integer | | not null |
-Indexes:
- "fk_notpartitioned_pk_pkey" PRIMARY KEY, btree (a, b)
-Referenced by:
- TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b)
-
-ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey;
--- done.
-DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk;
--- Altering a type referenced by a foreign key needs to drop/recreate the FK.
--- Ensure that works.
-CREATE TABLE fk_notpartitioned_pk (a INT, PRIMARY KEY(a), CHECK (a > 0));
-CREATE TABLE fk_partitioned_fk (a INT REFERENCES fk_notpartitioned_pk(a) PRIMARY KEY) PARTITION BY RANGE(a);
-CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES FROM (MINVALUE) TO (MAXVALUE);
-INSERT INTO fk_notpartitioned_pk VALUES (1);
-INSERT INTO fk_partitioned_fk VALUES (1);
-ALTER TABLE fk_notpartitioned_pk ALTER COLUMN a TYPE bigint;
-DELETE FROM fk_notpartitioned_pk WHERE a = 1;
-ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_fkey" on table "fk_partitioned_fk"
-DETAIL: Key (a)=(1) is still referenced from table "fk_partitioned_fk".
-DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk;
--- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE
--- actions
-CREATE TABLE fk_notpartitioned_pk (a int, b int, primary key (a, b));
-CREATE TABLE fk_partitioned_fk (a int default 2501, b int default 142857) PARTITION BY LIST (a);
-CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES IN (NULL,500,501,502);
-ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b)
- REFERENCES fk_notpartitioned_pk MATCH SIMPLE
- ON DELETE SET NULL ON UPDATE SET NULL;
-CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502);
-CREATE TABLE fk_partitioned_fk_3 (a int, b int);
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 FOR VALUES IN (2500,2501,2502,2503);
--- this insert fails
-INSERT INTO fk_partitioned_fk (a, b) VALUES (2502, 2503);
-ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503);
-ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk".
--- but since the FK is MATCH SIMPLE, this one doesn't
-INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, NULL);
--- now create the referenced row ...
-INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503);
---- and now the same insert work
-INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503);
--- this always works
-INSERT INTO fk_partitioned_fk (a,b) VALUES (NULL, NULL);
--- MATCH FULL
-INSERT INTO fk_notpartitioned_pk VALUES (1, 2);
-CREATE TABLE fk_partitioned_fk_full (x int, y int) PARTITION BY RANGE (x);
-CREATE TABLE fk_partitioned_fk_full_1 PARTITION OF fk_partitioned_fk_full DEFAULT;
-INSERT INTO fk_partitioned_fk_full VALUES (1, NULL);
-ALTER TABLE fk_partitioned_fk_full ADD FOREIGN KEY (x, y) REFERENCES fk_notpartitioned_pk MATCH FULL; -- fails
-ERROR: insert or update on table "fk_partitioned_fk_full_1" violates foreign key constraint "fk_partitioned_fk_full_x_y_fkey"
-DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
-TRUNCATE fk_partitioned_fk_full;
-ALTER TABLE fk_partitioned_fk_full ADD FOREIGN KEY (x, y) REFERENCES fk_notpartitioned_pk MATCH FULL;
-INSERT INTO fk_partitioned_fk_full VALUES (1, NULL); -- fails
-ERROR: insert or update on table "fk_partitioned_fk_full_1" violates foreign key constraint "fk_partitioned_fk_full_x_y_fkey"
-DETAIL: MATCH FULL does not allow mixing of null and nonnull key values.
-DROP TABLE fk_partitioned_fk_full;
--- ON UPDATE SET NULL
-SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a;
- tableoid | a | b
----------------------+------+---
- fk_partitioned_fk_3 | 2502 |
- fk_partitioned_fk_1 | |
-(2 rows)
-
-UPDATE fk_notpartitioned_pk SET a = a + 1 WHERE a = 2502;
-SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a;
- tableoid | a | b
----------------------+------+---
- fk_partitioned_fk_3 | 2502 |
- fk_partitioned_fk_1 | |
- fk_partitioned_fk_1 | |
-(3 rows)
-
--- ON DELETE SET NULL
-INSERT INTO fk_partitioned_fk VALUES (2503, 2503);
-SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL;
- count
--------
- 2
-(1 row)
-
-DELETE FROM fk_notpartitioned_pk;
-SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL;
- count
--------
- 3
-(1 row)
-
--- ON UPDATE/DELETE SET DEFAULT
-ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey;
-ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b)
- REFERENCES fk_notpartitioned_pk
- ON DELETE SET DEFAULT ON UPDATE SET DEFAULT;
-INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503);
-INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503);
--- this fails, because the defaults for the referencing table are not present
--- in the referenced table:
-UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502;
-ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2501, 142857) is not present in table "fk_notpartitioned_pk".
--- but inserting the row we can make it work:
-INSERT INTO fk_notpartitioned_pk VALUES (2501, 142857);
-UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502;
-SELECT * FROM fk_partitioned_fk WHERE b = 142857;
- a | b
-------+--------
- 2501 | 142857
-(1 row)
-
--- ON DELETE SET NULL column_list
-ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey;
-ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b)
- REFERENCES fk_notpartitioned_pk
- ON DELETE SET NULL (a);
-BEGIN;
-DELETE FROM fk_notpartitioned_pk WHERE b = 142857;
-SELECT * FROM fk_partitioned_fk WHERE a IS NOT NULL OR b IS NOT NULL ORDER BY a NULLS LAST;
- a | b
-------+--------
- 2502 |
- | 142857
-(2 rows)
-
-ROLLBACK;
--- ON DELETE SET DEFAULT column_list
-ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey;
-ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b)
- REFERENCES fk_notpartitioned_pk
- ON DELETE SET DEFAULT (a);
-BEGIN;
-DELETE FROM fk_partitioned_fk;
-DELETE FROM fk_notpartitioned_pk;
-INSERT INTO fk_notpartitioned_pk VALUES (500, 100000), (2501, 100000);
-INSERT INTO fk_partitioned_fk VALUES (500, 100000);
-DELETE FROM fk_notpartitioned_pk WHERE a = 500;
-SELECT * FROM fk_partitioned_fk ORDER BY a;
- a | b
-------+--------
- 2501 | 100000
-(1 row)
-
-ROLLBACK;
--- ON UPDATE/DELETE CASCADE
-ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey;
-ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b)
- REFERENCES fk_notpartitioned_pk
- ON DELETE CASCADE ON UPDATE CASCADE;
-UPDATE fk_notpartitioned_pk SET a = 2502 WHERE a = 2501;
-SELECT * FROM fk_partitioned_fk WHERE b = 142857;
- a | b
-------+--------
- 2502 | 142857
-(1 row)
-
--- Now you see it ...
-SELECT * FROM fk_partitioned_fk WHERE b = 142857;
- a | b
-------+--------
- 2502 | 142857
-(1 row)
-
-DELETE FROM fk_notpartitioned_pk WHERE b = 142857;
--- now you don't.
-SELECT * FROM fk_partitioned_fk WHERE a = 142857;
- a | b
----+---
-(0 rows)
-
--- verify that DROP works
-DROP TABLE fk_partitioned_fk_2;
--- Test behavior of the constraint together with attaching and detaching
--- partitions.
-CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502);
-ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_2;
-BEGIN;
-DROP TABLE fk_partitioned_fk;
--- constraint should still be there
-\d fk_partitioned_fk_2;
- Table "public.fk_partitioned_fk_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | | 2501
- b | integer | | | 142857
-Foreign-key constraints:
- "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE
-
-ROLLBACK;
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502);
-DROP TABLE fk_partitioned_fk_2;
-CREATE TABLE fk_partitioned_fk_2 (b int, c text, a int,
- FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk ON UPDATE CASCADE ON DELETE CASCADE);
-ALTER TABLE fk_partitioned_fk_2 DROP COLUMN c;
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502);
--- should have only one constraint
-\d fk_partitioned_fk_2
- Table "public.fk_partitioned_fk_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- b | integer | | |
- a | integer | | |
-Partition of: fk_partitioned_fk FOR VALUES IN (1500, 1502)
-Foreign-key constraints:
- TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE
-
-DROP TABLE fk_partitioned_fk_2;
-CREATE TABLE fk_partitioned_fk_4 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE) PARTITION BY RANGE (b, a);
-CREATE TABLE fk_partitioned_fk_4_1 PARTITION OF fk_partitioned_fk_4 FOR VALUES FROM (1,1) TO (100,100);
-CREATE TABLE fk_partitioned_fk_4_2 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL);
-ALTER TABLE fk_partitioned_fk_4 ATTACH PARTITION fk_partitioned_fk_4_2 FOR VALUES FROM (100,100) TO (1000,1000);
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502);
-ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_4;
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502);
--- should only have one constraint
-\d fk_partitioned_fk_4
- Partitioned table "public.fk_partitioned_fk_4"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
-Partition of: fk_partitioned_fk FOR VALUES IN (3500, 3502)
-Partition key: RANGE (b, a)
-Foreign-key constraints:
- TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE
-Number of partitions: 2 (Use \d+ to list them.)
-
-\d fk_partitioned_fk_4_1
- Table "public.fk_partitioned_fk_4_1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
-Partition of: fk_partitioned_fk_4 FOR VALUES FROM (1, 1) TO (100, 100)
-Foreign-key constraints:
- TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE
-
--- this one has an FK with mismatched properties
-\d fk_partitioned_fk_4_2
- Table "public.fk_partitioned_fk_4_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
-Partition of: fk_partitioned_fk_4 FOR VALUES FROM (100, 100) TO (1000, 1000)
-Foreign-key constraints:
- "fk_partitioned_fk_4_2_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL
- TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE
-
-CREATE TABLE fk_partitioned_fk_5 (a int, b int,
- FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE,
- FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE)
- PARTITION BY RANGE (a);
-CREATE TABLE fk_partitioned_fk_5_1 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk);
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500);
-ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10);
-ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_5;
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500);
--- this one has two constraints, similar but not quite the one in the parent,
--- so it gets a new one
-\d fk_partitioned_fk_5
- Partitioned table "public.fk_partitioned_fk_5"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
-Partition of: fk_partitioned_fk FOR VALUES IN (4500)
-Partition key: RANGE (a)
-Foreign-key constraints:
- "fk_partitioned_fk_5_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE
- "fk_partitioned_fk_5_a_b_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE
- TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE
-Number of partitions: 1 (Use \d+ to list them.)
-
--- verify that it works to reattaching a child with multiple candidate
--- constraints
-ALTER TABLE fk_partitioned_fk_5 DETACH PARTITION fk_partitioned_fk_5_1;
-ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10);
-\d fk_partitioned_fk_5_1
- Table "public.fk_partitioned_fk_5_1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
-Partition of: fk_partitioned_fk_5 FOR VALUES FROM (0) TO (10)
-Foreign-key constraints:
- "fk_partitioned_fk_5_1_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b)
- TABLE "fk_partitioned_fk_5" CONSTRAINT "fk_partitioned_fk_5_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE
- TABLE "fk_partitioned_fk_5" CONSTRAINT "fk_partitioned_fk_5_a_b_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE
- TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE
-
--- verify that attaching a table checks that the existing data satisfies the
--- constraint
-CREATE TABLE fk_partitioned_fk_2 (a int, b int) PARTITION BY RANGE (b);
-CREATE TABLE fk_partitioned_fk_2_1 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (0) TO (1000);
-CREATE TABLE fk_partitioned_fk_2_2 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (1000) TO (2000);
-INSERT INTO fk_partitioned_fk_2 VALUES (1600, 601), (1600, 1601);
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2
- FOR VALUES IN (1600);
-ERROR: insert or update on table "fk_partitioned_fk_2_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(1600, 601) is not present in table "fk_notpartitioned_pk".
-INSERT INTO fk_notpartitioned_pk VALUES (1600, 601), (1600, 1601);
-ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2
- FOR VALUES IN (1600);
--- leave these tables around intentionally
--- Verify that attaching a table that's referenced by an existing FK
--- in the parent throws an error
-CREATE TABLE fk_partitioned_pk_6 (a int PRIMARY KEY);
-CREATE TABLE fk_partitioned_fk_6 (a int REFERENCES fk_partitioned_pk_6) PARTITION BY LIST (a);
-ALTER TABLE fk_partitioned_fk_6 ATTACH PARTITION fk_partitioned_pk_6 FOR VALUES IN (1);
-ERROR: cannot attach table "fk_partitioned_pk_6" as a partition because it is referenced by foreign key "fk_partitioned_fk_6_a_fkey"
-DROP TABLE fk_partitioned_pk_6, fk_partitioned_fk_6;
--- This case is similar to above, but the referenced relation is one level
--- lower in the hierarchy. This one fails in a different way as the above,
--- because we don't bother to protect against this case explicitly. If the
--- current error stops happening, we'll need to add a better protection.
-CREATE TABLE fk_partitioned_pk_6 (a int PRIMARY KEY) PARTITION BY list (a);
-CREATE TABLE fk_partitioned_pk_61 PARTITION OF fk_partitioned_pk_6 FOR VALUES IN (1);
-CREATE TABLE fk_partitioned_fk_6 (a int REFERENCES fk_partitioned_pk_61) PARTITION BY LIST (a);
-ALTER TABLE fk_partitioned_fk_6 ATTACH PARTITION fk_partitioned_pk_6 FOR VALUES IN (1);
-ERROR: cannot ALTER TABLE "fk_partitioned_pk_61" because it is being used by active queries in this session
-DROP TABLE fk_partitioned_pk_6, fk_partitioned_fk_6;
--- test the case when the referenced table is owned by a different user
-create role regress_other_partitioned_fk_owner;
-grant references on fk_notpartitioned_pk to regress_other_partitioned_fk_owner;
-set role regress_other_partitioned_fk_owner;
-create table other_partitioned_fk(a int, b int) partition by list (a);
-create table other_partitioned_fk_1 partition of other_partitioned_fk
- for values in (2048);
-insert into other_partitioned_fk
- select 2048, x from generate_series(1,10) x;
--- this should fail
-alter table other_partitioned_fk add foreign key (a, b)
- references fk_notpartitioned_pk(a, b);
-ERROR: insert or update on table "other_partitioned_fk_1" violates foreign key constraint "other_partitioned_fk_a_b_fkey"
-DETAIL: Key (a, b)=(2048, 1) is not present in table "fk_notpartitioned_pk".
--- add the missing keys and retry
-reset role;
-insert into fk_notpartitioned_pk (a, b)
- select 2048, x from generate_series(1,10) x;
-set role regress_other_partitioned_fk_owner;
-alter table other_partitioned_fk add foreign key (a, b)
- references fk_notpartitioned_pk(a, b);
--- clean up
-drop table other_partitioned_fk;
-reset role;
-revoke all on fk_notpartitioned_pk from regress_other_partitioned_fk_owner;
-drop role regress_other_partitioned_fk_owner;
---
--- Test self-referencing foreign key with partition.
--- This should create only one fk constraint per partition
---
-CREATE TABLE parted_self_fk (
- id bigint NOT NULL PRIMARY KEY,
- id_abc bigint,
- FOREIGN KEY (id_abc) REFERENCES parted_self_fk(id)
-)
-PARTITION BY RANGE (id);
-CREATE TABLE part1_self_fk (
- id bigint NOT NULL PRIMARY KEY,
- id_abc bigint
-);
-ALTER TABLE parted_self_fk ATTACH PARTITION part1_self_fk FOR VALUES FROM (0) TO (10);
-CREATE TABLE part2_self_fk PARTITION OF parted_self_fk FOR VALUES FROM (10) TO (20);
-CREATE TABLE part3_self_fk ( -- a partitioned partition
- id bigint NOT NULL PRIMARY KEY,
- id_abc bigint
-) PARTITION BY RANGE (id);
-CREATE TABLE part32_self_fk PARTITION OF part3_self_fk FOR VALUES FROM (20) TO (30);
-ALTER TABLE parted_self_fk ATTACH PARTITION part3_self_fk FOR VALUES FROM (20) TO (40);
-CREATE TABLE part33_self_fk (
- id bigint NOT NULL PRIMARY KEY,
- id_abc bigint
-);
-ALTER TABLE part3_self_fk ATTACH PARTITION part33_self_fk FOR VALUES FROM (30) TO (40);
-SELECT cr.relname, co.conname, co.contype, co.convalidated,
- p.conname AS conparent, p.convalidated, cf.relname AS foreignrel
-FROM pg_constraint co
-JOIN pg_class cr ON cr.oid = co.conrelid
-LEFT JOIN pg_class cf ON cf.oid = co.confrelid
-LEFT JOIN pg_constraint p ON p.oid = co.conparentid
-WHERE cr.oid IN (SELECT relid FROM pg_partition_tree('parted_self_fk'))
-ORDER BY co.contype, cr.relname, co.conname, p.conname;
- relname | conname | contype | convalidated | conparent | convalidated | foreignrel
-----------------+----------------------------+---------+--------------+----------------------------+--------------+----------------
- part1_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- part2_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- part32_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- part33_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- part3_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- parted_self_fk | parted_self_fk_id_abc_fkey | f | t | | | parted_self_fk
- part1_self_fk | part1_self_fk_id_not_null | n | t | | |
- part2_self_fk | parted_self_fk_id_not_null | n | t | | |
- part32_self_fk | part3_self_fk_id_not_null | n | t | | |
- part33_self_fk | part33_self_fk_id_not_null | n | t | | |
- part3_self_fk | part3_self_fk_id_not_null | n | t | | |
- parted_self_fk | parted_self_fk_id_not_null | n | t | | |
- part1_self_fk | part1_self_fk_pkey | p | t | parted_self_fk_pkey | t |
- part2_self_fk | part2_self_fk_pkey | p | t | parted_self_fk_pkey | t |
- part32_self_fk | part32_self_fk_pkey | p | t | part3_self_fk_pkey | t |
- part33_self_fk | part33_self_fk_pkey | p | t | part3_self_fk_pkey | t |
- part3_self_fk | part3_self_fk_pkey | p | t | parted_self_fk_pkey | t |
- parted_self_fk | parted_self_fk_pkey | p | t | | |
-(18 rows)
-
--- detach and re-attach multiple times just to ensure everything is kosher
-ALTER TABLE parted_self_fk DETACH PARTITION part2_self_fk;
-ALTER TABLE parted_self_fk ATTACH PARTITION part2_self_fk FOR VALUES FROM (10) TO (20);
-ALTER TABLE parted_self_fk DETACH PARTITION part2_self_fk;
-ALTER TABLE parted_self_fk ATTACH PARTITION part2_self_fk FOR VALUES FROM (10) TO (20);
-SELECT cr.relname, co.conname, co.contype, co.convalidated,
- p.conname AS conparent, p.convalidated, cf.relname AS foreignrel
-FROM pg_constraint co
-JOIN pg_class cr ON cr.oid = co.conrelid
-LEFT JOIN pg_class cf ON cf.oid = co.confrelid
-LEFT JOIN pg_constraint p ON p.oid = co.conparentid
-WHERE cr.oid IN (SELECT relid FROM pg_partition_tree('parted_self_fk'))
-ORDER BY co.contype, cr.relname, co.conname, p.conname;
- relname | conname | contype | convalidated | conparent | convalidated | foreignrel
-----------------+----------------------------+---------+--------------+----------------------------+--------------+----------------
- part1_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- part2_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- part32_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- part33_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- part3_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk
- parted_self_fk | parted_self_fk_id_abc_fkey | f | t | | | parted_self_fk
- part1_self_fk | part1_self_fk_id_not_null | n | t | | |
- part2_self_fk | parted_self_fk_id_not_null | n | t | | |
- part32_self_fk | part3_self_fk_id_not_null | n | t | | |
- part33_self_fk | part33_self_fk_id_not_null | n | t | | |
- part3_self_fk | part3_self_fk_id_not_null | n | t | | |
- parted_self_fk | parted_self_fk_id_not_null | n | t | | |
- part1_self_fk | part1_self_fk_pkey | p | t | parted_self_fk_pkey | t |
- part2_self_fk | part2_self_fk_pkey | p | t | parted_self_fk_pkey | t |
- part32_self_fk | part32_self_fk_pkey | p | t | part3_self_fk_pkey | t |
- part33_self_fk | part33_self_fk_pkey | p | t | part3_self_fk_pkey | t |
- part3_self_fk | part3_self_fk_pkey | p | t | parted_self_fk_pkey | t |
- parted_self_fk | parted_self_fk_pkey | p | t | | |
-(18 rows)
-
--- Leave this table around, for pg_upgrade/pg_dump tests
--- Test creating a constraint at the parent that already exists in partitions.
--- There should be no duplicated constraints, and attempts to drop the
--- constraint in partitions should raise appropriate errors.
-create schema fkpart0
- create table pkey (a int primary key)
- create table fk_part (a int) partition by list (a)
- create table fk_part_1 partition of fk_part
- (foreign key (a) references fkpart0.pkey) for values in (1)
- create table fk_part_23 partition of fk_part
- (foreign key (a) references fkpart0.pkey) for values in (2, 3)
- partition by list (a)
- create table fk_part_23_2 partition of fk_part_23 for values in (2);
-alter table fkpart0.fk_part add foreign key (a) references fkpart0.pkey;
-\d fkpart0.fk_part_1 \\ -- should have only one FK
- Table "fkpart0.fk_part_1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-Partition of: fkpart0.fk_part FOR VALUES IN (1)
-Foreign-key constraints:
- TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a)
-
-alter table fkpart0.fk_part_1 drop constraint fk_part_1_a_fkey;
-ERROR: cannot drop inherited constraint "fk_part_1_a_fkey" of relation "fk_part_1"
-\d fkpart0.fk_part_23 \\ -- should have only one FK
- Partitioned table "fkpart0.fk_part_23"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-Partition of: fkpart0.fk_part FOR VALUES IN (2, 3)
-Partition key: LIST (a)
-Foreign-key constraints:
- TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a)
-Number of partitions: 1 (Use \d+ to list them.)
-
-\d fkpart0.fk_part_23_2 \\ -- should have only one FK
- Table "fkpart0.fk_part_23_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-Partition of: fkpart0.fk_part_23 FOR VALUES IN (2)
-Foreign-key constraints:
- TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a)
-
-alter table fkpart0.fk_part_23 drop constraint fk_part_23_a_fkey;
-ERROR: cannot drop inherited constraint "fk_part_23_a_fkey" of relation "fk_part_23"
-alter table fkpart0.fk_part_23_2 drop constraint fk_part_23_a_fkey;
-ERROR: cannot drop inherited constraint "fk_part_23_a_fkey" of relation "fk_part_23_2"
-create table fkpart0.fk_part_4 partition of fkpart0.fk_part for values in (4);
-\d fkpart0.fk_part_4
- Table "fkpart0.fk_part_4"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-Partition of: fkpart0.fk_part FOR VALUES IN (4)
-Foreign-key constraints:
- TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a)
-
-alter table fkpart0.fk_part_4 drop constraint fk_part_a_fkey;
-ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_4"
-create table fkpart0.fk_part_56 partition of fkpart0.fk_part
- for values in (5,6) partition by list (a);
-create table fkpart0.fk_part_56_5 partition of fkpart0.fk_part_56
- for values in (5);
-\d fkpart0.fk_part_56
- Partitioned table "fkpart0.fk_part_56"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-Partition of: fkpart0.fk_part FOR VALUES IN (5, 6)
-Partition key: LIST (a)
-Foreign-key constraints:
- TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a)
-Number of partitions: 1 (Use \d+ to list them.)
-
-alter table fkpart0.fk_part_56 drop constraint fk_part_a_fkey;
-ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_56"
-alter table fkpart0.fk_part_56_5 drop constraint fk_part_a_fkey;
-ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_56_5"
--- verify that attaching and detaching partitions maintains the right set of
--- triggers
-create schema fkpart1
- create table pkey (a int primary key)
- create table fk_part (a int) partition by list (a)
- create table fk_part_1 partition of fk_part for values in (1) partition by list (a)
- create table fk_part_1_1 partition of fk_part_1 for values in (1);
-alter table fkpart1.fk_part add foreign key (a) references fkpart1.pkey;
-insert into fkpart1.fk_part values (1); -- should fail
-ERROR: insert or update on table "fk_part_1_1" violates foreign key constraint "fk_part_a_fkey"
-DETAIL: Key (a)=(1) is not present in table "pkey".
-insert into fkpart1.pkey values (1);
-insert into fkpart1.fk_part values (1);
-delete from fkpart1.pkey where a = 1; -- should fail
-ERROR: update or delete on table "pkey" violates foreign key constraint "fk_part_a_fkey" on table "fk_part"
-DETAIL: Key (a)=(1) is still referenced from table "fk_part".
-alter table fkpart1.fk_part detach partition fkpart1.fk_part_1;
-create table fkpart1.fk_part_1_2 partition of fkpart1.fk_part_1 for values in (2);
-insert into fkpart1.fk_part_1 values (2); -- should fail
-ERROR: insert or update on table "fk_part_1_2" violates foreign key constraint "fk_part_a_fkey"
-DETAIL: Key (a)=(2) is not present in table "pkey".
-delete from fkpart1.pkey where a = 1;
-ERROR: update or delete on table "pkey" violates foreign key constraint "fk_part_a_fkey" on table "fk_part_1"
-DETAIL: Key (a)=(1) is still referenced from table "fk_part_1".
--- verify that attaching and detaching partitions manipulates the inheritance
--- properties of their FK constraints correctly
-create schema fkpart2
- create table pkey (a int primary key)
- create table fk_part (a int, constraint fkey foreign key (a) references fkpart2.pkey) partition by list (a)
- create table fk_part_1 partition of fkpart2.fk_part for values in (1) partition by list (a)
- create table fk_part_1_1 (a int, constraint my_fkey foreign key (a) references fkpart2.pkey);
-alter table fkpart2.fk_part_1 attach partition fkpart2.fk_part_1_1 for values in (1);
-alter table fkpart2.fk_part_1 drop constraint fkey; -- should fail
-ERROR: cannot drop inherited constraint "fkey" of relation "fk_part_1"
-alter table fkpart2.fk_part_1_1 drop constraint my_fkey; -- should fail
-ERROR: cannot drop inherited constraint "my_fkey" of relation "fk_part_1_1"
-alter table fkpart2.fk_part detach partition fkpart2.fk_part_1;
-alter table fkpart2.fk_part_1 drop constraint fkey; -- ok
-alter table fkpart2.fk_part_1_1 drop constraint my_fkey; -- doesn't exist
-ERROR: constraint "my_fkey" of relation "fk_part_1_1" does not exist
--- verify constraint deferrability
-create schema fkpart3
- create table pkey (a int primary key)
- create table fk_part (a int, constraint fkey foreign key (a) references fkpart3.pkey deferrable initially immediate) partition by list (a)
- create table fk_part_1 partition of fkpart3.fk_part for values in (1) partition by list (a)
- create table fk_part_1_1 partition of fkpart3.fk_part_1 for values in (1)
- create table fk_part_2 partition of fkpart3.fk_part for values in (2);
-begin;
-set constraints fkpart3.fkey deferred;
-insert into fkpart3.fk_part values (1);
-insert into fkpart3.pkey values (1);
-commit;
-begin;
-set constraints fkpart3.fkey deferred;
-delete from fkpart3.pkey;
-delete from fkpart3.fk_part;
-commit;
-drop schema fkpart0, fkpart1, fkpart2, fkpart3 cascade;
-NOTICE: drop cascades to 10 other objects
-DETAIL: drop cascades to table fkpart3.pkey
-drop cascades to table fkpart3.fk_part
-drop cascades to table fkpart2.pkey
-drop cascades to table fkpart2.fk_part
-drop cascades to table fkpart2.fk_part_1
-drop cascades to table fkpart1.pkey
-drop cascades to table fkpart1.fk_part
-drop cascades to table fkpart1.fk_part_1
-drop cascades to table fkpart0.pkey
-drop cascades to table fkpart0.fk_part
--- Test a partitioned table as referenced table.
--- Verify basic functionality with a regular partition creation and a partition
--- with a different column layout, as well as partitions added (created and
--- attached) after creating the foreign key.
-CREATE SCHEMA fkpart3;
-SET search_path TO fkpart3;
-CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY RANGE (a);
-CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (0) TO (1000);
-CREATE TABLE pk2 (b int, a int);
-ALTER TABLE pk2 DROP COLUMN b;
-ALTER TABLE pk2 ALTER a SET NOT NULL;
-ALTER TABLE pk ATTACH PARTITION pk2 FOR VALUES FROM (1000) TO (2000);
-CREATE TABLE fk (a int) PARTITION BY RANGE (a);
-CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (0) TO (750);
-ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk;
-CREATE TABLE fk2 (b int, a int) ;
-ALTER TABLE fk2 DROP COLUMN b;
-ALTER TABLE fk ATTACH PARTITION fk2 FOR VALUES FROM (750) TO (3500);
-CREATE TABLE pk3 PARTITION OF pk FOR VALUES FROM (2000) TO (3000);
-CREATE TABLE pk4 (LIKE pk);
-ALTER TABLE pk ATTACH PARTITION pk4 FOR VALUES FROM (3000) TO (4000);
-CREATE TABLE pk5 (c int, b int, a int NOT NULL) PARTITION BY RANGE (a);
-ALTER TABLE pk5 DROP COLUMN b, DROP COLUMN c;
-CREATE TABLE pk51 PARTITION OF pk5 FOR VALUES FROM (4000) TO (4500);
-CREATE TABLE pk52 PARTITION OF pk5 FOR VALUES FROM (4500) TO (5000);
-ALTER TABLE pk ATTACH PARTITION pk5 FOR VALUES FROM (4000) TO (5000);
-CREATE TABLE fk3 PARTITION OF fk FOR VALUES FROM (3500) TO (5000);
--- these should fail: referenced value not present
-INSERT into fk VALUES (1);
-ERROR: insert or update on table "fk1" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(1) is not present in table "pk".
-INSERT into fk VALUES (1000);
-ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(1000) is not present in table "pk".
-INSERT into fk VALUES (2000);
-ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(2000) is not present in table "pk".
-INSERT into fk VALUES (3000);
-ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(3000) is not present in table "pk".
-INSERT into fk VALUES (4000);
-ERROR: insert or update on table "fk3" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(4000) is not present in table "pk".
-INSERT into fk VALUES (4500);
-ERROR: insert or update on table "fk3" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(4500) is not present in table "pk".
--- insert into the referenced table, now they should work
-INSERT into pk VALUES (1), (1000), (2000), (3000), (4000), (4500);
-INSERT into fk VALUES (1), (1000), (2000), (3000), (4000), (4500);
--- should fail: referencing value present
-DELETE FROM pk WHERE a = 1;
-ERROR: update or delete on table "pk1" violates foreign key constraint "fk_a_fkey1" on table "fk"
-DETAIL: Key (a)=(1) is still referenced from table "fk".
-DELETE FROM pk WHERE a = 1000;
-ERROR: update or delete on table "pk2" violates foreign key constraint "fk_a_fkey2" on table "fk"
-DETAIL: Key (a)=(1000) is still referenced from table "fk".
-DELETE FROM pk WHERE a = 2000;
-ERROR: update or delete on table "pk3" violates foreign key constraint "fk_a_fkey3" on table "fk"
-DETAIL: Key (a)=(2000) is still referenced from table "fk".
-DELETE FROM pk WHERE a = 3000;
-ERROR: update or delete on table "pk4" violates foreign key constraint "fk_a_fkey4" on table "fk"
-DETAIL: Key (a)=(3000) is still referenced from table "fk".
-DELETE FROM pk WHERE a = 4000;
-ERROR: update or delete on table "pk51" violates foreign key constraint "fk_a_fkey6" on table "fk"
-DETAIL: Key (a)=(4000) is still referenced from table "fk".
-DELETE FROM pk WHERE a = 4500;
-ERROR: update or delete on table "pk52" violates foreign key constraint "fk_a_fkey7" on table "fk"
-DETAIL: Key (a)=(4500) is still referenced from table "fk".
-UPDATE pk SET a = 2 WHERE a = 1;
-ERROR: update or delete on table "pk1" violates foreign key constraint "fk_a_fkey1" on table "fk"
-DETAIL: Key (a)=(1) is still referenced from table "fk".
-UPDATE pk SET a = 1002 WHERE a = 1000;
-ERROR: update or delete on table "pk2" violates foreign key constraint "fk_a_fkey2" on table "fk"
-DETAIL: Key (a)=(1000) is still referenced from table "fk".
-UPDATE pk SET a = 2002 WHERE a = 2000;
-ERROR: update or delete on table "pk3" violates foreign key constraint "fk_a_fkey3" on table "fk"
-DETAIL: Key (a)=(2000) is still referenced from table "fk".
-UPDATE pk SET a = 3002 WHERE a = 3000;
-ERROR: update or delete on table "pk4" violates foreign key constraint "fk_a_fkey4" on table "fk"
-DETAIL: Key (a)=(3000) is still referenced from table "fk".
-UPDATE pk SET a = 4002 WHERE a = 4000;
-ERROR: update or delete on table "pk51" violates foreign key constraint "fk_a_fkey6" on table "fk"
-DETAIL: Key (a)=(4000) is still referenced from table "fk".
-UPDATE pk SET a = 4502 WHERE a = 4500;
-ERROR: update or delete on table "pk52" violates foreign key constraint "fk_a_fkey7" on table "fk"
-DETAIL: Key (a)=(4500) is still referenced from table "fk".
--- now they should work
-DELETE FROM fk;
-UPDATE pk SET a = 2 WHERE a = 1;
-DELETE FROM pk WHERE a = 2;
-UPDATE pk SET a = 1002 WHERE a = 1000;
-DELETE FROM pk WHERE a = 1002;
-UPDATE pk SET a = 2002 WHERE a = 2000;
-DELETE FROM pk WHERE a = 2002;
-UPDATE pk SET a = 3002 WHERE a = 3000;
-DELETE FROM pk WHERE a = 3002;
-UPDATE pk SET a = 4002 WHERE a = 4000;
-DELETE FROM pk WHERE a = 4002;
-UPDATE pk SET a = 4502 WHERE a = 4500;
-DELETE FROM pk WHERE a = 4502;
-CREATE SCHEMA fkpart4;
-SET search_path TO fkpart4;
--- dropping/detaching PARTITIONs is prevented if that would break
--- a foreign key's existing data
-CREATE TABLE droppk (a int PRIMARY KEY) PARTITION BY RANGE (a);
-CREATE TABLE droppk1 PARTITION OF droppk FOR VALUES FROM (0) TO (1000);
-CREATE TABLE droppk_d PARTITION OF droppk DEFAULT;
-CREATE TABLE droppk2 PARTITION OF droppk FOR VALUES FROM (1000) TO (2000)
- PARTITION BY RANGE (a);
-CREATE TABLE droppk21 PARTITION OF droppk2 FOR VALUES FROM (1000) TO (1400);
-CREATE TABLE droppk2_d PARTITION OF droppk2 DEFAULT;
-INSERT into droppk VALUES (1), (1000), (1500), (2000);
-CREATE TABLE dropfk (a int REFERENCES droppk);
-INSERT into dropfk VALUES (1), (1000), (1500), (2000);
--- these should all fail
-ALTER TABLE droppk DETACH PARTITION droppk_d;
-ERROR: removing partition "droppk_d" violates foreign key constraint "dropfk_a_fkey5"
-DETAIL: Key (a)=(2000) is still referenced from table "dropfk".
-ALTER TABLE droppk2 DETACH PARTITION droppk2_d;
-ERROR: removing partition "droppk2_d" violates foreign key constraint "dropfk_a_fkey4"
-DETAIL: Key (a)=(1500) is still referenced from table "dropfk".
-ALTER TABLE droppk DETACH PARTITION droppk1;
-ERROR: removing partition "droppk1" violates foreign key constraint "dropfk_a_fkey1"
-DETAIL: Key (a)=(1) is still referenced from table "dropfk".
-ALTER TABLE droppk DETACH PARTITION droppk2;
-ERROR: removing partition "droppk2" violates foreign key constraint "dropfk_a_fkey2"
-DETAIL: Key (a)=(1000) is still referenced from table "dropfk".
-ALTER TABLE droppk2 DETACH PARTITION droppk21;
-ERROR: removing partition "droppk21" violates foreign key constraint "dropfk_a_fkey3"
-DETAIL: Key (a)=(1000) is still referenced from table "dropfk".
--- dropping partitions is disallowed
-DROP TABLE droppk_d;
-ERROR: cannot drop table droppk_d because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk_d
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE droppk2_d;
-ERROR: cannot drop table droppk2_d because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2_d
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE droppk1;
-ERROR: cannot drop table droppk1 because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk1
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE droppk2;
-ERROR: cannot drop table droppk2 because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE droppk21;
-ERROR: cannot drop table droppk21 because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk21
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DELETE FROM dropfk;
--- dropping partitions is disallowed, even when no referencing values
-DROP TABLE droppk_d;
-ERROR: cannot drop table droppk_d because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk_d
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE droppk2_d;
-ERROR: cannot drop table droppk2_d because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2_d
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE droppk1;
-ERROR: cannot drop table droppk1 because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk1
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
--- but DETACH is allowed, and DROP afterwards works
-ALTER TABLE droppk2 DETACH PARTITION droppk21;
-DROP TABLE droppk2;
-ERROR: cannot drop table droppk2 because other objects depend on it
-DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
--- Verify that initial constraint creation and cloning behave correctly
-CREATE SCHEMA fkpart5;
-SET search_path TO fkpart5;
-CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY LIST (a);
-CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1) PARTITION BY LIST (a);
-CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES IN (1);
-CREATE TABLE fk (a int) PARTITION BY LIST (a);
-CREATE TABLE fk1 PARTITION OF fk FOR VALUES IN (1) PARTITION BY LIST (a);
-CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES IN (1);
-ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk;
-CREATE TABLE pk2 PARTITION OF pk FOR VALUES IN (2);
-CREATE TABLE pk3 (a int NOT NULL) PARTITION BY LIST (a);
-CREATE TABLE pk31 PARTITION OF pk3 FOR VALUES IN (31);
-CREATE TABLE pk32 (b int, a int NOT NULL);
-ALTER TABLE pk32 DROP COLUMN b;
-ALTER TABLE pk3 ATTACH PARTITION pk32 FOR VALUES IN (32);
-ALTER TABLE pk ATTACH PARTITION pk3 FOR VALUES IN (31, 32);
-CREATE TABLE fk2 PARTITION OF fk FOR VALUES IN (2);
-CREATE TABLE fk3 (b int, a int);
-ALTER TABLE fk3 DROP COLUMN b;
-ALTER TABLE fk ATTACH PARTITION fk3 FOR VALUES IN (3);
-SELECT pg_describe_object('pg_constraint'::regclass, oid, 0), confrelid::regclass,
- CASE WHEN conparentid <> 0 THEN pg_describe_object('pg_constraint'::regclass, conparentid, 0) ELSE 'TOP' END
-FROM pg_catalog.pg_constraint
-WHERE conrelid IN (SELECT relid FROM pg_partition_tree('fk'))
-ORDER BY conrelid::regclass::text, conname;
- pg_describe_object | confrelid | case
-------------------------------------+-----------+-----------------------------------
- constraint fk_a_fkey on table fk | pk | TOP
- constraint fk_a_fkey1 on table fk | pk1 | constraint fk_a_fkey on table fk
- constraint fk_a_fkey2 on table fk | pk11 | constraint fk_a_fkey1 on table fk
- constraint fk_a_fkey3 on table fk | pk2 | constraint fk_a_fkey on table fk
- constraint fk_a_fkey4 on table fk | pk3 | constraint fk_a_fkey on table fk
- constraint fk_a_fkey5 on table fk | pk31 | constraint fk_a_fkey4 on table fk
- constraint fk_a_fkey6 on table fk | pk32 | constraint fk_a_fkey4 on table fk
- constraint fk_a_fkey on table fk1 | pk | constraint fk_a_fkey on table fk
- constraint fk_a_fkey on table fk11 | pk | constraint fk_a_fkey on table fk1
- constraint fk_a_fkey on table fk2 | pk | constraint fk_a_fkey on table fk
- constraint fk_a_fkey on table fk3 | pk | constraint fk_a_fkey on table fk
-(11 rows)
-
-CREATE TABLE fk4 (LIKE fk);
-INSERT INTO fk4 VALUES (50);
-ALTER TABLE fk ATTACH PARTITION fk4 FOR VALUES IN (50);
-ERROR: insert or update on table "fk4" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(50) is not present in table "pk".
--- Verify constraint deferrability
-CREATE SCHEMA fkpart9;
-SET search_path TO fkpart9;
-CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY LIST (a);
-CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1, 2) PARTITION BY LIST (a);
-CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES IN (1);
-CREATE TABLE pk3 PARTITION OF pk FOR VALUES IN (3);
-CREATE TABLE fk (a int REFERENCES pk DEFERRABLE INITIALLY IMMEDIATE);
-INSERT INTO fk VALUES (1); -- should fail
-ERROR: insert or update on table "fk" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(1) is not present in table "pk".
-BEGIN;
-SET CONSTRAINTS fk_a_fkey DEFERRED;
-INSERT INTO fk VALUES (1);
-COMMIT; -- should fail
-ERROR: insert or update on table "fk" violates foreign key constraint "fk_a_fkey"
-DETAIL: Key (a)=(1) is not present in table "pk".
-BEGIN;
-SET CONSTRAINTS fk_a_fkey DEFERRED;
-INSERT INTO fk VALUES (1);
-INSERT INTO pk VALUES (1);
-COMMIT; -- OK
-BEGIN;
-SET CONSTRAINTS fk_a_fkey DEFERRED;
-DELETE FROM pk WHERE a = 1;
-DELETE FROM fk WHERE a = 1;
-COMMIT; -- OK
--- Verify constraint deferrability when changed by ALTER
--- Partitioned table at referencing end
-CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2));
-CREATE TABLE ref(f1 int, f2 int, f3 int)
- PARTITION BY list(f1);
-CREATE TABLE ref1 PARTITION OF ref FOR VALUES IN (1);
-CREATE TABLE ref2 PARTITION OF ref FOR VALUES in (2);
-ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt;
-ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey
- DEFERRABLE INITIALLY DEFERRED;
-INSERT INTO pt VALUES(1,2,3);
-INSERT INTO ref VALUES(1,2,3);
-BEGIN;
-DELETE FROM pt;
-DELETE FROM ref;
-ABORT;
-DROP TABLE pt, ref;
--- Multi-level partitioning at referencing end
-CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2));
-CREATE TABLE ref(f1 int, f2 int, f3 int)
- PARTITION BY list(f1);
-CREATE TABLE ref1_2 PARTITION OF ref FOR VALUES IN (1, 2) PARTITION BY list (f2);
-CREATE TABLE ref1 PARTITION OF ref1_2 FOR VALUES IN (1);
-CREATE TABLE ref2 PARTITION OF ref1_2 FOR VALUES IN (2) PARTITION BY list (f2);
-CREATE TABLE ref22 PARTITION OF ref2 FOR VALUES IN (2);
-ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt;
-INSERT INTO pt VALUES(1,2,3);
-INSERT INTO ref VALUES(1,2,3);
-ALTER TABLE ref22 ALTER CONSTRAINT ref_f1_f2_fkey
- DEFERRABLE INITIALLY IMMEDIATE; -- fails
-ERROR: cannot alter constraint "ref_f1_f2_fkey" on relation "ref22"
-DETAIL: Constraint "ref_f1_f2_fkey" is derived from constraint "ref_f1_f2_fkey" of relation "ref".
-HINT: You may alter the constraint it derives from instead.
-ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey
- DEFERRABLE INITIALLY DEFERRED;
-BEGIN;
-DELETE FROM pt;
-DELETE FROM ref;
-ABORT;
-DROP TABLE pt, ref;
--- Partitioned table at referenced end
-CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2))
- PARTITION BY LIST(f1);
-CREATE TABLE pt1 PARTITION OF pt FOR VALUES IN (1);
-CREATE TABLE pt2 PARTITION OF pt FOR VALUES IN (2);
-CREATE TABLE ref(f1 int, f2 int, f3 int);
-ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt;
-ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey
- DEFERRABLE INITIALLY DEFERRED;
-INSERT INTO pt VALUES(1,2,3);
-INSERT INTO ref VALUES(1,2,3);
-BEGIN;
-DELETE FROM pt;
-DELETE FROM ref;
-ABORT;
-DROP TABLE pt, ref;
--- Multi-level partitioning at referenced end
-CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2))
- PARTITION BY LIST(f1);
-CREATE TABLE pt1_2 PARTITION OF pt FOR VALUES IN (1, 2) PARTITION BY LIST (f1);
-CREATE TABLE pt1 PARTITION OF pt1_2 FOR VALUES IN (1);
-CREATE TABLE pt2 PARTITION OF pt1_2 FOR VALUES IN (2);
-CREATE TABLE ref(f1 int, f2 int, f3 int);
-ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt;
-ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey1
- DEFERRABLE INITIALLY DEFERRED; -- fails
-ERROR: cannot alter constraint "ref_f1_f2_fkey1" on relation "ref"
-DETAIL: Constraint "ref_f1_f2_fkey1" is derived from constraint "ref_f1_f2_fkey" of relation "ref".
-HINT: You may alter the constraint it derives from instead.
-ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey
- DEFERRABLE INITIALLY DEFERRED;
-INSERT INTO pt VALUES(1,2,3);
-INSERT INTO ref VALUES(1,2,3);
-BEGIN;
-DELETE FROM pt;
-DELETE FROM ref;
-ABORT;
-DROP TABLE pt, ref;
-DROP SCHEMA fkpart9 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table pk
-drop cascades to table fk
--- Verify ON UPDATE/DELETE behavior
-CREATE SCHEMA fkpart6;
-SET search_path TO fkpart6;
-CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY RANGE (a);
-CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a);
-CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES FROM (1) TO (50);
-CREATE TABLE pk12 PARTITION OF pk1 FOR VALUES FROM (50) TO (100);
-CREATE TABLE fk (a int) PARTITION BY RANGE (a);
-CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a);
-CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10);
-CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100);
-ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE CASCADE ON DELETE CASCADE;
-CREATE TABLE fk_d PARTITION OF fk DEFAULT;
-INSERT INTO pk VALUES (1);
-INSERT INTO fk VALUES (1);
-UPDATE pk SET a = 20;
-SELECT tableoid::regclass, * FROM fk;
- tableoid | a
-----------+----
- fk12 | 20
-(1 row)
-
-DELETE FROM pk WHERE a = 20;
-SELECT tableoid::regclass, * FROM fk;
- tableoid | a
-----------+---
-(0 rows)
-
-DROP TABLE fk;
-TRUNCATE TABLE pk;
-INSERT INTO pk VALUES (20), (50);
-CREATE TABLE fk (a int) PARTITION BY RANGE (a);
-CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a);
-CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10);
-CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100);
-ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE SET NULL ON DELETE SET NULL;
-CREATE TABLE fk_d PARTITION OF fk DEFAULT;
-INSERT INTO fk VALUES (20), (50);
-UPDATE pk SET a = 21 WHERE a = 20;
-DELETE FROM pk WHERE a = 50;
-SELECT tableoid::regclass, * FROM fk;
- tableoid | a
-----------+---
- fk_d |
- fk_d |
-(2 rows)
-
-DROP TABLE fk;
-TRUNCATE TABLE pk;
-INSERT INTO pk VALUES (20), (30), (50);
-CREATE TABLE fk (id int, a int DEFAULT 50) PARTITION BY RANGE (a);
-CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a);
-CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10);
-CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100);
-ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE SET DEFAULT ON DELETE SET DEFAULT;
-CREATE TABLE fk_d PARTITION OF fk DEFAULT;
-INSERT INTO fk VALUES (1, 20), (2, 30);
-DELETE FROM pk WHERE a = 20 RETURNING *;
- a
-----
- 20
-(1 row)
-
-UPDATE pk SET a = 90 WHERE a = 30 RETURNING *;
- a
-----
- 90
-(1 row)
-
-SELECT tableoid::regclass, * FROM fk;
- tableoid | id | a
-----------+----+----
- fk12 | 1 | 50
- fk12 | 2 | 50
-(2 rows)
-
-DROP TABLE fk;
-TRUNCATE TABLE pk;
-INSERT INTO pk VALUES (20), (30);
-CREATE TABLE fk (a int DEFAULT 50) PARTITION BY RANGE (a);
-CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a);
-CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10);
-CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100);
-ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE RESTRICT ON DELETE RESTRICT;
-CREATE TABLE fk_d PARTITION OF fk DEFAULT;
-INSERT INTO fk VALUES (20), (30);
-DELETE FROM pk WHERE a = 20;
-ERROR: update or delete on table "pk11" violates foreign key constraint "fk_a_fkey2" on table "fk"
-DETAIL: Key (a)=(20) is still referenced from table "fk".
-UPDATE pk SET a = 90 WHERE a = 30;
-ERROR: update or delete on table "pk" violates foreign key constraint "fk_a_fkey" on table "fk"
-DETAIL: Key (a)=(30) is still referenced from table "fk".
-SELECT tableoid::regclass, * FROM fk;
- tableoid | a
-----------+----
- fk12 | 20
- fk12 | 30
-(2 rows)
-
-DROP TABLE fk;
--- test for reported bug: relispartition not set
--- https://postgr.es/m/CA+HiwqHMsRtRYRWYTWavKJ8x14AFsv7bmAV46mYwnfD3vy8goQ@mail.gmail.com
-CREATE SCHEMA fkpart7
- CREATE TABLE pkpart (a int) PARTITION BY LIST (a)
- CREATE TABLE pkpart1 PARTITION OF pkpart FOR VALUES IN (1);
-ALTER TABLE fkpart7.pkpart1 ADD PRIMARY KEY (a);
-ALTER TABLE fkpart7.pkpart ADD PRIMARY KEY (a);
-CREATE TABLE fkpart7.fk (a int REFERENCES fkpart7.pkpart);
-DROP SCHEMA fkpart7 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table fkpart7.pkpart
-drop cascades to table fkpart7.fk
--- ensure we check partitions are "not used" when dropping constraints
-CREATE SCHEMA fkpart8
- CREATE TABLE tbl1(f1 int PRIMARY KEY)
- CREATE TABLE tbl2(f1 int REFERENCES tbl1 DEFERRABLE INITIALLY DEFERRED) PARTITION BY RANGE(f1)
- CREATE TABLE tbl2_p1 PARTITION OF tbl2 FOR VALUES FROM (minvalue) TO (maxvalue);
-INSERT INTO fkpart8.tbl1 VALUES(1);
-BEGIN;
-INSERT INTO fkpart8.tbl2 VALUES(1);
-ALTER TABLE fkpart8.tbl2 DROP CONSTRAINT tbl2_f1_fkey;
-ERROR: cannot ALTER TABLE "tbl2_p1" because it has pending trigger events
-COMMIT;
-DROP SCHEMA fkpart8 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table fkpart8.tbl1
-drop cascades to table fkpart8.tbl2
--- ensure FK referencing a multi-level partitioned table are
--- enforce reference to sub-children.
-CREATE SCHEMA fkpart9
- CREATE TABLE pk (a INT PRIMARY KEY) PARTITION BY RANGE (a)
- CREATE TABLE fk (
- fk_a INT REFERENCES pk(a) ON DELETE CASCADE
- )
- CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (30) TO (50) PARTITION BY RANGE (a)
- CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES FROM (30) TO (40);
-INSERT INTO fkpart9.pk VALUES (35);
-INSERT INTO fkpart9.fk VALUES (35);
-DELETE FROM fkpart9.pk WHERE a=35;
-SELECT * FROM fkpart9.pk;
- a
----
-(0 rows)
-
-SELECT * FROM fkpart9.fk;
- fk_a
-------
-(0 rows)
-
-DROP SCHEMA fkpart9 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table fkpart9.pk
-drop cascades to table fkpart9.fk
--- test that ri_Check_Pk_Match() scans the correct partition for a deferred
--- ON DELETE/UPDATE NO ACTION constraint
-CREATE SCHEMA fkpart10
- CREATE TABLE tbl1(f1 int PRIMARY KEY) PARTITION BY RANGE(f1)
- CREATE TABLE tbl1_p1 PARTITION OF tbl1 FOR VALUES FROM (minvalue) TO (1)
- CREATE TABLE tbl1_p2 PARTITION OF tbl1 FOR VALUES FROM (1) TO (maxvalue)
- CREATE TABLE tbl2(f1 int REFERENCES tbl1 DEFERRABLE INITIALLY DEFERRED)
- CREATE TABLE tbl3(f1 int PRIMARY KEY) PARTITION BY RANGE(f1)
- CREATE TABLE tbl3_p1 PARTITION OF tbl3 FOR VALUES FROM (minvalue) TO (1)
- CREATE TABLE tbl3_p2 PARTITION OF tbl3 FOR VALUES FROM (1) TO (maxvalue)
- CREATE TABLE tbl4(f1 int REFERENCES tbl3 DEFERRABLE INITIALLY DEFERRED);
-INSERT INTO fkpart10.tbl1 VALUES (0), (1);
-INSERT INTO fkpart10.tbl2 VALUES (0), (1);
-INSERT INTO fkpart10.tbl3 VALUES (-2), (-1), (0);
-INSERT INTO fkpart10.tbl4 VALUES (-2), (-1);
-BEGIN;
-DELETE FROM fkpart10.tbl1 WHERE f1 = 0;
-UPDATE fkpart10.tbl1 SET f1 = 2 WHERE f1 = 1;
-INSERT INTO fkpart10.tbl1 VALUES (0), (1);
-COMMIT;
--- test that cross-partition updates correctly enforces the foreign key
--- restriction (specifically testing INITIALLY DEFERRED)
-BEGIN;
-UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
-UPDATE fkpart10.tbl3 SET f1 = f1 * -1;
-INSERT INTO fkpart10.tbl1 VALUES (4);
-COMMIT;
-ERROR: update or delete on table "tbl1" violates foreign key constraint "tbl2_f1_fkey" on table "tbl2"
-DETAIL: Key (f1)=(0) is still referenced from table "tbl2".
-BEGIN;
-UPDATE fkpart10.tbl3 SET f1 = f1 * -1;
-UPDATE fkpart10.tbl3 SET f1 = f1 + 3;
-UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
-INSERT INTO fkpart10.tbl1 VALUES (0);
-COMMIT;
-ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl4_f1_fkey" on table "tbl4"
-DETAIL: Key (f1)=(-2) is still referenced from table "tbl4".
-BEGIN;
-UPDATE fkpart10.tbl3 SET f1 = f1 * -1;
-UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
-INSERT INTO fkpart10.tbl1 VALUES (0);
-INSERT INTO fkpart10.tbl3 VALUES (-2), (-1);
-COMMIT;
--- test where the updated table now has both an IMMEDIATE and a DEFERRED
--- constraint pointing into it
-CREATE TABLE fkpart10.tbl5(f1 int REFERENCES fkpart10.tbl3);
-INSERT INTO fkpart10.tbl5 VALUES (-2), (-1);
-BEGIN;
-UPDATE fkpart10.tbl3 SET f1 = f1 * -3;
-ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl5_f1_fkey" on table "tbl5"
-DETAIL: Key (f1)=(-2) is still referenced from table "tbl5".
-COMMIT;
--- Now test where the row referenced from the table with an IMMEDIATE
--- constraint stays in place, while those referenced from the table with a
--- DEFERRED constraint don't.
-DELETE FROM fkpart10.tbl5;
-INSERT INTO fkpart10.tbl5 VALUES (0);
-BEGIN;
-UPDATE fkpart10.tbl3 SET f1 = f1 * -3;
-COMMIT;
-ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl4_f1_fkey" on table "tbl4"
-DETAIL: Key (f1)=(-2) is still referenced from table "tbl4".
-DROP SCHEMA fkpart10 CASCADE;
-NOTICE: drop cascades to 5 other objects
-DETAIL: drop cascades to table fkpart10.tbl1
-drop cascades to table fkpart10.tbl2
-drop cascades to table fkpart10.tbl3
-drop cascades to table fkpart10.tbl4
-drop cascades to table fkpart10.tbl5
--- verify foreign keys are enforced during cross-partition updates,
--- especially on the PK side
-CREATE SCHEMA fkpart11
- CREATE TABLE pk (a INT PRIMARY KEY, b text) PARTITION BY LIST (a)
- CREATE TABLE fk (
- a INT,
- CONSTRAINT fkey FOREIGN KEY (a) REFERENCES pk(a) ON UPDATE CASCADE ON DELETE CASCADE
- )
- CREATE TABLE fk_parted (
- a INT PRIMARY KEY,
- CONSTRAINT fkey FOREIGN KEY (a) REFERENCES pk(a) ON UPDATE CASCADE ON DELETE CASCADE
- ) PARTITION BY LIST (a)
- CREATE TABLE fk_another (
- a INT,
- CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fk_parted (a) ON UPDATE CASCADE ON DELETE CASCADE
- )
- CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1, 2) PARTITION BY LIST (a)
- CREATE TABLE pk2 PARTITION OF pk FOR VALUES IN (3)
- CREATE TABLE pk3 PARTITION OF pk FOR VALUES IN (4)
- CREATE TABLE fk1 PARTITION OF fk_parted FOR VALUES IN (1, 2)
- CREATE TABLE fk2 PARTITION OF fk_parted FOR VALUES IN (3)
- CREATE TABLE fk3 PARTITION OF fk_parted FOR VALUES IN (4);
-CREATE TABLE fkpart11.pk11 (b text, a int NOT NULL);
-ALTER TABLE fkpart11.pk1 ATTACH PARTITION fkpart11.pk11 FOR VALUES IN (1);
-CREATE TABLE fkpart11.pk12 (b text, c int, a int NOT NULL);
-ALTER TABLE fkpart11.pk12 DROP c;
-ALTER TABLE fkpart11.pk1 ATTACH PARTITION fkpart11.pk12 FOR VALUES IN (2);
-INSERT INTO fkpart11.pk VALUES (1, 'xxx'), (3, 'yyy');
-INSERT INTO fkpart11.fk VALUES (1), (3);
-INSERT INTO fkpart11.fk_parted VALUES (1), (3);
-INSERT INTO fkpart11.fk_another VALUES (1), (3);
--- moves 2 rows from one leaf partition to another, with both updates being
--- cascaded to fk and fk_parted. Updates of fk_parted, of which one is
--- cross-partition (3 -> 4), are further cascaded to fk_another.
-UPDATE fkpart11.pk SET a = a + 1 RETURNING tableoid::pg_catalog.regclass, *;
- tableoid | a | b
----------------+---+-----
- fkpart11.pk12 | 2 | xxx
- fkpart11.pk3 | 4 | yyy
-(2 rows)
-
-SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk;
- tableoid | a
--------------+---
- fkpart11.fk | 2
- fkpart11.fk | 4
-(2 rows)
-
-SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_parted;
- tableoid | a
---------------+---
- fkpart11.fk1 | 2
- fkpart11.fk3 | 4
-(2 rows)
-
-SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_another;
- tableoid | a
----------------------+---
- fkpart11.fk_another | 2
- fkpart11.fk_another | 4
-(2 rows)
-
--- let's try with the foreign key pointing at tables in the partition tree
--- that are not the same as the query's target table
--- 1. foreign key pointing into a non-root ancestor
---
--- A cross-partition update on the root table will fail, because we currently
--- can't enforce the foreign keys pointing into a non-leaf partition
-ALTER TABLE fkpart11.fk DROP CONSTRAINT fkey;
-DELETE FROM fkpart11.fk WHERE a = 4;
-ALTER TABLE fkpart11.fk ADD CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fkpart11.pk1 (a) ON UPDATE CASCADE ON DELETE CASCADE;
-UPDATE fkpart11.pk SET a = a - 1;
-ERROR: cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key
-DETAIL: A foreign key points to ancestor "pk1" but not the root ancestor "pk".
-HINT: Consider defining the foreign key on table "pk".
--- it's okay though if the non-leaf partition is updated directly
-UPDATE fkpart11.pk1 SET a = a - 1;
-SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.pk;
- tableoid | a | b
----------------+---+-----
- fkpart11.pk11 | 1 | xxx
- fkpart11.pk3 | 4 | yyy
-(2 rows)
-
-SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk;
- tableoid | a
--------------+---
- fkpart11.fk | 1
-(1 row)
-
-SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_parted;
- tableoid | a
---------------+---
- fkpart11.fk1 | 1
- fkpart11.fk3 | 4
-(2 rows)
-
-SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_another;
- tableoid | a
----------------------+---
- fkpart11.fk_another | 4
- fkpart11.fk_another | 1
-(2 rows)
-
--- 2. foreign key pointing into a single leaf partition
---
--- A cross-partition update that deletes from the pointed-to leaf partition
--- is allowed to succeed
-ALTER TABLE fkpart11.fk DROP CONSTRAINT fkey;
-ALTER TABLE fkpart11.fk ADD CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fkpart11.pk11 (a) ON UPDATE CASCADE ON DELETE CASCADE;
--- will delete (1) from p11 which is cascaded to fk
-UPDATE fkpart11.pk SET a = a + 1 WHERE a = 1;
-SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk;
- tableoid | a
-----------+---
-(0 rows)
-
-DROP TABLE fkpart11.fk;
--- check that regular and deferrable AR triggers on the PK tables
--- still work as expected
-CREATE FUNCTION fkpart11.print_row () RETURNS TRIGGER LANGUAGE plpgsql AS $$
- BEGIN
- RAISE NOTICE 'TABLE: %, OP: %, OLD: %, NEW: %', TG_RELNAME, TG_OP, OLD, NEW;
- RETURN NULL;
- END;
-$$;
-CREATE TRIGGER trig_upd_pk AFTER UPDATE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row();
-CREATE TRIGGER trig_del_pk AFTER DELETE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row();
-CREATE TRIGGER trig_ins_pk AFTER INSERT ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row();
-CREATE CONSTRAINT TRIGGER trig_upd_fk_parted AFTER UPDATE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row();
-CREATE CONSTRAINT TRIGGER trig_del_fk_parted AFTER DELETE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row();
-CREATE CONSTRAINT TRIGGER trig_ins_fk_parted AFTER INSERT ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row();
-UPDATE fkpart11.pk SET a = 3 WHERE a = 4;
-NOTICE: TABLE: pk3, OP: DELETE, OLD: (4,yyy), NEW:
-NOTICE: TABLE: pk2, OP: INSERT, OLD: , NEW: (3,yyy)
-NOTICE: TABLE: fk3, OP: DELETE, OLD: (4), NEW:
-NOTICE: TABLE: fk2, OP: INSERT, OLD: , NEW: (3)
-UPDATE fkpart11.pk SET a = 1 WHERE a = 2;
-NOTICE: TABLE: pk12, OP: DELETE, OLD: (xxx,2), NEW:
-NOTICE: TABLE: pk11, OP: INSERT, OLD: , NEW: (xxx,1)
-NOTICE: TABLE: fk1, OP: UPDATE, OLD: (2), NEW: (1)
-DROP SCHEMA fkpart11 CASCADE;
-NOTICE: drop cascades to 4 other objects
-DETAIL: drop cascades to table fkpart11.pk
-drop cascades to table fkpart11.fk_parted
-drop cascades to table fkpart11.fk_another
-drop cascades to function fkpart11.print_row()
--- When a table is attached as partition to a partitioned table that has
--- a foreign key to another partitioned table, it acquires a clone of the
--- FK. Upon detach, this clone is not removed, but instead becomes an
--- independent FK. If it then attaches to the partitioned table again,
--- the FK from the parent "takes over" ownership of the independent FK rather
--- than creating a separate one.
-CREATE SCHEMA fkpart12
- CREATE TABLE fk_p ( id int, jd int, PRIMARY KEY(id, jd)) PARTITION BY list (id)
- CREATE TABLE fk_p_1 PARTITION OF fk_p FOR VALUES IN (1) PARTITION BY list (jd)
- CREATE TABLE fk_p_1_1 PARTITION OF fk_p_1 FOR VALUES IN (1)
- CREATE TABLE fk_p_1_2 (x int, y int, jd int NOT NULL, id int NOT NULL)
- CREATE TABLE fk_p_2 PARTITION OF fk_p FOR VALUES IN (2) PARTITION BY list (jd)
- CREATE TABLE fk_p_2_1 PARTITION OF fk_p_2 FOR VALUES IN (1)
- CREATE TABLE fk_p_2_2 PARTITION OF fk_p_2 FOR VALUES IN (2)
- CREATE TABLE fk_r_1 ( p_jd int NOT NULL, x int, id int PRIMARY KEY, p_id int NOT NULL)
- CREATE TABLE fk_r_2 ( id int PRIMARY KEY, p_id int NOT NULL, p_jd int NOT NULL) PARTITION BY list (id)
- CREATE TABLE fk_r_2_1 PARTITION OF fk_r_2 FOR VALUES IN (2, 1)
- CREATE TABLE fk_r ( id int PRIMARY KEY, p_id int NOT NULL, p_jd int NOT NULL,
- FOREIGN KEY (p_id, p_jd) REFERENCES fk_p (id, jd)
- ) PARTITION BY list (id);
-SET search_path TO fkpart12;
-ALTER TABLE fk_p_1_2 DROP COLUMN x, DROP COLUMN y;
-ALTER TABLE fk_p_1 ATTACH PARTITION fk_p_1_2 FOR VALUES IN (2);
-ALTER TABLE fk_r_1 DROP COLUMN x;
-INSERT INTO fk_p VALUES (1, 1);
-ALTER TABLE fk_r ATTACH PARTITION fk_r_1 FOR VALUES IN (1);
-ALTER TABLE fk_r ATTACH PARTITION fk_r_2 FOR VALUES IN (2);
-\d fk_r_2
- Partitioned table "fkpart12.fk_r_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- id | integer | | not null |
- p_id | integer | | not null |
- p_jd | integer | | not null |
-Partition of: fk_r FOR VALUES IN (2)
-Partition key: LIST (id)
-Indexes:
- "fk_r_2_pkey" PRIMARY KEY, btree (id)
-Foreign-key constraints:
- TABLE "fk_r" CONSTRAINT "fk_r_p_id_p_jd_fkey" FOREIGN KEY (p_id, p_jd) REFERENCES fk_p(id, jd)
-Number of partitions: 1 (Use \d+ to list them.)
-
-INSERT INTO fk_r VALUES (1, 1, 1);
-INSERT INTO fk_r VALUES (2, 2, 1);
-ERROR: insert or update on table "fk_r_2_1" violates foreign key constraint "fk_r_p_id_p_jd_fkey"
-DETAIL: Key (p_id, p_jd)=(2, 1) is not present in table "fk_p".
-ALTER TABLE fk_r DETACH PARTITION fk_r_1;
-ALTER TABLE fk_r DETACH PARTITION fk_r_2;
-\d fk_r_2
- Partitioned table "fkpart12.fk_r_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- id | integer | | not null |
- p_id | integer | | not null |
- p_jd | integer | | not null |
-Partition key: LIST (id)
-Indexes:
- "fk_r_2_pkey" PRIMARY KEY, btree (id)
-Foreign-key constraints:
- "fk_r_p_id_p_jd_fkey" FOREIGN KEY (p_id, p_jd) REFERENCES fk_p(id, jd)
-Number of partitions: 1 (Use \d+ to list them.)
-
-INSERT INTO fk_r_1 (id, p_id, p_jd) VALUES (2, 1, 2); -- should fail
-ERROR: insert or update on table "fk_r_1" violates foreign key constraint "fk_r_p_id_p_jd_fkey"
-DETAIL: Key (p_id, p_jd)=(1, 2) is not present in table "fk_p".
-DELETE FROM fk_p; -- should fail
-ERROR: update or delete on table "fk_p_1_1" violates foreign key constraint "fk_r_1_p_id_p_jd_fkey1" on table "fk_r_1"
-DETAIL: Key (id, jd)=(1, 1) is still referenced from table "fk_r_1".
-ALTER TABLE fk_r ATTACH PARTITION fk_r_1 FOR VALUES IN (1);
-ALTER TABLE fk_r ATTACH PARTITION fk_r_2 FOR VALUES IN (2);
-\d fk_r_2
- Partitioned table "fkpart12.fk_r_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- id | integer | | not null |
- p_id | integer | | not null |
- p_jd | integer | | not null |
-Partition of: fk_r FOR VALUES IN (2)
-Partition key: LIST (id)
-Indexes:
- "fk_r_2_pkey" PRIMARY KEY, btree (id)
-Foreign-key constraints:
- TABLE "fk_r" CONSTRAINT "fk_r_p_id_p_jd_fkey" FOREIGN KEY (p_id, p_jd) REFERENCES fk_p(id, jd)
-Number of partitions: 1 (Use \d+ to list them.)
-
-DELETE FROM fk_p; -- should fail
-ERROR: update or delete on table "fk_p_1_1" violates foreign key constraint "fk_r_p_id_p_jd_fkey2" on table "fk_r"
-DETAIL: Key (id, jd)=(1, 1) is still referenced from table "fk_r".
--- these should all fail
-ALTER TABLE fk_r_1 DROP CONSTRAINT fk_r_p_id_p_jd_fkey;
-ERROR: cannot drop inherited constraint "fk_r_p_id_p_jd_fkey" of relation "fk_r_1"
-ALTER TABLE fk_r DROP CONSTRAINT fk_r_p_id_p_jd_fkey1;
-ERROR: cannot drop inherited constraint "fk_r_p_id_p_jd_fkey1" of relation "fk_r"
-ALTER TABLE fk_r_2 DROP CONSTRAINT fk_r_p_id_p_jd_fkey;
-ERROR: cannot drop inherited constraint "fk_r_p_id_p_jd_fkey" of relation "fk_r_2"
-SET client_min_messages TO warning;
-DROP SCHEMA fkpart12 CASCADE;
-RESET client_min_messages;
-RESET search_path;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/cluster.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/cluster.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/cluster.out 2024-11-15 02:50:52.422160960 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/cluster.out 2024-11-15 02:59:17.229115702 +0000
@@ -1,675 +1,2 @@
---
--- CLUSTER
---
-CREATE TABLE clstr_tst_s (rf_a SERIAL PRIMARY KEY,
- b INT);
-CREATE TABLE clstr_tst (a SERIAL PRIMARY KEY,
- b INT,
- c TEXT,
- d TEXT,
- CONSTRAINT clstr_tst_con FOREIGN KEY (b) REFERENCES clstr_tst_s);
-CREATE INDEX clstr_tst_b ON clstr_tst (b);
-CREATE INDEX clstr_tst_c ON clstr_tst (c);
-CREATE INDEX clstr_tst_c_b ON clstr_tst (c,b);
-CREATE INDEX clstr_tst_b_c ON clstr_tst (b,c);
-INSERT INTO clstr_tst_s (b) VALUES (0);
-INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s;
-INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s;
-INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s;
-INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s;
-INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s;
-CREATE TABLE clstr_tst_inh () INHERITS (clstr_tst);
-INSERT INTO clstr_tst (b, c) VALUES (11, 'once');
-INSERT INTO clstr_tst (b, c) VALUES (10, 'diez');
-INSERT INTO clstr_tst (b, c) VALUES (31, 'treinta y uno');
-INSERT INTO clstr_tst (b, c) VALUES (22, 'veintidos');
-INSERT INTO clstr_tst (b, c) VALUES (3, 'tres');
-INSERT INTO clstr_tst (b, c) VALUES (20, 'veinte');
-INSERT INTO clstr_tst (b, c) VALUES (23, 'veintitres');
-INSERT INTO clstr_tst (b, c) VALUES (21, 'veintiuno');
-INSERT INTO clstr_tst (b, c) VALUES (4, 'cuatro');
-INSERT INTO clstr_tst (b, c) VALUES (14, 'catorce');
-INSERT INTO clstr_tst (b, c) VALUES (2, 'dos');
-INSERT INTO clstr_tst (b, c) VALUES (18, 'dieciocho');
-INSERT INTO clstr_tst (b, c) VALUES (27, 'veintisiete');
-INSERT INTO clstr_tst (b, c) VALUES (25, 'veinticinco');
-INSERT INTO clstr_tst (b, c) VALUES (13, 'trece');
-INSERT INTO clstr_tst (b, c) VALUES (28, 'veintiocho');
-INSERT INTO clstr_tst (b, c) VALUES (32, 'treinta y dos');
-INSERT INTO clstr_tst (b, c) VALUES (5, 'cinco');
-INSERT INTO clstr_tst (b, c) VALUES (29, 'veintinueve');
-INSERT INTO clstr_tst (b, c) VALUES (1, 'uno');
-INSERT INTO clstr_tst (b, c) VALUES (24, 'veinticuatro');
-INSERT INTO clstr_tst (b, c) VALUES (30, 'treinta');
-INSERT INTO clstr_tst (b, c) VALUES (12, 'doce');
-INSERT INTO clstr_tst (b, c) VALUES (17, 'diecisiete');
-INSERT INTO clstr_tst (b, c) VALUES (9, 'nueve');
-INSERT INTO clstr_tst (b, c) VALUES (19, 'diecinueve');
-INSERT INTO clstr_tst (b, c) VALUES (26, 'veintiseis');
-INSERT INTO clstr_tst (b, c) VALUES (15, 'quince');
-INSERT INTO clstr_tst (b, c) VALUES (7, 'siete');
-INSERT INTO clstr_tst (b, c) VALUES (16, 'dieciseis');
-INSERT INTO clstr_tst (b, c) VALUES (8, 'ocho');
--- This entry is needed to test that TOASTED values are copied correctly.
-INSERT INTO clstr_tst (b, c, d) VALUES (6, 'seis', repeat('xyzzy', 100000));
-CLUSTER clstr_tst_c ON clstr_tst;
-SELECT a,b,c,substring(d for 30), length(d) from clstr_tst;
- a | b | c | substring | length
-----+----+---------------+--------------------------------+--------
- 10 | 14 | catorce | |
- 18 | 5 | cinco | |
- 9 | 4 | cuatro | |
- 26 | 19 | diecinueve | |
- 12 | 18 | dieciocho | |
- 30 | 16 | dieciseis | |
- 24 | 17 | diecisiete | |
- 2 | 10 | diez | |
- 23 | 12 | doce | |
- 11 | 2 | dos | |
- 25 | 9 | nueve | |
- 31 | 8 | ocho | |
- 1 | 11 | once | |
- 28 | 15 | quince | |
- 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000
- 29 | 7 | siete | |
- 15 | 13 | trece | |
- 22 | 30 | treinta | |
- 17 | 32 | treinta y dos | |
- 3 | 31 | treinta y uno | |
- 5 | 3 | tres | |
- 20 | 1 | uno | |
- 6 | 20 | veinte | |
- 14 | 25 | veinticinco | |
- 21 | 24 | veinticuatro | |
- 4 | 22 | veintidos | |
- 19 | 29 | veintinueve | |
- 16 | 28 | veintiocho | |
- 27 | 26 | veintiseis | |
- 13 | 27 | veintisiete | |
- 7 | 23 | veintitres | |
- 8 | 21 | veintiuno | |
-(32 rows)
-
-SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY a;
- a | b | c | substring | length
-----+----+---------------+--------------------------------+--------
- 1 | 11 | once | |
- 2 | 10 | diez | |
- 3 | 31 | treinta y uno | |
- 4 | 22 | veintidos | |
- 5 | 3 | tres | |
- 6 | 20 | veinte | |
- 7 | 23 | veintitres | |
- 8 | 21 | veintiuno | |
- 9 | 4 | cuatro | |
- 10 | 14 | catorce | |
- 11 | 2 | dos | |
- 12 | 18 | dieciocho | |
- 13 | 27 | veintisiete | |
- 14 | 25 | veinticinco | |
- 15 | 13 | trece | |
- 16 | 28 | veintiocho | |
- 17 | 32 | treinta y dos | |
- 18 | 5 | cinco | |
- 19 | 29 | veintinueve | |
- 20 | 1 | uno | |
- 21 | 24 | veinticuatro | |
- 22 | 30 | treinta | |
- 23 | 12 | doce | |
- 24 | 17 | diecisiete | |
- 25 | 9 | nueve | |
- 26 | 19 | diecinueve | |
- 27 | 26 | veintiseis | |
- 28 | 15 | quince | |
- 29 | 7 | siete | |
- 30 | 16 | dieciseis | |
- 31 | 8 | ocho | |
- 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000
-(32 rows)
-
-SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY b;
- a | b | c | substring | length
-----+----+---------------+--------------------------------+--------
- 20 | 1 | uno | |
- 11 | 2 | dos | |
- 5 | 3 | tres | |
- 9 | 4 | cuatro | |
- 18 | 5 | cinco | |
- 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000
- 29 | 7 | siete | |
- 31 | 8 | ocho | |
- 25 | 9 | nueve | |
- 2 | 10 | diez | |
- 1 | 11 | once | |
- 23 | 12 | doce | |
- 15 | 13 | trece | |
- 10 | 14 | catorce | |
- 28 | 15 | quince | |
- 30 | 16 | dieciseis | |
- 24 | 17 | diecisiete | |
- 12 | 18 | dieciocho | |
- 26 | 19 | diecinueve | |
- 6 | 20 | veinte | |
- 8 | 21 | veintiuno | |
- 4 | 22 | veintidos | |
- 7 | 23 | veintitres | |
- 21 | 24 | veinticuatro | |
- 14 | 25 | veinticinco | |
- 27 | 26 | veintiseis | |
- 13 | 27 | veintisiete | |
- 16 | 28 | veintiocho | |
- 19 | 29 | veintinueve | |
- 22 | 30 | treinta | |
- 3 | 31 | treinta y uno | |
- 17 | 32 | treinta y dos | |
-(32 rows)
-
-SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY c;
- a | b | c | substring | length
-----+----+---------------+--------------------------------+--------
- 10 | 14 | catorce | |
- 18 | 5 | cinco | |
- 9 | 4 | cuatro | |
- 26 | 19 | diecinueve | |
- 12 | 18 | dieciocho | |
- 30 | 16 | dieciseis | |
- 24 | 17 | diecisiete | |
- 2 | 10 | diez | |
- 23 | 12 | doce | |
- 11 | 2 | dos | |
- 25 | 9 | nueve | |
- 31 | 8 | ocho | |
- 1 | 11 | once | |
- 28 | 15 | quince | |
- 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000
- 29 | 7 | siete | |
- 15 | 13 | trece | |
- 22 | 30 | treinta | |
- 17 | 32 | treinta y dos | |
- 3 | 31 | treinta y uno | |
- 5 | 3 | tres | |
- 20 | 1 | uno | |
- 6 | 20 | veinte | |
- 14 | 25 | veinticinco | |
- 21 | 24 | veinticuatro | |
- 4 | 22 | veintidos | |
- 19 | 29 | veintinueve | |
- 16 | 28 | veintiocho | |
- 27 | 26 | veintiseis | |
- 13 | 27 | veintisiete | |
- 7 | 23 | veintitres | |
- 8 | 21 | veintiuno | |
-(32 rows)
-
--- Verify that inheritance link still works
-INSERT INTO clstr_tst_inh VALUES (0, 100, 'in child table');
-SELECT a,b,c,substring(d for 30), length(d) from clstr_tst;
- a | b | c | substring | length
-----+-----+----------------+--------------------------------+--------
- 10 | 14 | catorce | |
- 18 | 5 | cinco | |
- 9 | 4 | cuatro | |
- 26 | 19 | diecinueve | |
- 12 | 18 | dieciocho | |
- 30 | 16 | dieciseis | |
- 24 | 17 | diecisiete | |
- 2 | 10 | diez | |
- 23 | 12 | doce | |
- 11 | 2 | dos | |
- 25 | 9 | nueve | |
- 31 | 8 | ocho | |
- 1 | 11 | once | |
- 28 | 15 | quince | |
- 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000
- 29 | 7 | siete | |
- 15 | 13 | trece | |
- 22 | 30 | treinta | |
- 17 | 32 | treinta y dos | |
- 3 | 31 | treinta y uno | |
- 5 | 3 | tres | |
- 20 | 1 | uno | |
- 6 | 20 | veinte | |
- 14 | 25 | veinticinco | |
- 21 | 24 | veinticuatro | |
- 4 | 22 | veintidos | |
- 19 | 29 | veintinueve | |
- 16 | 28 | veintiocho | |
- 27 | 26 | veintiseis | |
- 13 | 27 | veintisiete | |
- 7 | 23 | veintitres | |
- 8 | 21 | veintiuno | |
- 0 | 100 | in child table | |
-(33 rows)
-
--- Verify that foreign key link still works
-INSERT INTO clstr_tst (b, c) VALUES (1111, 'this should fail');
-ERROR: insert or update on table "clstr_tst" violates foreign key constraint "clstr_tst_con"
-DETAIL: Key (b)=(1111) is not present in table "clstr_tst_s".
-SELECT conname FROM pg_constraint WHERE conrelid = 'clstr_tst'::regclass
-ORDER BY 1;
- conname
-----------------------
- clstr_tst_a_not_null
- clstr_tst_con
- clstr_tst_pkey
-(3 rows)
-
-SELECT relname, relkind,
- EXISTS(SELECT 1 FROM pg_class WHERE oid = c.reltoastrelid) AS hastoast
-FROM pg_class c WHERE relname LIKE 'clstr_tst%' ORDER BY relname;
- relname | relkind | hastoast
-----------------------+---------+----------
- clstr_tst | r | t
- clstr_tst_a_seq | S | f
- clstr_tst_b | i | f
- clstr_tst_b_c | i | f
- clstr_tst_c | i | f
- clstr_tst_c_b | i | f
- clstr_tst_inh | r | t
- clstr_tst_pkey | i | f
- clstr_tst_s | r | f
- clstr_tst_s_pkey | i | f
- clstr_tst_s_rf_a_seq | S | f
-(11 rows)
-
--- Verify that indisclustered is correctly set
-SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2
-WHERE pg_class.oid=indexrelid
- AND indrelid=pg_class_2.oid
- AND pg_class_2.relname = 'clstr_tst'
- AND indisclustered;
- relname
--------------
- clstr_tst_c
-(1 row)
-
--- Try changing indisclustered
-ALTER TABLE clstr_tst CLUSTER ON clstr_tst_b_c;
-SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2
-WHERE pg_class.oid=indexrelid
- AND indrelid=pg_class_2.oid
- AND pg_class_2.relname = 'clstr_tst'
- AND indisclustered;
- relname
----------------
- clstr_tst_b_c
-(1 row)
-
--- Try turning off all clustering
-ALTER TABLE clstr_tst SET WITHOUT CLUSTER;
-SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2
-WHERE pg_class.oid=indexrelid
- AND indrelid=pg_class_2.oid
- AND pg_class_2.relname = 'clstr_tst'
- AND indisclustered;
- relname
----------
-(0 rows)
-
--- Verify that toast tables are clusterable
-CLUSTER pg_toast.pg_toast_826 USING pg_toast_826_index;
--- Verify that clustering all tables does in fact cluster the right ones
-CREATE USER regress_clstr_user;
-CREATE TABLE clstr_1 (a INT PRIMARY KEY);
-CREATE TABLE clstr_2 (a INT PRIMARY KEY);
-CREATE TABLE clstr_3 (a INT PRIMARY KEY);
-ALTER TABLE clstr_1 OWNER TO regress_clstr_user;
-ALTER TABLE clstr_3 OWNER TO regress_clstr_user;
-GRANT SELECT ON clstr_2 TO regress_clstr_user;
-INSERT INTO clstr_1 VALUES (2);
-INSERT INTO clstr_1 VALUES (1);
-INSERT INTO clstr_2 VALUES (2);
-INSERT INTO clstr_2 VALUES (1);
-INSERT INTO clstr_3 VALUES (2);
-INSERT INTO clstr_3 VALUES (1);
--- "CLUSTER " on a table that hasn't been clustered
-CLUSTER clstr_2;
-ERROR: there is no previously clustered index for table "clstr_2"
-CLUSTER clstr_1_pkey ON clstr_1;
-CLUSTER clstr_2 USING clstr_2_pkey;
-SELECT * FROM clstr_1 UNION ALL
- SELECT * FROM clstr_2 UNION ALL
- SELECT * FROM clstr_3;
- a
----
- 1
- 2
- 1
- 2
- 2
- 1
-(6 rows)
-
--- revert to the original state
-DELETE FROM clstr_1;
-DELETE FROM clstr_2;
-DELETE FROM clstr_3;
-INSERT INTO clstr_1 VALUES (2);
-INSERT INTO clstr_1 VALUES (1);
-INSERT INTO clstr_2 VALUES (2);
-INSERT INTO clstr_2 VALUES (1);
-INSERT INTO clstr_3 VALUES (2);
-INSERT INTO clstr_3 VALUES (1);
--- this user can only cluster clstr_1 and clstr_3, but the latter
--- has not been clustered
-SET SESSION AUTHORIZATION regress_clstr_user;
-SET client_min_messages = ERROR; -- order of "skipping" warnings may vary
-CLUSTER;
-RESET client_min_messages;
-SELECT * FROM clstr_1 UNION ALL
- SELECT * FROM clstr_2 UNION ALL
- SELECT * FROM clstr_3;
- a
----
- 1
- 2
- 2
- 1
- 2
- 1
-(6 rows)
-
--- cluster a single table using the indisclustered bit previously set
-DELETE FROM clstr_1;
-INSERT INTO clstr_1 VALUES (2);
-INSERT INTO clstr_1 VALUES (1);
-CLUSTER clstr_1;
-SELECT * FROM clstr_1;
- a
----
- 1
- 2
-(2 rows)
-
--- Test MVCC-safety of cluster. There isn't much we can do to verify the
--- results with a single backend...
-CREATE TABLE clustertest (key int PRIMARY KEY);
-INSERT INTO clustertest VALUES (10);
-INSERT INTO clustertest VALUES (20);
-INSERT INTO clustertest VALUES (30);
-INSERT INTO clustertest VALUES (40);
-INSERT INTO clustertest VALUES (50);
--- Use a transaction so that updates are not committed when CLUSTER sees 'em
-BEGIN;
--- Test update where the old row version is found first in the scan
-UPDATE clustertest SET key = 100 WHERE key = 10;
--- Test update where the new row version is found first in the scan
-UPDATE clustertest SET key = 35 WHERE key = 40;
--- Test longer update chain
-UPDATE clustertest SET key = 60 WHERE key = 50;
-UPDATE clustertest SET key = 70 WHERE key = 60;
-UPDATE clustertest SET key = 80 WHERE key = 70;
-SELECT * FROM clustertest;
- key
------
- 20
- 30
- 100
- 35
- 80
-(5 rows)
-
-CLUSTER clustertest_pkey ON clustertest;
-SELECT * FROM clustertest;
- key
------
- 20
- 30
- 35
- 80
- 100
-(5 rows)
-
-COMMIT;
-SELECT * FROM clustertest;
- key
------
- 20
- 30
- 35
- 80
- 100
-(5 rows)
-
--- check that temp tables can be clustered
-create temp table clstr_temp (col1 int primary key, col2 text);
-insert into clstr_temp values (2, 'two'), (1, 'one');
-cluster clstr_temp using clstr_temp_pkey;
-select * from clstr_temp;
- col1 | col2
-------+------
- 1 | one
- 2 | two
-(2 rows)
-
-drop table clstr_temp;
-RESET SESSION AUTHORIZATION;
--- check clustering an empty table
-DROP TABLE clustertest;
-CREATE TABLE clustertest (f1 int PRIMARY KEY);
-CLUSTER clustertest USING clustertest_pkey;
-CLUSTER clustertest;
--- Check that partitioned tables can be clustered
-CREATE TABLE clstrpart (a int) PARTITION BY RANGE (a);
-CREATE TABLE clstrpart1 PARTITION OF clstrpart FOR VALUES FROM (1) TO (10) PARTITION BY RANGE (a);
-CREATE TABLE clstrpart11 PARTITION OF clstrpart1 FOR VALUES FROM (1) TO (5);
-CREATE TABLE clstrpart12 PARTITION OF clstrpart1 FOR VALUES FROM (5) TO (10) PARTITION BY RANGE (a);
-CREATE TABLE clstrpart2 PARTITION OF clstrpart FOR VALUES FROM (10) TO (20);
-CREATE TABLE clstrpart3 PARTITION OF clstrpart DEFAULT PARTITION BY RANGE (a);
-CREATE TABLE clstrpart33 PARTITION OF clstrpart3 DEFAULT;
-CREATE INDEX clstrpart_only_idx ON ONLY clstrpart (a);
-CLUSTER clstrpart USING clstrpart_only_idx; -- fails
-ERROR: cannot cluster on invalid index "clstrpart_only_idx"
-DROP INDEX clstrpart_only_idx;
-CREATE INDEX clstrpart_idx ON clstrpart (a);
--- Check that clustering sets new relfilenodes:
-CREATE TEMP TABLE old_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ;
-CLUSTER clstrpart USING clstrpart_idx;
-CREATE TEMP TABLE new_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ;
-SELECT relname, old.level, old.relkind, old.relfilenode = new.relfilenode FROM old_cluster_info AS old JOIN new_cluster_info AS new USING (relname) ORDER BY relname COLLATE "C";
- relname | level | relkind | ?column?
--------------+-------+---------+----------
- clstrpart | 0 | p | t
- clstrpart1 | 1 | p | t
- clstrpart11 | 2 | r | f
- clstrpart12 | 2 | p | t
- clstrpart2 | 1 | r | f
- clstrpart3 | 1 | p | t
- clstrpart33 | 2 | r | f
-(7 rows)
-
--- Partitioned indexes aren't and can't be marked un/clustered:
-\d clstrpart
- Partitioned table "public.clstrpart"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-Partition key: RANGE (a)
-Indexes:
- "clstrpart_idx" btree (a)
-Number of partitions: 3 (Use \d+ to list them.)
-
-CLUSTER clstrpart;
-ERROR: there is no previously clustered index for table "clstrpart"
-ALTER TABLE clstrpart SET WITHOUT CLUSTER;
-ERROR: cannot mark index clustered in partitioned table
-ALTER TABLE clstrpart CLUSTER ON clstrpart_idx;
-ERROR: cannot mark index clustered in partitioned table
-DROP TABLE clstrpart;
--- Ownership of partitions is checked
-CREATE TABLE ptnowner(i int unique) PARTITION BY LIST (i);
-CREATE INDEX ptnowner_i_idx ON ptnowner(i);
-CREATE TABLE ptnowner1 PARTITION OF ptnowner FOR VALUES IN (1);
-CREATE ROLE regress_ptnowner;
-CREATE TABLE ptnowner2 PARTITION OF ptnowner FOR VALUES IN (2);
-ALTER TABLE ptnowner1 OWNER TO regress_ptnowner;
-SET SESSION AUTHORIZATION regress_ptnowner;
-CLUSTER ptnowner USING ptnowner_i_idx;
-ERROR: permission denied for table ptnowner
-RESET SESSION AUTHORIZATION;
-ALTER TABLE ptnowner OWNER TO regress_ptnowner;
-CREATE TEMP TABLE ptnowner_oldnodes AS
- SELECT oid, relname, relfilenode FROM pg_partition_tree('ptnowner') AS tree
- JOIN pg_class AS c ON c.oid=tree.relid;
-SET SESSION AUTHORIZATION regress_ptnowner;
-CLUSTER ptnowner USING ptnowner_i_idx;
-WARNING: permission denied to cluster "ptnowner2", skipping it
-RESET SESSION AUTHORIZATION;
-SELECT a.relname, a.relfilenode=b.relfilenode FROM pg_class a
- JOIN ptnowner_oldnodes b USING (oid) ORDER BY a.relname COLLATE "C";
- relname | ?column?
------------+----------
- ptnowner | t
- ptnowner1 | f
- ptnowner2 | t
-(3 rows)
-
-DROP TABLE ptnowner;
-DROP ROLE regress_ptnowner;
--- Test CLUSTER with external tuplesorting
-create table clstr_4 as select * from tenk1;
-create index cluster_sort on clstr_4 (hundred, thousand, tenthous);
--- ensure we don't use the index in CLUSTER nor the checking SELECTs
-set enable_indexscan = off;
--- Use external sort:
-set maintenance_work_mem = '1MB';
-cluster clstr_4 using cluster_sort;
-select * from
-(select hundred, lag(hundred) over () as lhundred,
- thousand, lag(thousand) over () as lthousand,
- tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss
-where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous);
- hundred | lhundred | thousand | lthousand | tenthous | ltenthous
----------+----------+----------+-----------+----------+-----------
-(0 rows)
-
-reset enable_indexscan;
-reset maintenance_work_mem;
--- test CLUSTER on expression index
-CREATE TABLE clstr_expression(id serial primary key, a int, b text COLLATE "C");
-INSERT INTO clstr_expression(a, b) SELECT g.i % 42, 'prefix'||g.i FROM generate_series(1, 133) g(i);
-CREATE INDEX clstr_expression_minus_a ON clstr_expression ((-a), b);
-CREATE INDEX clstr_expression_upper_b ON clstr_expression ((upper(b)));
--- verify indexes work before cluster
-BEGIN;
-SET LOCAL enable_seqscan = false;
-EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3';
- QUERY PLAN
----------------------------------------------------------------
- Index Scan using clstr_expression_upper_b on clstr_expression
- Index Cond: (upper(b) = 'PREFIX3'::text)
-(2 rows)
-
-SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3';
- id | a | b
-----+---+---------
- 3 | 3 | prefix3
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b;
- QUERY PLAN
----------------------------------------------------------------
- Index Scan using clstr_expression_minus_a on clstr_expression
- Index Cond: ((- a) = '-3'::integer)
-(2 rows)
-
-SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b;
- id | a | b
------+---+-----------
- 129 | 3 | prefix129
- 3 | 3 | prefix3
- 45 | 3 | prefix45
- 87 | 3 | prefix87
-(4 rows)
-
-COMMIT;
--- and after clustering on clstr_expression_minus_a
-CLUSTER clstr_expression USING clstr_expression_minus_a;
-WITH rows AS
- (SELECT ctid, lag(a) OVER (ORDER BY ctid) AS la, a FROM clstr_expression)
-SELECT * FROM rows WHERE la < a;
- ctid | la | a
-------+----+---
-(0 rows)
-
-BEGIN;
-SET LOCAL enable_seqscan = false;
-EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3';
- QUERY PLAN
----------------------------------------------------------------
- Index Scan using clstr_expression_upper_b on clstr_expression
- Index Cond: (upper(b) = 'PREFIX3'::text)
-(2 rows)
-
-SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3';
- id | a | b
-----+---+---------
- 3 | 3 | prefix3
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b;
- QUERY PLAN
----------------------------------------------------------------
- Index Scan using clstr_expression_minus_a on clstr_expression
- Index Cond: ((- a) = '-3'::integer)
-(2 rows)
-
-SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b;
- id | a | b
------+---+-----------
- 129 | 3 | prefix129
- 3 | 3 | prefix3
- 45 | 3 | prefix45
- 87 | 3 | prefix87
-(4 rows)
-
-COMMIT;
--- and after clustering on clstr_expression_upper_b
-CLUSTER clstr_expression USING clstr_expression_upper_b;
-WITH rows AS
- (SELECT ctid, lag(b) OVER (ORDER BY ctid) AS lb, b FROM clstr_expression)
-SELECT * FROM rows WHERE upper(lb) > upper(b);
- ctid | lb | b
-------+----+---
-(0 rows)
-
-BEGIN;
-SET LOCAL enable_seqscan = false;
-EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3';
- QUERY PLAN
----------------------------------------------------------------
- Index Scan using clstr_expression_upper_b on clstr_expression
- Index Cond: (upper(b) = 'PREFIX3'::text)
-(2 rows)
-
-SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3';
- id | a | b
-----+---+---------
- 3 | 3 | prefix3
-(1 row)
-
-EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b;
- QUERY PLAN
----------------------------------------------------------------
- Index Scan using clstr_expression_minus_a on clstr_expression
- Index Cond: ((- a) = '-3'::integer)
-(2 rows)
-
-SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b;
- id | a | b
------+---+-----------
- 129 | 3 | prefix129
- 3 | 3 | prefix3
- 45 | 3 | prefix45
- 87 | 3 | prefix87
-(4 rows)
-
-COMMIT;
--- clean up
-DROP TABLE clustertest;
-DROP TABLE clstr_1;
-DROP TABLE clstr_2;
-DROP TABLE clstr_3;
-DROP TABLE clstr_4;
-DROP TABLE clstr_expression;
-DROP USER regress_clstr_user;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/dependency.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/dependency.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/dependency.out 2024-11-15 02:50:52.434141211 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/dependency.out 2024-11-15 02:59:17.221115691 +0000
@@ -1,153 +1,2 @@
---
--- DEPENDENCIES
---
-CREATE USER regress_dep_user;
-CREATE USER regress_dep_user2;
-CREATE USER regress_dep_user3;
-CREATE GROUP regress_dep_group;
-CREATE TABLE deptest (f1 serial primary key, f2 text);
-GRANT SELECT ON TABLE deptest TO GROUP regress_dep_group;
-GRANT ALL ON TABLE deptest TO regress_dep_user, regress_dep_user2;
--- can't drop neither because they have privileges somewhere
-DROP USER regress_dep_user;
-ERROR: role "regress_dep_user" cannot be dropped because some objects depend on it
-DETAIL: privileges for table deptest
-DROP GROUP regress_dep_group;
-ERROR: role "regress_dep_group" cannot be dropped because some objects depend on it
-DETAIL: privileges for table deptest
--- if we revoke the privileges we can drop the group
-REVOKE SELECT ON deptest FROM GROUP regress_dep_group;
-DROP GROUP regress_dep_group;
--- can't drop the user if we revoke the privileges partially
-REVOKE SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, MAINTAIN ON deptest FROM regress_dep_user;
-DROP USER regress_dep_user;
-ERROR: role "regress_dep_user" cannot be dropped because some objects depend on it
-DETAIL: privileges for table deptest
--- now we are OK to drop him
-REVOKE TRIGGER ON deptest FROM regress_dep_user;
-DROP USER regress_dep_user;
--- we are OK too if we drop the privileges all at once
-REVOKE ALL ON deptest FROM regress_dep_user2;
-DROP USER regress_dep_user2;
--- can't drop the owner of an object
--- the error message detail here would include a pg_toast_nnn name that
--- is not constant, so suppress it
-\set VERBOSITY terse
-ALTER TABLE deptest OWNER TO regress_dep_user3;
-DROP USER regress_dep_user3;
-ERROR: role "regress_dep_user3" cannot be dropped because some objects depend on it
-\set VERBOSITY default
--- if we drop the object, we can drop the user too
-DROP TABLE deptest;
-DROP USER regress_dep_user3;
--- Test DROP OWNED
-CREATE USER regress_dep_user0;
-CREATE USER regress_dep_user1;
-CREATE USER regress_dep_user2;
-SET SESSION AUTHORIZATION regress_dep_user0;
--- permission denied
-DROP OWNED BY regress_dep_user1;
-ERROR: permission denied to drop objects
-DETAIL: Only roles with privileges of role "regress_dep_user1" may drop objects owned by it.
-DROP OWNED BY regress_dep_user0, regress_dep_user2;
-ERROR: permission denied to drop objects
-DETAIL: Only roles with privileges of role "regress_dep_user2" may drop objects owned by it.
-REASSIGN OWNED BY regress_dep_user0 TO regress_dep_user1;
-ERROR: permission denied to reassign objects
-DETAIL: Only roles with privileges of role "regress_dep_user1" may reassign objects to it.
-REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user0;
-ERROR: permission denied to reassign objects
-DETAIL: Only roles with privileges of role "regress_dep_user1" may reassign objects owned by it.
--- this one is allowed
-DROP OWNED BY regress_dep_user0;
-CREATE TABLE deptest1 (f1 int unique);
-GRANT ALL ON deptest1 TO regress_dep_user1 WITH GRANT OPTION;
-SET SESSION AUTHORIZATION regress_dep_user1;
-CREATE TABLE deptest (a serial primary key, b text);
-GRANT ALL ON deptest1 TO regress_dep_user2;
-RESET SESSION AUTHORIZATION;
-\z deptest1
- Access privileges
- Schema | Name | Type | Access privileges | Column privileges | Policies
---------+----------+-------+------------------------------------------------------+-------------------+----------
- public | deptest1 | table | regress_dep_user0=arwdDxtm/regress_dep_user0 +| |
- | | | regress_dep_user1=a*r*w*d*D*x*t*m*/regress_dep_user0+| |
- | | | regress_dep_user2=arwdDxtm/regress_dep_user1 | |
-(1 row)
-
-DROP OWNED BY regress_dep_user1;
--- all grants revoked
-\z deptest1
- Access privileges
- Schema | Name | Type | Access privileges | Column privileges | Policies
---------+----------+-------+----------------------------------------------+-------------------+----------
- public | deptest1 | table | regress_dep_user0=arwdDxtm/regress_dep_user0 | |
-(1 row)
-
--- table was dropped
-\d deptest
--- Test REASSIGN OWNED
-GRANT ALL ON deptest1 TO regress_dep_user1;
-GRANT CREATE ON DATABASE regression TO regress_dep_user1;
-SET SESSION AUTHORIZATION regress_dep_user1;
-CREATE SCHEMA deptest;
-CREATE TABLE deptest (a serial primary key, b text);
-ALTER DEFAULT PRIVILEGES FOR ROLE regress_dep_user1 IN SCHEMA deptest
- GRANT ALL ON TABLES TO regress_dep_user2;
-CREATE FUNCTION deptest_func() RETURNS void LANGUAGE plpgsql
- AS $$ BEGIN END; $$;
-CREATE TYPE deptest_enum AS ENUM ('red');
-CREATE TYPE deptest_range AS RANGE (SUBTYPE = int4);
-CREATE TABLE deptest2 (f1 int);
--- make a serial column the hard way
-CREATE SEQUENCE ss1;
-ALTER TABLE deptest2 ALTER f1 SET DEFAULT nextval('ss1');
-ALTER SEQUENCE ss1 OWNED BY deptest2.f1;
--- When reassigning ownership of a composite type, its pg_class entry
--- should match
-CREATE TYPE deptest_t AS (a int);
-SELECT typowner = relowner
-FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t';
- ?column?
-----------
- t
-(1 row)
-
-RESET SESSION AUTHORIZATION;
-REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user2;
-\dt deptest
- List of relations
- Schema | Name | Type | Owner
---------+---------+-------+-------------------
- public | deptest | table | regress_dep_user2
-(1 row)
-
-SELECT typowner = relowner
-FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t';
- ?column?
-----------
- t
-(1 row)
-
--- doesn't work: grant still exists
-DROP USER regress_dep_user1;
-ERROR: role "regress_dep_user1" cannot be dropped because some objects depend on it
-DETAIL: privileges for database regression
-privileges for table deptest1
-owner of default privileges on new relations belonging to role regress_dep_user1 in schema deptest
-DROP OWNED BY regress_dep_user1;
-DROP USER regress_dep_user1;
-DROP USER regress_dep_user2;
-ERROR: role "regress_dep_user2" cannot be dropped because some objects depend on it
-DETAIL: owner of schema deptest
-owner of sequence deptest_a_seq
-owner of table deptest
-owner of function deptest_func()
-owner of type deptest_enum
-owner of type deptest_range
-owner of table deptest2
-owner of sequence ss1
-owner of type deptest_t
-DROP OWNED BY regress_dep_user2, regress_dep_user0;
-DROP USER regress_dep_user2;
-DROP USER regress_dep_user0;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/guc.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/guc.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/guc.out 2024-11-15 02:50:52.446121462 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/guc.out 2024-11-15 02:59:17.229115702 +0000
@@ -1,915 +1,2 @@
--- pg_regress should ensure that this default value applies; however
--- we can't rely on any specific default value of vacuum_cost_delay
-SHOW datestyle;
- DateStyle
----------------
- Postgres, MDY
-(1 row)
-
--- Check output style of CamelCase enum options
-SET intervalstyle to 'asd';
-ERROR: invalid value for parameter "IntervalStyle": "asd"
-HINT: Available values: postgres, postgres_verbose, sql_standard, iso_8601.
--- SET to some nondefault value
-SET vacuum_cost_delay TO 40;
-SET datestyle = 'ISO, YMD';
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
--- SET LOCAL has no effect outside of a transaction
-SET LOCAL vacuum_cost_delay TO 50;
-WARNING: SET LOCAL can only be used in transaction blocks
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SET LOCAL datestyle = 'SQL';
-WARNING: SET LOCAL can only be used in transaction blocks
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
--- SET LOCAL within a transaction that commits
-BEGIN;
-SET LOCAL vacuum_cost_delay TO 50;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 50ms
-(1 row)
-
-SET LOCAL datestyle = 'SQL';
-SHOW datestyle;
- DateStyle
------------
- SQL, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
--------------------------
- 08/13/2006 12:34:56 PDT
-(1 row)
-
-COMMIT;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
--- SET should be reverted after ROLLBACK
-BEGIN;
-SET vacuum_cost_delay TO 60;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 60ms
-(1 row)
-
-SET datestyle = 'German';
-SHOW datestyle;
- DateStyle
--------------
- German, DMY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
--------------------------
- 13.08.2006 12:34:56 PDT
-(1 row)
-
-ROLLBACK;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
--- Some tests with subtransactions
-BEGIN;
-SET vacuum_cost_delay TO 70;
-SET datestyle = 'MDY';
-SHOW datestyle;
- DateStyle
------------
- ISO, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
-SAVEPOINT first_sp;
-SET vacuum_cost_delay TO 80.1;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 80100us
-(1 row)
-
-SET datestyle = 'German, DMY';
-SHOW datestyle;
- DateStyle
--------------
- German, DMY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
--------------------------
- 13.08.2006 12:34:56 PDT
-(1 row)
-
-ROLLBACK TO first_sp;
-SHOW datestyle;
- DateStyle
------------
- ISO, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
-SAVEPOINT second_sp;
-SET vacuum_cost_delay TO '900us';
-SET datestyle = 'SQL, YMD';
-SHOW datestyle;
- DateStyle
------------
- SQL, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
--------------------------
- 08/13/2006 12:34:56 PDT
-(1 row)
-
-SAVEPOINT third_sp;
-SET vacuum_cost_delay TO 100;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 100ms
-(1 row)
-
-SET datestyle = 'Postgres, MDY';
-SHOW datestyle;
- DateStyle
----------------
- Postgres, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------------
- Sun Aug 13 12:34:56 2006 PDT
-(1 row)
-
-ROLLBACK TO third_sp;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 900us
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- SQL, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
--------------------------
- 08/13/2006 12:34:56 PDT
-(1 row)
-
-ROLLBACK TO second_sp;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 70ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
-ROLLBACK;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
--- SET LOCAL with Savepoints
-BEGIN;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
-SAVEPOINT sp;
-SET LOCAL vacuum_cost_delay TO 30;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 30ms
-(1 row)
-
-SET LOCAL datestyle = 'Postgres, MDY';
-SHOW datestyle;
- DateStyle
----------------
- Postgres, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------------
- Sun Aug 13 12:34:56 2006 PDT
-(1 row)
-
-ROLLBACK TO sp;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
-ROLLBACK;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
--- SET LOCAL persists through RELEASE (which was not true in 8.0-8.2)
-BEGIN;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
-SAVEPOINT sp;
-SET LOCAL vacuum_cost_delay TO 30;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 30ms
-(1 row)
-
-SET LOCAL datestyle = 'Postgres, MDY';
-SHOW datestyle;
- DateStyle
----------------
- Postgres, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------------
- Sun Aug 13 12:34:56 2006 PDT
-(1 row)
-
-RELEASE SAVEPOINT sp;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 30ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
----------------
- Postgres, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------------
- Sun Aug 13 12:34:56 2006 PDT
-(1 row)
-
-ROLLBACK;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
--- SET followed by SET LOCAL
-BEGIN;
-SET vacuum_cost_delay TO 40;
-SET LOCAL vacuum_cost_delay TO 50;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 50ms
-(1 row)
-
-SET datestyle = 'ISO, DMY';
-SET LOCAL datestyle = 'Postgres, MDY';
-SHOW datestyle;
- DateStyle
----------------
- Postgres, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------------
- Sun Aug 13 12:34:56 2006 PDT
-(1 row)
-
-COMMIT;
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 40ms
-(1 row)
-
-SHOW datestyle;
- DateStyle
------------
- ISO, DMY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
---
--- Test RESET. We use datestyle because the reset value is forced by
--- pg_regress, so it doesn't depend on the installation's configuration.
---
-SET datestyle = iso, ymd;
-SHOW datestyle;
- DateStyle
------------
- ISO, YMD
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------
- 2006-08-13 12:34:56-07
-(1 row)
-
-RESET datestyle;
-SHOW datestyle;
- DateStyle
----------------
- Postgres, MDY
-(1 row)
-
-SELECT '2006-08-13 12:34:56'::timestamptz;
- timestamptz
-------------------------------
- Sun Aug 13 12:34:56 2006 PDT
-(1 row)
-
--- Test some simple error cases
-SET seq_page_cost TO 'NaN';
-ERROR: invalid value for parameter "seq_page_cost": "NaN"
-SET vacuum_cost_delay TO '10s';
-ERROR: 10000 ms is outside the valid range for parameter "vacuum_cost_delay" (0 ms .. 100 ms)
-SET no_such_variable TO 42;
-ERROR: unrecognized configuration parameter "no_such_variable"
--- Test "custom" GUCs created on the fly (which aren't really an
--- intended feature, but many people use them).
-SHOW custom.my_guc; -- error, not known yet
-ERROR: unrecognized configuration parameter "custom.my_guc"
-SET custom.my_guc = 42;
-SHOW custom.my_guc;
- custom.my_guc
----------------
- 42
-(1 row)
-
-RESET custom.my_guc; -- this makes it go to empty, not become unknown again
-SHOW custom.my_guc;
- custom.my_guc
----------------
-
-(1 row)
-
-SET custom.my.qualified.guc = 'foo';
-SHOW custom.my.qualified.guc;
- custom.my.qualified.guc
--------------------------
- foo
-(1 row)
-
-SET custom."bad-guc" = 42; -- disallowed because -c cannot set this name
-ERROR: invalid configuration parameter name "custom.bad-guc"
-DETAIL: Custom parameter names must be two or more simple identifiers separated by dots.
-SHOW custom."bad-guc";
-ERROR: unrecognized configuration parameter "custom.bad-guc"
-SET special."weird name" = 'foo'; -- could be allowed, but we choose not to
-ERROR: invalid configuration parameter name "special.weird name"
-DETAIL: Custom parameter names must be two or more simple identifiers separated by dots.
-SHOW special."weird name";
-ERROR: unrecognized configuration parameter "special.weird name"
--- Check what happens when you try to set a "custom" GUC within the
--- namespace of an extension.
-SET plpgsql.extra_foo_warnings = true; -- allowed if plpgsql is not loaded yet
-LOAD 'plpgsql'; -- this will throw a warning and delete the variable
-WARNING: invalid configuration parameter name "plpgsql.extra_foo_warnings", removing it
-DETAIL: "plpgsql" is now a reserved prefix.
-SET plpgsql.extra_foo_warnings = true; -- now, it's an error
-ERROR: invalid configuration parameter name "plpgsql.extra_foo_warnings"
-DETAIL: "plpgsql" is a reserved prefix.
-SHOW plpgsql.extra_foo_warnings;
-ERROR: unrecognized configuration parameter "plpgsql.extra_foo_warnings"
---
--- Test DISCARD TEMP
---
-CREATE TEMP TABLE reset_test ( data text ) ON COMMIT DELETE ROWS;
-SELECT relname FROM pg_class WHERE relname = 'reset_test';
- relname
-------------
- reset_test
-(1 row)
-
-DISCARD TEMP;
-SELECT relname FROM pg_class WHERE relname = 'reset_test';
- relname
----------
-(0 rows)
-
---
--- Test DISCARD ALL
---
--- do changes
-DECLARE foo CURSOR WITH HOLD FOR SELECT 1;
-PREPARE foo AS SELECT 1;
-LISTEN foo_event;
-SET vacuum_cost_delay = 13;
-CREATE TEMP TABLE tmp_foo (data text) ON COMMIT DELETE ROWS;
-CREATE ROLE regress_guc_user;
-SET SESSION AUTHORIZATION regress_guc_user;
--- look changes
-SELECT pg_listening_channels();
- pg_listening_channels
------------------------
- foo_event
-(1 row)
-
-SELECT name FROM pg_prepared_statements;
- name
-------
- foo
-(1 row)
-
-SELECT name FROM pg_cursors;
- name
-------
- foo
-(1 row)
-
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 13ms
-(1 row)
-
-SELECT relname from pg_class where relname = 'tmp_foo';
- relname
----------
- tmp_foo
-(1 row)
-
-SELECT current_user = 'regress_guc_user';
- ?column?
-----------
- t
-(1 row)
-
--- discard everything
-DISCARD ALL;
--- look again
-SELECT pg_listening_channels();
- pg_listening_channels
------------------------
-(0 rows)
-
-SELECT name FROM pg_prepared_statements;
- name
-------
-(0 rows)
-
-SELECT name FROM pg_cursors;
- name
-------
-(0 rows)
-
-SHOW vacuum_cost_delay;
- vacuum_cost_delay
--------------------
- 0
-(1 row)
-
-SELECT relname from pg_class where relname = 'tmp_foo';
- relname
----------
-(0 rows)
-
-SELECT current_user = 'regress_guc_user';
- ?column?
-----------
- f
-(1 row)
-
-DROP ROLE regress_guc_user;
---
--- search_path should react to changes in pg_namespace
---
-set search_path = foo, public, not_there_initially;
-select current_schemas(false);
- current_schemas
------------------
- {public}
-(1 row)
-
-create schema not_there_initially;
-select current_schemas(false);
- current_schemas
-------------------------------
- {public,not_there_initially}
-(1 row)
-
-drop schema not_there_initially;
-select current_schemas(false);
- current_schemas
------------------
- {public}
-(1 row)
-
-reset search_path;
---
--- Tests for function-local GUC settings
---
-set work_mem = '3MB';
-create function report_guc(text) returns text as
-$$ select current_setting($1) $$ language sql
-set work_mem = '1MB';
-select report_guc('work_mem'), current_setting('work_mem');
- report_guc | current_setting
-------------+-----------------
- 1MB | 3MB
-(1 row)
-
-alter function report_guc(text) set work_mem = '2MB';
-select report_guc('work_mem'), current_setting('work_mem');
- report_guc | current_setting
-------------+-----------------
- 2MB | 3MB
-(1 row)
-
-alter function report_guc(text) reset all;
-select report_guc('work_mem'), current_setting('work_mem');
- report_guc | current_setting
-------------+-----------------
- 3MB | 3MB
-(1 row)
-
--- SET LOCAL is restricted by a function SET option
-create or replace function myfunc(int) returns text as $$
-begin
- set local work_mem = '2MB';
- return current_setting('work_mem');
-end $$
-language plpgsql
-set work_mem = '1MB';
-select myfunc(0), current_setting('work_mem');
- myfunc | current_setting
---------+-----------------
- 2MB | 3MB
-(1 row)
-
-alter function myfunc(int) reset all;
-select myfunc(0), current_setting('work_mem');
- myfunc | current_setting
---------+-----------------
- 2MB | 2MB
-(1 row)
-
-set work_mem = '3MB';
--- but SET isn't
-create or replace function myfunc(int) returns text as $$
-begin
- set work_mem = '2MB';
- return current_setting('work_mem');
-end $$
-language plpgsql
-set work_mem = '1MB';
-select myfunc(0), current_setting('work_mem');
- myfunc | current_setting
---------+-----------------
- 2MB | 2MB
-(1 row)
-
-set work_mem = '3MB';
--- it should roll back on error, though
-create or replace function myfunc(int) returns text as $$
-begin
- set work_mem = '2MB';
- perform 1/$1;
- return current_setting('work_mem');
-end $$
-language plpgsql
-set work_mem = '1MB';
-select myfunc(0);
-ERROR: division by zero
-CONTEXT: SQL statement "SELECT 1/$1"
-PL/pgSQL function myfunc(integer) line 4 at PERFORM
-select current_setting('work_mem');
- current_setting
------------------
- 3MB
-(1 row)
-
-select myfunc(1), current_setting('work_mem');
- myfunc | current_setting
---------+-----------------
- 2MB | 2MB
-(1 row)
-
--- check current_setting()'s behavior with invalid setting name
-select current_setting('nosuch.setting'); -- FAIL
-ERROR: unrecognized configuration parameter "nosuch.setting"
-select current_setting('nosuch.setting', false); -- FAIL
-ERROR: unrecognized configuration parameter "nosuch.setting"
-select current_setting('nosuch.setting', true) is null;
- ?column?
-----------
- t
-(1 row)
-
--- after this, all three cases should yield 'nada'
-set nosuch.setting = 'nada';
-select current_setting('nosuch.setting');
- current_setting
------------------
- nada
-(1 row)
-
-select current_setting('nosuch.setting', false);
- current_setting
------------------
- nada
-(1 row)
-
-select current_setting('nosuch.setting', true);
- current_setting
------------------
- nada
-(1 row)
-
--- Normally, CREATE FUNCTION should complain about invalid values in
--- function SET options; but not if check_function_bodies is off,
--- because that creates ordering hazards for pg_dump
-create function func_with_bad_set() returns int as $$ select 1 $$
-language sql
-set default_text_search_config = no_such_config;
-NOTICE: text search configuration "no_such_config" does not exist
-ERROR: invalid value for parameter "default_text_search_config": "no_such_config"
-set check_function_bodies = off;
-create function func_with_bad_set() returns int as $$ select 1 $$
-language sql
-set default_text_search_config = no_such_config;
-NOTICE: text search configuration "no_such_config" does not exist
-select func_with_bad_set();
-ERROR: invalid value for parameter "default_text_search_config": "no_such_config"
-reset check_function_bodies;
-set default_with_oids to f;
--- Should not allow to set it to true.
-set default_with_oids to t;
-ERROR: tables declared WITH OIDS are not supported
--- Test that disabling track_activities disables query ID reporting in
--- pg_stat_activity.
-SET compute_query_id = on;
-SET track_activities = on;
-SELECT query_id IS NOT NULL AS qid_set FROM pg_stat_activity
- WHERE pid = pg_backend_pid();
- qid_set
----------
- t
-(1 row)
-
-SET track_activities = off;
-SELECT query_id IS NOT NULL AS qid_set FROM pg_stat_activity
- WHERE pid = pg_backend_pid();
- qid_set
----------
- f
-(1 row)
-
-RESET track_activities;
-RESET compute_query_id;
--- Test GUC categories and flag patterns
-SELECT pg_settings_get_flags(NULL);
- pg_settings_get_flags
------------------------
-
-(1 row)
-
-SELECT pg_settings_get_flags('does_not_exist');
- pg_settings_get_flags
------------------------
-
-(1 row)
-
-CREATE TABLE tab_settings_flags AS SELECT name, category,
- 'EXPLAIN' = ANY(flags) AS explain,
- 'NO_RESET' = ANY(flags) AS no_reset,
- 'NO_RESET_ALL' = ANY(flags) AS no_reset_all,
- 'NOT_IN_SAMPLE' = ANY(flags) AS not_in_sample,
- 'RUNTIME_COMPUTED' = ANY(flags) AS runtime_computed
- FROM pg_show_all_settings() AS psas,
- pg_settings_get_flags(psas.name) AS flags;
--- Developer GUCs should be flagged with GUC_NOT_IN_SAMPLE:
-SELECT name FROM tab_settings_flags
- WHERE category = 'Developer Options' AND NOT not_in_sample
- ORDER BY 1;
- name
-------
-(0 rows)
-
--- Most query-tuning GUCs are flagged as valid for EXPLAIN.
--- default_statistics_target is an exception.
-SELECT name FROM tab_settings_flags
- WHERE category ~ '^Query Tuning' AND NOT explain
- ORDER BY 1;
- name
----------------------------
- default_statistics_target
-(1 row)
-
--- Runtime-computed GUCs should be part of the preset category.
-SELECT name FROM tab_settings_flags
- WHERE NOT category = 'Preset Options' AND runtime_computed
- ORDER BY 1;
- name
-------
-(0 rows)
-
--- Preset GUCs are flagged as NOT_IN_SAMPLE.
-SELECT name FROM tab_settings_flags
- WHERE category = 'Preset Options' AND NOT not_in_sample
- ORDER BY 1;
- name
-------
-(0 rows)
-
--- NO_RESET implies NO_RESET_ALL.
-SELECT name FROM tab_settings_flags
- WHERE no_reset AND NOT no_reset_all
- ORDER BY 1;
- name
-------
-(0 rows)
-
-DROP TABLE tab_settings_flags;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/bitmapops.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/bitmapops.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/bitmapops.out 2024-11-15 02:50:52.418167543 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/bitmapops.out 2024-11-15 02:59:17.221115691 +0000
@@ -1,38 +1,2 @@
--- Test bitmap AND and OR
--- Generate enough data that we can test the lossy bitmaps.
--- There's 55 tuples per page in the table. 53 is just
--- below 55, so that an index scan with qual a = constant
--- will return at least one hit per page. 59 is just above
--- 55, so that an index scan with qual b = constant will return
--- hits on most but not all pages. 53 and 59 are prime, so that
--- there's a maximum number of a,b combinations in the table.
--- That allows us to test all the different combinations of
--- lossy and non-lossy pages with the minimum amount of data
-CREATE TABLE bmscantest (a int, b int, t text);
-INSERT INTO bmscantest
- SELECT (r%53), (r%59), 'foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo'
- FROM generate_series(1,70000) r;
-CREATE INDEX i_bmtest_a ON bmscantest(a);
-CREATE INDEX i_bmtest_b ON bmscantest(b);
--- We want to use bitmapscans. With default settings, the planner currently
--- chooses a bitmap scan for the queries below anyway, but let's make sure.
-set enable_indexscan=false;
-set enable_seqscan=false;
--- Lower work_mem to trigger use of lossy bitmaps
-set work_mem = 64;
--- Test bitmap-and.
-SELECT count(*) FROM bmscantest WHERE a = 1 AND b = 1;
- count
--------
- 23
-(1 row)
-
--- Test bitmap-or.
-SELECT count(*) FROM bmscantest WHERE a = 1 OR b = 1;
- count
--------
- 2485
-(1 row)
-
--- clean up
-DROP TABLE bmscantest;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/combocid.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/combocid.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/combocid.out 2024-11-15 02:50:52.422160960 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/combocid.out 2024-11-15 02:59:17.213115680 +0000
@@ -1,169 +1,2 @@
---
--- Tests for some likely failure cases with combo cmin/cmax mechanism
---
-CREATE TEMP TABLE combocidtest (foobar int);
-BEGIN;
--- a few dummy ops to push up the CommandId counter
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest VALUES (1);
-INSERT INTO combocidtest VALUES (2);
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 10 | 1
- (0,2) | 11 | 2
-(2 rows)
-
-SAVEPOINT s1;
-UPDATE combocidtest SET foobar = foobar + 10;
--- here we should see only updated tuples
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,3) | 12 | 11
- (0,4) | 12 | 12
-(2 rows)
-
-ROLLBACK TO s1;
--- now we should see old tuples, but with combo CIDs starting at 0
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 0 | 1
- (0,2) | 1 | 2
-(2 rows)
-
-COMMIT;
--- combo data is not there anymore, but should still see tuples
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 0 | 1
- (0,2) | 1 | 2
-(2 rows)
-
--- Test combo CIDs with portals
-BEGIN;
-INSERT INTO combocidtest VALUES (333);
-DECLARE c CURSOR FOR SELECT ctid,cmin,* FROM combocidtest;
-DELETE FROM combocidtest;
-FETCH ALL FROM c;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 1 | 1
- (0,2) | 1 | 2
- (0,5) | 0 | 333
-(3 rows)
-
-ROLLBACK;
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 1 | 1
- (0,2) | 1 | 2
-(2 rows)
-
--- check behavior with locked tuples
-BEGIN;
--- a few dummy ops to push up the CommandId counter
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest SELECT 1 LIMIT 0;
-INSERT INTO combocidtest VALUES (444);
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 1 | 1
- (0,2) | 1 | 2
- (0,6) | 10 | 444
-(3 rows)
-
-SAVEPOINT s1;
--- this doesn't affect cmin
-SELECT ctid,cmin,* FROM combocidtest FOR UPDATE;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 1 | 1
- (0,2) | 1 | 2
- (0,6) | 10 | 444
-(3 rows)
-
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 1 | 1
- (0,2) | 1 | 2
- (0,6) | 10 | 444
-(3 rows)
-
--- but this does
-UPDATE combocidtest SET foobar = foobar + 10;
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,7) | 12 | 11
- (0,8) | 12 | 12
- (0,9) | 12 | 454
-(3 rows)
-
-ROLLBACK TO s1;
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 12 | 1
- (0,2) | 12 | 2
- (0,6) | 0 | 444
-(3 rows)
-
-COMMIT;
-SELECT ctid,cmin,* FROM combocidtest;
- ctid | cmin | foobar
--------+------+--------
- (0,1) | 12 | 1
- (0,2) | 12 | 2
- (0,6) | 0 | 444
-(3 rows)
-
--- test for bug reported in
--- CABRT9RC81YUf1=jsmWopcKJEro=VoeG2ou6sPwyOUTx_qteRsg@mail.gmail.com
-CREATE TABLE IF NOT EXISTS testcase(
- id int PRIMARY KEY,
- balance numeric
-);
-INSERT INTO testcase VALUES (1, 0);
-BEGIN;
-SELECT * FROM testcase WHERE testcase.id = 1 FOR UPDATE;
- id | balance
-----+---------
- 1 | 0
-(1 row)
-
-UPDATE testcase SET balance = balance + 400 WHERE id=1;
-SAVEPOINT subxact;
-UPDATE testcase SET balance = balance - 100 WHERE id=1;
-ROLLBACK TO SAVEPOINT subxact;
--- should return one tuple
-SELECT * FROM testcase WHERE id = 1 FOR UPDATE;
- id | balance
-----+---------
- 1 | 400
-(1 row)
-
-ROLLBACK;
-DROP TABLE testcase;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tsearch.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/tsearch.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/tsearch.out 2024-11-15 02:50:52.514009551 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/tsearch.out 2024-11-15 02:59:17.233115707 +0000
@@ -1,3014 +1,2 @@
--- directory paths are passed to us in environment variables
-\getenv abs_srcdir PG_ABS_SRCDIR
---
--- Sanity checks for text search catalogs
---
--- NB: we assume the oidjoins test will have caught any dangling links,
--- that is OID or REGPROC fields that are not zero and do not match some
--- row in the linked-to table. However, if we want to enforce that a link
--- field can't be 0, we have to check it here.
--- Find unexpected zero link entries
-SELECT oid, prsname
-FROM pg_ts_parser
-WHERE prsnamespace = 0 OR prsstart = 0 OR prstoken = 0 OR prsend = 0 OR
- -- prsheadline is optional
- prslextype = 0;
- oid | prsname
------+---------
-(0 rows)
-
-SELECT oid, dictname
-FROM pg_ts_dict
-WHERE dictnamespace = 0 OR dictowner = 0 OR dicttemplate = 0;
- oid | dictname
------+----------
-(0 rows)
-
-SELECT oid, tmplname
-FROM pg_ts_template
-WHERE tmplnamespace = 0 OR tmpllexize = 0; -- tmplinit is optional
- oid | tmplname
------+----------
-(0 rows)
-
-SELECT oid, cfgname
-FROM pg_ts_config
-WHERE cfgnamespace = 0 OR cfgowner = 0 OR cfgparser = 0;
- oid | cfgname
------+---------
-(0 rows)
-
-SELECT mapcfg, maptokentype, mapseqno
-FROM pg_ts_config_map
-WHERE mapcfg = 0 OR mapdict = 0;
- mapcfg | maptokentype | mapseqno
---------+--------------+----------
-(0 rows)
-
--- Look for pg_ts_config_map entries that aren't one of parser's token types
-SELECT * FROM
- ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid
- FROM pg_ts_config ) AS tt
-RIGHT JOIN pg_ts_config_map AS m
- ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype)
-WHERE
- tt.cfgid IS NULL OR tt.tokid IS NULL;
- cfgid | tokid | mapcfg | maptokentype | mapseqno | mapdict
--------+-------+--------+--------------+----------+---------
-(0 rows)
-
--- Load some test data
-CREATE TABLE test_tsvector(
- t text,
- a tsvector
-);
-\set filename :abs_srcdir '/data/tsearch.data'
-COPY test_tsvector FROM :'filename';
-ANALYZE test_tsvector;
--- test basic text search behavior without indexes, then with
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh';
- count
--------
- 17
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt';
- count
--------
- 98
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)';
- count
--------
- 23
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)';
- count
--------
- 39
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
- count
--------
- 494
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}');
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh';
- count
--------
- 432
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A';
- count
--------
- 56
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D';
- count
--------
- 58
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A';
- count
--------
- 452
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
- count
--------
- 450
-(1 row)
-
-create index wowidx on test_tsvector using gist (a);
-SET enable_seqscan=OFF;
-SET enable_indexscan=ON;
-SET enable_bitmapscan=OFF;
-explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- QUERY PLAN
--------------------------------------------------------
- Aggregate
- -> Index Scan using wowidx on test_tsvector
- Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
-(3 rows)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh';
- count
--------
- 17
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt';
- count
--------
- 98
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)';
- count
--------
- 23
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)';
- count
--------
- 39
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
- count
--------
- 494
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}');
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh';
- count
--------
- 432
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A';
- count
--------
- 56
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D';
- count
--------
- 58
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A';
- count
--------
- 452
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
- count
--------
- 450
-(1 row)
-
-SET enable_indexscan=OFF;
-SET enable_bitmapscan=ON;
-explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- QUERY PLAN
--------------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on test_tsvector
- Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery)
- -> Bitmap Index Scan on wowidx
- Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
-(5 rows)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh';
- count
--------
- 17
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt';
- count
--------
- 98
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)';
- count
--------
- 23
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)';
- count
--------
- 39
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
- count
--------
- 494
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}');
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh';
- count
--------
- 432
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A';
- count
--------
- 56
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D';
- count
--------
- 58
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A';
- count
--------
- 452
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
- count
--------
- 450
-(1 row)
-
--- Test siglen parameter of GiST tsvector_ops
-CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(foo=1));
-ERROR: unrecognized parameter "foo"
-CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=0));
-ERROR: value 0 out of bounds for option "siglen"
-DETAIL: Valid values are between "1" and "2024".
-CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=2048));
-ERROR: value 2048 out of bounds for option "siglen"
-DETAIL: Valid values are between "1" and "2024".
-CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100,foo='bar'));
-ERROR: unrecognized parameter "foo"
-CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100, siglen = 200));
-ERROR: parameter "siglen" specified more than once
-CREATE INDEX wowidx2 ON test_tsvector USING gist (a tsvector_ops(siglen=1));
-\d test_tsvector
- Table "public.test_tsvector"
- Column | Type | Collation | Nullable | Default
---------+----------+-----------+----------+---------
- t | text | | |
- a | tsvector | | |
-Indexes:
- "wowidx" gist (a)
- "wowidx2" gist (a tsvector_ops (siglen='1'))
-
-DROP INDEX wowidx;
-EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- QUERY PLAN
--------------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on test_tsvector
- Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery)
- -> Bitmap Index Scan on wowidx2
- Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
-(5 rows)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh';
- count
--------
- 17
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt';
- count
--------
- 98
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)';
- count
--------
- 23
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)';
- count
--------
- 39
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
- count
--------
- 494
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}');
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh';
- count
--------
- 432
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A';
- count
--------
- 56
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D';
- count
--------
- 58
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A';
- count
--------
- 452
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
- count
--------
- 450
-(1 row)
-
-DROP INDEX wowidx2;
-CREATE INDEX wowidx ON test_tsvector USING gist (a tsvector_ops(siglen=484));
-\d test_tsvector
- Table "public.test_tsvector"
- Column | Type | Collation | Nullable | Default
---------+----------+-----------+----------+---------
- t | text | | |
- a | tsvector | | |
-Indexes:
- "wowidx" gist (a tsvector_ops (siglen='484'))
-
-EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- QUERY PLAN
--------------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on test_tsvector
- Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery)
- -> Bitmap Index Scan on wowidx
- Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
-(5 rows)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh';
- count
--------
- 17
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt';
- count
--------
- 98
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)';
- count
--------
- 23
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)';
- count
--------
- 39
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
- count
--------
- 494
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}');
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh';
- count
--------
- 432
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A';
- count
--------
- 56
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D';
- count
--------
- 58
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A';
- count
--------
- 452
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
- count
--------
- 450
-(1 row)
-
-RESET enable_seqscan;
-RESET enable_indexscan;
-RESET enable_bitmapscan;
-DROP INDEX wowidx;
-CREATE INDEX wowidx ON test_tsvector USING gin (a);
-SET enable_seqscan=OFF;
--- GIN only supports bitmapscan, so no need to test plain indexscan
-explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- QUERY PLAN
--------------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on test_tsvector
- Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery)
- -> Bitmap Index Scan on wowidx
- Index Cond: (a @@ '''wr'' | ''qh'''::tsquery)
-(5 rows)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh';
- count
--------
- 17
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt';
- count
--------
- 98
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)';
- count
--------
- 23
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)';
- count
--------
- 39
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*';
- count
--------
- 494
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}');
- count
--------
- 158
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh';
- count
--------
- 432
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt';
- count
--------
- 6
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)';
- count
--------
- 508
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)';
- count
--------
- 507
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A';
- count
--------
- 56
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D';
- count
--------
- 58
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A';
- count
--------
- 452
-(1 row)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D';
- count
--------
- 450
-(1 row)
-
--- Test optimization of non-empty GIN_SEARCH_MODE_ALL queries
-EXPLAIN (COSTS OFF)
-SELECT count(*) FROM test_tsvector WHERE a @@ '!qh';
- QUERY PLAN
------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on test_tsvector
- Recheck Cond: (a @@ '!''qh'''::tsquery)
- -> Bitmap Index Scan on wowidx
- Index Cond: (a @@ '!''qh'''::tsquery)
-(5 rows)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ '!qh';
- count
--------
- 410
-(1 row)
-
-EXPLAIN (COSTS OFF)
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr' AND a @@ '!qh';
- QUERY PLAN
-------------------------------------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on test_tsvector
- Recheck Cond: ((a @@ '''wr'''::tsquery) AND (a @@ '!''qh'''::tsquery))
- -> Bitmap Index Scan on wowidx
- Index Cond: ((a @@ '''wr'''::tsquery) AND (a @@ '!''qh'''::tsquery))
-(5 rows)
-
-SELECT count(*) FROM test_tsvector WHERE a @@ 'wr' AND a @@ '!qh';
- count
--------
- 60
-(1 row)
-
-RESET enable_seqscan;
-INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH');
-SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10;
- word | ndoc | nentry
-------+------+--------
- qq | 108 | 108
- qt | 102 | 102
- qe | 100 | 101
- qh | 98 | 99
- qw | 98 | 98
- qa | 97 | 97
- ql | 94 | 94
- qs | 94 | 94
- qr | 92 | 93
- qi | 92 | 92
-(10 rows)
-
-SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 'AB') ORDER BY ndoc DESC, nentry DESC, word;
- word | ndoc | nentry
-------+------+--------
- DFG | 1 | 2
-(1 row)
-
---dictionaries and to_tsvector
-SELECT ts_lexize('english_stem', 'skies');
- ts_lexize
------------
- {sky}
-(1 row)
-
-SELECT ts_lexize('english_stem', 'identity');
- ts_lexize
------------
- {ident}
-(1 row)
-
-SELECT * FROM ts_token_type('default');
- tokid | alias | description
--------+-----------------+------------------------------------------
- 1 | asciiword | Word, all ASCII
- 2 | word | Word, all letters
- 3 | numword | Word, letters and digits
- 4 | email | Email address
- 5 | url | URL
- 6 | host | Host
- 7 | sfloat | Scientific notation
- 8 | version | Version number
- 9 | hword_numpart | Hyphenated word part, letters and digits
- 10 | hword_part | Hyphenated word part, all letters
- 11 | hword_asciipart | Hyphenated word part, all ASCII
- 12 | blank | Space symbols
- 13 | tag | XML tag
- 14 | protocol | Protocol head
- 15 | numhword | Hyphenated word, letters and digits
- 16 | asciihword | Hyphenated word, all ASCII
- 17 | hword | Hyphenated word, all letters
- 18 | url_path | URL path
- 19 | file | File or path name
- 20 | float | Decimal notation
- 21 | int | Signed integer
- 22 | uint | Unsigned integer
- 23 | entity | XML entity
-(23 rows)
-
-SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2
-/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234
- wow < jqw <> qwerty');
- tokid | token
--------+--------------------------------------
- 22 | 345
- 12 |
- 1 | qwe
- 12 | @
- 19 | efd.r
- 12 | '
- 14 | http://
- 6 | www.com
- 12 | /
- 14 | http://
- 5 | aew.werc.ewr/?ad=qwe&dw
- 6 | aew.werc.ewr
- 18 | /?ad=qwe&dw
- 12 |
- 5 | 1aew.werc.ewr/?ad=qwe&dw
- 6 | 1aew.werc.ewr
- 18 | /?ad=qwe&dw
- 12 |
- 6 | 2aew.werc.ewr
- 12 |
- 14 | http://
- 5 | 3aew.werc.ewr/?ad=qwe&dw
- 6 | 3aew.werc.ewr
- 18 | /?ad=qwe&dw
- 12 |
- 14 | http://
- 6 | 4aew.werc.ewr
- 12 |
- 14 | http://
- 5 | 5aew.werc.ewr:8100/?
- 6 | 5aew.werc.ewr:8100
- 18 | /?
- 12 |
- 1 | ad
- 12 | =
- 1 | qwe
- 12 | &
- 1 | dw
- 12 |
- 5 | 6aew.werc.ewr:8100/?ad=qwe&dw
- 6 | 6aew.werc.ewr:8100
- 18 | /?ad=qwe&dw
- 12 |
- 5 | 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32
- 6 | 7aew.werc.ewr:8100
- 18 | /?ad=qwe&dw=%20%32
- 12 |
- 7 | +4.0e-10
- 12 |
- 1 | qwe
- 12 |
- 1 | qwe
- 12 |
- 1 | qwqwe
- 12 |
- 20 | 234.435
- 12 |
- 22 | 455
- 12 |
- 20 | 5.005
- 12 |
- 4 | teodor@stack.net
- 12 |
- 4 | teodor@123-stack.net
- 12 |
- 4 | 123_teodor@stack.net
- 12 |
- 4 | 123-teodor@stack.net
- 12 |
- 16 | qwe-wer
- 11 | qwe
- 12 | -
- 11 | wer
- 12 |
- 1 | asdf
- 12 |
- 13 |
- 1 | qwer
- 12 |
- 1 | jf
- 12 |
- 1 | sdjk
- 12 | <
- 1 | we
- 12 |
- 1 | hjwer
- 12 |
- 13 |
- 12 |
- 3 | ewr1
- 12 | >
- 3 | ewri2
- 12 |
- 13 |
- 12 | +
- |
- 19 | /usr/local/fff
- 12 |
- 19 | /awdf/dwqe/4325
- 12 |
- 19 | rewt/ewr
- 12 |
- 1 | wefjn
- 12 |
- 19 | /wqe-324/ewr
- 12 |
- 19 | gist.h
- 12 |
- 19 | gist.h.c
- 12 |
- 19 | gist.c
- 12 | .
- 1 | readline
- 12 |
- 20 | 4.2
- 12 |
- 20 | 4.2
- 12 | .
- 20 | 4.2
- 12 | ,
- 1 | readline
- 20 | -4.2
- 12 |
- 1 | readline
- 20 | -4.2
- 12 | .
- 22 | 234
- 12 | +
- |
- 12 | <
- 1 | i
- 12 |
- 13 |
- 12 |
- 1 | wow
- 12 |
- 12 | <
- 1 | jqw
- 12 |
- 12 | <>
- 1 | qwerty
-(139 rows)
-
-SELECT to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2
-/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234
- wow < jqw <> qwerty');
- to_tsvector
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- '+4.0e-10':28 '-4.2':63,65 '/?':18 '/?ad=qwe&dw':7,10,14,24 '/?ad=qwe&dw=%20%32':27 '/awdf/dwqe/4325':51 '/usr/local/fff':50 '/wqe-324/ewr':54 '123-teodor@stack.net':38 '123_teodor@stack.net':37 '1aew.werc.ewr':9 '1aew.werc.ewr/?ad=qwe&dw':8 '234':66 '234.435':32 '2aew.werc.ewr':11 '345':1 '3aew.werc.ewr':13 '3aew.werc.ewr/?ad=qwe&dw':12 '4.2':59,60,61 '455':33 '4aew.werc.ewr':15 '5.005':34 '5aew.werc.ewr:8100':17 '5aew.werc.ewr:8100/?':16 '6aew.werc.ewr:8100':23 '6aew.werc.ewr:8100/?ad=qwe&dw':22 '7aew.werc.ewr:8100':26 '7aew.werc.ewr:8100/?ad=qwe&dw=%20%32':25 'ad':19 'aew.werc.ewr':6 'aew.werc.ewr/?ad=qwe&dw':5 'asdf':42 'dw':21 'efd.r':3 'ewr1':48 'ewri2':49 'gist.c':57 'gist.h':55 'gist.h.c':56 'hjwer':47 'jf':44 'jqw':69 'qwe':2,20,29,30,40 'qwe-wer':39 'qwer':43 'qwerti':70 'qwqwe':31 'readlin':58,62,64 'rewt/ewr':52 'sdjk':45 'teodor@123-stack.net':36 'teodor@stack.net':35 'wefjn':53 'wer':41 'wow':68 'www.com':4
-(1 row)
-
-SELECT length(to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2
-/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234
- wow < jqw <> qwerty'));
- length
---------
- 56
-(1 row)
-
--- ts_debug
-SELECT * from ts_debug('english', 'abc&nm1;def©ghiõjkl');
- alias | description | token | dictionaries | dictionary | lexemes
------------+-----------------+----------------------------+----------------+--------------+---------
- tag | XML tag | | {} | |
- asciiword | Word, all ASCII | abc | {english_stem} | english_stem | {abc}
- entity | XML entity | &nm1; | {} | |
- asciiword | Word, all ASCII | def | {english_stem} | english_stem | {def}
- entity | XML entity | © | {} | |
- asciiword | Word, all ASCII | ghi | {english_stem} | english_stem | {ghi}
- entity | XML entity | õ | {} | |
- asciiword | Word, all ASCII | jkl | {english_stem} | english_stem | {jkl}
- tag | XML tag | | {} | |
-(9 rows)
-
--- check parsing of URLs
-SELECT * from ts_debug('english', 'http://www.harewoodsolutions.co.uk/press.aspx');
- alias | description | token | dictionaries | dictionary | lexemes
-----------+---------------+----------------------------------------+--------------+------------+------------------------------------------
- protocol | Protocol head | http:// | {} | |
- url | URL | www.harewoodsolutions.co.uk/press.aspx | {simple} | simple | {www.harewoodsolutions.co.uk/press.aspx}
- host | Host | www.harewoodsolutions.co.uk | {simple} | simple | {www.harewoodsolutions.co.uk}
- url_path | URL path | /press.aspx | {simple} | simple | {/press.aspx}
- tag | XML tag | | {} | |
-(5 rows)
-
-SELECT * from ts_debug('english', 'http://aew.wer0c.ewr/id?ad=qwe&dw');
- alias | description | token | dictionaries | dictionary | lexemes
-----------+---------------+----------------------------+--------------+------------+------------------------------
- protocol | Protocol head | http:// | {} | |
- url | URL | aew.wer0c.ewr/id?ad=qwe&dw | {simple} | simple | {aew.wer0c.ewr/id?ad=qwe&dw}
- host | Host | aew.wer0c.ewr | {simple} | simple | {aew.wer0c.ewr}
- url_path | URL path | /id?ad=qwe&dw | {simple} | simple | {/id?ad=qwe&dw}
- tag | XML tag | | {} | |
-(5 rows)
-
-SELECT * from ts_debug('english', 'http://5aew.werc.ewr:8100/?');
- alias | description | token | dictionaries | dictionary | lexemes
-----------+---------------+----------------------+--------------+------------+------------------------
- protocol | Protocol head | http:// | {} | |
- url | URL | 5aew.werc.ewr:8100/? | {simple} | simple | {5aew.werc.ewr:8100/?}
- host | Host | 5aew.werc.ewr:8100 | {simple} | simple | {5aew.werc.ewr:8100}
- url_path | URL path | /? | {simple} | simple | {/?}
-(4 rows)
-
-SELECT * from ts_debug('english', '5aew.werc.ewr:8100/?xx');
- alias | description | token | dictionaries | dictionary | lexemes
-----------+-------------+------------------------+--------------+------------+--------------------------
- url | URL | 5aew.werc.ewr:8100/?xx | {simple} | simple | {5aew.werc.ewr:8100/?xx}
- host | Host | 5aew.werc.ewr:8100 | {simple} | simple | {5aew.werc.ewr:8100}
- url_path | URL path | /?xx | {simple} | simple | {/?xx}
-(3 rows)
-
-SELECT token, alias,
- dictionaries, dictionaries is null as dnull, array_dims(dictionaries) as ddims,
- lexemes, lexemes is null as lnull, array_dims(lexemes) as ldims
-from ts_debug('english', 'a title');
- token | alias | dictionaries | dnull | ddims | lexemes | lnull | ldims
--------+-----------+----------------+-------+-------+---------+-------+-------
- a | asciiword | {english_stem} | f | [1:1] | {} | f |
- | blank | {} | f | | | t |
- title | asciiword | {english_stem} | f | [1:1] | {titl} | f | [1:1]
-(3 rows)
-
--- to_tsquery
-SELECT to_tsquery('english', 'qwe & sKies ');
- to_tsquery
----------------
- 'qwe' & 'sky'
-(1 row)
-
-SELECT to_tsquery('simple', 'qwe & sKies ');
- to_tsquery
------------------
- 'qwe' & 'skies'
-(1 row)
-
-SELECT to_tsquery('english', '''the wether'':dc & '' sKies '':BC ');
- to_tsquery
-------------------------
- 'wether':CD & 'sky':BC
-(1 row)
-
-SELECT to_tsquery('english', 'asd&(and|fghj)');
- to_tsquery
-----------------
- 'asd' & 'fghj'
-(1 row)
-
-SELECT to_tsquery('english', '(asd&and)|fghj');
- to_tsquery
-----------------
- 'asd' | 'fghj'
-(1 row)
-
-SELECT to_tsquery('english', '(asd&!and)|fghj');
- to_tsquery
-----------------
- 'asd' | 'fghj'
-(1 row)
-
-SELECT to_tsquery('english', '(the|and&(i&1))&fghj');
- to_tsquery
---------------
- '1' & 'fghj'
-(1 row)
-
-SELECT plainto_tsquery('english', 'the and z 1))& fghj');
- plainto_tsquery
---------------------
- 'z' & '1' & 'fghj'
-(1 row)
-
-SELECT plainto_tsquery('english', 'foo bar') && plainto_tsquery('english', 'asd');
- ?column?
------------------------
- 'foo' & 'bar' & 'asd'
-(1 row)
-
-SELECT plainto_tsquery('english', 'foo bar') || plainto_tsquery('english', 'asd fg');
- ?column?
-------------------------------
- 'foo' & 'bar' | 'asd' & 'fg'
-(1 row)
-
-SELECT plainto_tsquery('english', 'foo bar') || !!plainto_tsquery('english', 'asd fg');
- ?column?
------------------------------------
- 'foo' & 'bar' | !( 'asd' & 'fg' )
-(1 row)
-
-SELECT plainto_tsquery('english', 'foo bar') && 'asd | fg';
- ?column?
-----------------------------------
- 'foo' & 'bar' & ( 'asd' | 'fg' )
-(1 row)
-
--- Check stop word deletion, a and s are stop-words
-SELECT to_tsquery('english', '!(a & !b) & c');
- to_tsquery
--------------
- !!'b' & 'c'
-(1 row)
-
-SELECT to_tsquery('english', '!(a & !b)');
- to_tsquery
-------------
- !!'b'
-(1 row)
-
-SELECT to_tsquery('english', '(1 <-> 2) <-> a');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(1 <-> a) <-> 2');
- to_tsquery
--------------
- '1' <2> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(a <-> 1) <-> 2');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', 'a <-> (1 <-> 2)');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', '1 <-> (a <-> 2)');
- to_tsquery
--------------
- '1' <2> '2'
-(1 row)
-
-SELECT to_tsquery('english', '1 <-> (2 <-> a)');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(1 <-> 2) <3> a');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(1 <-> a) <3> 2');
- to_tsquery
--------------
- '1' <4> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(a <-> 1) <3> 2');
- to_tsquery
--------------
- '1' <3> '2'
-(1 row)
-
-SELECT to_tsquery('english', 'a <3> (1 <-> 2)');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', '1 <3> (a <-> 2)');
- to_tsquery
--------------
- '1' <4> '2'
-(1 row)
-
-SELECT to_tsquery('english', '1 <3> (2 <-> a)');
- to_tsquery
--------------
- '1' <3> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(1 <3> 2) <-> a');
- to_tsquery
--------------
- '1' <3> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(1 <3> a) <-> 2');
- to_tsquery
--------------
- '1' <4> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(a <3> 1) <-> 2');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', 'a <-> (1 <3> 2)');
- to_tsquery
--------------
- '1' <3> '2'
-(1 row)
-
-SELECT to_tsquery('english', '1 <-> (a <3> 2)');
- to_tsquery
--------------
- '1' <4> '2'
-(1 row)
-
-SELECT to_tsquery('english', '1 <-> (2 <3> a)');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', '((a <-> 1) <-> 2) <-> s');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(2 <-> (a <-> 1)) <-> s');
- to_tsquery
--------------
- '2' <2> '1'
-(1 row)
-
-SELECT to_tsquery('english', '((1 <-> a) <-> 2) <-> s');
- to_tsquery
--------------
- '1' <2> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(2 <-> (1 <-> a)) <-> s');
- to_tsquery
--------------
- '2' <-> '1'
-(1 row)
-
-SELECT to_tsquery('english', 's <-> ((a <-> 1) <-> 2)');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', 's <-> (2 <-> (a <-> 1))');
- to_tsquery
--------------
- '2' <2> '1'
-(1 row)
-
-SELECT to_tsquery('english', 's <-> ((1 <-> a) <-> 2)');
- to_tsquery
--------------
- '1' <2> '2'
-(1 row)
-
-SELECT to_tsquery('english', 's <-> (2 <-> (1 <-> a))');
- to_tsquery
--------------
- '2' <-> '1'
-(1 row)
-
-SELECT to_tsquery('english', '((a <-> 1) <-> s) <-> 2');
- to_tsquery
--------------
- '1' <2> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(s <-> (a <-> 1)) <-> 2');
- to_tsquery
--------------
- '1' <-> '2'
-(1 row)
-
-SELECT to_tsquery('english', '((1 <-> a) <-> s) <-> 2');
- to_tsquery
--------------
- '1' <3> '2'
-(1 row)
-
-SELECT to_tsquery('english', '(s <-> (1 <-> a)) <-> 2');
- to_tsquery
--------------
- '1' <2> '2'
-(1 row)
-
-SELECT to_tsquery('english', '2 <-> ((a <-> 1) <-> s)');
- to_tsquery
--------------
- '2' <2> '1'
-(1 row)
-
-SELECT to_tsquery('english', '2 <-> (s <-> (a <-> 1))');
- to_tsquery
--------------
- '2' <3> '1'
-(1 row)
-
-SELECT to_tsquery('english', '2 <-> ((1 <-> a) <-> s)');
- to_tsquery
--------------
- '2' <-> '1'
-(1 row)
-
-SELECT to_tsquery('english', '2 <-> (s <-> (1 <-> a))');
- to_tsquery
--------------
- '2' <2> '1'
-(1 row)
-
-SELECT to_tsquery('english', 'foo <-> (a <-> (the <-> bar))');
- to_tsquery
------------------
- 'foo' <3> 'bar'
-(1 row)
-
-SELECT to_tsquery('english', '((foo <-> a) <-> the) <-> bar');
- to_tsquery
------------------
- 'foo' <3> 'bar'
-(1 row)
-
-SELECT to_tsquery('english', 'foo <-> a <-> the <-> bar');
- to_tsquery
------------------
- 'foo' <3> 'bar'
-(1 row)
-
-SELECT phraseto_tsquery('english', 'PostgreSQL can be extended by the user in many ways');
- phraseto_tsquery
------------------------------------------------------------
- 'postgresql' <3> 'extend' <3> 'user' <2> 'mani' <-> 'way'
-(1 row)
-
-SELECT ts_rank_cd(to_tsvector('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-'), to_tsquery('english', 'paint&water'));
- ts_rank_cd
-------------
- 0.05
-(1 row)
-
-SELECT ts_rank_cd(to_tsvector('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-'), to_tsquery('english', 'breath&motion&water'));
- ts_rank_cd
--------------
- 0.008333334
-(1 row)
-
-SELECT ts_rank_cd(to_tsvector('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-'), to_tsquery('english', 'ocean'));
- ts_rank_cd
-------------
- 0.1
-(1 row)
-
-SELECT ts_rank_cd(to_tsvector('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-'), to_tsquery('english', 'painted <-> Ship'));
- ts_rank_cd
-------------
- 0.1
-(1 row)
-
-SELECT ts_rank_cd(strip(to_tsvector('both stripped')),
- to_tsquery('both & stripped'));
- ts_rank_cd
-------------
- 0
-(1 row)
-
-SELECT ts_rank_cd(to_tsvector('unstripped') || strip(to_tsvector('stripped')),
- to_tsquery('unstripped & stripped'));
- ts_rank_cd
-------------
- 0
-(1 row)
-
---headline tests
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'paint&water'));
- ts_headline
------------------------------------------
- painted Ocean. +
- Water, water, every where+
- And all the boards did shrink; +
- Water, water, every
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'breath&motion&water'));
- ts_headline
-----------------------------------
- breath nor motion,+
- As idle as a painted Ship +
- Upon a painted Ocean. +
- Water, water
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'ocean'));
- ts_headline
-----------------------------------
- Ocean. +
- Water, water, every where +
- And all the boards did shrink;+
- Water, water, every where
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'day & drink'));
- ts_headline
-------------------------------------
- day, +
- We stuck, nor breath nor motion,+
- As idle as a painted Ship +
- Upon a painted Ocean. +
- Water, water, every where +
- And all the boards did shrink; +
- Water, water, every where, +
- Nor any drop
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'day | drink'));
- ts_headline
------------------------------------------------------------
- Day after day, day after day,+
- We stuck, nor breath nor motion, +
- As idle as a painted
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'day | !drink'));
- ts_headline
------------------------------------------------------------
- Day after day, day after day,+
- We stuck, nor breath nor motion, +
- As idle as a painted
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'painted <-> Ship & drink'));
- ts_headline
-----------------------------------
- painted Ship +
- Upon a painted Ocean. +
- Water, water, every where +
- And all the boards did shrink;+
- Water, water, every where, +
- Nor any drop to drink
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'painted <-> Ship | drink'));
- ts_headline
----------------------------------
- painted Ship +
- Upon a painted Ocean. +
- Water, water, every where +
- And all the boards did shrink
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'painted <-> Ship | !drink'));
- ts_headline
----------------------------------
- painted Ship +
- Upon a painted Ocean. +
- Water, water, every where +
- And all the boards did shrink
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', phraseto_tsquery('english', 'painted Ocean'));
- ts_headline
-----------------------------------
- painted Ocean. +
- Water, water, every where +
- And all the boards did shrink;+
- Water, water, every
-(1 row)
-
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', phraseto_tsquery('english', 'idle as a painted Ship'));
- ts_headline
----------------------------------------------
- idle as a painted Ship+
- Upon a painted Ocean. +
- Water, water, every where +
- And all the boards
-(1 row)
-
-SELECT ts_headline('english',
-'Lorem ipsum urna. Nullam nullam ullamcorper urna.',
-to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'),
-'MaxWords=100, MinWords=1');
- ts_headline
--------------------------------------------------------------------------------
- Lorem ipsum urna. Nullam nullam ullamcorper urna
-(1 row)
-
-SELECT ts_headline('english',
-'Lorem ipsum urna. Nullam nullam ullamcorper urna.',
-phraseto_tsquery('english','ullamcorper urna'),
-'MaxWords=100, MinWords=5');
- ts_headline
--------------------------------------------------------------
- urna. Nullam nullam ullamcorper urna.
-(1 row)
-
-SELECT ts_headline('english', '
-
-
-
-Sea view wow foo bar qq
-YES
-ff-bg
-
-
-',
-to_tsquery('english', 'sea&foo'), 'HighlightAll=true');
- ts_headline
------------------------------------------------------------------------------
- +
- +
- +
- +
- Sea view wow foo bar qq +
- YES +
- ff-bg +
- +
- +
-
-(1 row)
-
-SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=2, MinWords=1');
- ts_headline
--------------------
- 1 3
-(1 row)
-
-SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 & 3', 'MaxWords=4, MinWords=1');
- ts_headline
----------------------
- 1 2 3
-(1 row)
-
-SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=4, MinWords=1');
- ts_headline
--------------------
- 1 3
-(1 row)
-
---Check if headline fragments work
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'ocean'), 'MaxFragments=1');
- ts_headline
-------------------------------------
- after day, +
- We stuck, nor breath nor motion,+
- As idle as a painted Ship +
- Upon a painted Ocean. +
- Water, water, every where +
- And all the boards did shrink; +
- Water, water, every where, +
- Nor any drop
-(1 row)
-
---Check if more than one fragments are displayed
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'Coleridge & stuck'), 'MaxFragments=2');
- ts_headline
-----------------------------------------------
- after day, day after day, +
- We stuck, nor breath nor motion, +
- As idle as a painted Ship +
- Upon a painted Ocean. +
- Water, water, every where +
- And all the boards did shrink; +
- Water, water, every where ... drop to drink.+
- S. T. Coleridge
-(1 row)
-
---Fragments when there all query words are not in the document
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'ocean & seahorse'), 'MaxFragments=1');
- ts_headline
-------------------------------------
- +
- Day after day, day after day, +
- We stuck, nor breath nor motion,+
- As idle as
-(1 row)
-
---FragmentDelimiter option
-SELECT ts_headline('english', '
-Day after day, day after day,
- We stuck, nor breath nor motion,
-As idle as a painted Ship
- Upon a painted Ocean.
-Water, water, every where
- And all the boards did shrink;
-Water, water, every where,
- Nor any drop to drink.
-S. T. Coleridge (1772-1834)
-', to_tsquery('english', 'Coleridge & stuck'), 'MaxFragments=2,FragmentDelimiter=***');
- ts_headline
---------------------------------------------
- after day, day after day, +
- We stuck, nor breath nor motion, +
- As idle as a painted Ship +
- Upon a painted Ocean. +
- Water, water, every where +
- And all the boards did shrink; +
- Water, water, every where***drop to drink.+
- S. T. Coleridge
-(1 row)
-
---Fragments with phrase search
-SELECT ts_headline('english',
-'Lorem ipsum urna. Nullam nullam ullamcorper urna.',
-to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'),
-'MaxFragments=100, MaxWords=100, MinWords=1');
- ts_headline
--------------------------------------------------------------------------------
- Lorem ipsum urna. Nullam nullam ullamcorper urna
-(1 row)
-
--- Edge cases with empty query
-SELECT ts_headline('english',
-'', to_tsquery('english', ''));
-NOTICE: text-search query doesn't contain lexemes: ""
- ts_headline
--------------
-
-(1 row)
-
-SELECT ts_headline('english',
-'foo bar', to_tsquery('english', ''));
-NOTICE: text-search query doesn't contain lexemes: ""
- ts_headline
--------------
- foo bar
-(1 row)
-
---Rewrite sub system
-CREATE TABLE test_tsquery (txtkeyword TEXT, txtsample TEXT);
-\set ECHO none
-ALTER TABLE test_tsquery ADD COLUMN keyword tsquery;
-UPDATE test_tsquery SET keyword = to_tsquery('english', txtkeyword);
-ALTER TABLE test_tsquery ADD COLUMN sample tsquery;
-UPDATE test_tsquery SET sample = to_tsquery('english', txtsample::text);
-SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york';
- count
--------
- 2
-(1 row)
-
-SELECT COUNT(*) FROM test_tsquery WHERE keyword <= 'new <-> york';
- count
--------
- 3
-(1 row)
-
-SELECT COUNT(*) FROM test_tsquery WHERE keyword = 'new <-> york';
- count
--------
- 1
-(1 row)
-
-SELECT COUNT(*) FROM test_tsquery WHERE keyword >= 'new <-> york';
- count
--------
- 4
-(1 row)
-
-SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new <-> york';
- count
--------
- 3
-(1 row)
-
-CREATE UNIQUE INDEX bt_tsq ON test_tsquery (keyword);
-SET enable_seqscan=OFF;
-SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york';
- count
--------
- 2
-(1 row)
-
-SELECT COUNT(*) FROM test_tsquery WHERE keyword <= 'new <-> york';
- count
--------
- 3
-(1 row)
-
-SELECT COUNT(*) FROM test_tsquery WHERE keyword = 'new <-> york';
- count
--------
- 1
-(1 row)
-
-SELECT COUNT(*) FROM test_tsquery WHERE keyword >= 'new <-> york';
- count
--------
- 4
-(1 row)
-
-SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new <-> york';
- count
--------
- 3
-(1 row)
-
-RESET enable_seqscan;
-SELECT ts_rewrite('foo & bar & qq & new & york', 'new & york'::tsquery, 'big & apple | nyc | new & york & city');
- ts_rewrite
-------------------------------------------------------------------------------
- 'foo' & 'bar' & 'qq' & ( 'city' & 'new' & 'york' | 'nyc' | 'big' & 'apple' )
-(1 row)
-
-SELECT ts_rewrite(ts_rewrite('new & !york ', 'york', '!jersey'),
- 'jersey', 'mexico');
- ts_rewrite
---------------------
- 'new' & !!'mexico'
-(1 row)
-
-SELECT ts_rewrite('moscow', 'SELECT keyword, sample FROM test_tsquery'::text );
- ts_rewrite
----------------------
- 'moskva' | 'moscow'
-(1 row)
-
-SELECT ts_rewrite('moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'::text );
- ts_rewrite
------------------------------------
- 'hotel' & ( 'moskva' | 'moscow' )
-(1 row)
-
-SELECT ts_rewrite('bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'::text );
- ts_rewrite
--------------------------------------------------------------------------------------
- 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' )
-(1 row)
-
-SELECT ts_rewrite( 'moscow', 'SELECT keyword, sample FROM test_tsquery');
- ts_rewrite
----------------------
- 'moskva' | 'moscow'
-(1 row)
-
-SELECT ts_rewrite( 'moscow & hotel', 'SELECT keyword, sample FROM test_tsquery');
- ts_rewrite
------------------------------------
- 'hotel' & ( 'moskva' | 'moscow' )
-(1 row)
-
-SELECT ts_rewrite( 'bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery');
- ts_rewrite
--------------------------------------------------------------------------------------
- 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' )
-(1 row)
-
-SELECT ts_rewrite('1 & (2 <-> 3)', 'SELECT keyword, sample FROM test_tsquery'::text );
- ts_rewrite
--------------
- '2' <-> '4'
-(1 row)
-
-SELECT ts_rewrite('1 & (2 <2> 3)', 'SELECT keyword, sample FROM test_tsquery'::text );
- ts_rewrite
--------------------
- '1' & '2' <2> '3'
-(1 row)
-
-SELECT ts_rewrite('5 <-> (1 & (2 <-> 3))', 'SELECT keyword, sample FROM test_tsquery'::text );
- ts_rewrite
--------------------------
- '5' <-> ( '2' <-> '4' )
-(1 row)
-
-SELECT ts_rewrite('5 <-> (6 | 8)', 'SELECT keyword, sample FROM test_tsquery'::text );
- ts_rewrite
------------------------
- '5' <-> ( '6' | '8' )
-(1 row)
-
--- Check empty substitution
-SELECT ts_rewrite(to_tsquery('5 & (6 | 5)'), to_tsquery('5'), to_tsquery(''));
-NOTICE: text-search query doesn't contain lexemes: ""
- ts_rewrite
-------------
- '6'
-(1 row)
-
-SELECT ts_rewrite(to_tsquery('!5'), to_tsquery('5'), to_tsquery(''));
-NOTICE: text-search query doesn't contain lexemes: ""
- ts_rewrite
-------------
-
-(1 row)
-
-SELECT keyword FROM test_tsquery WHERE keyword @> 'new';
- keyword
-------------------
- 'new' <-> 'york'
-(1 row)
-
-SELECT keyword FROM test_tsquery WHERE keyword @> 'moscow';
- keyword
-----------
- 'moscow'
-(1 row)
-
-SELECT keyword FROM test_tsquery WHERE keyword <@ 'new';
- keyword
----------
-(0 rows)
-
-SELECT keyword FROM test_tsquery WHERE keyword <@ 'moscow';
- keyword
-----------
- 'moscow'
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query;
- ts_rewrite
----------------------
- 'moskva' | 'moscow'
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query;
- ts_rewrite
------------------------------------
- 'hotel' & ( 'moskva' | 'moscow' )
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query;
- ts_rewrite
--------------------------------------------------------------------------------------
- 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' )
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query;
- ts_rewrite
----------------------
- 'moskva' | 'moscow'
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query;
- ts_rewrite
------------------------------------
- 'hotel' & ( 'moskva' | 'moscow' )
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query;
- ts_rewrite
--------------------------------------------------------------------------------------
- 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' )
-(1 row)
-
-CREATE INDEX qq ON test_tsquery USING gist (keyword tsquery_ops);
-SET enable_seqscan=OFF;
-SELECT keyword FROM test_tsquery WHERE keyword @> 'new';
- keyword
-------------------
- 'new' <-> 'york'
-(1 row)
-
-SELECT keyword FROM test_tsquery WHERE keyword @> 'moscow';
- keyword
-----------
- 'moscow'
-(1 row)
-
-SELECT keyword FROM test_tsquery WHERE keyword <@ 'new';
- keyword
----------
-(0 rows)
-
-SELECT keyword FROM test_tsquery WHERE keyword <@ 'moscow';
- keyword
-----------
- 'moscow'
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query;
- ts_rewrite
----------------------
- 'moskva' | 'moscow'
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query;
- ts_rewrite
------------------------------------
- 'hotel' & ( 'moskva' | 'moscow' )
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query;
- ts_rewrite
--------------------------------------------------------------------------------------
- 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' )
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query;
- ts_rewrite
----------------------
- 'moskva' | 'moscow'
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query;
- ts_rewrite
------------------------------------
- 'hotel' & ( 'moskva' | 'moscow' )
-(1 row)
-
-SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query;
- ts_rewrite
--------------------------------------------------------------------------------------
- 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' )
-(1 row)
-
-SELECT ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz');
- ts_rewrite
------------------------------------------
- ( 'bar' | 'baz' ) <-> ( 'bar' | 'baz' )
-(1 row)
-
-SELECT to_tsvector('foo bar') @@
- ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz');
- ?column?
-----------
- f
-(1 row)
-
-SELECT to_tsvector('bar baz') @@
- ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz');
- ?column?
-----------
- t
-(1 row)
-
-RESET enable_seqscan;
---test GUC
-SET default_text_search_config=simple;
-SELECT to_tsvector('SKIES My booKs');
- to_tsvector
-----------------------------
- 'books':3 'my':2 'skies':1
-(1 row)
-
-SELECT plainto_tsquery('SKIES My booKs');
- plainto_tsquery
---------------------------
- 'skies' & 'my' & 'books'
-(1 row)
-
-SELECT to_tsquery('SKIES & My | booKs');
- to_tsquery
---------------------------
- 'skies' & 'my' | 'books'
-(1 row)
-
-SET default_text_search_config=english;
-SELECT to_tsvector('SKIES My booKs');
- to_tsvector
-------------------
- 'book':3 'sky':1
-(1 row)
-
-SELECT plainto_tsquery('SKIES My booKs');
- plainto_tsquery
------------------
- 'sky' & 'book'
-(1 row)
-
-SELECT to_tsquery('SKIES & My | booKs');
- to_tsquery
-----------------
- 'sky' | 'book'
-(1 row)
-
---trigger
-CREATE TRIGGER tsvectorupdate
-BEFORE UPDATE OR INSERT ON test_tsvector
-FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(a, 'pg_catalog.english', t);
-SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty');
- count
--------
- 0
-(1 row)
-
-INSERT INTO test_tsvector (t) VALUES ('345 qwerty');
-SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty');
- count
--------
- 1
-(1 row)
-
-UPDATE test_tsvector SET t = null WHERE t = '345 qwerty';
-SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty');
- count
--------
- 0
-(1 row)
-
-INSERT INTO test_tsvector (t) VALUES ('345 qwerty');
-SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty');
- count
--------
- 1
-(1 row)
-
--- Test inlining of immutable constant functions
--- to_tsquery(text) is not immutable, so it won't be inlined
-explain (costs off)
-select * from test_tsquery, to_tsquery('new') q where txtsample @@ q;
- QUERY PLAN
-------------------------------------------------
- Nested Loop
- Join Filter: (test_tsquery.txtsample @@ q.q)
- -> Function Scan on to_tsquery q
- -> Seq Scan on test_tsquery
-(4 rows)
-
--- to_tsquery(regconfig, text) is an immutable function.
--- That allows us to get rid of using function scan and join at all.
-explain (costs off)
-select * from test_tsquery, to_tsquery('english', 'new') q where txtsample @@ q;
- QUERY PLAN
----------------------------------------------
- Seq Scan on test_tsquery
- Filter: (txtsample @@ '''new'''::tsquery)
-(2 rows)
-
--- test finding items in GIN's pending list
-create temp table pendtest (ts tsvector);
-create index pendtest_idx on pendtest using gin(ts);
-insert into pendtest values (to_tsvector('Lore ipsam'));
-insert into pendtest values (to_tsvector('Lore ipsum'));
-select * from pendtest where 'ipsu:*'::tsquery @@ ts;
- ts
---------------------
- 'ipsum':2 'lore':1
-(1 row)
-
-select * from pendtest where 'ipsa:*'::tsquery @@ ts;
- ts
---------------------
- 'ipsam':2 'lore':1
-(1 row)
-
-select * from pendtest where 'ips:*'::tsquery @@ ts;
- ts
---------------------
- 'ipsam':2 'lore':1
- 'ipsum':2 'lore':1
-(2 rows)
-
-select * from pendtest where 'ipt:*'::tsquery @@ ts;
- ts
-----
-(0 rows)
-
-select * from pendtest where 'ipi:*'::tsquery @@ ts;
- ts
-----
-(0 rows)
-
---check OP_PHRASE on index
-create temp table phrase_index_test(fts tsvector);
-insert into phrase_index_test values ('A fat cat has just eaten a rat.');
-insert into phrase_index_test values (to_tsvector('english', 'A fat cat has just eaten a rat.'));
-create index phrase_index_test_idx on phrase_index_test using gin(fts);
-set enable_seqscan = off;
-select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat cat');
- fts
------------------------------------
- 'cat':3 'eaten':6 'fat':2 'rat':8
-(1 row)
-
-set enable_seqscan = on;
--- test websearch_to_tsquery function
-select websearch_to_tsquery('simple', 'I have a fat:*ABCD cat');
- websearch_to_tsquery
----------------------------------------------
- 'i' & 'have' & 'a' & 'fat' & 'abcd' & 'cat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'orange:**AABBCCDD');
- websearch_to_tsquery
------------------------
- 'orange' & 'aabbccdd'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat:A!cat:B|rat:C<');
- websearch_to_tsquery
------------------------------------------
- 'fat' & 'a' & 'cat' & 'b' & 'rat' & 'c'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat:A : cat:B');
- websearch_to_tsquery
----------------------------
- 'fat' & 'a' & 'cat' & 'b'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat*rat');
- websearch_to_tsquery
-----------------------
- 'fat' <-> 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat-rat');
- websearch_to_tsquery
--------------------------------
- 'fat-rat' <-> 'fat' <-> 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat_rat');
- websearch_to_tsquery
-----------------------
- 'fat' <-> 'rat'
-(1 row)
-
--- weights are completely ignored
-select websearch_to_tsquery('simple', 'abc : def');
- websearch_to_tsquery
-----------------------
- 'abc' & 'def'
-(1 row)
-
-select websearch_to_tsquery('simple', 'abc:def');
- websearch_to_tsquery
-----------------------
- 'abc' & 'def'
-(1 row)
-
-select websearch_to_tsquery('simple', 'a:::b');
- websearch_to_tsquery
-----------------------
- 'a' & 'b'
-(1 row)
-
-select websearch_to_tsquery('simple', 'abc:d');
- websearch_to_tsquery
-----------------------
- 'abc' & 'd'
-(1 row)
-
-select websearch_to_tsquery('simple', ':');
-NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored
- websearch_to_tsquery
-----------------------
-
-(1 row)
-
--- these operators are ignored
-select websearch_to_tsquery('simple', 'abc & def');
- websearch_to_tsquery
-----------------------
- 'abc' & 'def'
-(1 row)
-
-select websearch_to_tsquery('simple', 'abc | def');
- websearch_to_tsquery
-----------------------
- 'abc' & 'def'
-(1 row)
-
-select websearch_to_tsquery('simple', 'abc <-> def');
- websearch_to_tsquery
-----------------------
- 'abc' & 'def'
-(1 row)
-
--- parens are ignored, too
-select websearch_to_tsquery('simple', 'abc (pg or class)');
- websearch_to_tsquery
-------------------------
- 'abc' & 'pg' | 'class'
-(1 row)
-
-select websearch_to_tsquery('simple', '(foo bar) or (ding dong)');
- websearch_to_tsquery
----------------------------------
- 'foo' & 'bar' | 'ding' & 'dong'
-(1 row)
-
--- NOT is ignored in quotes
-select websearch_to_tsquery('english', 'My brand new smartphone');
- websearch_to_tsquery
--------------------------------
- 'brand' & 'new' & 'smartphon'
-(1 row)
-
-select websearch_to_tsquery('english', 'My brand "new smartphone"');
- websearch_to_tsquery
----------------------------------
- 'brand' & 'new' <-> 'smartphon'
-(1 row)
-
-select websearch_to_tsquery('english', 'My brand "new -smartphone"');
- websearch_to_tsquery
----------------------------------
- 'brand' & 'new' <-> 'smartphon'
-(1 row)
-
--- test OR operator
-select websearch_to_tsquery('simple', 'cat or rat');
- websearch_to_tsquery
-----------------------
- 'cat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'cat OR rat');
- websearch_to_tsquery
-----------------------
- 'cat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'cat "OR" rat');
- websearch_to_tsquery
-----------------------
- 'cat' & 'or' & 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'cat OR');
- websearch_to_tsquery
-----------------------
- 'cat' & 'or'
-(1 row)
-
-select websearch_to_tsquery('simple', 'OR rat');
- websearch_to_tsquery
-----------------------
- 'or' & 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', '"fat cat OR rat"');
- websearch_to_tsquery
-------------------------------------
- 'fat' <-> 'cat' <-> 'or' <-> 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat (cat OR rat');
- websearch_to_tsquery
------------------------
- 'fat' & 'cat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'or OR or');
- websearch_to_tsquery
-----------------------
- 'or' | 'or'
-(1 row)
-
--- OR is an operator here ...
-select websearch_to_tsquery('simple', '"fat cat"or"fat rat"');
- websearch_to_tsquery
------------------------------------
- 'fat' <-> 'cat' | 'fat' <-> 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat or(rat');
- websearch_to_tsquery
-----------------------
- 'fat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat or)rat');
- websearch_to_tsquery
-----------------------
- 'fat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat or&rat');
- websearch_to_tsquery
-----------------------
- 'fat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat or|rat');
- websearch_to_tsquery
-----------------------
- 'fat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat or!rat');
- websearch_to_tsquery
-----------------------
- 'fat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat orrat');
- websearch_to_tsquery
-----------------------
- 'fat' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('simple', 'fat or ');
- websearch_to_tsquery
-----------------------
- 'fat' & 'or'
-(1 row)
-
--- ... but not here
-select websearch_to_tsquery('simple', 'abc orange');
- websearch_to_tsquery
-----------------------
- 'abc' & 'orange'
-(1 row)
-
-select websearch_to_tsquery('simple', 'abc OR1234');
- websearch_to_tsquery
-----------------------
- 'abc' & 'or1234'
-(1 row)
-
-select websearch_to_tsquery('simple', 'abc or-abc');
- websearch_to_tsquery
--------------------------------------
- 'abc' & 'or-abc' <-> 'or' <-> 'abc'
-(1 row)
-
-select websearch_to_tsquery('simple', 'abc OR_abc');
- websearch_to_tsquery
-------------------------
- 'abc' & 'or' <-> 'abc'
-(1 row)
-
--- test quotes
-select websearch_to_tsquery('english', '"pg_class pg');
- websearch_to_tsquery
----------------------------
- 'pg' <-> 'class' <-> 'pg'
-(1 row)
-
-select websearch_to_tsquery('english', 'pg_class pg"');
- websearch_to_tsquery
--------------------------
- 'pg' <-> 'class' & 'pg'
-(1 row)
-
-select websearch_to_tsquery('english', '"pg_class pg"');
- websearch_to_tsquery
----------------------------
- 'pg' <-> 'class' <-> 'pg'
-(1 row)
-
-select websearch_to_tsquery('english', '"pg_class : pg"');
- websearch_to_tsquery
----------------------------
- 'pg' <-> 'class' <-> 'pg'
-(1 row)
-
-select websearch_to_tsquery('english', 'abc "pg_class pg"');
- websearch_to_tsquery
------------------------------------
- 'abc' & 'pg' <-> 'class' <-> 'pg'
-(1 row)
-
-select websearch_to_tsquery('english', '"pg_class pg" def');
- websearch_to_tsquery
------------------------------------
- 'pg' <-> 'class' <-> 'pg' & 'def'
-(1 row)
-
-select websearch_to_tsquery('english', 'abc "pg pg_class pg" def');
- websearch_to_tsquery
-----------------------------------------------------
- 'abc' & 'pg' <-> 'pg' <-> 'class' <-> 'pg' & 'def'
-(1 row)
-
-select websearch_to_tsquery('english', ' or "pg pg_class pg" or ');
- websearch_to_tsquery
-------------------------------------
- 'pg' <-> 'pg' <-> 'class' <-> 'pg'
-(1 row)
-
-select websearch_to_tsquery('english', '""pg pg_class pg""');
- websearch_to_tsquery
---------------------------------
- 'pg' & 'pg' <-> 'class' & 'pg'
-(1 row)
-
-select websearch_to_tsquery('english', 'abc """"" def');
- websearch_to_tsquery
-----------------------
- 'abc' & 'def'
-(1 row)
-
-select websearch_to_tsquery('english', 'cat -"fat rat"');
- websearch_to_tsquery
-------------------------------
- 'cat' & !( 'fat' <-> 'rat' )
-(1 row)
-
-select websearch_to_tsquery('english', 'cat -"fat rat" cheese');
- websearch_to_tsquery
-----------------------------------------
- 'cat' & !( 'fat' <-> 'rat' ) & 'chees'
-(1 row)
-
-select websearch_to_tsquery('english', 'abc "def -"');
- websearch_to_tsquery
-----------------------
- 'abc' & 'def'
-(1 row)
-
-select websearch_to_tsquery('english', 'abc "def :"');
- websearch_to_tsquery
-----------------------
- 'abc' & 'def'
-(1 row)
-
-select websearch_to_tsquery('english', '"A fat cat" has just eaten a -rat.');
- websearch_to_tsquery
-------------------------------------
- 'fat' <-> 'cat' & 'eaten' & !'rat'
-(1 row)
-
-select websearch_to_tsquery('english', '"A fat cat" has just eaten OR !rat.');
- websearch_to_tsquery
------------------------------------
- 'fat' <-> 'cat' & 'eaten' | 'rat'
-(1 row)
-
-select websearch_to_tsquery('english', '"A fat cat" has just (+eaten OR -rat)');
- websearch_to_tsquery
-------------------------------------
- 'fat' <-> 'cat' & 'eaten' | !'rat'
-(1 row)
-
-select websearch_to_tsquery('english', 'this is ----fine');
- websearch_to_tsquery
-----------------------
- !!!!'fine'
-(1 row)
-
-select websearch_to_tsquery('english', '(()) )))) this ||| is && -fine, "dear friend" OR good');
- websearch_to_tsquery
-----------------------------------------
- !'fine' & 'dear' <-> 'friend' | 'good'
-(1 row)
-
-select websearch_to_tsquery('english', 'an old <-> cat " is fine &&& too');
- websearch_to_tsquery
-------------------------
- 'old' & 'cat' & 'fine'
-(1 row)
-
-select websearch_to_tsquery('english', '"A the" OR just on');
-NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored
- websearch_to_tsquery
-----------------------
-
-(1 row)
-
-select websearch_to_tsquery('english', '"a fat cat" ate a rat');
- websearch_to_tsquery
----------------------------------
- 'fat' <-> 'cat' & 'ate' & 'rat'
-(1 row)
-
-select to_tsvector('english', 'A fat cat ate a rat') @@
- websearch_to_tsquery('english', '"a fat cat" ate a rat');
- ?column?
-----------
- t
-(1 row)
-
-select to_tsvector('english', 'A fat grey cat ate a rat') @@
- websearch_to_tsquery('english', '"a fat cat" ate a rat');
- ?column?
-----------
- f
-(1 row)
-
--- cases handled by gettoken_tsvector()
-select websearch_to_tsquery('''');
-NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored
- websearch_to_tsquery
-----------------------
-
-(1 row)
-
-select websearch_to_tsquery('''abc''''def''');
- websearch_to_tsquery
-----------------------
- 'abc' <-> 'def'
-(1 row)
-
-select websearch_to_tsquery('\abc');
- websearch_to_tsquery
-----------------------
- 'abc'
-(1 row)
-
-select websearch_to_tsquery('\');
-NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored
- websearch_to_tsquery
-----------------------
-
-(1 row)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/tsdicts.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/tsdicts.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/tsdicts.out 2024-11-15 02:50:52.514009551 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/tsdicts.out 2024-11-15 02:59:17.225115696 +0000
@@ -1,723 +1,2 @@
---Test text search dictionaries and configurations
--- Test ISpell dictionary with ispell affix file
-CREATE TEXT SEARCH DICTIONARY ispell (
- Template=ispell,
- DictFile=ispell_sample,
- AffFile=ispell_sample
-);
-SELECT ts_lexize('ispell', 'skies');
- ts_lexize
------------
- {sky}
-(1 row)
-
-SELECT ts_lexize('ispell', 'bookings');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('ispell', 'booking');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('ispell', 'foot');
- ts_lexize
------------
- {foot}
-(1 row)
-
-SELECT ts_lexize('ispell', 'foots');
- ts_lexize
------------
- {foot}
-(1 row)
-
-SELECT ts_lexize('ispell', 'rebookings');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('ispell', 'rebooking');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('ispell', 'rebook');
- ts_lexize
------------
-
-(1 row)
-
-SELECT ts_lexize('ispell', 'unbookings');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('ispell', 'unbooking');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('ispell', 'unbook');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('ispell', 'footklubber');
- ts_lexize
-----------------
- {foot,klubber}
-(1 row)
-
-SELECT ts_lexize('ispell', 'footballklubber');
- ts_lexize
-------------------------------------------------------
- {footballklubber,foot,ball,klubber,football,klubber}
-(1 row)
-
-SELECT ts_lexize('ispell', 'ballyklubber');
- ts_lexize
-----------------
- {ball,klubber}
-(1 row)
-
-SELECT ts_lexize('ispell', 'footballyklubber');
- ts_lexize
----------------------
- {foot,ball,klubber}
-(1 row)
-
--- Test ISpell dictionary with hunspell affix file
-CREATE TEXT SEARCH DICTIONARY hunspell (
- Template=ispell,
- DictFile=ispell_sample,
- AffFile=hunspell_sample
-);
-SELECT ts_lexize('hunspell', 'skies');
- ts_lexize
------------
- {sky}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'bookings');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'booking');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'foot');
- ts_lexize
------------
- {foot}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'foots');
- ts_lexize
------------
- {foot}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'rebookings');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'rebooking');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'rebook');
- ts_lexize
------------
-
-(1 row)
-
-SELECT ts_lexize('hunspell', 'unbookings');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'unbooking');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'unbook');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'footklubber');
- ts_lexize
-----------------
- {foot,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'footballklubber');
- ts_lexize
-------------------------------------------------------
- {footballklubber,foot,ball,klubber,football,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'ballyklubber');
- ts_lexize
-----------------
- {ball,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell', 'footballyklubber');
- ts_lexize
----------------------
- {foot,ball,klubber}
-(1 row)
-
--- Test ISpell dictionary with hunspell affix file with FLAG long parameter
-CREATE TEXT SEARCH DICTIONARY hunspell_long (
- Template=ispell,
- DictFile=hunspell_sample_long,
- AffFile=hunspell_sample_long
-);
-SELECT ts_lexize('hunspell_long', 'skies');
- ts_lexize
------------
- {sky}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'bookings');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'booking');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'foot');
- ts_lexize
------------
- {foot}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'foots');
- ts_lexize
------------
- {foot}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'rebookings');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'rebooking');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'rebook');
- ts_lexize
------------
-
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'unbookings');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'unbooking');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'unbook');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'booked');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'footklubber');
- ts_lexize
-----------------
- {foot,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'footballklubber');
- ts_lexize
-------------------------------------------------------
- {footballklubber,foot,ball,klubber,football,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'ballyklubber');
- ts_lexize
-----------------
- {ball,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'ballsklubber');
- ts_lexize
-----------------
- {ball,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'footballyklubber');
- ts_lexize
----------------------
- {foot,ball,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell_long', 'ex-machina');
- ts_lexize
----------------
- {ex-,machina}
-(1 row)
-
--- Test ISpell dictionary with hunspell affix file with FLAG num parameter
-CREATE TEXT SEARCH DICTIONARY hunspell_num (
- Template=ispell,
- DictFile=hunspell_sample_num,
- AffFile=hunspell_sample_num
-);
-SELECT ts_lexize('hunspell_num', 'skies');
- ts_lexize
------------
- {sky}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'sk');
- ts_lexize
------------
- {sky}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'bookings');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'booking');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'foot');
- ts_lexize
------------
- {foot}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'foots');
- ts_lexize
------------
- {foot}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'rebookings');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'rebooking');
- ts_lexize
-----------------
- {booking,book}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'rebook');
- ts_lexize
------------
-
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'unbookings');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'unbooking');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'unbook');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'booked');
- ts_lexize
------------
- {book}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'footklubber');
- ts_lexize
-----------------
- {foot,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'footballklubber');
- ts_lexize
-------------------------------------------------------
- {footballklubber,foot,ball,klubber,football,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'ballyklubber');
- ts_lexize
-----------------
- {ball,klubber}
-(1 row)
-
-SELECT ts_lexize('hunspell_num', 'footballyklubber');
- ts_lexize
----------------------
- {foot,ball,klubber}
-(1 row)
-
--- Test suitability of affix and dict files
-CREATE TEXT SEARCH DICTIONARY hunspell_err (
- Template=ispell,
- DictFile=ispell_sample,
- AffFile=hunspell_sample_long
-);
-ERROR: invalid affix alias "GJUS"
-CREATE TEXT SEARCH DICTIONARY hunspell_err (
- Template=ispell,
- DictFile=ispell_sample,
- AffFile=hunspell_sample_num
-);
-ERROR: invalid affix flag "SZ\"
-CREATE TEXT SEARCH DICTIONARY hunspell_invalid_1 (
- Template=ispell,
- DictFile=hunspell_sample_long,
- AffFile=ispell_sample
-);
-CREATE TEXT SEARCH DICTIONARY hunspell_invalid_2 (
- Template=ispell,
- DictFile=hunspell_sample_long,
- AffFile=hunspell_sample_num
-);
-CREATE TEXT SEARCH DICTIONARY hunspell_invalid_3 (
- Template=ispell,
- DictFile=hunspell_sample_num,
- AffFile=ispell_sample
-);
-CREATE TEXT SEARCH DICTIONARY hunspell_err (
- Template=ispell,
- DictFile=hunspell_sample_num,
- AffFile=hunspell_sample_long
-);
-ERROR: invalid affix alias "302,301,202,303"
--- Synonym dictionary
-CREATE TEXT SEARCH DICTIONARY synonym (
- Template=synonym,
- Synonyms=synonym_sample
-);
-SELECT ts_lexize('synonym', 'PoStGrEs');
- ts_lexize
------------
- {pgsql}
-(1 row)
-
-SELECT ts_lexize('synonym', 'Gogle');
- ts_lexize
------------
- {googl}
-(1 row)
-
-SELECT ts_lexize('synonym', 'indices');
- ts_lexize
------------
- {index}
-(1 row)
-
--- test altering boolean parameters
-SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym';
- dictinitoption
------------------------------
- synonyms = 'synonym_sample'
-(1 row)
-
-ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = 1);
-SELECT ts_lexize('synonym', 'PoStGrEs');
- ts_lexize
------------
-
-(1 row)
-
-SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym';
- dictinitoption
-------------------------------------------------
- synonyms = 'synonym_sample', casesensitive = 1
-(1 row)
-
-ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = 2); -- fail
-ERROR: casesensitive requires a Boolean value
-ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = off);
-SELECT ts_lexize('synonym', 'PoStGrEs');
- ts_lexize
------------
- {pgsql}
-(1 row)
-
-SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym';
- dictinitoption
-----------------------------------------------------
- synonyms = 'synonym_sample', casesensitive = 'off'
-(1 row)
-
--- Create and simple test thesaurus dictionary
--- More tests in configuration checks because ts_lexize()
--- cannot pass more than one word to thesaurus.
-CREATE TEXT SEARCH DICTIONARY thesaurus (
- Template=thesaurus,
- DictFile=thesaurus_sample,
- Dictionary=english_stem
-);
-SELECT ts_lexize('thesaurus', 'one');
- ts_lexize
------------
- {1}
-(1 row)
-
--- Test ispell dictionary in configuration
-CREATE TEXT SEARCH CONFIGURATION ispell_tst (
- COPY=english
-);
-ALTER TEXT SEARCH CONFIGURATION ispell_tst ALTER MAPPING FOR
- word, numword, asciiword, hword, numhword, asciihword, hword_part, hword_numpart, hword_asciipart
- WITH ispell, english_stem;
-SELECT to_tsvector('ispell_tst', 'Booking the skies after rebookings for footballklubber from a foot');
- to_tsvector
-----------------------------------------------------------------------------------------------------
- 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3
-(1 row)
-
-SELECT to_tsquery('ispell_tst', 'footballklubber');
- to_tsquery
---------------------------------------------------------------------------
- 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber'
-(1 row)
-
-SELECT to_tsquery('ispell_tst', 'footballyklubber:b & rebookings:A & sky');
- to_tsquery
-------------------------------------------------------------------------
- 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky'
-(1 row)
-
--- Test ispell dictionary with hunspell affix in configuration
-CREATE TEXT SEARCH CONFIGURATION hunspell_tst (
- COPY=ispell_tst
-);
-ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING
- REPLACE ispell WITH hunspell;
-SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot');
- to_tsvector
-----------------------------------------------------------------------------------------------------
- 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3
-(1 row)
-
-SELECT to_tsquery('hunspell_tst', 'footballklubber');
- to_tsquery
---------------------------------------------------------------------------
- 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber'
-(1 row)
-
-SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky');
- to_tsquery
-------------------------------------------------------------------------
- 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky'
-(1 row)
-
-SELECT to_tsquery('hunspell_tst', 'footballyklubber:b <-> sky');
- to_tsquery
--------------------------------------------------
- ( 'foot':B & 'ball':B & 'klubber':B ) <-> 'sky'
-(1 row)
-
-SELECT phraseto_tsquery('hunspell_tst', 'footballyklubber sky');
- phraseto_tsquery
--------------------------------------------
- ( 'foot' & 'ball' & 'klubber' ) <-> 'sky'
-(1 row)
-
--- Test ispell dictionary with hunspell affix with FLAG long in configuration
-ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING
- REPLACE hunspell WITH hunspell_long;
-SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot');
- to_tsvector
-----------------------------------------------------------------------------------------------------
- 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3
-(1 row)
-
-SELECT to_tsquery('hunspell_tst', 'footballklubber');
- to_tsquery
---------------------------------------------------------------------------
- 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber'
-(1 row)
-
-SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky');
- to_tsquery
-------------------------------------------------------------------------
- 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky'
-(1 row)
-
--- Test ispell dictionary with hunspell affix with FLAG num in configuration
-ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING
- REPLACE hunspell_long WITH hunspell_num;
-SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot');
- to_tsvector
-----------------------------------------------------------------------------------------------------
- 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3
-(1 row)
-
-SELECT to_tsquery('hunspell_tst', 'footballklubber');
- to_tsquery
---------------------------------------------------------------------------
- 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber'
-(1 row)
-
-SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky');
- to_tsquery
-------------------------------------------------------------------------
- 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky'
-(1 row)
-
--- Test synonym dictionary in configuration
-CREATE TEXT SEARCH CONFIGURATION synonym_tst (
- COPY=english
-);
-ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR
- asciiword, hword_asciipart, asciihword
- WITH synonym, english_stem;
-SELECT to_tsvector('synonym_tst', 'Postgresql is often called as postgres or pgsql and pronounced as postgre');
- to_tsvector
----------------------------------------------------
- 'call':4 'often':3 'pgsql':1,6,8,12 'pronounc':10
-(1 row)
-
-SELECT to_tsvector('synonym_tst', 'Most common mistake is to write Gogle instead of Google');
- to_tsvector
-----------------------------------------------------------
- 'common':2 'googl':7,10 'instead':8 'mistak':3 'write':6
-(1 row)
-
-SELECT to_tsvector('synonym_tst', 'Indexes or indices - Which is right plural form of index?');
- to_tsvector
-----------------------------------------------
- 'form':8 'index':1,3,10 'plural':7 'right':6
-(1 row)
-
-SELECT to_tsquery('synonym_tst', 'Index & indices');
- to_tsquery
----------------------
- 'index' & 'index':*
-(1 row)
-
--- test thesaurus in configuration
--- see thesaurus_sample.ths to understand 'odd' resulting tsvector
-CREATE TEXT SEARCH CONFIGURATION thesaurus_tst (
- COPY=synonym_tst
-);
-ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR
- asciiword, hword_asciipart, asciihword
- WITH synonym, thesaurus, english_stem;
-SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one');
- to_tsvector
-----------------------------------
- '1':1,5 '12':3 '123':4 'pgsql':2
-(1 row)
-
-SELECT to_tsvector('thesaurus_tst', 'Supernovae star is very new star and usually called supernovae (abbreviation SN)');
- to_tsvector
---------------------------------------------------------------
- 'abbrevi':10 'call':8 'new':4 'sn':1,9,11 'star':5 'usual':7
-(1 row)
-
-SELECT to_tsvector('thesaurus_tst', 'Booking tickets is looking like a booking a tickets');
- to_tsvector
--------------------------------------------------------
- 'card':3,10 'invit':2,9 'like':6 'look':5 'order':1,8
-(1 row)
-
--- invalid: non-lowercase quoted identifiers
-CREATE TEXT SEARCH DICTIONARY tsdict_case
-(
- Template = ispell,
- "DictFile" = ispell_sample,
- "AffFile" = ispell_sample
-);
-ERROR: unrecognized Ispell parameter: "DictFile"
--- Test grammar for configurations
-CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
--- Overridden mapping change with duplicated tokens.
-ALTER TEXT SEARCH CONFIGURATION dummy_tst
- ALTER MAPPING FOR word, word WITH ispell;
--- Not a token supported by the configuration's parser, fails.
-ALTER TEXT SEARCH CONFIGURATION dummy_tst
- DROP MAPPING FOR not_a_token, not_a_token;
-ERROR: token type "not_a_token" does not exist
--- Not a token supported by the configuration's parser, fails even
--- with IF EXISTS.
-ALTER TEXT SEARCH CONFIGURATION dummy_tst
- DROP MAPPING IF EXISTS FOR not_a_token, not_a_token;
-ERROR: token type "not_a_token" does not exist
--- Token supported by the configuration's parser, succeeds.
-ALTER TEXT SEARCH CONFIGURATION dummy_tst
- DROP MAPPING FOR word, word;
--- No mapping for token supported by the configuration's parser, fails.
-ALTER TEXT SEARCH CONFIGURATION dummy_tst
- DROP MAPPING FOR word;
-ERROR: mapping for token type "word" does not exist
--- Token supported by the configuration's parser, cannot be found,
--- succeeds with IF EXISTS.
-ALTER TEXT SEARCH CONFIGURATION dummy_tst
- DROP MAPPING IF EXISTS FOR word, word;
-NOTICE: mapping for token type "word" does not exist, skipping
--- Re-add mapping, with duplicated tokens supported by the parser.
-ALTER TEXT SEARCH CONFIGURATION dummy_tst
- ADD MAPPING FOR word, word WITH ispell;
--- Not a token supported by the configuration's parser, fails.
-ALTER TEXT SEARCH CONFIGURATION dummy_tst
- ADD MAPPING FOR not_a_token WITH ispell;
-ERROR: token type "not_a_token" does not exist
-DROP TEXT SEARCH CONFIGURATION dummy_tst;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/foreign_data.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/foreign_data.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/foreign_data.out 2024-11-15 02:50:52.438134628 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/foreign_data.out 2024-11-15 02:59:17.233115707 +0000
@@ -1,2208 +1,2 @@
---
--- Test foreign-data wrapper and server management.
---
--- directory paths and dlsuffix are passed to us in environment variables
-\getenv libdir PG_LIBDIR
-\getenv dlsuffix PG_DLSUFFIX
-\set regresslib :libdir '/regress' :dlsuffix
-CREATE FUNCTION test_fdw_handler()
- RETURNS fdw_handler
- AS :'regresslib', 'test_fdw_handler'
- LANGUAGE C;
--- Clean up in case a prior regression run failed
--- Suppress NOTICE messages when roles don't exist
-SET client_min_messages TO 'warning';
-DROP ROLE IF EXISTS regress_foreign_data_user, regress_test_role, regress_test_role2, regress_test_role_super, regress_test_indirect, regress_unprivileged_role;
-RESET client_min_messages;
-CREATE ROLE regress_foreign_data_user LOGIN SUPERUSER;
-SET SESSION AUTHORIZATION 'regress_foreign_data_user';
-CREATE ROLE regress_test_role;
-CREATE ROLE regress_test_role2;
-CREATE ROLE regress_test_role_super SUPERUSER;
-CREATE ROLE regress_test_indirect;
-CREATE ROLE regress_unprivileged_role;
-CREATE FOREIGN DATA WRAPPER dummy;
-COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless';
-CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator;
--- At this point we should have 2 built-in wrappers and no servers.
-SELECT fdwname, fdwhandler::regproc, fdwvalidator::regproc, fdwoptions FROM pg_foreign_data_wrapper ORDER BY 1, 2, 3;
- fdwname | fdwhandler | fdwvalidator | fdwoptions
-------------+------------+--------------------------+------------
- dummy | - | - |
- postgresql | - | postgresql_fdw_validator |
-(2 rows)
-
-SELECT srvname, srvoptions FROM pg_foreign_server;
- srvname | srvoptions
----------+------------
-(0 rows)
-
-SELECT * FROM pg_user_mapping;
- oid | umuser | umserver | umoptions
------+--------+----------+-----------
-(0 rows)
-
--- CREATE FOREIGN DATA WRAPPER
-CREATE FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR
-ERROR: function bar(text[], oid) does not exist
-CREATE FOREIGN DATA WRAPPER foo;
-\dew
- List of foreign-data wrappers
- Name | Owner | Handler | Validator
-------------+---------------------------+---------+--------------------------
- dummy | regress_foreign_data_user | - | -
- foo | regress_foreign_data_user | - | -
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator
-(3 rows)
-
-CREATE FOREIGN DATA WRAPPER foo; -- duplicate
-ERROR: foreign-data wrapper "foo" already exists
-DROP FOREIGN DATA WRAPPER foo;
-CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1');
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+---------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (testing '1') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-DROP FOREIGN DATA WRAPPER foo;
-CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', testing '2'); -- ERROR
-ERROR: option "testing" provided more than once
-CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', another '2');
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+----------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (testing '1', another '2') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-DROP FOREIGN DATA WRAPPER foo;
-SET ROLE regress_test_role;
-CREATE FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: permission denied to create foreign-data wrapper "foo"
-HINT: Must be superuser to create a foreign-data wrapper.
-RESET ROLE;
-CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator;
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | postgresql_fdw_validator | | |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
--- HANDLER related checks
-CREATE FUNCTION invalid_fdw_handler() RETURNS int LANGUAGE SQL AS 'SELECT 1;';
-CREATE FOREIGN DATA WRAPPER test_fdw HANDLER invalid_fdw_handler; -- ERROR
-ERROR: function invalid_fdw_handler must return type fdw_handler
-CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER invalid_fdw_handler; -- ERROR
-ERROR: conflicting or redundant options
-LINE 1: ...GN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER in...
- ^
-CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler;
-DROP FOREIGN DATA WRAPPER test_fdw;
--- ALTER FOREIGN DATA WRAPPER
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (nonexistent 'fdw'); -- ERROR
-ERROR: invalid option "nonexistent"
-HINT: There are no valid options in this context.
-ALTER FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: syntax error at or near ";"
-LINE 1: ALTER FOREIGN DATA WRAPPER foo;
- ^
-ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR
-ERROR: function bar(text[], oid) does not exist
-ALTER FOREIGN DATA WRAPPER foo NO VALIDATOR;
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '1', b '2');
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (SET c '4'); -- ERROR
-ERROR: option "c" not found
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP c); -- ERROR
-ERROR: option "c" not found
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD x '1', DROP x);
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+----------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (a '1', b '2') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP a, SET b '3', ADD c '4');
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+----------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (b '3', c '4') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '2');
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (b '4'); -- ERROR
-ERROR: option "b" provided more than once
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-----------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-SET ROLE regress_test_role;
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -- ERROR
-ERROR: permission denied to alter foreign-data wrapper "foo"
-HINT: Must be superuser to alter a foreign-data wrapper.
-SET ROLE regress_test_role_super;
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5');
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2', d '5') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role; -- ERROR
-ERROR: permission denied to change owner of foreign-data wrapper "foo"
-HINT: The owner of a foreign-data wrapper must be a superuser.
-ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role_super;
-ALTER ROLE regress_test_role_super NOSUPERUSER;
-SET ROLE regress_test_role_super;
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD e '6'); -- ERROR
-ERROR: permission denied to alter foreign-data wrapper "foo"
-HINT: Must be superuser to alter a foreign-data wrapper.
-RESET ROLE;
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1;
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+------------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo1 | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo;
--- HANDLER related checks
-ALTER FOREIGN DATA WRAPPER foo HANDLER invalid_fdw_handler; -- ERROR
-ERROR: function invalid_fdw_handler must return type fdw_handler
-ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER anything; -- ERROR
-ERROR: conflicting or redundant options
-LINE 1: ...FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER an...
- ^
-ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler;
-WARNING: changing the foreign-data wrapper handler can change behavior of existing foreign tables
-DROP FUNCTION invalid_fdw_handler();
--- DROP FOREIGN DATA WRAPPER
-DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR
-ERROR: foreign-data wrapper "nonexistent" does not exist
-DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent;
-NOTICE: foreign-data wrapper "nonexistent" does not exist, skipping
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+------------------+--------------------------+-------------------+------------------------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_test_role_super | test_fdw_handler | - | | (b '3', c '4', a '2', d '5') |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-DROP ROLE regress_test_role_super; -- ERROR
-ERROR: role "regress_test_role_super" cannot be dropped because some objects depend on it
-DETAIL: owner of foreign-data wrapper foo
-SET ROLE regress_test_role_super;
-DROP FOREIGN DATA WRAPPER foo;
-RESET ROLE;
-DROP ROLE regress_test_role_super;
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(2 rows)
-
-CREATE FOREIGN DATA WRAPPER foo;
-CREATE SERVER s1 FOREIGN DATA WRAPPER foo;
-COMMENT ON SERVER s1 IS 'foreign server';
-CREATE USER MAPPING FOR current_user SERVER s1;
-CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR
-ERROR: user mapping for "regress_foreign_data_user" already exists for server "s1"
-CREATE USER MAPPING IF NOT EXISTS FOR current_user SERVER s1; -- NOTICE
-NOTICE: user mapping for "regress_foreign_data_user" already exists for server "s1", skipping
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- foo | regress_foreign_data_user | - | - | | |
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(3 rows)
-
-\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
-------+---------------------------+----------------------+-------------------+------+---------+-------------+----------------
- s1 | regress_foreign_data_user | foo | | | | | foreign server
-(1 row)
-
-\deu+
- List of user mappings
- Server | User name | FDW options
---------+---------------------------+-------------
- s1 | regress_foreign_data_user |
-(1 row)
-
-DROP FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: cannot drop foreign-data wrapper foo because other objects depend on it
-DETAIL: server s1 depends on foreign-data wrapper foo
-user mapping for regress_foreign_data_user on server s1 depends on server s1
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-SET ROLE regress_test_role;
-DROP FOREIGN DATA WRAPPER foo CASCADE; -- ERROR
-ERROR: must be owner of foreign-data wrapper foo
-RESET ROLE;
-DROP FOREIGN DATA WRAPPER foo CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to server s1
-drop cascades to user mapping for regress_foreign_data_user on server s1
-\dew+
- List of foreign-data wrappers
- Name | Owner | Handler | Validator | Access privileges | FDW options | Description
-------------+---------------------------+---------+--------------------------+-------------------+-------------+-------------
- dummy | regress_foreign_data_user | - | - | | | useless
- postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | |
-(2 rows)
-
-\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
-------+-------+----------------------+-------------------+------+---------+-------------+-------------
-(0 rows)
-
-\deu+
- List of user mappings
- Server | User name | FDW options
---------+-----------+-------------
-(0 rows)
-
--- exercise CREATE SERVER
-CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: foreign-data wrapper "foo" does not exist
-CREATE FOREIGN DATA WRAPPER foo OPTIONS ("test wrapper" 'true');
-CREATE SERVER s1 FOREIGN DATA WRAPPER foo;
-CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: server "s1" already exists
-CREATE SERVER IF NOT EXISTS s1 FOREIGN DATA WRAPPER foo; -- No ERROR, just NOTICE
-NOTICE: server "s1" already exists, skipping
-CREATE SERVER s2 FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
-CREATE SERVER s3 TYPE 'oracle' FOREIGN DATA WRAPPER foo;
-CREATE SERVER s4 TYPE 'oracle' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
-CREATE SERVER s5 VERSION '15.0' FOREIGN DATA WRAPPER foo;
-CREATE SERVER s6 VERSION '16.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
-CREATE SERVER s7 TYPE 'oracle' VERSION '17.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b');
-CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (foo '1'); -- ERROR
-ERROR: invalid option "foo"
-CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (host 'localhost', dbname 's8db');
-\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
-------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
- s1 | regress_foreign_data_user | foo | | | | |
- s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') |
-(8 rows)
-
-SET ROLE regress_test_role;
-CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -- ERROR: no usage on FDW
-ERROR: permission denied for foreign-data wrapper foo
-RESET ROLE;
-GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role;
-SET ROLE regress_test_role;
-CREATE SERVER t1 FOREIGN DATA WRAPPER foo;
-RESET ROLE;
-\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
-------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
- s1 | regress_foreign_data_user | foo | | | | |
- s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') |
- t1 | regress_test_role | foo | | | | |
-(9 rows)
-
-REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_test_role;
-GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect;
-SET ROLE regress_test_role;
-CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: permission denied for foreign-data wrapper foo
-RESET ROLE;
-GRANT regress_test_indirect TO regress_test_role;
-SET ROLE regress_test_role;
-CREATE SERVER t2 FOREIGN DATA WRAPPER foo;
-\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
-------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+-------------
- s1 | regress_foreign_data_user | foo | | | | |
- s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') |
- t1 | regress_test_role | foo | | | | |
- t2 | regress_test_role | foo | | | | |
-(10 rows)
-
-RESET ROLE;
-REVOKE regress_test_indirect FROM regress_test_role;
--- ALTER SERVER
-ALTER SERVER s0; -- ERROR
-ERROR: syntax error at or near ";"
-LINE 1: ALTER SERVER s0;
- ^
-ALTER SERVER s0 OPTIONS (a '1'); -- ERROR
-ERROR: server "s0" does not exist
-ALTER SERVER s1 VERSION '1.0' OPTIONS (servername 's1');
-ALTER SERVER s2 VERSION '1.1';
-ALTER SERVER s3 OPTIONS ("tns name" 'orcl', port '1521');
-GRANT USAGE ON FOREIGN SERVER s1 TO regress_test_role;
-GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role2 WITH GRANT OPTION;
-\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
-------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+-----------------------------------+-------------
- s1 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 1.0 | (servername 's1') |
- | | | regress_test_role=U/regress_foreign_data_user | | | |
- s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') |
- | | | regress_test_role2=U*/regress_foreign_data_user | | | |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') |
- t1 | regress_test_role | foo | | | | |
- t2 | regress_test_role | foo | | | | |
-(10 rows)
-
-SET ROLE regress_test_role;
-ALTER SERVER s1 VERSION '1.1'; -- ERROR
-ERROR: must be owner of foreign server s1
-ALTER SERVER s1 OWNER TO regress_test_role; -- ERROR
-ERROR: must be owner of foreign server s1
-RESET ROLE;
-ALTER SERVER s1 OWNER TO regress_test_role;
-GRANT regress_test_role2 TO regress_test_role;
-SET ROLE regress_test_role;
-ALTER SERVER s1 VERSION '1.1';
-ALTER SERVER s1 OWNER TO regress_test_role2; -- ERROR
-ERROR: permission denied for foreign-data wrapper foo
-RESET ROLE;
-ALTER SERVER s8 OPTIONS (foo '1'); -- ERROR option validation
-ERROR: invalid option "foo"
-ALTER SERVER s8 OPTIONS (connect_timeout '30', SET dbname 'db1', DROP host);
-SET ROLE regress_test_role;
-ALTER SERVER s1 OWNER TO regress_test_indirect; -- ERROR
-ERROR: must be able to SET ROLE "regress_test_indirect"
-RESET ROLE;
-GRANT regress_test_indirect TO regress_test_role;
-SET ROLE regress_test_role;
-ALTER SERVER s1 OWNER TO regress_test_indirect;
-RESET ROLE;
-GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect;
-SET ROLE regress_test_role;
-ALTER SERVER s1 OWNER TO regress_test_indirect;
-RESET ROLE;
-DROP ROLE regress_test_indirect; -- ERROR
-ERROR: role "regress_test_indirect" cannot be dropped because some objects depend on it
-DETAIL: privileges for foreign-data wrapper foo
-owner of server s1
-\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
-------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+-------------
- s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') |
- s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') |
- | | | regress_test_role2=U*/regress_foreign_data_user | | | |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8 | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') |
- t1 | regress_test_role | foo | | | | |
- t2 | regress_test_role | foo | | | | |
-(10 rows)
-
-ALTER SERVER s8 RENAME to s8new;
-\des+
- List of foreign servers
- Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description
--------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+-------------
- s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') |
- s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') |
- s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') |
- s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') |
- s5 | regress_foreign_data_user | foo | | | 15.0 | |
- s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') |
- | | | regress_test_role2=U*/regress_foreign_data_user | | | |
- s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') |
- s8new | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') |
- t1 | regress_test_role | foo | | | | |
- t2 | regress_test_role | foo | | | | |
-(10 rows)
-
-ALTER SERVER s8new RENAME to s8;
--- DROP SERVER
-DROP SERVER nonexistent; -- ERROR
-ERROR: server "nonexistent" does not exist
-DROP SERVER IF EXISTS nonexistent;
-NOTICE: server "nonexistent" does not exist, skipping
-\des
- List of foreign servers
- Name | Owner | Foreign-data wrapper
-------+---------------------------+----------------------
- s1 | regress_test_indirect | foo
- s2 | regress_foreign_data_user | foo
- s3 | regress_foreign_data_user | foo
- s4 | regress_foreign_data_user | foo
- s5 | regress_foreign_data_user | foo
- s6 | regress_foreign_data_user | foo
- s7 | regress_foreign_data_user | foo
- s8 | regress_foreign_data_user | postgresql
- t1 | regress_test_role | foo
- t2 | regress_test_role | foo
-(10 rows)
-
-SET ROLE regress_test_role;
-DROP SERVER s2; -- ERROR
-ERROR: must be owner of foreign server s2
-DROP SERVER s1;
-RESET ROLE;
-\des
- List of foreign servers
- Name | Owner | Foreign-data wrapper
-------+---------------------------+----------------------
- s2 | regress_foreign_data_user | foo
- s3 | regress_foreign_data_user | foo
- s4 | regress_foreign_data_user | foo
- s5 | regress_foreign_data_user | foo
- s6 | regress_foreign_data_user | foo
- s7 | regress_foreign_data_user | foo
- s8 | regress_foreign_data_user | postgresql
- t1 | regress_test_role | foo
- t2 | regress_test_role | foo
-(9 rows)
-
-ALTER SERVER s2 OWNER TO regress_test_role;
-SET ROLE regress_test_role;
-DROP SERVER s2;
-RESET ROLE;
-\des
- List of foreign servers
- Name | Owner | Foreign-data wrapper
-------+---------------------------+----------------------
- s3 | regress_foreign_data_user | foo
- s4 | regress_foreign_data_user | foo
- s5 | regress_foreign_data_user | foo
- s6 | regress_foreign_data_user | foo
- s7 | regress_foreign_data_user | foo
- s8 | regress_foreign_data_user | postgresql
- t1 | regress_test_role | foo
- t2 | regress_test_role | foo
-(8 rows)
-
-CREATE USER MAPPING FOR current_user SERVER s3;
-\deu
- List of user mappings
- Server | User name
---------+---------------------------
- s3 | regress_foreign_data_user
-(1 row)
-
-DROP SERVER s3; -- ERROR
-ERROR: cannot drop server s3 because other objects depend on it
-DETAIL: user mapping for regress_foreign_data_user on server s3 depends on server s3
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP SERVER s3 CASCADE;
-NOTICE: drop cascades to user mapping for regress_foreign_data_user on server s3
-\des
- List of foreign servers
- Name | Owner | Foreign-data wrapper
-------+---------------------------+----------------------
- s4 | regress_foreign_data_user | foo
- s5 | regress_foreign_data_user | foo
- s6 | regress_foreign_data_user | foo
- s7 | regress_foreign_data_user | foo
- s8 | regress_foreign_data_user | postgresql
- t1 | regress_test_role | foo
- t2 | regress_test_role | foo
-(7 rows)
-
-\deu
-List of user mappings
- Server | User name
---------+-----------
-(0 rows)
-
--- CREATE USER MAPPING
-CREATE USER MAPPING FOR regress_test_missing_role SERVER s1; -- ERROR
-ERROR: role "regress_test_missing_role" does not exist
-CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR
-ERROR: server "s1" does not exist
-CREATE USER MAPPING FOR current_user SERVER s4;
-CREATE USER MAPPING FOR user SERVER s4; -- ERROR duplicate
-ERROR: user mapping for "regress_foreign_data_user" already exists for server "s4"
-CREATE USER MAPPING FOR public SERVER s4 OPTIONS ("this mapping" 'is public');
-CREATE USER MAPPING FOR user SERVER s8 OPTIONS (username 'test', password 'secret'); -- ERROR
-ERROR: invalid option "username"
-HINT: Perhaps you meant the option "user".
-CREATE USER MAPPING FOR user SERVER s8 OPTIONS (user 'test', password 'secret');
-ALTER SERVER s5 OWNER TO regress_test_role;
-ALTER SERVER s6 OWNER TO regress_test_indirect;
-SET ROLE regress_test_role;
-CREATE USER MAPPING FOR current_user SERVER s5;
-CREATE USER MAPPING FOR current_user SERVER s6 OPTIONS (username 'test');
-CREATE USER MAPPING FOR current_user SERVER s7; -- ERROR
-ERROR: permission denied for foreign server s7
-CREATE USER MAPPING FOR public SERVER s8; -- ERROR
-ERROR: must be owner of foreign server s8
-RESET ROLE;
-ALTER SERVER t1 OWNER TO regress_test_indirect;
-SET ROLE regress_test_role;
-CREATE USER MAPPING FOR current_user SERVER t1 OPTIONS (username 'bob', password 'boo');
-CREATE USER MAPPING FOR public SERVER t1;
-RESET ROLE;
-\deu
- List of user mappings
- Server | User name
---------+---------------------------
- s4 | public
- s4 | regress_foreign_data_user
- s5 | regress_test_role
- s6 | regress_test_role
- s8 | regress_foreign_data_user
- t1 | public
- t1 | regress_test_role
-(7 rows)
-
--- ALTER USER MAPPING
-ALTER USER MAPPING FOR regress_test_missing_role SERVER s4 OPTIONS (gotcha 'true'); -- ERROR
-ERROR: role "regress_test_missing_role" does not exist
-ALTER USER MAPPING FOR user SERVER ss4 OPTIONS (gotcha 'true'); -- ERROR
-ERROR: server "ss4" does not exist
-ALTER USER MAPPING FOR public SERVER s5 OPTIONS (gotcha 'true'); -- ERROR
-ERROR: user mapping for "public" does not exist for server "s5"
-ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (username 'test'); -- ERROR
-ERROR: invalid option "username"
-HINT: Perhaps you meant the option "user".
-ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (DROP user, SET password 'public');
-SET ROLE regress_test_role;
-ALTER USER MAPPING FOR current_user SERVER s5 OPTIONS (ADD modified '1');
-ALTER USER MAPPING FOR public SERVER s4 OPTIONS (ADD modified '1'); -- ERROR
-ERROR: must be owner of foreign server s4
-ALTER USER MAPPING FOR public SERVER t1 OPTIONS (ADD modified '1');
-RESET ROLE;
-\deu+
- List of user mappings
- Server | User name | FDW options
---------+---------------------------+----------------------------------
- s4 | public | ("this mapping" 'is public')
- s4 | regress_foreign_data_user |
- s5 | regress_test_role | (modified '1')
- s6 | regress_test_role | (username 'test')
- s8 | regress_foreign_data_user | (password 'public')
- t1 | public | (modified '1')
- t1 | regress_test_role | (username 'bob', password 'boo')
-(7 rows)
-
--- DROP USER MAPPING
-DROP USER MAPPING FOR regress_test_missing_role SERVER s4; -- ERROR
-ERROR: role "regress_test_missing_role" does not exist
-DROP USER MAPPING FOR user SERVER ss4;
-ERROR: server "ss4" does not exist
-DROP USER MAPPING FOR public SERVER s7; -- ERROR
-ERROR: user mapping for "public" does not exist for server "s7"
-DROP USER MAPPING IF EXISTS FOR regress_test_missing_role SERVER s4;
-NOTICE: role "regress_test_missing_role" does not exist, skipping
-DROP USER MAPPING IF EXISTS FOR user SERVER ss4;
-NOTICE: server "ss4" does not exist, skipping
-DROP USER MAPPING IF EXISTS FOR public SERVER s7;
-NOTICE: user mapping for "public" does not exist for server "s7", skipping
-CREATE USER MAPPING FOR public SERVER s8;
-SET ROLE regress_test_role;
-DROP USER MAPPING FOR public SERVER s8; -- ERROR
-ERROR: must be owner of foreign server s8
-RESET ROLE;
-DROP SERVER s7;
-\deu
- List of user mappings
- Server | User name
---------+---------------------------
- s4 | public
- s4 | regress_foreign_data_user
- s5 | regress_test_role
- s6 | regress_test_role
- s8 | public
- s8 | regress_foreign_data_user
- t1 | public
- t1 | regress_test_role
-(8 rows)
-
--- CREATE FOREIGN TABLE
-CREATE SCHEMA foreign_schema;
-CREATE SERVER s0 FOREIGN DATA WRAPPER dummy;
-CREATE FOREIGN TABLE ft1 (); -- ERROR
-ERROR: syntax error at or near ";"
-LINE 1: CREATE FOREIGN TABLE ft1 ();
- ^
-CREATE FOREIGN TABLE ft1 () SERVER no_server; -- ERROR
-ERROR: server "no_server" does not exist
-CREATE FOREIGN TABLE ft1 (
- c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY,
- c2 text OPTIONS (param2 'val2', param3 'val3'),
- c3 date
-) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR
-ERROR: primary key constraints are not supported on foreign tables
-LINE 2: c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY,
- ^
-CREATE TABLE ref_table (id integer PRIMARY KEY);
-CREATE FOREIGN TABLE ft1 (
- c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table (id),
- c2 text OPTIONS (param2 'val2', param3 'val3'),
- c3 date
-) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR
-ERROR: foreign key constraints are not supported on foreign tables
-LINE 2: c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table ...
- ^
-DROP TABLE ref_table;
-CREATE FOREIGN TABLE ft1 (
- c1 integer OPTIONS ("param 1" 'val1') NOT NULL,
- c2 text OPTIONS (param2 'val2', param3 'val3'),
- c3 date,
- UNIQUE (c3)
-) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR
-ERROR: unique constraints are not supported on foreign tables
-LINE 5: UNIQUE (c3)
- ^
-CREATE FOREIGN TABLE ft1 (
- c1 integer OPTIONS ("param 1" 'val1') NOT NULL,
- c2 text OPTIONS (param2 'val2', param3 'val3') CHECK (c2 <> ''),
- c3 date,
- CHECK (c3 BETWEEN '1994-01-01'::date AND '1994-01-31'::date)
-) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
-COMMENT ON FOREIGN TABLE ft1 IS 'ft1';
-COMMENT ON COLUMN ft1.c1 IS 'ft1.c1';
-\d+ ft1
- Foreign table "public.ft1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+--------------------------------+----------+--------------+-------------
- c1 | integer | | not null | | ("param 1" 'val1') | plain | | ft1.c1
- c2 | text | | | | (param2 'val2', param3 'val3') | extended | |
- c3 | date | | | | | plain | |
-Check constraints:
- "ft1_c2_check" CHECK (c2 <> ''::text)
- "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date)
-Not-null constraints:
- "ft1_c1_not_null" NOT NULL "c1"
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
-\det+
- List of foreign tables
- Schema | Table | Server | FDW options | Description
---------+-------+--------+-------------------------------------------------+-------------
- public | ft1 | s0 | (delimiter ',', quote '"', "be quoted" 'value') | ft1
-(1 row)
-
-CREATE INDEX id_ft1_c2 ON ft1 (c2); -- ERROR
-ERROR: cannot create index on relation "ft1"
-DETAIL: This operation is not supported for foreign tables.
-SELECT * FROM ft1; -- ERROR
-ERROR: foreign-data wrapper "dummy" has no handler
-EXPLAIN SELECT * FROM ft1; -- ERROR
-ERROR: foreign-data wrapper "dummy" has no handler
-CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a);
-CREATE FOREIGN TABLE ft_part1
- PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0;
-CREATE INDEX ON lt1 (a); -- skips partition
-CREATE UNIQUE INDEX ON lt1 (a); -- ERROR
-ERROR: cannot create unique index on partitioned table "lt1"
-DETAIL: Table "lt1" contains partitions that are foreign tables.
-ALTER TABLE lt1 ADD PRIMARY KEY (a); -- ERROR
-ERROR: cannot create unique index on partitioned table "lt1"
-DETAIL: Table "lt1" contains partitions that are foreign tables.
-DROP TABLE lt1;
-CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a);
-CREATE INDEX ON lt1 (a);
-CREATE FOREIGN TABLE ft_part1
- PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0;
-CREATE FOREIGN TABLE ft_part2 (a INT) SERVER s0;
-ALTER TABLE lt1 ATTACH PARTITION ft_part2 FOR VALUES FROM (1000) TO (2000);
-DROP FOREIGN TABLE ft_part1, ft_part2;
-CREATE UNIQUE INDEX ON lt1 (a);
-ALTER TABLE lt1 ADD PRIMARY KEY (a);
-CREATE FOREIGN TABLE ft_part1
- PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -- ERROR
-ERROR: cannot create foreign partition of partitioned table "lt1"
-DETAIL: Table "lt1" contains indexes that are unique.
-CREATE FOREIGN TABLE ft_part2 (a INT NOT NULL) SERVER s0;
-ALTER TABLE lt1 ATTACH PARTITION ft_part2
- FOR VALUES FROM (1000) TO (2000); -- ERROR
-ERROR: cannot attach foreign table "ft_part2" as partition of partitioned table "lt1"
-DETAIL: Partitioned table "lt1" contains unique indexes.
-DROP TABLE lt1;
-DROP FOREIGN TABLE ft_part2;
-CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a);
-CREATE INDEX ON lt1 (a);
-CREATE TABLE lt1_part1
- PARTITION OF lt1 FOR VALUES FROM (0) TO (1000)
- PARTITION BY RANGE (a);
-CREATE FOREIGN TABLE ft_part_1_1
- PARTITION OF lt1_part1 FOR VALUES FROM (0) TO (100) SERVER s0;
-CREATE FOREIGN TABLE ft_part_1_2 (a INT) SERVER s0;
-ALTER TABLE lt1_part1 ATTACH PARTITION ft_part_1_2 FOR VALUES FROM (100) TO (200);
-CREATE UNIQUE INDEX ON lt1 (a);
-ERROR: cannot create unique index on partitioned table "lt1"
-DETAIL: Table "lt1" contains partitions that are foreign tables.
-ALTER TABLE lt1 ADD PRIMARY KEY (a);
-ERROR: cannot create unique index on partitioned table "lt1_part1"
-DETAIL: Table "lt1_part1" contains partitions that are foreign tables.
-DROP FOREIGN TABLE ft_part_1_1, ft_part_1_2;
-CREATE UNIQUE INDEX ON lt1 (a);
-ALTER TABLE lt1 ADD PRIMARY KEY (a);
-CREATE FOREIGN TABLE ft_part_1_1
- PARTITION OF lt1_part1 FOR VALUES FROM (0) TO (100) SERVER s0;
-ERROR: cannot create foreign partition of partitioned table "lt1_part1"
-DETAIL: Table "lt1_part1" contains indexes that are unique.
-CREATE FOREIGN TABLE ft_part_1_2 (a INT NOT NULL) SERVER s0;
-ALTER TABLE lt1_part1 ATTACH PARTITION ft_part_1_2 FOR VALUES FROM (100) TO (200);
-ERROR: cannot attach foreign table "ft_part_1_2" as partition of partitioned table "lt1_part1"
-DETAIL: Partitioned table "lt1_part1" contains unique indexes.
-DROP TABLE lt1;
-DROP FOREIGN TABLE ft_part_1_2;
--- ALTER FOREIGN TABLE
-COMMENT ON FOREIGN TABLE ft1 IS 'foreign table';
-COMMENT ON FOREIGN TABLE ft1 IS NULL;
-COMMENT ON COLUMN ft1.c1 IS 'foreign column';
-COMMENT ON COLUMN ft1.c1 IS NULL;
-ALTER FOREIGN TABLE ft1 ADD COLUMN c4 integer;
-ALTER FOREIGN TABLE ft1 ADD COLUMN c5 integer DEFAULT 0;
-ALTER FOREIGN TABLE ft1 ADD COLUMN c6 integer;
-ALTER FOREIGN TABLE ft1 ADD COLUMN c7 integer NOT NULL;
-ALTER FOREIGN TABLE ft1 ADD COLUMN c8 integer;
-ALTER FOREIGN TABLE ft1 ADD COLUMN c9 integer;
-ALTER FOREIGN TABLE ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1');
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c4 SET DEFAULT 0;
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c5 DROP DEFAULT;
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c6 SET NOT NULL;
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 DROP NOT NULL;
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR
-ERROR: "ft1" is not a table
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10);
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE text;
-ALTER FOREIGN TABLE ft1 ALTER COLUMN xmin OPTIONS (ADD p1 'v1'); -- ERROR
-ERROR: cannot alter system column "xmin"
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'),
- ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2');
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1);
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET STATISTICS 10000;
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct = 100);
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STATISTICS -1;
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STORAGE PLAIN;
-\d+ ft1
- Foreign table "public.ft1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+--------------------------------+----------+--------------+-------------
- c1 | integer | | not null | | ("param 1" 'val1') | plain | 10000 |
- c2 | text | | | | (param2 'val2', param3 'val3') | extended | |
- c3 | date | | | | | plain | |
- c4 | integer | | | 0 | | plain | |
- c5 | integer | | | | | plain | |
- c6 | integer | | not null | | | plain | |
- c7 | integer | | | | (p1 'v1', p2 'v2') | plain | |
- c8 | text | | | | (p2 'V2') | plain | |
- c9 | integer | | | | | plain | |
- c10 | integer | | | | (p1 'v1') | plain | |
-Check constraints:
- "ft1_c2_check" CHECK (c2 <> ''::text)
- "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date)
-Not-null constraints:
- "ft1_c1_not_null" NOT NULL "c1"
- "ft1_c6_not_null" NOT NULL "c6"
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
--- can't change the column type if it's used elsewhere
-CREATE TABLE use_ft1_column_type (x ft1);
-ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE integer; -- ERROR
-ERROR: cannot alter foreign table "ft1" because column "use_ft1_column_type.x" uses its row type
-DROP TABLE use_ft1_column_type;
-ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); -- ERROR
-ERROR: primary key constraints are not supported on foreign tables
-LINE 1: ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7);
- ^
-ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c9_check CHECK (c9 < 0) NOT VALID;
-ALTER FOREIGN TABLE ft1 ALTER CONSTRAINT ft1_c9_check DEFERRABLE; -- ERROR
-ERROR: ALTER action ALTER CONSTRAINT cannot be performed on relation "ft1"
-DETAIL: This operation is not supported for foreign tables.
-ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c9_check;
-ALTER FOREIGN TABLE ft1 DROP CONSTRAINT no_const; -- ERROR
-ERROR: constraint "no_const" of relation "ft1" does not exist
-ALTER FOREIGN TABLE ft1 DROP CONSTRAINT IF EXISTS no_const;
-NOTICE: constraint "no_const" of relation "ft1" does not exist, skipping
-ALTER FOREIGN TABLE ft1 OWNER TO regress_test_role;
-ALTER FOREIGN TABLE ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@');
-ALTER FOREIGN TABLE ft1 DROP COLUMN no_column; -- ERROR
-ERROR: column "no_column" of relation "ft1" does not exist
-ALTER FOREIGN TABLE ft1 DROP COLUMN IF EXISTS no_column;
-NOTICE: column "no_column" of relation "ft1" does not exist, skipping
-ALTER FOREIGN TABLE ft1 DROP COLUMN c9;
-ALTER FOREIGN TABLE ft1 ADD COLUMN c11 serial;
-ALTER FOREIGN TABLE ft1 SET SCHEMA foreign_schema;
-ALTER FOREIGN TABLE ft1 SET TABLESPACE ts; -- ERROR
-ERROR: relation "ft1" does not exist
-ALTER SEQUENCE foreign_schema.ft1_c11_seq SET SCHEMA public; -- ERROR
-ERROR: cannot move an owned sequence into another schema
-DETAIL: Sequence "ft1_c11_seq" is linked to table "ft1".
-ALTER FOREIGN TABLE foreign_schema.ft1 RENAME c1 TO foreign_column_1;
-ALTER FOREIGN TABLE foreign_schema.ft1 RENAME TO foreign_table_1;
-\d foreign_schema.foreign_table_1
- Foreign table "foreign_schema.foreign_table_1"
- Column | Type | Collation | Nullable | Default | FDW options
-------------------+---------+-----------+----------+-------------------------------------------------+--------------------------------
- foreign_column_1 | integer | | not null | | ("param 1" 'val1')
- c2 | text | | | | (param2 'val2', param3 'val3')
- c3 | date | | | |
- c4 | integer | | | 0 |
- c5 | integer | | | |
- c6 | integer | | not null | |
- c7 | integer | | | | (p1 'v1', p2 'v2')
- c8 | text | | | | (p2 'V2')
- c10 | integer | | | | (p1 'v1')
- c11 | integer | | not null | nextval('foreign_schema.ft1_c11_seq'::regclass) |
-Check constraints:
- "ft1_c2_check" CHECK (c2 <> ''::text)
- "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date)
-Server: s0
-FDW options: (quote '~', "be quoted" 'value', escape '@')
-
--- alter noexisting table
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c4 integer;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c6 integer;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c7 integer NOT NULL;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c8 integer;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c9 integer;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1');
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c6 SET NOT NULL;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 DROP NOT NULL;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 TYPE char(10);
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 SET DATA TYPE text;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'),
- ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2');
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1);
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT IF EXISTS no_const;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT ft1_c1_check;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OWNER TO regress_test_role;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@');
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN IF EXISTS no_column;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN c9;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 SET SCHEMA foreign_schema;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME c1 TO foreign_column_1;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
-ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME TO foreign_table_1;
-NOTICE: relation "doesnt_exist_ft1" does not exist, skipping
--- Information schema
-SELECT * FROM information_schema.foreign_data_wrappers ORDER BY 1, 2;
- foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier | library_name | foreign_data_wrapper_language
-------------------------------+---------------------------+---------------------------+--------------+-------------------------------
- regression | dummy | regress_foreign_data_user | | c
- regression | foo | regress_foreign_data_user | | c
- regression | postgresql | regress_foreign_data_user | | c
-(3 rows)
-
-SELECT * FROM information_schema.foreign_data_wrapper_options ORDER BY 1, 2, 3;
- foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value
-------------------------------+---------------------------+--------------+--------------
- regression | foo | test wrapper | true
-(1 row)
-
-SELECT * FROM information_schema.foreign_servers ORDER BY 1, 2;
- foreign_server_catalog | foreign_server_name | foreign_data_wrapper_catalog | foreign_data_wrapper_name | foreign_server_type | foreign_server_version | authorization_identifier
-------------------------+---------------------+------------------------------+---------------------------+---------------------+------------------------+---------------------------
- regression | s0 | regression | dummy | | | regress_foreign_data_user
- regression | s4 | regression | foo | oracle | | regress_foreign_data_user
- regression | s5 | regression | foo | | 15.0 | regress_test_role
- regression | s6 | regression | foo | | 16.0 | regress_test_indirect
- regression | s8 | regression | postgresql | | | regress_foreign_data_user
- regression | t1 | regression | foo | | | regress_test_indirect
- regression | t2 | regression | foo | | | regress_test_role
-(7 rows)
-
-SELECT * FROM information_schema.foreign_server_options ORDER BY 1, 2, 3;
- foreign_server_catalog | foreign_server_name | option_name | option_value
-------------------------+---------------------+-----------------+--------------
- regression | s4 | dbname | b
- regression | s4 | host | a
- regression | s6 | dbname | b
- regression | s6 | host | a
- regression | s8 | connect_timeout | 30
- regression | s8 | dbname | db1
-(6 rows)
-
-SELECT * FROM information_schema.user_mappings ORDER BY lower(authorization_identifier), 2, 3;
- authorization_identifier | foreign_server_catalog | foreign_server_name
----------------------------+------------------------+---------------------
- PUBLIC | regression | s4
- PUBLIC | regression | s8
- PUBLIC | regression | t1
- regress_foreign_data_user | regression | s4
- regress_foreign_data_user | regression | s8
- regress_test_role | regression | s5
- regress_test_role | regression | s6
- regress_test_role | regression | t1
-(8 rows)
-
-SELECT * FROM information_schema.user_mapping_options ORDER BY lower(authorization_identifier), 2, 3, 4;
- authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value
----------------------------+------------------------+---------------------+--------------+--------------
- PUBLIC | regression | s4 | this mapping | is public
- PUBLIC | regression | t1 | modified | 1
- regress_foreign_data_user | regression | s8 | password | public
- regress_test_role | regression | s5 | modified | 1
- regress_test_role | regression | s6 | username | test
- regress_test_role | regression | t1 | password | boo
- regress_test_role | regression | t1 | username | bob
-(7 rows)
-
-SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
- grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable
----------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+--------------
- regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES
- regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO
- regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES
- regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES
-(4 rows)
-
-SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
- grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable
----------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+--------------
- regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES
- regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO
- regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES
- regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES
-(4 rows)
-
-SELECT * FROM information_schema.foreign_tables ORDER BY 1, 2, 3;
- foreign_table_catalog | foreign_table_schema | foreign_table_name | foreign_server_catalog | foreign_server_name
------------------------+----------------------+--------------------+------------------------+---------------------
- regression | foreign_schema | foreign_table_1 | regression | s0
-(1 row)
-
-SELECT * FROM information_schema.foreign_table_options ORDER BY 1, 2, 3, 4;
- foreign_table_catalog | foreign_table_schema | foreign_table_name | option_name | option_value
------------------------+----------------------+--------------------+-------------+--------------
- regression | foreign_schema | foreign_table_1 | be quoted | value
- regression | foreign_schema | foreign_table_1 | escape | @
- regression | foreign_schema | foreign_table_1 | quote | ~
-(3 rows)
-
-SET ROLE regress_test_role;
-SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4;
- authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value
---------------------------+------------------------+---------------------+-------------+--------------
- PUBLIC | regression | t1 | modified | 1
- regress_test_role | regression | s5 | modified | 1
- regress_test_role | regression | s6 | username | test
- regress_test_role | regression | t1 | password | boo
- regress_test_role | regression | t1 | username | bob
-(5 rows)
-
-SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
- grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable
----------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+--------------
- regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO
- regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES
- regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES
-(3 rows)
-
-SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5;
- grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable
----------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+--------------
- regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO
- regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES
- regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES
-(3 rows)
-
-DROP USER MAPPING FOR current_user SERVER t1;
-SET ROLE regress_test_role2;
-SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4;
- authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value
---------------------------+------------------------+---------------------+-------------+--------------
- regress_test_role | regression | s6 | username |
-(1 row)
-
-RESET ROLE;
--- has_foreign_data_wrapper_privilege
-SELECT has_foreign_data_wrapper_privilege('regress_test_role',
- (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE');
- has_foreign_data_wrapper_privilege
-------------------------------------
- t
-(1 row)
-
-SELECT has_foreign_data_wrapper_privilege('regress_test_role', 'foo', 'USAGE');
- has_foreign_data_wrapper_privilege
-------------------------------------
- t
-(1 row)
-
-SELECT has_foreign_data_wrapper_privilege(
- (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'),
- (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE');
- has_foreign_data_wrapper_privilege
-------------------------------------
- t
-(1 row)
-
-SELECT has_foreign_data_wrapper_privilege(
- (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE');
- has_foreign_data_wrapper_privilege
-------------------------------------
- t
-(1 row)
-
-SELECT has_foreign_data_wrapper_privilege(
- (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 'foo', 'USAGE');
- has_foreign_data_wrapper_privilege
-------------------------------------
- t
-(1 row)
-
-SELECT has_foreign_data_wrapper_privilege('foo', 'USAGE');
- has_foreign_data_wrapper_privilege
-------------------------------------
- t
-(1 row)
-
-GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role;
-SELECT has_foreign_data_wrapper_privilege('regress_test_role', 'foo', 'USAGE');
- has_foreign_data_wrapper_privilege
-------------------------------------
- t
-(1 row)
-
--- has_server_privilege
-SELECT has_server_privilege('regress_test_role',
- (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE');
- has_server_privilege
-----------------------
- f
-(1 row)
-
-SELECT has_server_privilege('regress_test_role', 's8', 'USAGE');
- has_server_privilege
-----------------------
- f
-(1 row)
-
-SELECT has_server_privilege(
- (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'),
- (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE');
- has_server_privilege
-----------------------
- f
-(1 row)
-
-SELECT has_server_privilege(
- (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE');
- has_server_privilege
-----------------------
- t
-(1 row)
-
-SELECT has_server_privilege(
- (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 's8', 'USAGE');
- has_server_privilege
-----------------------
- f
-(1 row)
-
-SELECT has_server_privilege('s8', 'USAGE');
- has_server_privilege
-----------------------
- t
-(1 row)
-
-GRANT USAGE ON FOREIGN SERVER s8 TO regress_test_role;
-SELECT has_server_privilege('regress_test_role', 's8', 'USAGE');
- has_server_privilege
-----------------------
- t
-(1 row)
-
-REVOKE USAGE ON FOREIGN SERVER s8 FROM regress_test_role;
-GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role;
-DROP USER MAPPING FOR public SERVER s4;
-ALTER SERVER s6 OPTIONS (DROP host, DROP dbname);
-ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (DROP username);
-ALTER FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator;
-WARNING: changing the foreign-data wrapper validator can cause the options for dependent objects to become invalid
--- Privileges
-SET ROLE regress_unprivileged_role;
-CREATE FOREIGN DATA WRAPPER foobar; -- ERROR
-ERROR: permission denied to create foreign-data wrapper "foobar"
-HINT: Must be superuser to create a foreign-data wrapper.
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR
-ERROR: permission denied to alter foreign-data wrapper "foo"
-HINT: Must be superuser to alter a foreign-data wrapper.
-ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_unprivileged_role; -- ERROR
-ERROR: permission denied to change owner of foreign-data wrapper "foo"
-HINT: Must be superuser to change owner of a foreign-data wrapper.
-DROP FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: must be owner of foreign-data wrapper foo
-GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR
-ERROR: permission denied for foreign-data wrapper foo
-CREATE SERVER s9 FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: permission denied for foreign-data wrapper foo
-ALTER SERVER s4 VERSION '0.5'; -- ERROR
-ERROR: must be owner of foreign server s4
-ALTER SERVER s4 OWNER TO regress_unprivileged_role; -- ERROR
-ERROR: must be owner of foreign server s4
-DROP SERVER s4; -- ERROR
-ERROR: must be owner of foreign server s4
-GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; -- ERROR
-ERROR: permission denied for foreign server s4
-CREATE USER MAPPING FOR public SERVER s4; -- ERROR
-ERROR: must be owner of foreign server s4
-ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR
-ERROR: must be owner of foreign server s6
-DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR
-ERROR: must be owner of foreign server s6
-RESET ROLE;
-GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_unprivileged_role;
-GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_unprivileged_role WITH GRANT OPTION;
-SET ROLE regress_unprivileged_role;
-CREATE FOREIGN DATA WRAPPER foobar; -- ERROR
-ERROR: permission denied to create foreign-data wrapper "foobar"
-HINT: Must be superuser to create a foreign-data wrapper.
-ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR
-ERROR: permission denied to alter foreign-data wrapper "foo"
-HINT: Must be superuser to alter a foreign-data wrapper.
-DROP FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: must be owner of foreign-data wrapper foo
-GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_test_role; -- WARNING
-WARNING: no privileges were granted for "postgresql"
-GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role;
-CREATE SERVER s9 FOREIGN DATA WRAPPER postgresql;
-ALTER SERVER s6 VERSION '0.5'; -- ERROR
-ERROR: must be owner of foreign server s6
-DROP SERVER s6; -- ERROR
-ERROR: must be owner of foreign server s6
-GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role; -- ERROR
-ERROR: permission denied for foreign server s6
-GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role;
-CREATE USER MAPPING FOR public SERVER s6; -- ERROR
-ERROR: must be owner of foreign server s6
-CREATE USER MAPPING FOR public SERVER s9;
-ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR
-ERROR: must be owner of foreign server s6
-DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR
-ERROR: must be owner of foreign server s6
-RESET ROLE;
-REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role; -- ERROR
-ERROR: dependent privileges exist
-HINT: Use CASCADE to revoke them too.
-REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role CASCADE;
-SET ROLE regress_unprivileged_role;
-GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR
-ERROR: permission denied for foreign-data wrapper foo
-CREATE SERVER s10 FOREIGN DATA WRAPPER foo; -- ERROR
-ERROR: permission denied for foreign-data wrapper foo
-ALTER SERVER s9 VERSION '1.1';
-GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role;
-CREATE USER MAPPING FOR current_user SERVER s9;
-DROP SERVER s9 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to user mapping for public on server s9
-drop cascades to user mapping for regress_unprivileged_role on server s9
-RESET ROLE;
-CREATE SERVER s9 FOREIGN DATA WRAPPER foo;
-GRANT USAGE ON FOREIGN SERVER s9 TO regress_unprivileged_role;
-SET ROLE regress_unprivileged_role;
-ALTER SERVER s9 VERSION '1.2'; -- ERROR
-ERROR: must be owner of foreign server s9
-GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -- WARNING
-WARNING: no privileges were granted for "s9"
-CREATE USER MAPPING FOR current_user SERVER s9;
-DROP SERVER s9 CASCADE; -- ERROR
-ERROR: must be owner of foreign server s9
--- Check visibility of user mapping data
-SET ROLE regress_test_role;
-CREATE SERVER s10 FOREIGN DATA WRAPPER foo;
-CREATE USER MAPPING FOR public SERVER s10 OPTIONS (user 'secret');
-CREATE USER MAPPING FOR regress_unprivileged_role SERVER s10 OPTIONS (user 'secret');
--- owner of server can see some option fields
-\deu+
- List of user mappings
- Server | User name | FDW options
---------+---------------------------+-------------------
- s10 | public | ("user" 'secret')
- s10 | regress_unprivileged_role |
- s4 | regress_foreign_data_user |
- s5 | regress_test_role | (modified '1')
- s6 | regress_test_role |
- s8 | public |
- s8 | regress_foreign_data_user |
- s9 | regress_unprivileged_role |
- t1 | public | (modified '1')
-(9 rows)
-
-RESET ROLE;
--- superuser can see all option fields
-\deu+
- List of user mappings
- Server | User name | FDW options
---------+---------------------------+---------------------
- s10 | public | ("user" 'secret')
- s10 | regress_unprivileged_role | ("user" 'secret')
- s4 | regress_foreign_data_user |
- s5 | regress_test_role | (modified '1')
- s6 | regress_test_role |
- s8 | public |
- s8 | regress_foreign_data_user | (password 'public')
- s9 | regress_unprivileged_role |
- t1 | public | (modified '1')
-(9 rows)
-
--- unprivileged user cannot see any option field
-SET ROLE regress_unprivileged_role;
-\deu+
- List of user mappings
- Server | User name | FDW options
---------+---------------------------+-------------
- s10 | public |
- s10 | regress_unprivileged_role |
- s4 | regress_foreign_data_user |
- s5 | regress_test_role |
- s6 | regress_test_role |
- s8 | public |
- s8 | regress_foreign_data_user |
- s9 | regress_unprivileged_role |
- t1 | public |
-(9 rows)
-
-RESET ROLE;
-DROP SERVER s10 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to user mapping for public on server s10
-drop cascades to user mapping for regress_unprivileged_role on server s10
--- Triggers
-CREATE FUNCTION dummy_trigger() RETURNS TRIGGER AS $$
- BEGIN
- RETURN NULL;
- END
-$$ language plpgsql;
-CREATE TRIGGER trigtest_before_stmt BEFORE INSERT OR UPDATE OR DELETE
-ON foreign_schema.foreign_table_1
-FOR EACH STATEMENT
-EXECUTE PROCEDURE dummy_trigger();
-CREATE TRIGGER trigtest_after_stmt AFTER INSERT OR UPDATE OR DELETE
-ON foreign_schema.foreign_table_1
-FOR EACH STATEMENT
-EXECUTE PROCEDURE dummy_trigger();
-CREATE TRIGGER trigtest_after_stmt_tt AFTER INSERT OR UPDATE OR DELETE -- ERROR
-ON foreign_schema.foreign_table_1
-REFERENCING NEW TABLE AS new_table
-FOR EACH STATEMENT
-EXECUTE PROCEDURE dummy_trigger();
-ERROR: "foreign_table_1" is a foreign table
-DETAIL: Triggers on foreign tables cannot have transition tables.
-CREATE TRIGGER trigtest_before_row BEFORE INSERT OR UPDATE OR DELETE
-ON foreign_schema.foreign_table_1
-FOR EACH ROW
-EXECUTE PROCEDURE dummy_trigger();
-CREATE TRIGGER trigtest_after_row AFTER INSERT OR UPDATE OR DELETE
-ON foreign_schema.foreign_table_1
-FOR EACH ROW
-EXECUTE PROCEDURE dummy_trigger();
-CREATE CONSTRAINT TRIGGER trigtest_constraint AFTER INSERT OR UPDATE OR DELETE
-ON foreign_schema.foreign_table_1
-FOR EACH ROW
-EXECUTE PROCEDURE dummy_trigger();
-ERROR: "foreign_table_1" is a foreign table
-DETAIL: Foreign tables cannot have constraint triggers.
-ALTER FOREIGN TABLE foreign_schema.foreign_table_1
- DISABLE TRIGGER trigtest_before_stmt;
-ALTER FOREIGN TABLE foreign_schema.foreign_table_1
- ENABLE TRIGGER trigtest_before_stmt;
-DROP TRIGGER trigtest_before_stmt ON foreign_schema.foreign_table_1;
-DROP TRIGGER trigtest_before_row ON foreign_schema.foreign_table_1;
-DROP TRIGGER trigtest_after_stmt ON foreign_schema.foreign_table_1;
-DROP TRIGGER trigtest_after_row ON foreign_schema.foreign_table_1;
-DROP FUNCTION dummy_trigger();
--- Table inheritance
-CREATE TABLE fd_pt1 (
- c1 integer NOT NULL,
- c2 text,
- c3 date
-);
-CREATE FOREIGN TABLE ft2 () INHERITS (fd_pt1)
- SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1" (inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-
-DROP FOREIGN TABLE ft2;
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
-
-CREATE FOREIGN TABLE ft2 (
- c1 integer NOT NULL,
- c2 text,
- c3 date
-) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1"
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
-ALTER FOREIGN TABLE ft2 INHERIT fd_pt1;
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-
-CREATE TABLE ct3() INHERITS(ft2);
-CREATE FOREIGN TABLE ft3 (
- c1 integer NOT NULL,
- c2 text,
- c3 date
-) INHERITS(ft2)
- SERVER s0;
-NOTICE: merging column "c1" with inherited definition
-NOTICE: merging column "c2" with inherited definition
-NOTICE: merging column "c3" with inherited definition
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-Child tables: ct3,
- ft3, FOREIGN
-
-\d+ ct3
- Table "public.ct3"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (inherited)
-Inherits: ft2
-
-\d+ ft3
- Foreign table "public.ft3"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Not-null constraints:
- "ft3_c1_not_null" NOT NULL "c1" (local, inherited)
-Server: s0
-Inherits: ft2
-
--- add attributes recursively
-ALTER TABLE fd_pt1 ADD COLUMN c4 integer;
-ALTER TABLE fd_pt1 ADD COLUMN c5 integer DEFAULT 0;
-ALTER TABLE fd_pt1 ADD COLUMN c6 integer;
-ALTER TABLE fd_pt1 ADD COLUMN c7 integer NOT NULL;
-ALTER TABLE fd_pt1 ADD COLUMN c8 integer;
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
- c4 | integer | | | | plain | |
- c5 | integer | | | 0 | plain | |
- c6 | integer | | | | plain | |
- c7 | integer | | not null | | plain | |
- c8 | integer | | | | plain | |
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
- "fd_pt1_c7_not_null" NOT NULL "c7"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
- c4 | integer | | | | | plain | |
- c5 | integer | | | 0 | | plain | |
- c6 | integer | | | | | plain | |
- c7 | integer | | not null | | | plain | |
- c8 | integer | | | | | plain | |
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
- "fd_pt1_c7_not_null" NOT NULL "c7" (inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-Child tables: ct3,
- ft3, FOREIGN
-
-\d+ ct3
- Table "public.ct3"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
- c4 | integer | | | | plain | |
- c5 | integer | | | 0 | plain | |
- c6 | integer | | | | plain | |
- c7 | integer | | not null | | plain | |
- c8 | integer | | | | plain | |
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (inherited)
- "fd_pt1_c7_not_null" NOT NULL "c7" (inherited)
-Inherits: ft2
-
-\d+ ft3
- Foreign table "public.ft3"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
- c4 | integer | | | | | plain | |
- c5 | integer | | | 0 | | plain | |
- c6 | integer | | | | | plain | |
- c7 | integer | | not null | | | plain | |
- c8 | integer | | | | | plain | |
-Not-null constraints:
- "ft3_c1_not_null" NOT NULL "c1" (local, inherited)
- "fd_pt1_c7_not_null" NOT NULL "c7" (inherited)
-Server: s0
-Inherits: ft2
-
--- alter attributes recursively
-ALTER TABLE fd_pt1 ALTER COLUMN c4 SET DEFAULT 0;
-ALTER TABLE fd_pt1 ALTER COLUMN c5 DROP DEFAULT;
-ALTER TABLE fd_pt1 ALTER COLUMN c6 SET NOT NULL;
-ALTER TABLE fd_pt1 ALTER COLUMN c7 DROP NOT NULL;
-ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR
-ERROR: "ft2" is not a table
-ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10);
-ALTER TABLE fd_pt1 ALTER COLUMN c8 SET DATA TYPE text;
-ALTER TABLE fd_pt1 ALTER COLUMN c1 SET STATISTICS 10000;
-ALTER TABLE fd_pt1 ALTER COLUMN c1 SET (n_distinct = 100);
-ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STATISTICS -1;
-ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL;
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | 10000 |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
- c4 | integer | | | 0 | plain | |
- c5 | integer | | | | plain | |
- c6 | integer | | not null | | plain | |
- c7 | integer | | | | plain | |
- c8 | text | | | | external | |
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
- "fd_pt1_c6_not_null" NOT NULL "c6"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | 10000 |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
- c4 | integer | | | 0 | | plain | |
- c5 | integer | | | | | plain | |
- c6 | integer | | not null | | | plain | |
- c7 | integer | | | | | plain | |
- c8 | text | | | | | external | |
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
- "fd_pt1_c6_not_null" NOT NULL "c6" (inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-Child tables: ct3,
- ft3, FOREIGN
-
--- drop attributes recursively
-ALTER TABLE fd_pt1 DROP COLUMN c4;
-ALTER TABLE fd_pt1 DROP COLUMN c5;
-ALTER TABLE fd_pt1 DROP COLUMN c6;
-ALTER TABLE fd_pt1 DROP COLUMN c7;
-ALTER TABLE fd_pt1 DROP COLUMN c8;
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | 10000 |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | 10000 |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-Child tables: ct3,
- ft3, FOREIGN
-
--- add constraints recursively
-ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk1 CHECK (c1 > 0) NO INHERIT;
-ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> '');
--- connoinherit should be true for NO INHERIT constraint
-SELECT relname, conname, contype, conislocal, coninhcount, connoinherit
- FROM pg_class AS pc JOIN pg_constraint AS pgc ON (conrelid = pc.oid)
- WHERE pc.relname = 'fd_pt1'
- ORDER BY 1,2;
- relname | conname | contype | conislocal | coninhcount | connoinherit
----------+--------------------+---------+------------+-------------+--------------
- fd_pt1 | fd_pt1_c1_not_null | n | t | 0 | f
- fd_pt1 | fd_pt1chk1 | c | t | 0 | t
- fd_pt1 | fd_pt1chk2 | c | t | 0 | f
-(3 rows)
-
--- child does not inherit NO INHERIT constraints
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | 10000 |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Check constraints:
- "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT
- "fd_pt1chk2" CHECK (c2 <> ''::text)
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | 10000 |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Check constraints:
- "fd_pt1chk2" CHECK (c2 <> ''::text)
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-Child tables: ct3,
- ft3, FOREIGN
-
-DROP FOREIGN TABLE ft2; -- ERROR
-ERROR: cannot drop foreign table ft2 because other objects depend on it
-DETAIL: table ct3 depends on foreign table ft2
-foreign table ft3 depends on foreign table ft2
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP FOREIGN TABLE ft2 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table ct3
-drop cascades to foreign table ft3
-CREATE FOREIGN TABLE ft2 (
- c1 integer NOT NULL,
- c2 text,
- c3 date
-) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
--- child must have parent's INHERIT constraints
-ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- ERROR
-ERROR: child table is missing constraint "fd_pt1chk2"
-ALTER FOREIGN TABLE ft2 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> '');
-ALTER FOREIGN TABLE ft2 INHERIT fd_pt1;
--- child does not inherit NO INHERIT constraints
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | 10000 |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Check constraints:
- "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT
- "fd_pt1chk2" CHECK (c2 <> ''::text)
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Check constraints:
- "fd_pt1chk2" CHECK (c2 <> ''::text)
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-
--- drop constraints recursively
-ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk1 CASCADE;
-ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk2 CASCADE;
--- NOT VALID case
-INSERT INTO fd_pt1 VALUES (1, 'fd_pt1'::text, '1994-01-01'::date);
-ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk3 CHECK (c2 <> '') NOT VALID;
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | 10000 |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Check constraints:
- "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Check constraints:
- "fd_pt1chk2" CHECK (c2 <> ''::text)
- "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-
--- VALIDATE CONSTRAINT need do nothing on foreign tables
-ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3;
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | 10000 |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Check constraints:
- "fd_pt1chk3" CHECK (c2 <> ''::text)
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "c1"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Check constraints:
- "fd_pt1chk2" CHECK (c2 <> ''::text)
- "fd_pt1chk3" CHECK (c2 <> ''::text)
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "c1" (local, inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-
--- changes name of an attribute recursively
-ALTER TABLE fd_pt1 RENAME COLUMN c1 TO f1;
-ALTER TABLE fd_pt1 RENAME COLUMN c2 TO f2;
-ALTER TABLE fd_pt1 RENAME COLUMN c3 TO f3;
--- changes name of a constraint recursively
-ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check;
-\d+ fd_pt1
- Table "public.fd_pt1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- f1 | integer | | not null | | plain | 10000 |
- f2 | text | | | | extended | |
- f3 | date | | | | plain | |
-Check constraints:
- "f2_check" CHECK (f2 <> ''::text)
-Not-null constraints:
- "fd_pt1_c1_not_null" NOT NULL "f1"
-Child tables: ft2, FOREIGN
-
-\d+ ft2
- Foreign table "public.ft2"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- f1 | integer | | not null | | | plain | |
- f2 | text | | | | | extended | |
- f3 | date | | | | | plain | |
-Check constraints:
- "f2_check" CHECK (f2 <> ''::text)
- "fd_pt1chk2" CHECK (f2 <> ''::text)
-Not-null constraints:
- "ft2_c1_not_null" NOT NULL "f1" (local, inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-Inherits: fd_pt1
-
-DROP TABLE fd_pt1 CASCADE;
-NOTICE: drop cascades to foreign table ft2
--- IMPORT FOREIGN SCHEMA
-IMPORT FOREIGN SCHEMA s1 FROM SERVER s9 INTO public; -- ERROR
-ERROR: foreign-data wrapper "foo" has no handler
-IMPORT FOREIGN SCHEMA s1 LIMIT TO (t1) FROM SERVER s9 INTO public; --ERROR
-ERROR: foreign-data wrapper "foo" has no handler
-IMPORT FOREIGN SCHEMA s1 EXCEPT (t1) FROM SERVER s9 INTO public; -- ERROR
-ERROR: foreign-data wrapper "foo" has no handler
-IMPORT FOREIGN SCHEMA s1 EXCEPT (t1, t2) FROM SERVER s9 INTO public
-OPTIONS (option1 'value1', option2 'value2'); -- ERROR
-ERROR: foreign-data wrapper "foo" has no handler
--- DROP FOREIGN TABLE
-DROP FOREIGN TABLE no_table; -- ERROR
-ERROR: foreign table "no_table" does not exist
-DROP FOREIGN TABLE IF EXISTS no_table;
-NOTICE: foreign table "no_table" does not exist, skipping
-DROP FOREIGN TABLE foreign_schema.foreign_table_1;
--- REASSIGN OWNED/DROP OWNED of foreign objects
-REASSIGN OWNED BY regress_test_role TO regress_test_role2;
-DROP OWNED BY regress_test_role2;
-ERROR: cannot drop desired object(s) because other objects depend on them
-DETAIL: user mapping for regress_test_role on server s5 depends on server s5
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP OWNED BY regress_test_role2 CASCADE;
-NOTICE: drop cascades to user mapping for regress_test_role on server s5
--- Foreign partition DDL stuff
-CREATE TABLE fd_pt2 (
- c1 integer NOT NULL,
- c2 text,
- c3 date
-) PARTITION BY LIST (c1);
-CREATE FOREIGN TABLE fd_pt2_1 PARTITION OF fd_pt2 FOR VALUES IN (1)
- SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
-\d+ fd_pt2
- Partitioned table "public.fd_pt2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Partition key: LIST (c1)
-Not-null constraints:
- "fd_pt2_c1_not_null" NOT NULL "c1"
-Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN
-
-\d+ fd_pt2_1
- Foreign table "public.fd_pt2_1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Partition of: fd_pt2 FOR VALUES IN (1)
-Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1))
-Not-null constraints:
- "fd_pt2_c1_not_null" NOT NULL "c1" (inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
--- partition cannot have additional columns
-DROP FOREIGN TABLE fd_pt2_1;
-CREATE FOREIGN TABLE fd_pt2_1 (
- c1 integer NOT NULL,
- c2 text,
- c3 date,
- c4 char
-) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
-\d+ fd_pt2_1
- Foreign table "public.fd_pt2_1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+--------------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
- c4 | character(1) | | | | | extended | |
-Not-null constraints:
- "fd_pt2_1_c1_not_null" NOT NULL "c1"
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
-ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR
-ERROR: table "fd_pt2_1" contains column "c4" not found in parent "fd_pt2"
-DETAIL: The new partition may contain only the columns present in parent.
-DROP FOREIGN TABLE fd_pt2_1;
-\d+ fd_pt2
- Partitioned table "public.fd_pt2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Partition key: LIST (c1)
-Not-null constraints:
- "fd_pt2_c1_not_null" NOT NULL "c1"
-Number of partitions: 0
-
-CREATE FOREIGN TABLE fd_pt2_1 (
- c1 integer NOT NULL,
- c2 text,
- c3 date
-) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value');
-\d+ fd_pt2_1
- Foreign table "public.fd_pt2_1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Not-null constraints:
- "fd_pt2_1_c1_not_null" NOT NULL "c1"
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
--- no attach partition validation occurs for foreign tables
-ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1);
-\d+ fd_pt2
- Partitioned table "public.fd_pt2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Partition key: LIST (c1)
-Not-null constraints:
- "fd_pt2_c1_not_null" NOT NULL "c1"
-Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN
-
-\d+ fd_pt2_1
- Foreign table "public.fd_pt2_1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | | | | plain | |
-Partition of: fd_pt2 FOR VALUES IN (1)
-Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1))
-Not-null constraints:
- "fd_pt2_1_c1_not_null" NOT NULL "c1" (inherited)
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
--- cannot add column to a partition
-ALTER TABLE fd_pt2_1 ADD c4 char;
-ERROR: cannot add column to a partition
--- ok to have a partition's own constraints though
-ALTER TABLE fd_pt2_1 ALTER c3 SET NOT NULL;
-ALTER TABLE fd_pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> '');
-\d+ fd_pt2
- Partitioned table "public.fd_pt2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | | | extended | |
- c3 | date | | | | plain | |
-Partition key: LIST (c1)
-Not-null constraints:
- "fd_pt2_c1_not_null" NOT NULL "c1"
-Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN
-
-\d+ fd_pt2_1
- Foreign table "public.fd_pt2_1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | not null | | | plain | |
-Partition of: fd_pt2 FOR VALUES IN (1)
-Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1))
-Check constraints:
- "p21chk" CHECK (c2 <> ''::text)
-Not-null constraints:
- "fd_pt2_1_c1_not_null" NOT NULL "c1" (inherited)
- "fd_pt2_1_c3_not_null" NOT NULL "c3"
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
--- cannot drop inherited NOT NULL constraint from a partition
-ALTER TABLE fd_pt2_1 ALTER c1 DROP NOT NULL;
-ERROR: column "c1" is marked NOT NULL in parent table
--- partition must have parent's constraints
-ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1;
-ALTER TABLE fd_pt2 ALTER c2 SET NOT NULL;
-\d+ fd_pt2
- Partitioned table "public.fd_pt2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | not null | | extended | |
- c3 | date | | | | plain | |
-Partition key: LIST (c1)
-Not-null constraints:
- "fd_pt2_c1_not_null" NOT NULL "c1"
- "fd_pt2_c2_not_null" NOT NULL "c2"
-Number of partitions: 0
-
-\d+ fd_pt2_1
- Foreign table "public.fd_pt2_1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | | | | extended | |
- c3 | date | | not null | | | plain | |
-Check constraints:
- "p21chk" CHECK (c2 <> ''::text)
-Not-null constraints:
- "fd_pt2_1_c1_not_null" NOT NULL "c1"
- "fd_pt2_1_c3_not_null" NOT NULL "c3"
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
-ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR
-ERROR: column "c2" in child table "fd_pt2_1" must be marked NOT NULL
-ALTER FOREIGN TABLE fd_pt2_1 ALTER c2 SET NOT NULL;
-ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1);
-ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1;
-ALTER TABLE fd_pt2 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0);
-\d+ fd_pt2
- Partitioned table "public.fd_pt2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+----------+--------------+-------------
- c1 | integer | | not null | | plain | |
- c2 | text | | not null | | extended | |
- c3 | date | | | | plain | |
-Partition key: LIST (c1)
-Check constraints:
- "fd_pt2chk1" CHECK (c1 > 0)
-Not-null constraints:
- "fd_pt2_c1_not_null" NOT NULL "c1"
- "fd_pt2_c2_not_null" NOT NULL "c2"
-Number of partitions: 0
-
-\d+ fd_pt2_1
- Foreign table "public.fd_pt2_1"
- Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description
---------+---------+-----------+----------+---------+-------------+----------+--------------+-------------
- c1 | integer | | not null | | | plain | |
- c2 | text | | not null | | | extended | |
- c3 | date | | not null | | | plain | |
-Check constraints:
- "p21chk" CHECK (c2 <> ''::text)
-Not-null constraints:
- "fd_pt2_1_c1_not_null" NOT NULL "c1"
- "fd_pt2_1_c2_not_null" NOT NULL "c2"
- "fd_pt2_1_c3_not_null" NOT NULL "c3"
-Server: s0
-FDW options: (delimiter ',', quote '"', "be quoted" 'value')
-
-ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR
-ERROR: child table is missing constraint "fd_pt2chk1"
-ALTER FOREIGN TABLE fd_pt2_1 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0);
-ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1);
-DROP FOREIGN TABLE fd_pt2_1;
-DROP TABLE fd_pt2;
--- foreign table cannot be part of partition tree made of temporary
--- relations.
-CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a);
-CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted DEFAULT
- SERVER s0; -- ERROR
-ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted"
-CREATE FOREIGN TABLE foreign_part (a int) SERVER s0;
-ALTER TABLE temp_parted ATTACH PARTITION foreign_part DEFAULT; -- ERROR
-ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted"
-DROP FOREIGN TABLE foreign_part;
-DROP TABLE temp_parted;
--- Cleanup
-DROP SCHEMA foreign_schema CASCADE;
-DROP ROLE regress_test_role; -- ERROR
-ERROR: role "regress_test_role" cannot be dropped because some objects depend on it
-DETAIL: privileges for foreign-data wrapper foo
-privileges for server s4
-owner of user mapping for regress_test_role on server s6
-DROP SERVER t1 CASCADE;
-NOTICE: drop cascades to user mapping for public on server t1
-DROP USER MAPPING FOR regress_test_role SERVER s6;
-DROP FOREIGN DATA WRAPPER foo CASCADE;
-NOTICE: drop cascades to 5 other objects
-DETAIL: drop cascades to server s4
-drop cascades to user mapping for regress_foreign_data_user on server s4
-drop cascades to server s6
-drop cascades to server s9
-drop cascades to user mapping for regress_unprivileged_role on server s9
-DROP SERVER s8 CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to user mapping for regress_foreign_data_user on server s8
-drop cascades to user mapping for public on server s8
-DROP ROLE regress_test_indirect;
-DROP ROLE regress_test_role;
-DROP ROLE regress_unprivileged_role; -- ERROR
-ERROR: role "regress_unprivileged_role" cannot be dropped because some objects depend on it
-DETAIL: privileges for foreign-data wrapper postgresql
-REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM regress_unprivileged_role;
-DROP ROLE regress_unprivileged_role;
-DROP ROLE regress_test_role2;
-DROP FOREIGN DATA WRAPPER postgresql CASCADE;
-DROP FOREIGN DATA WRAPPER dummy CASCADE;
-NOTICE: drop cascades to server s0
-\c
-DROP ROLE regress_foreign_data_user;
--- At this point we should have no wrappers, no servers, and no mappings.
-SELECT fdwname, fdwhandler, fdwvalidator, fdwoptions FROM pg_foreign_data_wrapper;
- fdwname | fdwhandler | fdwvalidator | fdwoptions
----------+------------+--------------+------------
-(0 rows)
-
-SELECT srvname, srvoptions FROM pg_foreign_server;
- srvname | srvoptions
----------+------------
-(0 rows)
-
-SELECT * FROM pg_user_mapping;
- oid | umuser | umserver | umoptions
------+--------+----------+-----------
-(0 rows)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/window.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/window.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/window.out 2024-11-15 02:50:52.521996385 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/window.out 2024-11-15 02:59:17.217115686 +0000
@@ -1,5395 +1,2 @@
---
--- WINDOW FUNCTIONS
---
-CREATE TEMPORARY TABLE empsalary (
- depname varchar,
- empno bigint,
- salary int,
- enroll_date date
-);
-INSERT INTO empsalary VALUES
-('develop', 10, 5200, '2007-08-01'),
-('sales', 1, 5000, '2006-10-01'),
-('personnel', 5, 3500, '2007-12-10'),
-('sales', 4, 4800, '2007-08-08'),
-('personnel', 2, 3900, '2006-12-23'),
-('develop', 7, 4200, '2008-01-01'),
-('develop', 9, 4500, '2008-01-01'),
-('sales', 3, 4800, '2007-08-01'),
-('develop', 8, 6000, '2006-10-01'),
-('develop', 11, 5200, '2007-08-15');
-SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) FROM empsalary ORDER BY depname, salary;
- depname | empno | salary | sum
------------+-------+--------+-------
- develop | 7 | 4200 | 25100
- develop | 9 | 4500 | 25100
- develop | 11 | 5200 | 25100
- develop | 10 | 5200 | 25100
- develop | 8 | 6000 | 25100
- personnel | 5 | 3500 | 7400
- personnel | 2 | 3900 | 7400
- sales | 3 | 4800 | 14600
- sales | 4 | 4800 | 14600
- sales | 1 | 5000 | 14600
-(10 rows)
-
-SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY salary) FROM empsalary;
- depname | empno | salary | rank
------------+-------+--------+------
- develop | 7 | 4200 | 1
- develop | 9 | 4500 | 2
- develop | 11 | 5200 | 3
- develop | 10 | 5200 | 3
- develop | 8 | 6000 | 5
- personnel | 5 | 3500 | 1
- personnel | 2 | 3900 | 2
- sales | 3 | 4800 | 1
- sales | 4 | 4800 | 1
- sales | 1 | 5000 | 3
-(10 rows)
-
--- with GROUP BY
-SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four), AVG(ten) FROM tenk1
-GROUP BY four, ten ORDER BY four, ten;
- four | ten | sum | avg
-------+-----+------+------------------------
- 0 | 0 | 0 | 0.00000000000000000000
- 0 | 2 | 0 | 2.0000000000000000
- 0 | 4 | 0 | 4.0000000000000000
- 0 | 6 | 0 | 6.0000000000000000
- 0 | 8 | 0 | 8.0000000000000000
- 1 | 1 | 2500 | 1.00000000000000000000
- 1 | 3 | 2500 | 3.0000000000000000
- 1 | 5 | 2500 | 5.0000000000000000
- 1 | 7 | 2500 | 7.0000000000000000
- 1 | 9 | 2500 | 9.0000000000000000
- 2 | 0 | 5000 | 0.00000000000000000000
- 2 | 2 | 5000 | 2.0000000000000000
- 2 | 4 | 5000 | 4.0000000000000000
- 2 | 6 | 5000 | 6.0000000000000000
- 2 | 8 | 5000 | 8.0000000000000000
- 3 | 1 | 7500 | 1.00000000000000000000
- 3 | 3 | 7500 | 3.0000000000000000
- 3 | 5 | 7500 | 5.0000000000000000
- 3 | 7 | 7500 | 7.0000000000000000
- 3 | 9 | 7500 | 9.0000000000000000
-(20 rows)
-
-SELECT depname, empno, salary, sum(salary) OVER w FROM empsalary WINDOW w AS (PARTITION BY depname);
- depname | empno | salary | sum
------------+-------+--------+-------
- develop | 11 | 5200 | 25100
- develop | 7 | 4200 | 25100
- develop | 9 | 4500 | 25100
- develop | 8 | 6000 | 25100
- develop | 10 | 5200 | 25100
- personnel | 5 | 3500 | 7400
- personnel | 2 | 3900 | 7400
- sales | 3 | 4800 | 14600
- sales | 1 | 5000 | 14600
- sales | 4 | 4800 | 14600
-(10 rows)
-
-SELECT depname, empno, salary, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary) ORDER BY rank() OVER w;
- depname | empno | salary | rank
------------+-------+--------+------
- develop | 7 | 4200 | 1
- personnel | 5 | 3500 | 1
- sales | 3 | 4800 | 1
- sales | 4 | 4800 | 1
- personnel | 2 | 3900 | 2
- develop | 9 | 4500 | 2
- sales | 1 | 5000 | 3
- develop | 11 | 5200 | 3
- develop | 10 | 5200 | 3
- develop | 8 | 6000 | 5
-(10 rows)
-
--- empty window specification
-SELECT COUNT(*) OVER () FROM tenk1 WHERE unique2 < 10;
- count
--------
- 10
- 10
- 10
- 10
- 10
- 10
- 10
- 10
- 10
- 10
-(10 rows)
-
-SELECT COUNT(*) OVER w FROM tenk1 WHERE unique2 < 10 WINDOW w AS ();
- count
--------
- 10
- 10
- 10
- 10
- 10
- 10
- 10
- 10
- 10
- 10
-(10 rows)
-
--- no window operation
-SELECT four FROM tenk1 WHERE FALSE WINDOW w AS (PARTITION BY ten);
- four
-------
-(0 rows)
-
--- cumulative aggregate
-SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four FROM tenk1 WHERE unique2 < 10;
- sum_1 | ten | four
--------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 2 | 0 | 2
- 3 | 1 | 3
- 4 | 1 | 1
- 5 | 1 | 1
- 3 | 3 | 3
- 0 | 4 | 0
- 1 | 7 | 1
- 1 | 9 | 1
-(10 rows)
-
-SELECT row_number() OVER (ORDER BY unique2) FROM tenk1 WHERE unique2 < 10;
- row_number
-------------
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
-(10 rows)
-
-SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM tenk1 WHERE unique2 < 10;
- rank_1 | ten | four
---------+-----+------
- 1 | 0 | 0
- 1 | 0 | 0
- 3 | 4 | 0
- 1 | 1 | 1
- 1 | 1 | 1
- 3 | 7 | 1
- 4 | 9 | 1
- 1 | 0 | 2
- 1 | 1 | 3
- 2 | 3 | 3
-(10 rows)
-
-SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- dense_rank | ten | four
-------------+-----+------
- 1 | 0 | 0
- 1 | 0 | 0
- 2 | 4 | 0
- 1 | 1 | 1
- 1 | 1 | 1
- 2 | 7 | 1
- 3 | 9 | 1
- 1 | 0 | 2
- 1 | 1 | 3
- 2 | 3 | 3
-(10 rows)
-
-SELECT percent_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- percent_rank | ten | four
---------------------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 1 | 4 | 0
- 0 | 1 | 1
- 0 | 1 | 1
- 0.6666666666666666 | 7 | 1
- 1 | 9 | 1
- 0 | 0 | 2
- 0 | 1 | 3
- 1 | 3 | 3
-(10 rows)
-
-SELECT cume_dist() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- cume_dist | ten | four
---------------------+-----+------
- 0.6666666666666666 | 0 | 0
- 0.6666666666666666 | 0 | 0
- 1 | 4 | 0
- 0.5 | 1 | 1
- 0.5 | 1 | 1
- 0.75 | 7 | 1
- 1 | 9 | 1
- 1 | 0 | 2
- 0.5 | 1 | 3
- 1 | 3 | 3
-(10 rows)
-
-SELECT ntile(3) OVER (ORDER BY ten, four), ten, four FROM tenk1 WHERE unique2 < 10;
- ntile | ten | four
--------+-----+------
- 1 | 0 | 0
- 1 | 0 | 0
- 1 | 0 | 2
- 1 | 1 | 1
- 2 | 1 | 1
- 2 | 1 | 3
- 2 | 3 | 3
- 3 | 4 | 0
- 3 | 7 | 1
- 3 | 9 | 1
-(10 rows)
-
-SELECT ntile(NULL) OVER (ORDER BY ten, four), ten, four FROM tenk1 LIMIT 2;
- ntile | ten | four
--------+-----+------
- | 0 | 0
- | 0 | 0
-(2 rows)
-
-SELECT lag(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- lag | ten | four
------+-----+------
- | 0 | 0
- 0 | 0 | 0
- 0 | 4 | 0
- | 1 | 1
- 1 | 1 | 1
- 1 | 7 | 1
- 7 | 9 | 1
- | 0 | 2
- | 1 | 3
- 1 | 3 | 3
-(10 rows)
-
-SELECT lag(ten, four) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- lag | ten | four
------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 4 | 4 | 0
- | 1 | 1
- 1 | 1 | 1
- 1 | 7 | 1
- 7 | 9 | 1
- | 0 | 2
- | 1 | 3
- | 3 | 3
-(10 rows)
-
-SELECT lag(ten, four, 0) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- lag | ten | four
------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 4 | 4 | 0
- 0 | 1 | 1
- 1 | 1 | 1
- 1 | 7 | 1
- 7 | 9 | 1
- 0 | 0 | 2
- 0 | 1 | 3
- 0 | 3 | 3
-(10 rows)
-
-SELECT lag(ten, four, 0.7) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten;
- lag | ten | four
------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 4 | 4 | 0
- 0.7 | 1 | 1
- 1 | 1 | 1
- 1 | 7 | 1
- 7 | 9 | 1
- 0.7 | 0 | 2
- 0.7 | 1 | 3
- 0.7 | 3 | 3
-(10 rows)
-
-SELECT lead(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- lead | ten | four
-------+-----+------
- 0 | 0 | 0
- 4 | 0 | 0
- | 4 | 0
- 1 | 1 | 1
- 7 | 1 | 1
- 9 | 7 | 1
- | 9 | 1
- | 0 | 2
- 3 | 1 | 3
- | 3 | 3
-(10 rows)
-
-SELECT lead(ten * 2, 1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- lead | ten | four
-------+-----+------
- 0 | 0 | 0
- 8 | 0 | 0
- | 4 | 0
- 2 | 1 | 1
- 14 | 1 | 1
- 18 | 7 | 1
- | 9 | 1
- | 0 | 2
- 6 | 1 | 3
- | 3 | 3
-(10 rows)
-
-SELECT lead(ten * 2, 1, -1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- lead | ten | four
-------+-----+------
- 0 | 0 | 0
- 8 | 0 | 0
- -1 | 4 | 0
- 2 | 1 | 1
- 14 | 1 | 1
- 18 | 7 | 1
- -1 | 9 | 1
- -1 | 0 | 2
- 6 | 1 | 3
- -1 | 3 | 3
-(10 rows)
-
-SELECT lead(ten * 2, 1, -1.4) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten;
- lead | ten | four
-------+-----+------
- 0 | 0 | 0
- 8 | 0 | 0
- -1.4 | 4 | 0
- 2 | 1 | 1
- 14 | 1 | 1
- 18 | 7 | 1
- -1.4 | 9 | 1
- -1.4 | 0 | 2
- 6 | 1 | 3
- -1.4 | 3 | 3
-(10 rows)
-
-SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- first_value | ten | four
--------------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 0 | 4 | 0
- 1 | 1 | 1
- 1 | 1 | 1
- 1 | 7 | 1
- 1 | 9 | 1
- 0 | 0 | 2
- 1 | 1 | 3
- 1 | 3 | 3
-(10 rows)
-
--- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window.
-SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
- last_value | ten | four
-------------+-----+------
- 0 | 0 | 0
- 0 | 0 | 2
- 0 | 0 | 0
- 1 | 1 | 1
- 1 | 1 | 3
- 1 | 1 | 1
- 3 | 3 | 3
- 0 | 4 | 0
- 1 | 7 | 1
- 1 | 9 | 1
-(10 rows)
-
-SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM
- (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s
- ORDER BY four, ten;
- last_value | ten | four
-------------+-----+------
- 4 | 0 | 0
- 4 | 0 | 0
- 4 | 4 | 0
- 9 | 1 | 1
- 9 | 1 | 1
- 9 | 7 | 1
- 9 | 9 | 1
- 0 | 0 | 2
- 3 | 1 | 3
- 3 | 3 | 3
-(10 rows)
-
-SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four
- FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s;
- nth_value | ten | four
------------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 0 | 4 | 0
- 1 | 1 | 1
- 1 | 1 | 1
- 1 | 7 | 1
- 1 | 9 | 1
- | 0 | 2
- | 1 | 3
- | 3 | 3
-(10 rows)
-
-SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum
-FROM tenk1 GROUP BY ten, two;
- ten | two | gsum | wsum
------+-----+-------+--------
- 0 | 0 | 45000 | 45000
- 2 | 0 | 47000 | 92000
- 4 | 0 | 49000 | 141000
- 6 | 0 | 51000 | 192000
- 8 | 0 | 53000 | 245000
- 1 | 1 | 46000 | 46000
- 3 | 1 | 48000 | 94000
- 5 | 1 | 50000 | 144000
- 7 | 1 | 52000 | 196000
- 9 | 1 | 54000 | 250000
-(10 rows)
-
-SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE two = 1)s WHERE unique2 < 10;
- count | four
--------+------
- 4 | 1
- 4 | 1
- 4 | 1
- 4 | 1
- 2 | 3
- 2 | 3
-(6 rows)
-
-SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) +
- sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum
- FROM tenk1 WHERE unique2 < 10;
- cntsum
---------
- 22
- 22
- 87
- 24
- 24
- 82
- 92
- 51
- 92
- 136
-(10 rows)
-
--- opexpr with different windows evaluation.
-SELECT * FROM(
- SELECT count(*) OVER (PARTITION BY four ORDER BY ten) +
- sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total,
- count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount,
- sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum
- FROM tenk1
-)sub
-WHERE total <> fourcount + twosum;
- total | fourcount | twosum
--------+-----------+--------
-(0 rows)
-
-SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHERE unique2 < 10;
- avg
-------------------------
- 0.00000000000000000000
- 0.00000000000000000000
- 0.00000000000000000000
- 1.00000000000000000000
- 1.00000000000000000000
- 1.00000000000000000000
- 1.00000000000000000000
- 2.0000000000000000
- 3.0000000000000000
- 3.0000000000000000
-(10 rows)
-
-SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum
-FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten);
- ten | two | gsum | wsum
------+-----+-------+--------
- 0 | 0 | 45000 | 45000
- 2 | 0 | 47000 | 92000
- 4 | 0 | 49000 | 141000
- 6 | 0 | 51000 | 192000
- 8 | 0 | 53000 | 245000
- 1 | 1 | 46000 | 46000
- 3 | 1 | 48000 | 94000
- 5 | 1 | 50000 | 144000
- 7 | 1 | 52000 | 196000
- 9 | 1 | 54000 | 250000
-(10 rows)
-
--- more than one window with GROUP BY
-SELECT sum(salary),
- row_number() OVER (ORDER BY depname),
- sum(sum(salary)) OVER (ORDER BY depname DESC)
-FROM empsalary GROUP BY depname;
- sum | row_number | sum
--------+------------+-------
- 25100 | 1 | 47100
- 7400 | 2 | 22000
- 14600 | 3 | 14600
-(3 rows)
-
--- identical windows with different names
-SELECT sum(salary) OVER w1, count(*) OVER w2
-FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary);
- sum | count
--------+-------
- 3500 | 1
- 7400 | 2
- 11600 | 3
- 16100 | 4
- 25700 | 6
- 25700 | 6
- 30700 | 7
- 41100 | 9
- 41100 | 9
- 47100 | 10
-(10 rows)
-
--- subplan
-SELECT lead(ten, (SELECT two FROM tenk1 WHERE s.unique2 = unique2)) OVER (PARTITION BY four ORDER BY ten)
-FROM tenk1 s WHERE unique2 < 10;
- lead
-------
- 0
- 0
- 4
- 1
- 7
- 9
-
- 0
- 3
-
-(10 rows)
-
--- empty table
-SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 WHERE FALSE)s;
- count
--------
-(0 rows)
-
--- mixture of agg/wfunc in the same window
-SELECT sum(salary) OVER w, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary DESC);
- sum | rank
--------+------
- 6000 | 1
- 16400 | 2
- 16400 | 2
- 20900 | 4
- 25100 | 5
- 3900 | 1
- 7400 | 2
- 5000 | 1
- 14600 | 2
- 14600 | 2
-(10 rows)
-
--- strict aggs
-SELECT empno, depname, salary, bonus, depadj, MIN(bonus) OVER (ORDER BY empno), MAX(depadj) OVER () FROM(
- SELECT *,
- CASE WHEN enroll_date < '2008-01-01' THEN 2008 - extract(YEAR FROM enroll_date) END * 500 AS bonus,
- CASE WHEN
- AVG(salary) OVER (PARTITION BY depname) < salary
- THEN 200 END AS depadj FROM empsalary
-)s;
- empno | depname | salary | bonus | depadj | min | max
--------+-----------+--------+-------+--------+------+-----
- 1 | sales | 5000 | 1000 | 200 | 1000 | 200
- 2 | personnel | 3900 | 1000 | 200 | 1000 | 200
- 3 | sales | 4800 | 500 | | 500 | 200
- 4 | sales | 4800 | 500 | | 500 | 200
- 5 | personnel | 3500 | 500 | | 500 | 200
- 7 | develop | 4200 | | | 500 | 200
- 8 | develop | 6000 | 1000 | 200 | 500 | 200
- 9 | develop | 4500 | | | 500 | 200
- 10 | develop | 5200 | 500 | 200 | 500 | 200
- 11 | develop | 5200 | 500 | 200 | 500 | 200
-(10 rows)
-
--- window function over ungrouped agg over empty row set (bug before 9.1)
-SELECT SUM(COUNT(f1)) OVER () FROM int4_tbl WHERE f1=42;
- sum
------
- 0
-(1 row)
-
--- window function with ORDER BY an expression involving aggregates (9.1 bug)
-select ten,
- sum(unique1) + sum(unique2) as res,
- rank() over (order by sum(unique1) + sum(unique2)) as rank
-from tenk1
-group by ten order by ten;
- ten | res | rank
------+----------+------
- 0 | 9976146 | 4
- 1 | 10114187 | 9
- 2 | 10059554 | 8
- 3 | 9878541 | 1
- 4 | 9881005 | 2
- 5 | 9981670 | 5
- 6 | 9947099 | 3
- 7 | 10120309 | 10
- 8 | 9991305 | 6
- 9 | 10040184 | 7
-(10 rows)
-
--- window and aggregate with GROUP BY expression (9.2 bug)
-explain (costs off)
-select first_value(max(x)) over (), y
- from (select unique1 as x, ten+four as y from tenk1) ss
- group by y;
- QUERY PLAN
----------------------------------------------
- WindowAgg
- -> HashAggregate
- Group Key: (tenk1.ten + tenk1.four)
- -> Seq Scan on tenk1
-(4 rows)
-
--- window functions returning pass-by-ref values from different rows
-select x, lag(x, 1) over (order by x), lead(x, 3) over (order by x)
-from (select x::numeric as x from generate_series(1,10) x);
- x | lag | lead
-----+-----+------
- 1 | | 4
- 2 | 1 | 5
- 3 | 2 | 6
- 4 | 3 | 7
- 5 | 4 | 8
- 6 | 5 | 9
- 7 | 6 | 10
- 8 | 7 |
- 9 | 8 |
- 10 | 9 |
-(10 rows)
-
--- test non-default frame specifications
-SELECT four, ten,
- sum(ten) over (partition by four order by ten),
- last_value(ten) over (partition by four order by ten)
-FROM (select distinct ten, four from tenk1) ss;
- four | ten | sum | last_value
-------+-----+-----+------------
- 0 | 0 | 0 | 0
- 0 | 2 | 2 | 2
- 0 | 4 | 6 | 4
- 0 | 6 | 12 | 6
- 0 | 8 | 20 | 8
- 1 | 1 | 1 | 1
- 1 | 3 | 4 | 3
- 1 | 5 | 9 | 5
- 1 | 7 | 16 | 7
- 1 | 9 | 25 | 9
- 2 | 0 | 0 | 0
- 2 | 2 | 2 | 2
- 2 | 4 | 6 | 4
- 2 | 6 | 12 | 6
- 2 | 8 | 20 | 8
- 3 | 1 | 1 | 1
- 3 | 3 | 4 | 3
- 3 | 5 | 9 | 5
- 3 | 7 | 16 | 7
- 3 | 9 | 25 | 9
-(20 rows)
-
-SELECT four, ten,
- sum(ten) over (partition by four order by ten range between unbounded preceding and current row),
- last_value(ten) over (partition by four order by ten range between unbounded preceding and current row)
-FROM (select distinct ten, four from tenk1) ss;
- four | ten | sum | last_value
-------+-----+-----+------------
- 0 | 0 | 0 | 0
- 0 | 2 | 2 | 2
- 0 | 4 | 6 | 4
- 0 | 6 | 12 | 6
- 0 | 8 | 20 | 8
- 1 | 1 | 1 | 1
- 1 | 3 | 4 | 3
- 1 | 5 | 9 | 5
- 1 | 7 | 16 | 7
- 1 | 9 | 25 | 9
- 2 | 0 | 0 | 0
- 2 | 2 | 2 | 2
- 2 | 4 | 6 | 4
- 2 | 6 | 12 | 6
- 2 | 8 | 20 | 8
- 3 | 1 | 1 | 1
- 3 | 3 | 4 | 3
- 3 | 5 | 9 | 5
- 3 | 7 | 16 | 7
- 3 | 9 | 25 | 9
-(20 rows)
-
-SELECT four, ten,
- sum(ten) over (partition by four order by ten range between unbounded preceding and unbounded following),
- last_value(ten) over (partition by four order by ten range between unbounded preceding and unbounded following)
-FROM (select distinct ten, four from tenk1) ss;
- four | ten | sum | last_value
-------+-----+-----+------------
- 0 | 0 | 20 | 8
- 0 | 2 | 20 | 8
- 0 | 4 | 20 | 8
- 0 | 6 | 20 | 8
- 0 | 8 | 20 | 8
- 1 | 1 | 25 | 9
- 1 | 3 | 25 | 9
- 1 | 5 | 25 | 9
- 1 | 7 | 25 | 9
- 1 | 9 | 25 | 9
- 2 | 0 | 20 | 8
- 2 | 2 | 20 | 8
- 2 | 4 | 20 | 8
- 2 | 6 | 20 | 8
- 2 | 8 | 20 | 8
- 3 | 1 | 25 | 9
- 3 | 3 | 25 | 9
- 3 | 5 | 25 | 9
- 3 | 7 | 25 | 9
- 3 | 9 | 25 | 9
-(20 rows)
-
-SELECT four, ten/4 as two,
- sum(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row),
- last_value(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row)
-FROM (select distinct ten, four from tenk1) ss;
- four | two | sum | last_value
-------+-----+-----+------------
- 0 | 0 | 0 | 0
- 0 | 0 | 0 | 0
- 0 | 1 | 2 | 1
- 0 | 1 | 2 | 1
- 0 | 2 | 4 | 2
- 1 | 0 | 0 | 0
- 1 | 0 | 0 | 0
- 1 | 1 | 2 | 1
- 1 | 1 | 2 | 1
- 1 | 2 | 4 | 2
- 2 | 0 | 0 | 0
- 2 | 0 | 0 | 0
- 2 | 1 | 2 | 1
- 2 | 1 | 2 | 1
- 2 | 2 | 4 | 2
- 3 | 0 | 0 | 0
- 3 | 0 | 0 | 0
- 3 | 1 | 2 | 1
- 3 | 1 | 2 | 1
- 3 | 2 | 4 | 2
-(20 rows)
-
-SELECT four, ten/4 as two,
- sum(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row),
- last_value(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row)
-FROM (select distinct ten, four from tenk1) ss;
- four | two | sum | last_value
-------+-----+-----+------------
- 0 | 0 | 0 | 0
- 0 | 0 | 0 | 0
- 0 | 1 | 1 | 1
- 0 | 1 | 2 | 1
- 0 | 2 | 4 | 2
- 1 | 0 | 0 | 0
- 1 | 0 | 0 | 0
- 1 | 1 | 1 | 1
- 1 | 1 | 2 | 1
- 1 | 2 | 4 | 2
- 2 | 0 | 0 | 0
- 2 | 0 | 0 | 0
- 2 | 1 | 1 | 1
- 2 | 1 | 2 | 1
- 2 | 2 | 4 | 2
- 3 | 0 | 0 | 0
- 3 | 0 | 0 | 0
- 3 | 1 | 1 | 1
- 3 | 1 | 2 | 1
- 3 | 2 | 4 | 2
-(20 rows)
-
-SELECT sum(unique1) over (order by four range between current row and unbounded following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 45 | 0 | 0
- 45 | 8 | 0
- 45 | 4 | 0
- 33 | 5 | 1
- 33 | 9 | 1
- 33 | 1 | 1
- 18 | 6 | 2
- 18 | 2 | 2
- 10 | 3 | 3
- 10 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (rows between current row and unbounded following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 45 | 4 | 0
- 41 | 2 | 2
- 39 | 1 | 1
- 38 | 6 | 2
- 32 | 9 | 1
- 23 | 8 | 0
- 15 | 5 | 1
- 10 | 3 | 3
- 7 | 7 | 3
- 0 | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between 2 preceding and 2 following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 7 | 4 | 0
- 13 | 2 | 2
- 22 | 1 | 1
- 26 | 6 | 2
- 29 | 9 | 1
- 31 | 8 | 0
- 32 | 5 | 1
- 23 | 3 | 3
- 15 | 7 | 3
- 10 | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 7 | 4 | 0
- 13 | 2 | 2
- 22 | 1 | 1
- 26 | 6 | 2
- 29 | 9 | 1
- 31 | 8 | 0
- 32 | 5 | 1
- 23 | 3 | 3
- 15 | 7 | 3
- 10 | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude current row),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 3 | 4 | 0
- 11 | 2 | 2
- 21 | 1 | 1
- 20 | 6 | 2
- 20 | 9 | 1
- 23 | 8 | 0
- 27 | 5 | 1
- 20 | 3 | 3
- 8 | 7 | 3
- 10 | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude group),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 4 | 0
- | 2 | 2
- | 1 | 1
- | 6 | 2
- | 9 | 1
- | 8 | 0
- | 5 | 1
- | 3 | 3
- | 7 | 3
- | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude ties),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 4 | 4 | 0
- 2 | 2 | 2
- 1 | 1 | 1
- 6 | 6 | 2
- 9 | 9 | 1
- 8 | 8 | 0
- 5 | 5 | 1
- 3 | 3 | 3
- 7 | 7 | 3
- 0 | 0 | 0
-(10 rows)
-
-SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- first_value | unique1 | four
--------------+---------+------
- 8 | 0 | 0
- 4 | 8 | 0
- 5 | 4 | 0
- 9 | 5 | 1
- 1 | 9 | 1
- 6 | 1 | 1
- 2 | 6 | 2
- 3 | 2 | 2
- 7 | 3 | 3
- | 7 | 3
-(10 rows)
-
-SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- first_value | unique1 | four
--------------+---------+------
- | 0 | 0
- 5 | 8 | 0
- 5 | 4 | 0
- | 5 | 1
- 6 | 9 | 1
- 6 | 1 | 1
- 3 | 6 | 2
- 3 | 2 | 2
- | 3 | 3
- | 7 | 3
-(10 rows)
-
-SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- first_value | unique1 | four
--------------+---------+------
- 0 | 0 | 0
- 8 | 8 | 0
- 4 | 4 | 0
- 5 | 5 | 1
- 9 | 9 | 1
- 1 | 1 | 1
- 6 | 6 | 2
- 2 | 2 | 2
- 3 | 3 | 3
- 7 | 7 | 3
-(10 rows)
-
-SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- last_value | unique1 | four
-------------+---------+------
- 4 | 0 | 0
- 5 | 8 | 0
- 9 | 4 | 0
- 1 | 5 | 1
- 6 | 9 | 1
- 2 | 1 | 1
- 3 | 6 | 2
- 7 | 2 | 2
- 7 | 3 | 3
- | 7 | 3
-(10 rows)
-
-SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- last_value | unique1 | four
-------------+---------+------
- | 0 | 0
- 5 | 8 | 0
- 9 | 4 | 0
- | 5 | 1
- 6 | 9 | 1
- 2 | 1 | 1
- 3 | 6 | 2
- 7 | 2 | 2
- | 3 | 3
- | 7 | 3
-(10 rows)
-
-SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- last_value | unique1 | four
-------------+---------+------
- 0 | 0 | 0
- 5 | 8 | 0
- 9 | 4 | 0
- 5 | 5 | 1
- 6 | 9 | 1
- 2 | 1 | 1
- 3 | 6 | 2
- 7 | 2 | 2
- 3 | 3 | 3
- 7 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (rows between 2 preceding and 1 preceding),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 4 | 0
- 4 | 2 | 2
- 6 | 1 | 1
- 3 | 6 | 2
- 7 | 9 | 1
- 15 | 8 | 0
- 17 | 5 | 1
- 13 | 3 | 3
- 8 | 7 | 3
- 10 | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between 1 following and 3 following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 9 | 4 | 0
- 16 | 2 | 2
- 23 | 1 | 1
- 22 | 6 | 2
- 16 | 9 | 1
- 15 | 8 | 0
- 10 | 5 | 1
- 7 | 3 | 3
- 0 | 7 | 3
- | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between unbounded preceding and 1 following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 6 | 4 | 0
- 7 | 2 | 2
- 13 | 1 | 1
- 22 | 6 | 2
- 30 | 9 | 1
- 35 | 8 | 0
- 38 | 5 | 1
- 45 | 3 | 3
- 45 | 7 | 3
- 45 | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (w range between current row and unbounded following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
- sum | unique1 | four
------+---------+------
- 45 | 0 | 0
- 45 | 8 | 0
- 45 | 4 | 0
- 33 | 5 | 1
- 33 | 9 | 1
- 33 | 1 | 1
- 18 | 6 | 2
- 18 | 2 | 2
- 10 | 3 | 3
- 10 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (w range between unbounded preceding and current row exclude current row),
- unique1, four
-FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
- sum | unique1 | four
------+---------+------
- 12 | 0 | 0
- 4 | 8 | 0
- 8 | 4 | 0
- 22 | 5 | 1
- 18 | 9 | 1
- 26 | 1 | 1
- 29 | 6 | 2
- 33 | 2 | 2
- 42 | 3 | 3
- 38 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (w range between unbounded preceding and current row exclude group),
- unique1, four
-FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
- sum | unique1 | four
------+---------+------
- | 0 | 0
- | 8 | 0
- | 4 | 0
- 12 | 5 | 1
- 12 | 9 | 1
- 12 | 1 | 1
- 27 | 6 | 2
- 27 | 2 | 2
- 35 | 3 | 3
- 35 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (w range between unbounded preceding and current row exclude ties),
- unique1, four
-FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
- sum | unique1 | four
------+---------+------
- 0 | 0 | 0
- 8 | 8 | 0
- 4 | 4 | 0
- 17 | 5 | 1
- 21 | 9 | 1
- 13 | 1 | 1
- 33 | 6 | 2
- 29 | 2 | 2
- 38 | 3 | 3
- 42 | 7 | 3
-(10 rows)
-
-SELECT first_value(unique1) over w,
- nth_value(unique1, 2) over w AS nth_2,
- last_value(unique1) over w, unique1, four
-FROM tenk1 WHERE unique1 < 10
-WINDOW w AS (order by four range between current row and unbounded following);
- first_value | nth_2 | last_value | unique1 | four
--------------+-------+------------+---------+------
- 0 | 8 | 7 | 0 | 0
- 0 | 8 | 7 | 8 | 0
- 0 | 8 | 7 | 4 | 0
- 5 | 9 | 7 | 5 | 1
- 5 | 9 | 7 | 9 | 1
- 5 | 9 | 7 | 1 | 1
- 6 | 2 | 7 | 6 | 2
- 6 | 2 | 7 | 2 | 2
- 3 | 7 | 7 | 3 | 3
- 3 | 7 | 7 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over
- (order by unique1
- rows (SELECT unique1 FROM tenk1 ORDER BY unique1 LIMIT 1) + 1 PRECEDING),
- unique1
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1
------+---------
- 0 | 0
- 1 | 1
- 3 | 2
- 5 | 3
- 7 | 4
- 9 | 5
- 11 | 6
- 13 | 7
- 15 | 8
- 17 | 9
-(10 rows)
-
-CREATE TEMP VIEW v_window AS
- SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows
- FROM generate_series(1, 10) i;
-SELECT * FROM v_window;
- i | sum_rows
-----+----------
- 1 | 3
- 2 | 6
- 3 | 9
- 4 | 12
- 5 | 15
- 6 | 18
- 7 | 21
- 8 | 24
- 9 | 27
- 10 | 19
-(10 rows)
-
-SELECT pg_get_viewdef('v_window');
- pg_get_viewdef
------------------------------------------------------------------------------------
- SELECT i, +
- sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+
- FROM generate_series(1, 10) i(i);
-(1 row)
-
-CREATE OR REPLACE TEMP VIEW v_window AS
- SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
- exclude current row) as sum_rows FROM generate_series(1, 10) i;
-SELECT * FROM v_window;
- i | sum_rows
-----+----------
- 1 | 2
- 2 | 4
- 3 | 6
- 4 | 8
- 5 | 10
- 6 | 12
- 7 | 14
- 8 | 16
- 9 | 18
- 10 | 9
-(10 rows)
-
-SELECT pg_get_viewdef('v_window');
- pg_get_viewdef
--------------------------------------------------------------------------------------------------------
- SELECT i, +
- sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) AS sum_rows+
- FROM generate_series(1, 10) i(i);
-(1 row)
-
-CREATE OR REPLACE TEMP VIEW v_window AS
- SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
- exclude group) as sum_rows FROM generate_series(1, 10) i;
-SELECT * FROM v_window;
- i | sum_rows
-----+----------
- 1 | 2
- 2 | 4
- 3 | 6
- 4 | 8
- 5 | 10
- 6 | 12
- 7 | 14
- 8 | 16
- 9 | 18
- 10 | 9
-(10 rows)
-
-SELECT pg_get_viewdef('v_window');
- pg_get_viewdef
--------------------------------------------------------------------------------------------------
- SELECT i, +
- sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE GROUP) AS sum_rows+
- FROM generate_series(1, 10) i(i);
-(1 row)
-
-CREATE OR REPLACE TEMP VIEW v_window AS
- SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
- exclude ties) as sum_rows FROM generate_series(1, 10) i;
-SELECT * FROM v_window;
- i | sum_rows
-----+----------
- 1 | 3
- 2 | 6
- 3 | 9
- 4 | 12
- 5 | 15
- 6 | 18
- 7 | 21
- 8 | 24
- 9 | 27
- 10 | 19
-(10 rows)
-
-SELECT pg_get_viewdef('v_window');
- pg_get_viewdef
-------------------------------------------------------------------------------------------------
- SELECT i, +
- sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE TIES) AS sum_rows+
- FROM generate_series(1, 10) i(i);
-(1 row)
-
-CREATE OR REPLACE TEMP VIEW v_window AS
- SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
- exclude no others) as sum_rows FROM generate_series(1, 10) i;
-SELECT * FROM v_window;
- i | sum_rows
-----+----------
- 1 | 3
- 2 | 6
- 3 | 9
- 4 | 12
- 5 | 15
- 6 | 18
- 7 | 21
- 8 | 24
- 9 | 27
- 10 | 19
-(10 rows)
-
-SELECT pg_get_viewdef('v_window');
- pg_get_viewdef
------------------------------------------------------------------------------------
- SELECT i, +
- sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+
- FROM generate_series(1, 10) i(i);
-(1 row)
-
-CREATE OR REPLACE TEMP VIEW v_window AS
- SELECT i, sum(i) over (order by i groups between 1 preceding and 1 following) as sum_rows FROM generate_series(1, 10) i;
-SELECT * FROM v_window;
- i | sum_rows
-----+----------
- 1 | 3
- 2 | 6
- 3 | 9
- 4 | 12
- 5 | 15
- 6 | 18
- 7 | 21
- 8 | 24
- 9 | 27
- 10 | 19
-(10 rows)
-
-SELECT pg_get_viewdef('v_window');
- pg_get_viewdef
--------------------------------------------------------------------------------------
- SELECT i, +
- sum(i) OVER (ORDER BY i GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+
- FROM generate_series(1, 10) i(i);
-(1 row)
-
-DROP VIEW v_window;
-CREATE TEMP VIEW v_window AS
- SELECT i, min(i) over (order by i range between '1 day' preceding and '10 days' following) as min_i
- FROM generate_series(now(), now()+'100 days'::interval, '1 hour') i;
-SELECT pg_get_viewdef('v_window');
- pg_get_viewdef
------------------------------------------------------------------------------------------------------------------------
- SELECT i, +
- min(i) OVER (ORDER BY i RANGE BETWEEN '@ 1 day'::interval PRECEDING AND '@ 10 days'::interval FOLLOWING) AS min_i+
- FROM generate_series(now(), (now() + '@ 100 days'::interval), '@ 1 hour'::interval) i(i);
-(1 row)
-
--- RANGE offset PRECEDING/FOLLOWING tests
-SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 0 | 0
- | 8 | 0
- | 4 | 0
- 12 | 5 | 1
- 12 | 9 | 1
- 12 | 1 | 1
- 27 | 6 | 2
- 27 | 2 | 2
- 23 | 3 | 3
- 23 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four desc range between 2::int8 preceding and 1::int2 preceding),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 3 | 3
- | 7 | 3
- 10 | 6 | 2
- 10 | 2 | 2
- 18 | 9 | 1
- 18 | 5 | 1
- 18 | 1 | 1
- 23 | 0 | 0
- 23 | 8 | 0
- 23 | 4 | 0
-(10 rows)
-
-SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude no others),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 0 | 0
- | 8 | 0
- | 4 | 0
- 12 | 5 | 1
- 12 | 9 | 1
- 12 | 1 | 1
- 27 | 6 | 2
- 27 | 2 | 2
- 23 | 3 | 3
- 23 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude current row),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 0 | 0
- | 8 | 0
- | 4 | 0
- 12 | 5 | 1
- 12 | 9 | 1
- 12 | 1 | 1
- 27 | 6 | 2
- 27 | 2 | 2
- 23 | 3 | 3
- 23 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude group),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 0 | 0
- | 8 | 0
- | 4 | 0
- 12 | 5 | 1
- 12 | 9 | 1
- 12 | 1 | 1
- 27 | 6 | 2
- 27 | 2 | 2
- 23 | 3 | 3
- 23 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude ties),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 0 | 0
- | 8 | 0
- | 4 | 0
- 12 | 5 | 1
- 12 | 9 | 1
- 12 | 1 | 1
- 27 | 6 | 2
- 27 | 2 | 2
- 23 | 3 | 3
- 23 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude ties),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 33 | 0 | 0
- 41 | 8 | 0
- 37 | 4 | 0
- 35 | 5 | 1
- 39 | 9 | 1
- 31 | 1 | 1
- 43 | 6 | 2
- 39 | 2 | 2
- 26 | 3 | 3
- 30 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude group),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 33 | 0 | 0
- 33 | 8 | 0
- 33 | 4 | 0
- 30 | 5 | 1
- 30 | 9 | 1
- 30 | 1 | 1
- 37 | 6 | 2
- 37 | 2 | 2
- 23 | 3 | 3
- 23 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 4 | 0 | 0
- 12 | 4 | 0
- 12 | 8 | 0
- 6 | 1 | 1
- 15 | 5 | 1
- 14 | 9 | 1
- 8 | 2 | 2
- 8 | 6 | 2
- 10 | 3 | 3
- 10 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following
- exclude current row),unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 4 | 0 | 0
- 8 | 4 | 0
- 4 | 8 | 0
- 5 | 1 | 1
- 10 | 5 | 1
- 5 | 9 | 1
- 6 | 2 | 2
- 2 | 6 | 2
- 7 | 3 | 3
- 3 | 7 | 3
-(10 rows)
-
-select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following),
- salary, enroll_date from empsalary;
- sum | salary | enroll_date
--------+--------+-------------
- 34900 | 5000 | 10-01-2006
- 34900 | 6000 | 10-01-2006
- 38400 | 3900 | 12-23-2006
- 47100 | 4800 | 08-01-2007
- 47100 | 5200 | 08-01-2007
- 47100 | 4800 | 08-08-2007
- 47100 | 5200 | 08-15-2007
- 36100 | 3500 | 12-10-2007
- 32200 | 4500 | 01-01-2008
- 32200 | 4200 | 01-01-2008
-(10 rows)
-
-select sum(salary) over (order by enroll_date desc range between '1 year'::interval preceding and '1 year'::interval following),
- salary, enroll_date from empsalary;
- sum | salary | enroll_date
--------+--------+-------------
- 32200 | 4200 | 01-01-2008
- 32200 | 4500 | 01-01-2008
- 36100 | 3500 | 12-10-2007
- 47100 | 5200 | 08-15-2007
- 47100 | 4800 | 08-08-2007
- 47100 | 4800 | 08-01-2007
- 47100 | 5200 | 08-01-2007
- 38400 | 3900 | 12-23-2006
- 34900 | 5000 | 10-01-2006
- 34900 | 6000 | 10-01-2006
-(10 rows)
-
-select sum(salary) over (order by enroll_date desc range between '1 year'::interval following and '1 year'::interval following),
- salary, enroll_date from empsalary;
- sum | salary | enroll_date
------+--------+-------------
- | 4200 | 01-01-2008
- | 4500 | 01-01-2008
- | 3500 | 12-10-2007
- | 5200 | 08-15-2007
- | 4800 | 08-08-2007
- | 4800 | 08-01-2007
- | 5200 | 08-01-2007
- | 3900 | 12-23-2006
- | 5000 | 10-01-2006
- | 6000 | 10-01-2006
-(10 rows)
-
-select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
- exclude current row), salary, enroll_date from empsalary;
- sum | salary | enroll_date
--------+--------+-------------
- 29900 | 5000 | 10-01-2006
- 28900 | 6000 | 10-01-2006
- 34500 | 3900 | 12-23-2006
- 42300 | 4800 | 08-01-2007
- 41900 | 5200 | 08-01-2007
- 42300 | 4800 | 08-08-2007
- 41900 | 5200 | 08-15-2007
- 32600 | 3500 | 12-10-2007
- 27700 | 4500 | 01-01-2008
- 28000 | 4200 | 01-01-2008
-(10 rows)
-
-select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
- exclude group), salary, enroll_date from empsalary;
- sum | salary | enroll_date
--------+--------+-------------
- 23900 | 5000 | 10-01-2006
- 23900 | 6000 | 10-01-2006
- 34500 | 3900 | 12-23-2006
- 37100 | 4800 | 08-01-2007
- 37100 | 5200 | 08-01-2007
- 42300 | 4800 | 08-08-2007
- 41900 | 5200 | 08-15-2007
- 32600 | 3500 | 12-10-2007
- 23500 | 4500 | 01-01-2008
- 23500 | 4200 | 01-01-2008
-(10 rows)
-
-select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
- exclude ties), salary, enroll_date from empsalary;
- sum | salary | enroll_date
--------+--------+-------------
- 28900 | 5000 | 10-01-2006
- 29900 | 6000 | 10-01-2006
- 38400 | 3900 | 12-23-2006
- 41900 | 4800 | 08-01-2007
- 42300 | 5200 | 08-01-2007
- 47100 | 4800 | 08-08-2007
- 47100 | 5200 | 08-15-2007
- 36100 | 3500 | 12-10-2007
- 28000 | 4500 | 01-01-2008
- 27700 | 4200 | 01-01-2008
-(10 rows)
-
-select first_value(salary) over(order by salary range between 1000 preceding and 1000 following),
- lead(salary) over(order by salary range between 1000 preceding and 1000 following),
- nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following),
- salary from empsalary;
- first_value | lead | nth_value | salary
--------------+------+-----------+--------
- 3500 | 3900 | 3500 | 3500
- 3500 | 4200 | 3500 | 3900
- 3500 | 4500 | 3500 | 4200
- 3500 | 4800 | 3500 | 4500
- 3900 | 4800 | 3900 | 4800
- 3900 | 5000 | 3900 | 4800
- 4200 | 5200 | 4200 | 5000
- 4200 | 5200 | 4200 | 5200
- 4200 | 6000 | 4200 | 5200
- 5000 | | 5000 | 6000
-(10 rows)
-
-select last_value(salary) over(order by salary range between 1000 preceding and 1000 following),
- lag(salary) over(order by salary range between 1000 preceding and 1000 following),
- salary from empsalary;
- last_value | lag | salary
-------------+------+--------
- 4500 | | 3500
- 4800 | 3500 | 3900
- 5200 | 3900 | 4200
- 5200 | 4200 | 4500
- 5200 | 4500 | 4800
- 5200 | 4800 | 4800
- 6000 | 4800 | 5000
- 6000 | 5000 | 5200
- 6000 | 5200 | 5200
- 6000 | 5200 | 6000
-(10 rows)
-
-select first_value(salary) over(order by salary range between 1000 following and 3000 following
- exclude current row),
- lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties),
- nth_value(salary, 1) over(order by salary range between 1000 following and 3000 following
- exclude ties),
- salary from empsalary;
- first_value | lead | nth_value | salary
--------------+------+-----------+--------
- 4500 | 3900 | 4500 | 3500
- 5000 | 4200 | 5000 | 3900
- 5200 | 4500 | 5200 | 4200
- 6000 | 4800 | 6000 | 4500
- 6000 | 4800 | 6000 | 4800
- 6000 | 5000 | 6000 | 4800
- 6000 | 5200 | 6000 | 5000
- | 5200 | | 5200
- | 6000 | | 5200
- | | | 6000
-(10 rows)
-
-select last_value(salary) over(order by salary range between 1000 following and 3000 following
- exclude group),
- lag(salary) over(order by salary range between 1000 following and 3000 following exclude group),
- salary from empsalary;
- last_value | lag | salary
-------------+------+--------
- 6000 | | 3500
- 6000 | 3500 | 3900
- 6000 | 3900 | 4200
- 6000 | 4200 | 4500
- 6000 | 4500 | 4800
- 6000 | 4800 | 4800
- 6000 | 4800 | 5000
- | 5000 | 5200
- | 5200 | 5200
- | 5200 | 6000
-(10 rows)
-
-select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
- exclude ties),
- last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following),
- salary, enroll_date from empsalary;
- first_value | last_value | salary | enroll_date
--------------+------------+--------+-------------
- 5000 | 5200 | 5000 | 10-01-2006
- 6000 | 5200 | 6000 | 10-01-2006
- 5000 | 3500 | 3900 | 12-23-2006
- 5000 | 4200 | 4800 | 08-01-2007
- 5000 | 4200 | 5200 | 08-01-2007
- 5000 | 4200 | 4800 | 08-08-2007
- 5000 | 4200 | 5200 | 08-15-2007
- 5000 | 4200 | 3500 | 12-10-2007
- 5000 | 4200 | 4500 | 01-01-2008
- 5000 | 4200 | 4200 | 01-01-2008
-(10 rows)
-
-select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
- exclude ties),
- last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
- exclude ties),
- salary, enroll_date from empsalary;
- first_value | last_value | salary | enroll_date
--------------+------------+--------+-------------
- 5000 | 5200 | 5000 | 10-01-2006
- 6000 | 5200 | 6000 | 10-01-2006
- 5000 | 3500 | 3900 | 12-23-2006
- 5000 | 4200 | 4800 | 08-01-2007
- 5000 | 4200 | 5200 | 08-01-2007
- 5000 | 4200 | 4800 | 08-08-2007
- 5000 | 4200 | 5200 | 08-15-2007
- 5000 | 4200 | 3500 | 12-10-2007
- 5000 | 4500 | 4500 | 01-01-2008
- 5000 | 4200 | 4200 | 01-01-2008
-(10 rows)
-
-select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
- exclude group),
- last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
- exclude group),
- salary, enroll_date from empsalary;
- first_value | last_value | salary | enroll_date
--------------+------------+--------+-------------
- 3900 | 5200 | 5000 | 10-01-2006
- 3900 | 5200 | 6000 | 10-01-2006
- 5000 | 3500 | 3900 | 12-23-2006
- 5000 | 4200 | 4800 | 08-01-2007
- 5000 | 4200 | 5200 | 08-01-2007
- 5000 | 4200 | 4800 | 08-08-2007
- 5000 | 4200 | 5200 | 08-15-2007
- 5000 | 4200 | 3500 | 12-10-2007
- 5000 | 3500 | 4500 | 01-01-2008
- 5000 | 3500 | 4200 | 01-01-2008
-(10 rows)
-
-select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
- exclude current row),
- last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
- exclude current row),
- salary, enroll_date from empsalary;
- first_value | last_value | salary | enroll_date
--------------+------------+--------+-------------
- 6000 | 5200 | 5000 | 10-01-2006
- 5000 | 5200 | 6000 | 10-01-2006
- 5000 | 3500 | 3900 | 12-23-2006
- 5000 | 4200 | 4800 | 08-01-2007
- 5000 | 4200 | 5200 | 08-01-2007
- 5000 | 4200 | 4800 | 08-08-2007
- 5000 | 4200 | 5200 | 08-15-2007
- 5000 | 4200 | 3500 | 12-10-2007
- 5000 | 4200 | 4500 | 01-01-2008
- 5000 | 4500 | 4200 | 01-01-2008
-(10 rows)
-
--- RANGE offset PRECEDING/FOLLOWING with null values
-select x, y,
- first_value(y) over w,
- last_value(y) over w
-from
- (select x, x as y from generate_series(1,5) as x
- union all select null, 42
- union all select null, 43) ss
-window w as
- (order by x asc nulls first range between 2 preceding and 2 following);
- x | y | first_value | last_value
----+----+-------------+------------
- | 42 | 42 | 43
- | 43 | 42 | 43
- 1 | 1 | 1 | 3
- 2 | 2 | 1 | 4
- 3 | 3 | 1 | 5
- 4 | 4 | 2 | 5
- 5 | 5 | 3 | 5
-(7 rows)
-
-select x, y,
- first_value(y) over w,
- last_value(y) over w
-from
- (select x, x as y from generate_series(1,5) as x
- union all select null, 42
- union all select null, 43) ss
-window w as
- (order by x asc nulls last range between 2 preceding and 2 following);
- x | y | first_value | last_value
----+----+-------------+------------
- 1 | 1 | 1 | 3
- 2 | 2 | 1 | 4
- 3 | 3 | 1 | 5
- 4 | 4 | 2 | 5
- 5 | 5 | 3 | 5
- | 42 | 42 | 43
- | 43 | 42 | 43
-(7 rows)
-
-select x, y,
- first_value(y) over w,
- last_value(y) over w
-from
- (select x, x as y from generate_series(1,5) as x
- union all select null, 42
- union all select null, 43) ss
-window w as
- (order by x desc nulls first range between 2 preceding and 2 following);
- x | y | first_value | last_value
----+----+-------------+------------
- | 43 | 43 | 42
- | 42 | 43 | 42
- 5 | 5 | 5 | 3
- 4 | 4 | 5 | 2
- 3 | 3 | 5 | 1
- 2 | 2 | 4 | 1
- 1 | 1 | 3 | 1
-(7 rows)
-
-select x, y,
- first_value(y) over w,
- last_value(y) over w
-from
- (select x, x as y from generate_series(1,5) as x
- union all select null, 42
- union all select null, 43) ss
-window w as
- (order by x desc nulls last range between 2 preceding and 2 following);
- x | y | first_value | last_value
----+----+-------------+------------
- 5 | 5 | 5 | 3
- 4 | 4 | 5 | 2
- 3 | 3 | 5 | 1
- 2 | 2 | 4 | 1
- 1 | 1 | 3 | 1
- | 42 | 42 | 43
- | 43 | 42 | 43
-(7 rows)
-
--- There is a syntactic ambiguity in the SQL standard. Since
--- UNBOUNDED is a non-reserved word, it could be the name of a
--- function parameter and be used as an expression. There is a
--- grammar hack to resolve such cases as the keyword. The following
--- tests record this behavior.
-CREATE FUNCTION unbounded_syntax_test1a(x int) RETURNS TABLE (a int, b int, c int)
-LANGUAGE SQL
-BEGIN ATOMIC
- SELECT sum(unique1) over (rows between x preceding and x following),
- unique1, four
- FROM tenk1 WHERE unique1 < 10;
-END;
-CREATE FUNCTION unbounded_syntax_test1b(x int) RETURNS TABLE (a int, b int, c int)
-LANGUAGE SQL
-AS $$
- SELECT sum(unique1) over (rows between x preceding and x following),
- unique1, four
- FROM tenk1 WHERE unique1 < 10;
-$$;
--- These will apply the argument to the window specification inside the function.
-SELECT * FROM unbounded_syntax_test1a(2);
- a | b | c
-----+---+---
- 7 | 4 | 0
- 13 | 2 | 2
- 22 | 1 | 1
- 26 | 6 | 2
- 29 | 9 | 1
- 31 | 8 | 0
- 32 | 5 | 1
- 23 | 3 | 3
- 15 | 7 | 3
- 10 | 0 | 0
-(10 rows)
-
-SELECT * FROM unbounded_syntax_test1b(2);
- a | b | c
-----+---+---
- 7 | 4 | 0
- 13 | 2 | 2
- 22 | 1 | 1
- 26 | 6 | 2
- 29 | 9 | 1
- 31 | 8 | 0
- 32 | 5 | 1
- 23 | 3 | 3
- 15 | 7 | 3
- 10 | 0 | 0
-(10 rows)
-
-CREATE FUNCTION unbounded_syntax_test2a(unbounded int) RETURNS TABLE (a int, b int, c int)
-LANGUAGE SQL
-BEGIN ATOMIC
- SELECT sum(unique1) over (rows between unbounded preceding and unbounded following),
- unique1, four
- FROM tenk1 WHERE unique1 < 10;
-END;
-CREATE FUNCTION unbounded_syntax_test2b(unbounded int) RETURNS TABLE (a int, b int, c int)
-LANGUAGE SQL
-AS $$
- SELECT sum(unique1) over (rows between unbounded preceding and unbounded following),
- unique1, four
- FROM tenk1 WHERE unique1 < 10;
-$$;
--- These will not apply the argument but instead treat UNBOUNDED as a keyword.
-SELECT * FROM unbounded_syntax_test2a(2);
- a | b | c
-----+---+---
- 45 | 4 | 0
- 45 | 2 | 2
- 45 | 1 | 1
- 45 | 6 | 2
- 45 | 9 | 1
- 45 | 8 | 0
- 45 | 5 | 1
- 45 | 3 | 3
- 45 | 7 | 3
- 45 | 0 | 0
-(10 rows)
-
-SELECT * FROM unbounded_syntax_test2b(2);
- a | b | c
-----+---+---
- 45 | 4 | 0
- 45 | 2 | 2
- 45 | 1 | 1
- 45 | 6 | 2
- 45 | 9 | 1
- 45 | 8 | 0
- 45 | 5 | 1
- 45 | 3 | 3
- 45 | 7 | 3
- 45 | 0 | 0
-(10 rows)
-
-DROP FUNCTION unbounded_syntax_test1a, unbounded_syntax_test1b,
- unbounded_syntax_test2a, unbounded_syntax_test2b;
--- Other tests with token UNBOUNDED in potentially problematic position
-CREATE FUNCTION unbounded(x int) RETURNS int LANGUAGE SQL IMMUTABLE RETURN x;
-SELECT sum(unique1) over (rows between 1 preceding and 1 following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 6 | 4 | 0
- 7 | 2 | 2
- 9 | 1 | 1
- 16 | 6 | 2
- 23 | 9 | 1
- 22 | 8 | 0
- 16 | 5 | 1
- 15 | 3 | 3
- 10 | 7 | 3
- 7 | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between unbounded(1) preceding and unbounded(1) following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 6 | 4 | 0
- 7 | 2 | 2
- 9 | 1 | 1
- 16 | 6 | 2
- 23 | 9 | 1
- 22 | 8 | 0
- 16 | 5 | 1
- 15 | 3 | 3
- 10 | 7 | 3
- 7 | 0 | 0
-(10 rows)
-
-SELECT sum(unique1) over (rows between unbounded.x preceding and unbounded.x following),
- unique1, four
-FROM tenk1, (values (1)) as unbounded(x) WHERE unique1 < 10;
-ERROR: argument of ROWS must not contain variables
-LINE 1: SELECT sum(unique1) over (rows between unbounded.x preceding...
- ^
-DROP FUNCTION unbounded;
--- Check overflow behavior for various integer sizes
-select x, last_value(x) over (order by x::smallint range between current row and 2147450884 following)
-from generate_series(32764, 32766) x;
- x | last_value
--------+------------
- 32764 | 32766
- 32765 | 32766
- 32766 | 32766
-(3 rows)
-
-select x, last_value(x) over (order by x::smallint desc range between current row and 2147450885 following)
-from generate_series(-32766, -32764) x;
- x | last_value
---------+------------
- -32764 | -32766
- -32765 | -32766
- -32766 | -32766
-(3 rows)
-
-select x, last_value(x) over (order by x range between current row and 4 following)
-from generate_series(2147483644, 2147483646) x;
- x | last_value
-------------+------------
- 2147483644 | 2147483646
- 2147483645 | 2147483646
- 2147483646 | 2147483646
-(3 rows)
-
-select x, last_value(x) over (order by x desc range between current row and 5 following)
-from generate_series(-2147483646, -2147483644) x;
- x | last_value
--------------+-------------
- -2147483644 | -2147483646
- -2147483645 | -2147483646
- -2147483646 | -2147483646
-(3 rows)
-
-select x, last_value(x) over (order by x range between current row and 4 following)
-from generate_series(9223372036854775804, 9223372036854775806) x;
- x | last_value
----------------------+---------------------
- 9223372036854775804 | 9223372036854775806
- 9223372036854775805 | 9223372036854775806
- 9223372036854775806 | 9223372036854775806
-(3 rows)
-
-select x, last_value(x) over (order by x desc range between current row and 5 following)
-from generate_series(-9223372036854775806, -9223372036854775804) x;
- x | last_value
-----------------------+----------------------
- -9223372036854775804 | -9223372036854775806
- -9223372036854775805 | -9223372036854775806
- -9223372036854775806 | -9223372036854775806
-(3 rows)
-
--- Test in_range for other numeric datatypes
-create temp table numerics(
- id int,
- f_float4 float4,
- f_float8 float8,
- f_numeric numeric
-);
-insert into numerics values
-(0, '-infinity', '-infinity', '-infinity'),
-(1, -3, -3, -3),
-(2, -1, -1, -1),
-(3, 0, 0, 0),
-(4, 1.1, 1.1, 1.1),
-(5, 1.12, 1.12, 1.12),
-(6, 2, 2, 2),
-(7, 100, 100, 100),
-(8, 'infinity', 'infinity', 'infinity'),
-(9, 'NaN', 'NaN', 'NaN');
-select id, f_float4, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float4 range between
- 1 preceding and 1 following);
- id | f_float4 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 1 | 1
- 2 | -1 | 2 | 3
- 3 | 0 | 2 | 3
- 4 | 1.1 | 4 | 6
- 5 | 1.12 | 4 | 6
- 6 | 2 | 4 | 6
- 7 | 100 | 7 | 7
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float4, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float4 range between
- 1 preceding and 1.1::float4 following);
- id | f_float4 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 1 | 1
- 2 | -1 | 2 | 3
- 3 | 0 | 2 | 4
- 4 | 1.1 | 4 | 6
- 5 | 1.12 | 4 | 6
- 6 | 2 | 4 | 6
- 7 | 100 | 7 | 7
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float4, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float4 range between
- 'inf' preceding and 'inf' following);
- id | f_float4 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 8
- 1 | -3 | 0 | 8
- 2 | -1 | 0 | 8
- 3 | 0 | 0 | 8
- 4 | 1.1 | 0 | 8
- 5 | 1.12 | 0 | 8
- 6 | 2 | 0 | 8
- 7 | 100 | 0 | 8
- 8 | Infinity | 0 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float4, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float4 range between
- 'inf' preceding and 'inf' preceding);
- id | f_float4 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 0 | 0
- 2 | -1 | 0 | 0
- 3 | 0 | 0 | 0
- 4 | 1.1 | 0 | 0
- 5 | 1.12 | 0 | 0
- 6 | 2 | 0 | 0
- 7 | 100 | 0 | 0
- 8 | Infinity | 0 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float4, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float4 range between
- 'inf' following and 'inf' following);
- id | f_float4 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 8
- 1 | -3 | 8 | 8
- 2 | -1 | 8 | 8
- 3 | 0 | 8 | 8
- 4 | 1.1 | 8 | 8
- 5 | 1.12 | 8 | 8
- 6 | 2 | 8 | 8
- 7 | 100 | 8 | 8
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float4, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float4 range between
- 1.1 preceding and 'NaN' following); -- error, NaN disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_float8, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float8 range between
- 1 preceding and 1 following);
- id | f_float8 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 1 | 1
- 2 | -1 | 2 | 3
- 3 | 0 | 2 | 3
- 4 | 1.1 | 4 | 6
- 5 | 1.12 | 4 | 6
- 6 | 2 | 4 | 6
- 7 | 100 | 7 | 7
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float8, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float8 range between
- 1 preceding and 1.1::float8 following);
- id | f_float8 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 1 | 1
- 2 | -1 | 2 | 3
- 3 | 0 | 2 | 4
- 4 | 1.1 | 4 | 6
- 5 | 1.12 | 4 | 6
- 6 | 2 | 4 | 6
- 7 | 100 | 7 | 7
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float8, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float8 range between
- 'inf' preceding and 'inf' following);
- id | f_float8 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 8
- 1 | -3 | 0 | 8
- 2 | -1 | 0 | 8
- 3 | 0 | 0 | 8
- 4 | 1.1 | 0 | 8
- 5 | 1.12 | 0 | 8
- 6 | 2 | 0 | 8
- 7 | 100 | 0 | 8
- 8 | Infinity | 0 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float8, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float8 range between
- 'inf' preceding and 'inf' preceding);
- id | f_float8 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 0 | 0
- 2 | -1 | 0 | 0
- 3 | 0 | 0 | 0
- 4 | 1.1 | 0 | 0
- 5 | 1.12 | 0 | 0
- 6 | 2 | 0 | 0
- 7 | 100 | 0 | 0
- 8 | Infinity | 0 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float8, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float8 range between
- 'inf' following and 'inf' following);
- id | f_float8 | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 8
- 1 | -3 | 8 | 8
- 2 | -1 | 8 | 8
- 3 | 0 | 8 | 8
- 4 | 1.1 | 8 | 8
- 5 | 1.12 | 8 | 8
- 6 | 2 | 8 | 8
- 7 | 100 | 8 | 8
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_float8, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_float8 range between
- 1.1 preceding and 'NaN' following); -- error, NaN disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_numeric, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_numeric range between
- 1 preceding and 1 following);
- id | f_numeric | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 1 | 1
- 2 | -1 | 2 | 3
- 3 | 0 | 2 | 3
- 4 | 1.1 | 4 | 6
- 5 | 1.12 | 4 | 6
- 6 | 2 | 4 | 6
- 7 | 100 | 7 | 7
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_numeric, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_numeric range between
- 1 preceding and 1.1::numeric following);
- id | f_numeric | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 1 | 1
- 2 | -1 | 2 | 3
- 3 | 0 | 2 | 4
- 4 | 1.1 | 4 | 6
- 5 | 1.12 | 4 | 6
- 6 | 2 | 4 | 6
- 7 | 100 | 7 | 7
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_numeric, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_numeric range between
- 1 preceding and 1.1::float8 following); -- currently unsupported
-ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type numeric and offset type double precision
-LINE 4: 1 preceding and 1.1::float8 following);
- ^
-HINT: Cast the offset value to an appropriate type.
-select id, f_numeric, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_numeric range between
- 'inf' preceding and 'inf' following);
- id | f_numeric | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 8
- 1 | -3 | 0 | 8
- 2 | -1 | 0 | 8
- 3 | 0 | 0 | 8
- 4 | 1.1 | 0 | 8
- 5 | 1.12 | 0 | 8
- 6 | 2 | 0 | 8
- 7 | 100 | 0 | 8
- 8 | Infinity | 0 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_numeric, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_numeric range between
- 'inf' preceding and 'inf' preceding);
- id | f_numeric | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 0
- 1 | -3 | 0 | 0
- 2 | -1 | 0 | 0
- 3 | 0 | 0 | 0
- 4 | 1.1 | 0 | 0
- 5 | 1.12 | 0 | 0
- 6 | 2 | 0 | 0
- 7 | 100 | 0 | 0
- 8 | Infinity | 0 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_numeric, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_numeric range between
- 'inf' following and 'inf' following);
- id | f_numeric | first_value | last_value
-----+-----------+-------------+------------
- 0 | -Infinity | 0 | 8
- 1 | -3 | 8 | 8
- 2 | -1 | 8 | 8
- 3 | 0 | 8 | 8
- 4 | 1.1 | 8 | 8
- 5 | 1.12 | 8 | 8
- 6 | 2 | 8 | 8
- 7 | 100 | 8 | 8
- 8 | Infinity | 8 | 8
- 9 | NaN | 9 | 9
-(10 rows)
-
-select id, f_numeric, first_value(id) over w, last_value(id) over w
-from numerics
-window w as (order by f_numeric range between
- 1.1 preceding and 'NaN' following); -- error, NaN disallowed
-ERROR: invalid preceding or following size in window function
--- Test in_range for other datetime datatypes
-create temp table datetimes(
- id int,
- f_time time,
- f_timetz timetz,
- f_interval interval,
- f_timestamptz timestamptz,
- f_timestamp timestamp
-);
-insert into datetimes values
-(0, '10:00', '10:00 BST', '-infinity', '-infinity', '-infinity'),
-(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19 10:23:54'),
-(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'),
-(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'),
-(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19 10:23:54'),
-(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19 10:23:54'),
-(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19 10:23:54'),
-(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19 10:23:54'),
-(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19 10:23:54'),
-(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19 10:23:54'),
-(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19 10:23:54'),
-(11, '21:00', '21:00 BST', 'infinity', 'infinity', 'infinity');
-select id, f_time, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_time range between
- '70 min'::interval preceding and '2 hours'::interval following);
- id | f_time | first_value | last_value
-----+----------+-------------+------------
- 0 | 10:00:00 | 0 | 2
- 1 | 11:00:00 | 0 | 3
- 2 | 12:00:00 | 1 | 4
- 3 | 13:00:00 | 2 | 6
- 4 | 14:00:00 | 3 | 6
- 5 | 15:00:00 | 4 | 7
- 6 | 15:00:00 | 4 | 7
- 7 | 17:00:00 | 7 | 9
- 8 | 18:00:00 | 7 | 10
- 9 | 19:00:00 | 8 | 11
- 10 | 20:00:00 | 9 | 11
- 11 | 21:00:00 | 10 | 11
-(12 rows)
-
-select id, f_time, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_time desc range between
- '70 min' preceding and '2 hours' following);
- id | f_time | first_value | last_value
-----+----------+-------------+------------
- 11 | 21:00:00 | 11 | 9
- 10 | 20:00:00 | 11 | 8
- 9 | 19:00:00 | 10 | 7
- 8 | 18:00:00 | 9 | 7
- 7 | 17:00:00 | 8 | 5
- 6 | 15:00:00 | 6 | 3
- 5 | 15:00:00 | 6 | 3
- 4 | 14:00:00 | 6 | 2
- 3 | 13:00:00 | 4 | 1
- 2 | 12:00:00 | 3 | 0
- 1 | 11:00:00 | 2 | 0
- 0 | 10:00:00 | 1 | 0
-(12 rows)
-
-select id, f_time, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_time desc range between
- '-70 min' preceding and '2 hours' following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_time, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_time range between
- 'infinity'::interval preceding and 'infinity'::interval following);
- id | f_time | first_value | last_value
-----+----------+-------------+------------
- 0 | 10:00:00 | 0 | 11
- 1 | 11:00:00 | 0 | 11
- 2 | 12:00:00 | 0 | 11
- 3 | 13:00:00 | 0 | 11
- 4 | 14:00:00 | 0 | 11
- 5 | 15:00:00 | 0 | 11
- 6 | 15:00:00 | 0 | 11
- 7 | 17:00:00 | 0 | 11
- 8 | 18:00:00 | 0 | 11
- 9 | 19:00:00 | 0 | 11
- 10 | 20:00:00 | 0 | 11
- 11 | 21:00:00 | 0 | 11
-(12 rows)
-
-select id, f_time, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_time range between
- 'infinity'::interval preceding and 'infinity'::interval preceding);
- id | f_time | first_value | last_value
-----+----------+-------------+------------
- 0 | 10:00:00 | |
- 1 | 11:00:00 | |
- 2 | 12:00:00 | |
- 3 | 13:00:00 | |
- 4 | 14:00:00 | |
- 5 | 15:00:00 | |
- 6 | 15:00:00 | |
- 7 | 17:00:00 | |
- 8 | 18:00:00 | |
- 9 | 19:00:00 | |
- 10 | 20:00:00 | |
- 11 | 21:00:00 | |
-(12 rows)
-
-select id, f_time, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_time range between
- 'infinity'::interval following and 'infinity'::interval following);
- id | f_time | first_value | last_value
-----+----------+-------------+------------
- 0 | 10:00:00 | |
- 1 | 11:00:00 | |
- 2 | 12:00:00 | |
- 3 | 13:00:00 | |
- 4 | 14:00:00 | |
- 5 | 15:00:00 | |
- 6 | 15:00:00 | |
- 7 | 17:00:00 | |
- 8 | 18:00:00 | |
- 9 | 19:00:00 | |
- 10 | 20:00:00 | |
- 11 | 21:00:00 | |
-(12 rows)
-
-select id, f_time, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_time range between
- '-infinity'::interval following and
- 'infinity'::interval following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_timetz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timetz range between
- '70 min'::interval preceding and '2 hours'::interval following);
- id | f_timetz | first_value | last_value
-----+-------------+-------------+------------
- 0 | 10:00:00+01 | 0 | 2
- 1 | 11:00:00+01 | 0 | 3
- 2 | 12:00:00+01 | 1 | 4
- 3 | 13:00:00+01 | 2 | 6
- 4 | 14:00:00+01 | 3 | 6
- 5 | 15:00:00+01 | 4 | 7
- 6 | 15:00:00+01 | 4 | 7
- 7 | 17:00:00+01 | 7 | 9
- 8 | 18:00:00+01 | 7 | 10
- 9 | 19:00:00+01 | 8 | 11
- 10 | 20:00:00+01 | 9 | 11
- 11 | 21:00:00+01 | 10 | 11
-(12 rows)
-
-select id, f_timetz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timetz desc range between
- '70 min' preceding and '2 hours' following);
- id | f_timetz | first_value | last_value
-----+-------------+-------------+------------
- 11 | 21:00:00+01 | 11 | 9
- 10 | 20:00:00+01 | 11 | 8
- 9 | 19:00:00+01 | 10 | 7
- 8 | 18:00:00+01 | 9 | 7
- 7 | 17:00:00+01 | 8 | 5
- 6 | 15:00:00+01 | 6 | 3
- 5 | 15:00:00+01 | 6 | 3
- 4 | 14:00:00+01 | 6 | 2
- 3 | 13:00:00+01 | 4 | 1
- 2 | 12:00:00+01 | 3 | 0
- 1 | 11:00:00+01 | 2 | 0
- 0 | 10:00:00+01 | 1 | 0
-(12 rows)
-
-select id, f_timetz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timetz desc range between
- '70 min' preceding and '-2 hours' following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_timetz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timetz range between
- 'infinity'::interval preceding and 'infinity'::interval following);
- id | f_timetz | first_value | last_value
-----+-------------+-------------+------------
- 0 | 10:00:00+01 | 0 | 11
- 1 | 11:00:00+01 | 0 | 11
- 2 | 12:00:00+01 | 0 | 11
- 3 | 13:00:00+01 | 0 | 11
- 4 | 14:00:00+01 | 0 | 11
- 5 | 15:00:00+01 | 0 | 11
- 6 | 15:00:00+01 | 0 | 11
- 7 | 17:00:00+01 | 0 | 11
- 8 | 18:00:00+01 | 0 | 11
- 9 | 19:00:00+01 | 0 | 11
- 10 | 20:00:00+01 | 0 | 11
- 11 | 21:00:00+01 | 0 | 11
-(12 rows)
-
-select id, f_timetz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timetz range between
- 'infinity'::interval preceding and 'infinity'::interval preceding);
- id | f_timetz | first_value | last_value
-----+-------------+-------------+------------
- 0 | 10:00:00+01 | |
- 1 | 11:00:00+01 | |
- 2 | 12:00:00+01 | |
- 3 | 13:00:00+01 | |
- 4 | 14:00:00+01 | |
- 5 | 15:00:00+01 | |
- 6 | 15:00:00+01 | |
- 7 | 17:00:00+01 | |
- 8 | 18:00:00+01 | |
- 9 | 19:00:00+01 | |
- 10 | 20:00:00+01 | |
- 11 | 21:00:00+01 | |
-(12 rows)
-
-select id, f_timetz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timetz range between
- 'infinity'::interval following and 'infinity'::interval following);
- id | f_timetz | first_value | last_value
-----+-------------+-------------+------------
- 0 | 10:00:00+01 | |
- 1 | 11:00:00+01 | |
- 2 | 12:00:00+01 | |
- 3 | 13:00:00+01 | |
- 4 | 14:00:00+01 | |
- 5 | 15:00:00+01 | |
- 6 | 15:00:00+01 | |
- 7 | 17:00:00+01 | |
- 8 | 18:00:00+01 | |
- 9 | 19:00:00+01 | |
- 10 | 20:00:00+01 | |
- 11 | 21:00:00+01 | |
-(12 rows)
-
-select id, f_timetz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timetz range between
- 'infinity'::interval following and
- '-infinity'::interval following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_interval, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_interval range between
- '1 year'::interval preceding and '1 year'::interval following);
- id | f_interval | first_value | last_value
-----+------------+-------------+------------
- 0 | -infinity | 0 | 0
- 1 | @ 1 year | 1 | 2
- 2 | @ 2 years | 1 | 3
- 3 | @ 3 years | 2 | 4
- 4 | @ 4 years | 3 | 6
- 5 | @ 5 years | 4 | 6
- 6 | @ 5 years | 4 | 6
- 7 | @ 7 years | 7 | 8
- 8 | @ 8 years | 7 | 9
- 9 | @ 9 years | 8 | 10
- 10 | @ 10 years | 9 | 10
- 11 | infinity | 11 | 11
-(12 rows)
-
-select id, f_interval, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_interval desc range between
- '1 year' preceding and '1 year' following);
- id | f_interval | first_value | last_value
-----+------------+-------------+------------
- 11 | infinity | 11 | 11
- 10 | @ 10 years | 10 | 9
- 9 | @ 9 years | 10 | 8
- 8 | @ 8 years | 9 | 7
- 7 | @ 7 years | 8 | 7
- 6 | @ 5 years | 6 | 4
- 5 | @ 5 years | 6 | 4
- 4 | @ 4 years | 6 | 3
- 3 | @ 3 years | 4 | 2
- 2 | @ 2 years | 3 | 1
- 1 | @ 1 year | 2 | 1
- 0 | -infinity | 0 | 0
-(12 rows)
-
-select id, f_interval, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_interval desc range between
- '-1 year' preceding and '1 year' following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_interval, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_interval range between
- 'infinity'::interval preceding and 'infinity'::interval following);
- id | f_interval | first_value | last_value
-----+------------+-------------+------------
- 0 | -infinity | 0 | 11
- 1 | @ 1 year | 0 | 11
- 2 | @ 2 years | 0 | 11
- 3 | @ 3 years | 0 | 11
- 4 | @ 4 years | 0 | 11
- 5 | @ 5 years | 0 | 11
- 6 | @ 5 years | 0 | 11
- 7 | @ 7 years | 0 | 11
- 8 | @ 8 years | 0 | 11
- 9 | @ 9 years | 0 | 11
- 10 | @ 10 years | 0 | 11
- 11 | infinity | 0 | 11
-(12 rows)
-
-select id, f_interval, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_interval range between
- 'infinity'::interval preceding and 'infinity'::interval preceding);
- id | f_interval | first_value | last_value
-----+------------+-------------+------------
- 0 | -infinity | 0 | 0
- 1 | @ 1 year | 0 | 0
- 2 | @ 2 years | 0 | 0
- 3 | @ 3 years | 0 | 0
- 4 | @ 4 years | 0 | 0
- 5 | @ 5 years | 0 | 0
- 6 | @ 5 years | 0 | 0
- 7 | @ 7 years | 0 | 0
- 8 | @ 8 years | 0 | 0
- 9 | @ 9 years | 0 | 0
- 10 | @ 10 years | 0 | 0
- 11 | infinity | 0 | 11
-(12 rows)
-
-select id, f_interval, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_interval range between
- 'infinity'::interval following and 'infinity'::interval following);
- id | f_interval | first_value | last_value
-----+------------+-------------+------------
- 0 | -infinity | 0 | 11
- 1 | @ 1 year | 11 | 11
- 2 | @ 2 years | 11 | 11
- 3 | @ 3 years | 11 | 11
- 4 | @ 4 years | 11 | 11
- 5 | @ 5 years | 11 | 11
- 6 | @ 5 years | 11 | 11
- 7 | @ 7 years | 11 | 11
- 8 | @ 8 years | 11 | 11
- 9 | @ 9 years | 11 | 11
- 10 | @ 10 years | 11 | 11
- 11 | infinity | 11 | 11
-(12 rows)
-
-select id, f_interval, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_interval range between
- '-infinity'::interval following and
- 'infinity'::interval following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_timestamptz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamptz range between
- '1 year'::interval preceding and '1 year'::interval following);
- id | f_timestamptz | first_value | last_value
-----+------------------------------+-------------+------------
- 0 | -infinity | 0 | 0
- 1 | Thu Oct 19 02:23:54 2000 PDT | 1 | 3
- 2 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4
- 3 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4
- 4 | Sat Oct 19 02:23:54 2002 PDT | 2 | 5
- 5 | Sun Oct 19 02:23:54 2003 PDT | 4 | 6
- 6 | Tue Oct 19 02:23:54 2004 PDT | 5 | 7
- 7 | Wed Oct 19 02:23:54 2005 PDT | 6 | 8
- 8 | Thu Oct 19 02:23:54 2006 PDT | 7 | 9
- 9 | Fri Oct 19 02:23:54 2007 PDT | 8 | 10
- 10 | Sun Oct 19 02:23:54 2008 PDT | 9 | 10
- 11 | infinity | 11 | 11
-(12 rows)
-
-select id, f_timestamptz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamptz desc range between
- '1 year' preceding and '1 year' following);
- id | f_timestamptz | first_value | last_value
-----+------------------------------+-------------+------------
- 11 | infinity | 11 | 11
- 10 | Sun Oct 19 02:23:54 2008 PDT | 10 | 9
- 9 | Fri Oct 19 02:23:54 2007 PDT | 10 | 8
- 8 | Thu Oct 19 02:23:54 2006 PDT | 9 | 7
- 7 | Wed Oct 19 02:23:54 2005 PDT | 8 | 6
- 6 | Tue Oct 19 02:23:54 2004 PDT | 7 | 5
- 5 | Sun Oct 19 02:23:54 2003 PDT | 6 | 4
- 4 | Sat Oct 19 02:23:54 2002 PDT | 5 | 2
- 3 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1
- 2 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1
- 1 | Thu Oct 19 02:23:54 2000 PDT | 3 | 1
- 0 | -infinity | 0 | 0
-(12 rows)
-
-select id, f_timestamptz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamptz desc range between
- '1 year' preceding and '-1 year' following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_timestamptz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamptz range between
- 'infinity'::interval preceding and 'infinity'::interval following);
- id | f_timestamptz | first_value | last_value
-----+------------------------------+-------------+------------
- 0 | -infinity | 0 | 11
- 1 | Thu Oct 19 02:23:54 2000 PDT | 0 | 11
- 2 | Fri Oct 19 02:23:54 2001 PDT | 0 | 11
- 3 | Fri Oct 19 02:23:54 2001 PDT | 0 | 11
- 4 | Sat Oct 19 02:23:54 2002 PDT | 0 | 11
- 5 | Sun Oct 19 02:23:54 2003 PDT | 0 | 11
- 6 | Tue Oct 19 02:23:54 2004 PDT | 0 | 11
- 7 | Wed Oct 19 02:23:54 2005 PDT | 0 | 11
- 8 | Thu Oct 19 02:23:54 2006 PDT | 0 | 11
- 9 | Fri Oct 19 02:23:54 2007 PDT | 0 | 11
- 10 | Sun Oct 19 02:23:54 2008 PDT | 0 | 11
- 11 | infinity | 0 | 11
-(12 rows)
-
-select id, f_timestamptz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamptz range between
- 'infinity'::interval preceding and 'infinity'::interval preceding);
- id | f_timestamptz | first_value | last_value
-----+------------------------------+-------------+------------
- 0 | -infinity | 0 | 0
- 1 | Thu Oct 19 02:23:54 2000 PDT | 0 | 0
- 2 | Fri Oct 19 02:23:54 2001 PDT | 0 | 0
- 3 | Fri Oct 19 02:23:54 2001 PDT | 0 | 0
- 4 | Sat Oct 19 02:23:54 2002 PDT | 0 | 0
- 5 | Sun Oct 19 02:23:54 2003 PDT | 0 | 0
- 6 | Tue Oct 19 02:23:54 2004 PDT | 0 | 0
- 7 | Wed Oct 19 02:23:54 2005 PDT | 0 | 0
- 8 | Thu Oct 19 02:23:54 2006 PDT | 0 | 0
- 9 | Fri Oct 19 02:23:54 2007 PDT | 0 | 0
- 10 | Sun Oct 19 02:23:54 2008 PDT | 0 | 0
- 11 | infinity | 0 | 11
-(12 rows)
-
-select id, f_timestamptz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamptz range between
- 'infinity'::interval following and 'infinity'::interval following);
- id | f_timestamptz | first_value | last_value
-----+------------------------------+-------------+------------
- 0 | -infinity | 0 | 11
- 1 | Thu Oct 19 02:23:54 2000 PDT | 11 | 11
- 2 | Fri Oct 19 02:23:54 2001 PDT | 11 | 11
- 3 | Fri Oct 19 02:23:54 2001 PDT | 11 | 11
- 4 | Sat Oct 19 02:23:54 2002 PDT | 11 | 11
- 5 | Sun Oct 19 02:23:54 2003 PDT | 11 | 11
- 6 | Tue Oct 19 02:23:54 2004 PDT | 11 | 11
- 7 | Wed Oct 19 02:23:54 2005 PDT | 11 | 11
- 8 | Thu Oct 19 02:23:54 2006 PDT | 11 | 11
- 9 | Fri Oct 19 02:23:54 2007 PDT | 11 | 11
- 10 | Sun Oct 19 02:23:54 2008 PDT | 11 | 11
- 11 | infinity | 11 | 11
-(12 rows)
-
-select id, f_timestamptz, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamptz range between
- '-infinity'::interval following and
- 'infinity'::interval following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_timestamp, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamp range between
- '1 year'::interval preceding and '1 year'::interval following);
- id | f_timestamp | first_value | last_value
-----+--------------------------+-------------+------------
- 0 | -infinity | 0 | 0
- 1 | Thu Oct 19 10:23:54 2000 | 1 | 3
- 2 | Fri Oct 19 10:23:54 2001 | 1 | 4
- 3 | Fri Oct 19 10:23:54 2001 | 1 | 4
- 4 | Sat Oct 19 10:23:54 2002 | 2 | 5
- 5 | Sun Oct 19 10:23:54 2003 | 4 | 6
- 6 | Tue Oct 19 10:23:54 2004 | 5 | 7
- 7 | Wed Oct 19 10:23:54 2005 | 6 | 8
- 8 | Thu Oct 19 10:23:54 2006 | 7 | 9
- 9 | Fri Oct 19 10:23:54 2007 | 8 | 10
- 10 | Sun Oct 19 10:23:54 2008 | 9 | 10
- 11 | infinity | 11 | 11
-(12 rows)
-
-select id, f_timestamp, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamp desc range between
- '1 year' preceding and '1 year' following);
- id | f_timestamp | first_value | last_value
-----+--------------------------+-------------+------------
- 11 | infinity | 11 | 11
- 10 | Sun Oct 19 10:23:54 2008 | 10 | 9
- 9 | Fri Oct 19 10:23:54 2007 | 10 | 8
- 8 | Thu Oct 19 10:23:54 2006 | 9 | 7
- 7 | Wed Oct 19 10:23:54 2005 | 8 | 6
- 6 | Tue Oct 19 10:23:54 2004 | 7 | 5
- 5 | Sun Oct 19 10:23:54 2003 | 6 | 4
- 4 | Sat Oct 19 10:23:54 2002 | 5 | 2
- 3 | Fri Oct 19 10:23:54 2001 | 4 | 1
- 2 | Fri Oct 19 10:23:54 2001 | 4 | 1
- 1 | Thu Oct 19 10:23:54 2000 | 3 | 1
- 0 | -infinity | 0 | 0
-(12 rows)
-
-select id, f_timestamp, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamp desc range between
- '-1 year' preceding and '1 year' following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
-select id, f_timestamp, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamp range between
- 'infinity'::interval preceding and 'infinity'::interval following);
- id | f_timestamp | first_value | last_value
-----+--------------------------+-------------+------------
- 0 | -infinity | 0 | 11
- 1 | Thu Oct 19 10:23:54 2000 | 0 | 11
- 2 | Fri Oct 19 10:23:54 2001 | 0 | 11
- 3 | Fri Oct 19 10:23:54 2001 | 0 | 11
- 4 | Sat Oct 19 10:23:54 2002 | 0 | 11
- 5 | Sun Oct 19 10:23:54 2003 | 0 | 11
- 6 | Tue Oct 19 10:23:54 2004 | 0 | 11
- 7 | Wed Oct 19 10:23:54 2005 | 0 | 11
- 8 | Thu Oct 19 10:23:54 2006 | 0 | 11
- 9 | Fri Oct 19 10:23:54 2007 | 0 | 11
- 10 | Sun Oct 19 10:23:54 2008 | 0 | 11
- 11 | infinity | 0 | 11
-(12 rows)
-
-select id, f_timestamp, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamp range between
- 'infinity'::interval preceding and 'infinity'::interval preceding);
- id | f_timestamp | first_value | last_value
-----+--------------------------+-------------+------------
- 0 | -infinity | 0 | 0
- 1 | Thu Oct 19 10:23:54 2000 | 0 | 0
- 2 | Fri Oct 19 10:23:54 2001 | 0 | 0
- 3 | Fri Oct 19 10:23:54 2001 | 0 | 0
- 4 | Sat Oct 19 10:23:54 2002 | 0 | 0
- 5 | Sun Oct 19 10:23:54 2003 | 0 | 0
- 6 | Tue Oct 19 10:23:54 2004 | 0 | 0
- 7 | Wed Oct 19 10:23:54 2005 | 0 | 0
- 8 | Thu Oct 19 10:23:54 2006 | 0 | 0
- 9 | Fri Oct 19 10:23:54 2007 | 0 | 0
- 10 | Sun Oct 19 10:23:54 2008 | 0 | 0
- 11 | infinity | 0 | 11
-(12 rows)
-
-select id, f_timestamp, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamp range between
- 'infinity'::interval following and 'infinity'::interval following);
- id | f_timestamp | first_value | last_value
-----+--------------------------+-------------+------------
- 0 | -infinity | 0 | 11
- 1 | Thu Oct 19 10:23:54 2000 | 11 | 11
- 2 | Fri Oct 19 10:23:54 2001 | 11 | 11
- 3 | Fri Oct 19 10:23:54 2001 | 11 | 11
- 4 | Sat Oct 19 10:23:54 2002 | 11 | 11
- 5 | Sun Oct 19 10:23:54 2003 | 11 | 11
- 6 | Tue Oct 19 10:23:54 2004 | 11 | 11
- 7 | Wed Oct 19 10:23:54 2005 | 11 | 11
- 8 | Thu Oct 19 10:23:54 2006 | 11 | 11
- 9 | Fri Oct 19 10:23:54 2007 | 11 | 11
- 10 | Sun Oct 19 10:23:54 2008 | 11 | 11
- 11 | infinity | 11 | 11
-(12 rows)
-
-select id, f_timestamp, first_value(id) over w, last_value(id) over w
-from datetimes
-window w as (order by f_timestamp range between
- '-infinity'::interval following and
- 'infinity'::interval following); -- error, negative offset disallowed
-ERROR: invalid preceding or following size in window function
--- RANGE offset PRECEDING/FOLLOWING error cases
-select sum(salary) over (order by enroll_date, salary range between '1 year'::interval preceding and '2 years'::interval following
- exclude ties), salary, enroll_date from empsalary;
-ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column
-LINE 1: select sum(salary) over (order by enroll_date, salary range ...
- ^
-select sum(salary) over (range between '1 year'::interval preceding and '2 years'::interval following
- exclude ties), salary, enroll_date from empsalary;
-ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column
-LINE 1: select sum(salary) over (range between '1 year'::interval pr...
- ^
-select sum(salary) over (order by depname range between '1 year'::interval preceding and '2 years'::interval following
- exclude ties), salary, enroll_date from empsalary;
-ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type text
-LINE 1: ... sum(salary) over (order by depname range between '1 year'::...
- ^
-select max(enroll_date) over (order by enroll_date range between 1 preceding and 2 following
- exclude ties), salary, enroll_date from empsalary;
-ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type date and offset type integer
-LINE 1: ...ll_date) over (order by enroll_date range between 1 precedin...
- ^
-HINT: Cast the offset value to an appropriate type.
-select max(enroll_date) over (order by salary range between -1 preceding and 2 following
- exclude ties), salary, enroll_date from empsalary;
-ERROR: invalid preceding or following size in window function
-select max(enroll_date) over (order by salary range between 1 preceding and -2 following
- exclude ties), salary, enroll_date from empsalary;
-ERROR: invalid preceding or following size in window function
-select max(enroll_date) over (order by salary range between '1 year'::interval preceding and '2 years'::interval following
- exclude ties), salary, enroll_date from empsalary;
-ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type integer and offset type interval
-LINE 1: ...(enroll_date) over (order by salary range between '1 year'::...
- ^
-HINT: Cast the offset value to an appropriate type.
-select max(enroll_date) over (order by enroll_date range between '1 year'::interval preceding and '-2 years'::interval following
- exclude ties), salary, enroll_date from empsalary;
-ERROR: invalid preceding or following size in window function
--- GROUPS tests
-SELECT sum(unique1) over (order by four groups between unbounded preceding and current row),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 12 | 0 | 0
- 12 | 8 | 0
- 12 | 4 | 0
- 27 | 5 | 1
- 27 | 9 | 1
- 27 | 1 | 1
- 35 | 6 | 2
- 35 | 2 | 2
- 45 | 3 | 3
- 45 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between unbounded preceding and unbounded following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 45 | 0 | 0
- 45 | 8 | 0
- 45 | 4 | 0
- 45 | 5 | 1
- 45 | 9 | 1
- 45 | 1 | 1
- 45 | 6 | 2
- 45 | 2 | 2
- 45 | 3 | 3
- 45 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between current row and unbounded following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 45 | 0 | 0
- 45 | 8 | 0
- 45 | 4 | 0
- 33 | 5 | 1
- 33 | 9 | 1
- 33 | 1 | 1
- 18 | 6 | 2
- 18 | 2 | 2
- 10 | 3 | 3
- 10 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between 1 preceding and unbounded following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 45 | 0 | 0
- 45 | 8 | 0
- 45 | 4 | 0
- 45 | 5 | 1
- 45 | 9 | 1
- 45 | 1 | 1
- 33 | 6 | 2
- 33 | 2 | 2
- 18 | 3 | 3
- 18 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between 1 following and unbounded following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 33 | 0 | 0
- 33 | 8 | 0
- 33 | 4 | 0
- 18 | 5 | 1
- 18 | 9 | 1
- 18 | 1 | 1
- 10 | 6 | 2
- 10 | 2 | 2
- | 3 | 3
- | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between unbounded preceding and 2 following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 35 | 0 | 0
- 35 | 8 | 0
- 35 | 4 | 0
- 45 | 5 | 1
- 45 | 9 | 1
- 45 | 1 | 1
- 45 | 6 | 2
- 45 | 2 | 2
- 45 | 3 | 3
- 45 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between 2 preceding and 1 preceding),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- | 0 | 0
- | 8 | 0
- | 4 | 0
- 12 | 5 | 1
- 12 | 9 | 1
- 12 | 1 | 1
- 27 | 6 | 2
- 27 | 2 | 2
- 23 | 3 | 3
- 23 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 27 | 0 | 0
- 27 | 8 | 0
- 27 | 4 | 0
- 35 | 5 | 1
- 35 | 9 | 1
- 35 | 1 | 1
- 45 | 6 | 2
- 45 | 2 | 2
- 33 | 3 | 3
- 33 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between 0 preceding and 0 following),
- unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 12 | 0 | 0
- 12 | 8 | 0
- 12 | 4 | 0
- 15 | 5 | 1
- 15 | 9 | 1
- 15 | 1 | 1
- 8 | 6 | 2
- 8 | 2 | 2
- 10 | 3 | 3
- 10 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
- exclude current row), unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 27 | 0 | 0
- 19 | 8 | 0
- 23 | 4 | 0
- 30 | 5 | 1
- 26 | 9 | 1
- 34 | 1 | 1
- 39 | 6 | 2
- 43 | 2 | 2
- 30 | 3 | 3
- 26 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
- exclude group), unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 15 | 0 | 0
- 15 | 8 | 0
- 15 | 4 | 0
- 20 | 5 | 1
- 20 | 9 | 1
- 20 | 1 | 1
- 37 | 6 | 2
- 37 | 2 | 2
- 23 | 3 | 3
- 23 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
- exclude ties), unique1, four
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four
------+---------+------
- 15 | 0 | 0
- 23 | 8 | 0
- 19 | 4 | 0
- 25 | 5 | 1
- 29 | 9 | 1
- 21 | 1 | 1
- 43 | 6 | 2
- 39 | 2 | 2
- 26 | 3 | 3
- 30 | 7 | 3
-(10 rows)
-
-SELECT sum(unique1) over (partition by ten
- order by four groups between 0 preceding and 0 following),unique1, four, ten
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four | ten
------+---------+------+-----
- 0 | 0 | 0 | 0
- 1 | 1 | 1 | 1
- 2 | 2 | 2 | 2
- 3 | 3 | 3 | 3
- 4 | 4 | 0 | 4
- 5 | 5 | 1 | 5
- 6 | 6 | 2 | 6
- 7 | 7 | 3 | 7
- 8 | 8 | 0 | 8
- 9 | 9 | 1 | 9
-(10 rows)
-
-SELECT sum(unique1) over (partition by ten
- order by four groups between 0 preceding and 0 following exclude current row), unique1, four, ten
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four | ten
------+---------+------+-----
- | 0 | 0 | 0
- | 1 | 1 | 1
- | 2 | 2 | 2
- | 3 | 3 | 3
- | 4 | 0 | 4
- | 5 | 1 | 5
- | 6 | 2 | 6
- | 7 | 3 | 7
- | 8 | 0 | 8
- | 9 | 1 | 9
-(10 rows)
-
-SELECT sum(unique1) over (partition by ten
- order by four groups between 0 preceding and 0 following exclude group), unique1, four, ten
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four | ten
------+---------+------+-----
- | 0 | 0 | 0
- | 1 | 1 | 1
- | 2 | 2 | 2
- | 3 | 3 | 3
- | 4 | 0 | 4
- | 5 | 1 | 5
- | 6 | 2 | 6
- | 7 | 3 | 7
- | 8 | 0 | 8
- | 9 | 1 | 9
-(10 rows)
-
-SELECT sum(unique1) over (partition by ten
- order by four groups between 0 preceding and 0 following exclude ties), unique1, four, ten
-FROM tenk1 WHERE unique1 < 10;
- sum | unique1 | four | ten
------+---------+------+-----
- 0 | 0 | 0 | 0
- 1 | 1 | 1 | 1
- 2 | 2 | 2 | 2
- 3 | 3 | 3 | 3
- 4 | 4 | 0 | 4
- 5 | 5 | 1 | 5
- 6 | 6 | 2 | 6
- 7 | 7 | 3 | 7
- 8 | 8 | 0 | 8
- 9 | 9 | 1 | 9
-(10 rows)
-
-select first_value(salary) over(order by enroll_date groups between 1 preceding and 1 following),
- lead(salary) over(order by enroll_date groups between 1 preceding and 1 following),
- nth_value(salary, 1) over(order by enroll_date groups between 1 preceding and 1 following),
- salary, enroll_date from empsalary;
- first_value | lead | nth_value | salary | enroll_date
--------------+------+-----------+--------+-------------
- 5000 | 6000 | 5000 | 5000 | 10-01-2006
- 5000 | 3900 | 5000 | 6000 | 10-01-2006
- 5000 | 4800 | 5000 | 3900 | 12-23-2006
- 3900 | 5200 | 3900 | 4800 | 08-01-2007
- 3900 | 4800 | 3900 | 5200 | 08-01-2007
- 4800 | 5200 | 4800 | 4800 | 08-08-2007
- 4800 | 3500 | 4800 | 5200 | 08-15-2007
- 5200 | 4500 | 5200 | 3500 | 12-10-2007
- 3500 | 4200 | 3500 | 4500 | 01-01-2008
- 3500 | | 3500 | 4200 | 01-01-2008
-(10 rows)
-
-select last_value(salary) over(order by enroll_date groups between 1 preceding and 1 following),
- lag(salary) over(order by enroll_date groups between 1 preceding and 1 following),
- salary, enroll_date from empsalary;
- last_value | lag | salary | enroll_date
-------------+------+--------+-------------
- 3900 | | 5000 | 10-01-2006
- 3900 | 5000 | 6000 | 10-01-2006
- 5200 | 6000 | 3900 | 12-23-2006
- 4800 | 3900 | 4800 | 08-01-2007
- 4800 | 4800 | 5200 | 08-01-2007
- 5200 | 5200 | 4800 | 08-08-2007
- 3500 | 4800 | 5200 | 08-15-2007
- 4200 | 5200 | 3500 | 12-10-2007
- 4200 | 3500 | 4500 | 01-01-2008
- 4200 | 4500 | 4200 | 01-01-2008
-(10 rows)
-
-select first_value(salary) over(order by enroll_date groups between 1 following and 3 following
- exclude current row),
- lead(salary) over(order by enroll_date groups between 1 following and 3 following exclude ties),
- nth_value(salary, 1) over(order by enroll_date groups between 1 following and 3 following
- exclude ties),
- salary, enroll_date from empsalary;
- first_value | lead | nth_value | salary | enroll_date
--------------+------+-----------+--------+-------------
- 3900 | 6000 | 3900 | 5000 | 10-01-2006
- 3900 | 3900 | 3900 | 6000 | 10-01-2006
- 4800 | 4800 | 4800 | 3900 | 12-23-2006
- 4800 | 5200 | 4800 | 4800 | 08-01-2007
- 4800 | 4800 | 4800 | 5200 | 08-01-2007
- 5200 | 5200 | 5200 | 4800 | 08-08-2007
- 3500 | 3500 | 3500 | 5200 | 08-15-2007
- 4500 | 4500 | 4500 | 3500 | 12-10-2007
- | 4200 | | 4500 | 01-01-2008
- | | | 4200 | 01-01-2008
-(10 rows)
-
-select last_value(salary) over(order by enroll_date groups between 1 following and 3 following
- exclude group),
- lag(salary) over(order by enroll_date groups between 1 following and 3 following exclude group),
- salary, enroll_date from empsalary;
- last_value | lag | salary | enroll_date
-------------+------+--------+-------------
- 4800 | | 5000 | 10-01-2006
- 4800 | 5000 | 6000 | 10-01-2006
- 5200 | 6000 | 3900 | 12-23-2006
- 3500 | 3900 | 4800 | 08-01-2007
- 3500 | 4800 | 5200 | 08-01-2007
- 4200 | 5200 | 4800 | 08-08-2007
- 4200 | 4800 | 5200 | 08-15-2007
- 4200 | 5200 | 3500 | 12-10-2007
- | 3500 | 4500 | 01-01-2008
- | 4500 | 4200 | 01-01-2008
-(10 rows)
-
--- Show differences in offset interpretation between ROWS, RANGE, and GROUPS
-WITH cte (x) AS (
- SELECT * FROM generate_series(1, 35, 2)
-)
-SELECT x, (sum(x) over w)
-FROM cte
-WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following);
- x | sum
-----+-----
- 1 | 4
- 3 | 9
- 5 | 15
- 7 | 21
- 9 | 27
- 11 | 33
- 13 | 39
- 15 | 45
- 17 | 51
- 19 | 57
- 21 | 63
- 23 | 69
- 25 | 75
- 27 | 81
- 29 | 87
- 31 | 93
- 33 | 99
- 35 | 68
-(18 rows)
-
-WITH cte (x) AS (
- SELECT * FROM generate_series(1, 35, 2)
-)
-SELECT x, (sum(x) over w)
-FROM cte
-WINDOW w AS (ORDER BY x range between 1 preceding and 1 following);
- x | sum
-----+-----
- 1 | 1
- 3 | 3
- 5 | 5
- 7 | 7
- 9 | 9
- 11 | 11
- 13 | 13
- 15 | 15
- 17 | 17
- 19 | 19
- 21 | 21
- 23 | 23
- 25 | 25
- 27 | 27
- 29 | 29
- 31 | 31
- 33 | 33
- 35 | 35
-(18 rows)
-
-WITH cte (x) AS (
- SELECT * FROM generate_series(1, 35, 2)
-)
-SELECT x, (sum(x) over w)
-FROM cte
-WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following);
- x | sum
-----+-----
- 1 | 4
- 3 | 9
- 5 | 15
- 7 | 21
- 9 | 27
- 11 | 33
- 13 | 39
- 15 | 45
- 17 | 51
- 19 | 57
- 21 | 63
- 23 | 69
- 25 | 75
- 27 | 81
- 29 | 87
- 31 | 93
- 33 | 99
- 35 | 68
-(18 rows)
-
-WITH cte (x) AS (
- select 1 union all select 1 union all select 1 union all
- SELECT * FROM generate_series(5, 49, 2)
-)
-SELECT x, (sum(x) over w)
-FROM cte
-WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following);
- x | sum
-----+-----
- 1 | 2
- 1 | 3
- 1 | 7
- 5 | 13
- 7 | 21
- 9 | 27
- 11 | 33
- 13 | 39
- 15 | 45
- 17 | 51
- 19 | 57
- 21 | 63
- 23 | 69
- 25 | 75
- 27 | 81
- 29 | 87
- 31 | 93
- 33 | 99
- 35 | 105
- 37 | 111
- 39 | 117
- 41 | 123
- 43 | 129
- 45 | 135
- 47 | 141
- 49 | 96
-(26 rows)
-
-WITH cte (x) AS (
- select 1 union all select 1 union all select 1 union all
- SELECT * FROM generate_series(5, 49, 2)
-)
-SELECT x, (sum(x) over w)
-FROM cte
-WINDOW w AS (ORDER BY x range between 1 preceding and 1 following);
- x | sum
-----+-----
- 1 | 3
- 1 | 3
- 1 | 3
- 5 | 5
- 7 | 7
- 9 | 9
- 11 | 11
- 13 | 13
- 15 | 15
- 17 | 17
- 19 | 19
- 21 | 21
- 23 | 23
- 25 | 25
- 27 | 27
- 29 | 29
- 31 | 31
- 33 | 33
- 35 | 35
- 37 | 37
- 39 | 39
- 41 | 41
- 43 | 43
- 45 | 45
- 47 | 47
- 49 | 49
-(26 rows)
-
-WITH cte (x) AS (
- select 1 union all select 1 union all select 1 union all
- SELECT * FROM generate_series(5, 49, 2)
-)
-SELECT x, (sum(x) over w)
-FROM cte
-WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following);
- x | sum
-----+-----
- 1 | 8
- 1 | 8
- 1 | 8
- 5 | 15
- 7 | 21
- 9 | 27
- 11 | 33
- 13 | 39
- 15 | 45
- 17 | 51
- 19 | 57
- 21 | 63
- 23 | 69
- 25 | 75
- 27 | 81
- 29 | 87
- 31 | 93
- 33 | 99
- 35 | 105
- 37 | 111
- 39 | 117
- 41 | 123
- 43 | 129
- 45 | 135
- 47 | 141
- 49 | 96
-(26 rows)
-
--- with UNION
-SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0;
- count
--------
-(0 rows)
-
--- check some degenerate cases
-create temp table t1 (f1 int, f2 int8);
-insert into t1 values (1,1),(1,2),(2,2);
-select f1, sum(f1) over (partition by f1
- range between 1 preceding and 1 following)
-from t1 where f1 = f2; -- error, must have order by
-ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column
-LINE 1: select f1, sum(f1) over (partition by f1
- ^
-explain (costs off)
-select f1, sum(f1) over (partition by f1 order by f2
- range between 1 preceding and 1 following)
-from t1 where f1 = f2;
- QUERY PLAN
----------------------------------
- WindowAgg
- -> Sort
- Sort Key: f1
- -> Seq Scan on t1
- Filter: (f1 = f2)
-(5 rows)
-
-select f1, sum(f1) over (partition by f1 order by f2
- range between 1 preceding and 1 following)
-from t1 where f1 = f2;
- f1 | sum
-----+-----
- 1 | 1
- 2 | 2
-(2 rows)
-
-select f1, sum(f1) over (partition by f1, f1 order by f2
- range between 2 preceding and 1 preceding)
-from t1 where f1 = f2;
- f1 | sum
-----+-----
- 1 |
- 2 |
-(2 rows)
-
-select f1, sum(f1) over (partition by f1, f2 order by f2
- range between 1 following and 2 following)
-from t1 where f1 = f2;
- f1 | sum
-----+-----
- 1 |
- 2 |
-(2 rows)
-
-select f1, sum(f1) over (partition by f1
- groups between 1 preceding and 1 following)
-from t1 where f1 = f2; -- error, must have order by
-ERROR: GROUPS mode requires an ORDER BY clause
-LINE 1: select f1, sum(f1) over (partition by f1
- ^
-explain (costs off)
-select f1, sum(f1) over (partition by f1 order by f2
- groups between 1 preceding and 1 following)
-from t1 where f1 = f2;
- QUERY PLAN
----------------------------------
- WindowAgg
- -> Sort
- Sort Key: f1
- -> Seq Scan on t1
- Filter: (f1 = f2)
-(5 rows)
-
-select f1, sum(f1) over (partition by f1 order by f2
- groups between 1 preceding and 1 following)
-from t1 where f1 = f2;
- f1 | sum
-----+-----
- 1 | 1
- 2 | 2
-(2 rows)
-
-select f1, sum(f1) over (partition by f1, f1 order by f2
- groups between 2 preceding and 1 preceding)
-from t1 where f1 = f2;
- f1 | sum
-----+-----
- 1 |
- 2 |
-(2 rows)
-
-select f1, sum(f1) over (partition by f1, f2 order by f2
- groups between 1 following and 2 following)
-from t1 where f1 = f2;
- f1 | sum
-----+-----
- 1 |
- 2 |
-(2 rows)
-
--- ordering by a non-integer constant is allowed
-SELECT rank() OVER (ORDER BY length('abc'));
- rank
-------
- 1
-(1 row)
-
--- can't order by another window function
-SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random()));
-ERROR: window functions are not allowed in window definitions
-LINE 1: SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())...
- ^
--- some other errors
-SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10;
-ERROR: window functions are not allowed in WHERE
-LINE 1: SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY sa...
- ^
-SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10;
-ERROR: window functions are not allowed in JOIN conditions
-LINE 1: SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVE...
- ^
-SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1;
-ERROR: window functions are not allowed in GROUP BY
-LINE 1: SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GRO...
- ^
-SELECT * FROM rank() OVER (ORDER BY random());
-ERROR: syntax error at or near "ORDER"
-LINE 1: SELECT * FROM rank() OVER (ORDER BY random());
- ^
-DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10;
-ERROR: window functions are not allowed in WHERE
-LINE 1: DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())...
- ^
-DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random());
-ERROR: window functions are not allowed in RETURNING
-LINE 1: DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random...
- ^
-SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1);
-ERROR: window "w" is already defined
-LINE 1: ...w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY ...
- ^
-SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1;
-ERROR: syntax error at or near "ORDER"
-LINE 1: SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM te...
- ^
-SELECT count() OVER () FROM tenk1;
-ERROR: count(*) must be used to call a parameterless aggregate function
-LINE 1: SELECT count() OVER () FROM tenk1;
- ^
-SELECT generate_series(1, 100) OVER () FROM empsalary;
-ERROR: OVER specified, but generate_series is not a window function nor an aggregate function
-LINE 1: SELECT generate_series(1, 100) OVER () FROM empsalary;
- ^
-SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1;
-ERROR: argument of ntile must be greater than zero
-SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1;
-ERROR: argument of nth_value must be greater than zero
--- filter
-SELECT sum(salary), row_number() OVER (ORDER BY depname), sum(
- sum(salary) FILTER (WHERE enroll_date > '2007-01-01')
-) FILTER (WHERE depname <> 'sales') OVER (ORDER BY depname DESC) AS "filtered_sum",
- depname
-FROM empsalary GROUP BY depname;
- sum | row_number | filtered_sum | depname
--------+------------+--------------+-----------
- 25100 | 1 | 22600 | develop
- 7400 | 2 | 3500 | personnel
- 14600 | 3 | | sales
-(3 rows)
-
---
--- Test SupportRequestOptimizeWindowClause's ability to de-duplicate
--- WindowClauses
---
--- Ensure WindowClause frameOptions are changed so that only a single
--- WindowAgg exists in the plan.
-EXPLAIN (COSTS OFF)
-SELECT
- empno,
- depname,
- row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn,
- rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN
- UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk,
- dense_rank() OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN
- CURRENT ROW AND CURRENT ROW) drnk,
- ntile(10) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN
- CURRENT ROW AND UNBOUNDED FOLLOWING) nt,
- percent_rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN
- CURRENT ROW AND UNBOUNDED FOLLOWING) pr,
- cume_dist() OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN
- CURRENT ROW AND UNBOUNDED FOLLOWING) cd
-FROM empsalary;
- QUERY PLAN
-----------------------------------------
- WindowAgg
- -> Sort
- Sort Key: depname, enroll_date
- -> Seq Scan on empsalary
-(4 rows)
-
--- Ensure WindowFuncs which cannot support their WindowClause's frameOptions
--- being changed are untouched
-EXPLAIN (COSTS OFF, VERBOSE)
-SELECT
- empno,
- depname,
- row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn,
- rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN
- UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk,
- count(*) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN
- CURRENT ROW AND CURRENT ROW) cnt
-FROM empsalary;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------
- WindowAgg
- Output: empno, depname, (row_number() OVER (?)), (rank() OVER (?)), count(*) OVER (?), enroll_date
- -> WindowAgg
- Output: depname, enroll_date, empno, row_number() OVER (?), rank() OVER (?)
- -> Sort
- Output: depname, enroll_date, empno
- Sort Key: empsalary.depname, empsalary.enroll_date
- -> Seq Scan on pg_temp.empsalary
- Output: depname, enroll_date, empno
-(9 rows)
-
--- Ensure the above query gives us the expected results
-SELECT
- empno,
- depname,
- row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn,
- rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN
- UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk,
- count(*) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN
- CURRENT ROW AND CURRENT ROW) cnt
-FROM empsalary;
- empno | depname | rn | rnk | cnt
--------+-----------+----+-----+-----
- 8 | develop | 1 | 1 | 1
- 10 | develop | 2 | 2 | 1
- 11 | develop | 3 | 3 | 1
- 9 | develop | 4 | 4 | 2
- 7 | develop | 5 | 4 | 2
- 2 | personnel | 1 | 1 | 1
- 5 | personnel | 2 | 2 | 1
- 1 | sales | 1 | 1 | 1
- 3 | sales | 2 | 2 | 1
- 4 | sales | 3 | 3 | 1
-(10 rows)
-
--- Test pushdown of quals into a subquery containing window functions
--- pushdown is safe because all PARTITION BY clauses include depname:
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT depname,
- sum(salary) OVER (PARTITION BY depname) depsalary,
- min(salary) OVER (PARTITION BY depname || 'A', depname) depminsalary
- FROM empsalary) emp
-WHERE depname = 'sales';
- QUERY PLAN
---------------------------------------------------------------------------
- Subquery Scan on emp
- -> WindowAgg
- -> WindowAgg
- -> Sort
- Sort Key: (((empsalary.depname)::text || 'A'::text))
- -> Seq Scan on empsalary
- Filter: ((depname)::text = 'sales'::text)
-(7 rows)
-
--- pushdown is unsafe because there's a PARTITION BY clause without depname:
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT depname,
- sum(salary) OVER (PARTITION BY enroll_date) enroll_salary,
- min(salary) OVER (PARTITION BY depname) depminsalary
- FROM empsalary) emp
-WHERE depname = 'sales';
- QUERY PLAN
--------------------------------------------------------
- Subquery Scan on emp
- Filter: ((emp.depname)::text = 'sales'::text)
- -> WindowAgg
- -> Sort
- Sort Key: empsalary.enroll_date
- -> WindowAgg
- -> Sort
- Sort Key: empsalary.depname
- -> Seq Scan on empsalary
-(9 rows)
-
--- Test window function run conditions are properly pushed down into the
--- WindowAgg
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- row_number() OVER (ORDER BY empno) rn
- FROM empsalary) emp
-WHERE rn < 3;
- QUERY PLAN
-----------------------------------------------
- WindowAgg
- Run Condition: (row_number() OVER (?) < 3)
- -> Sort
- Sort Key: empsalary.empno
- -> Seq Scan on empsalary
-(5 rows)
-
--- The following 3 statements should result the same result.
-SELECT * FROM
- (SELECT empno,
- row_number() OVER (ORDER BY empno) rn
- FROM empsalary) emp
-WHERE rn < 3;
- empno | rn
--------+----
- 1 | 1
- 2 | 2
-(2 rows)
-
-SELECT * FROM
- (SELECT empno,
- row_number() OVER (ORDER BY empno) rn
- FROM empsalary) emp
-WHERE 3 > rn;
- empno | rn
--------+----
- 1 | 1
- 2 | 2
-(2 rows)
-
-SELECT * FROM
- (SELECT empno,
- row_number() OVER (ORDER BY empno) rn
- FROM empsalary) emp
-WHERE 2 >= rn;
- empno | rn
--------+----
- 1 | 1
- 2 | 2
-(2 rows)
-
--- Ensure r <= 3 is pushed down into the run condition of the window agg
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- rank() OVER (ORDER BY salary DESC) r
- FROM empsalary) emp
-WHERE r <= 3;
- QUERY PLAN
------------------------------------------
- WindowAgg
- Run Condition: (rank() OVER (?) <= 3)
- -> Sort
- Sort Key: empsalary.salary DESC
- -> Seq Scan on empsalary
-(5 rows)
-
-SELECT * FROM
- (SELECT empno,
- salary,
- rank() OVER (ORDER BY salary DESC) r
- FROM empsalary) emp
-WHERE r <= 3;
- empno | salary | r
--------+--------+---
- 8 | 6000 | 1
- 10 | 5200 | 2
- 11 | 5200 | 2
-(3 rows)
-
--- Ensure dr = 1 is converted to dr <= 1 to get all rows leading up to dr = 1
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- dense_rank() OVER (ORDER BY salary DESC) dr
- FROM empsalary) emp
-WHERE dr = 1;
- QUERY PLAN
------------------------------------------------------
- Subquery Scan on emp
- Filter: (emp.dr = 1)
- -> WindowAgg
- Run Condition: (dense_rank() OVER (?) <= 1)
- -> Sort
- Sort Key: empsalary.salary DESC
- -> Seq Scan on empsalary
-(7 rows)
-
-SELECT * FROM
- (SELECT empno,
- salary,
- dense_rank() OVER (ORDER BY salary DESC) dr
- FROM empsalary) emp
-WHERE dr = 1;
- empno | salary | dr
--------+--------+----
- 8 | 6000 | 1
-(1 row)
-
--- Check COUNT() and COUNT(*)
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count(*) OVER (ORDER BY salary DESC) c
- FROM empsalary) emp
-WHERE c <= 3;
- QUERY PLAN
--------------------------------------------
- WindowAgg
- Run Condition: (count(*) OVER (?) <= 3)
- -> Sort
- Sort Key: empsalary.salary DESC
- -> Seq Scan on empsalary
-(5 rows)
-
-SELECT * FROM
- (SELECT empno,
- salary,
- count(*) OVER (ORDER BY salary DESC) c
- FROM empsalary) emp
-WHERE c <= 3;
- empno | salary | c
--------+--------+---
- 8 | 6000 | 1
- 10 | 5200 | 3
- 11 | 5200 | 3
-(3 rows)
-
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count(empno) OVER (ORDER BY salary DESC) c
- FROM empsalary) emp
-WHERE c <= 3;
- QUERY PLAN
----------------------------------------------------------
- WindowAgg
- Run Condition: (count(empsalary.empno) OVER (?) <= 3)
- -> Sort
- Sort Key: empsalary.salary DESC
- -> Seq Scan on empsalary
-(5 rows)
-
-SELECT * FROM
- (SELECT empno,
- salary,
- count(empno) OVER (ORDER BY salary DESC) c
- FROM empsalary) emp
-WHERE c <= 3;
- empno | salary | c
--------+--------+---
- 8 | 6000 | 1
- 10 | 5200 | 3
- 11 | 5200 | 3
-(3 rows)
-
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count(*) OVER (ORDER BY salary DESC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) c
- FROM empsalary) emp
-WHERE c >= 3;
- QUERY PLAN
--------------------------------------------
- WindowAgg
- Run Condition: (count(*) OVER (?) >= 3)
- -> Sort
- Sort Key: empsalary.salary DESC
- -> Seq Scan on empsalary
-(5 rows)
-
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count(*) OVER () c
- FROM empsalary) emp
-WHERE 11 <= c;
- QUERY PLAN
---------------------------------------------
- WindowAgg
- Run Condition: (11 <= count(*) OVER (?))
- -> Seq Scan on empsalary
-(3 rows)
-
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count(*) OVER (ORDER BY salary DESC) c,
- dense_rank() OVER (ORDER BY salary DESC) dr
- FROM empsalary) emp
-WHERE dr = 1;
- QUERY PLAN
------------------------------------------------------
- Subquery Scan on emp
- Filter: (emp.dr = 1)
- -> WindowAgg
- Run Condition: (dense_rank() OVER (?) <= 1)
- -> Sort
- Sort Key: empsalary.salary DESC
- -> Seq Scan on empsalary
-(7 rows)
-
--- Ensure we get a run condition when there's a PARTITION BY clause
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- depname,
- row_number() OVER (PARTITION BY depname ORDER BY empno) rn
- FROM empsalary) emp
-WHERE rn < 3;
- QUERY PLAN
-------------------------------------------------------
- WindowAgg
- Run Condition: (row_number() OVER (?) < 3)
- -> Sort
- Sort Key: empsalary.depname, empsalary.empno
- -> Seq Scan on empsalary
-(5 rows)
-
--- and ensure we get the correct results from the above plan
-SELECT * FROM
- (SELECT empno,
- depname,
- row_number() OVER (PARTITION BY depname ORDER BY empno) rn
- FROM empsalary) emp
-WHERE rn < 3;
- empno | depname | rn
--------+-----------+----
- 7 | develop | 1
- 8 | develop | 2
- 2 | personnel | 1
- 5 | personnel | 2
- 1 | sales | 1
- 3 | sales | 2
-(6 rows)
-
--- ensure that "unused" subquery columns are not removed when the column only
--- exists in the run condition
-EXPLAIN (COSTS OFF)
-SELECT empno, depname FROM
- (SELECT empno,
- depname,
- row_number() OVER (PARTITION BY depname ORDER BY empno) rn
- FROM empsalary) emp
-WHERE rn < 3;
- QUERY PLAN
-------------------------------------------------------------
- Subquery Scan on emp
- -> WindowAgg
- Run Condition: (row_number() OVER (?) < 3)
- -> Sort
- Sort Key: empsalary.depname, empsalary.empno
- -> Seq Scan on empsalary
-(6 rows)
-
--- likewise with count(empno) instead of row_number()
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- depname,
- salary,
- count(empno) OVER (PARTITION BY depname ORDER BY salary DESC) c
- FROM empsalary) emp
-WHERE c <= 3;
- QUERY PLAN
-------------------------------------------------------------
- WindowAgg
- Run Condition: (count(empsalary.empno) OVER (?) <= 3)
- -> Sort
- Sort Key: empsalary.depname, empsalary.salary DESC
- -> Seq Scan on empsalary
-(5 rows)
-
--- and again, check the results are what we expect.
-SELECT * FROM
- (SELECT empno,
- depname,
- salary,
- count(empno) OVER (PARTITION BY depname ORDER BY salary DESC) c
- FROM empsalary) emp
-WHERE c <= 3;
- empno | depname | salary | c
--------+-----------+--------+---
- 8 | develop | 6000 | 1
- 10 | develop | 5200 | 3
- 11 | develop | 5200 | 3
- 2 | personnel | 3900 | 1
- 5 | personnel | 3500 | 2
- 1 | sales | 5000 | 1
- 4 | sales | 4800 | 3
- 3 | sales | 4800 | 3
-(8 rows)
-
--- Ensure we get the correct run condition when the window function is both
--- monotonically increasing and decreasing.
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- depname,
- salary,
- count(empno) OVER () c
- FROM empsalary) emp
-WHERE c = 1;
- QUERY PLAN
---------------------------------------------------------
- WindowAgg
- Run Condition: (count(empsalary.empno) OVER (?) = 1)
- -> Seq Scan on empsalary
-(3 rows)
-
--- Some more complex cases with multiple window clauses
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT *,
- count(salary) OVER (PARTITION BY depname || '') c1, -- w1
- row_number() OVER (PARTITION BY depname) rn, -- w2
- count(*) OVER (PARTITION BY depname) c2, -- w2
- count(*) OVER (PARTITION BY '' || depname) c3, -- w3
- ntile(2) OVER (PARTITION BY depname) nt -- w2
- FROM empsalary
-) e WHERE rn <= 1 AND c1 <= 3 AND nt < 2;
- QUERY PLAN
------------------------------------------------------------------------------------------------
- Subquery Scan on e
- -> WindowAgg
- Filter: (((row_number() OVER (?)) <= 1) AND ((ntile(2) OVER (?)) < 2))
- Run Condition: (count(empsalary.salary) OVER (?) <= 3)
- -> Sort
- Sort Key: (((empsalary.depname)::text || ''::text))
- -> WindowAgg
- Run Condition: ((row_number() OVER (?) <= 1) AND (ntile(2) OVER (?) < 2))
- -> Sort
- Sort Key: empsalary.depname
- -> WindowAgg
- -> Sort
- Sort Key: ((''::text || (empsalary.depname)::text))
- -> Seq Scan on empsalary
-(14 rows)
-
--- Ensure we correctly filter out all of the run conditions from each window
-SELECT * FROM
- (SELECT *,
- count(salary) OVER (PARTITION BY depname || '') c1, -- w1
- row_number() OVER (PARTITION BY depname) rn, -- w2
- count(*) OVER (PARTITION BY depname) c2, -- w2
- count(*) OVER (PARTITION BY '' || depname) c3, -- w3
- ntile(2) OVER (PARTITION BY depname) nt -- w2
- FROM empsalary
-) e WHERE rn <= 1 AND c1 <= 3 AND nt < 2;
- depname | empno | salary | enroll_date | c1 | rn | c2 | c3 | nt
------------+-------+--------+-------------+----+----+----+----+----
- personnel | 5 | 3500 | 12-10-2007 | 2 | 1 | 2 | 2 | 1
- sales | 3 | 4800 | 08-01-2007 | 3 | 1 | 3 | 3 | 1
-(2 rows)
-
--- Ensure we remove references to reduced outer joins as nulling rels in run
--- conditions
-EXPLAIN (COSTS OFF)
-SELECT 1 FROM
- (SELECT ntile(e2.salary) OVER (PARTITION BY e1.depname) AS c
- FROM empsalary e1 LEFT JOIN empsalary e2 ON TRUE
- WHERE e1.empno = e2.empno) s
-WHERE s.c = 1;
- QUERY PLAN
----------------------------------------------------------
- Subquery Scan on s
- Filter: (s.c = 1)
- -> WindowAgg
- Run Condition: (ntile(e2.salary) OVER (?) <= 1)
- -> Sort
- Sort Key: e1.depname
- -> Merge Join
- Merge Cond: (e1.empno = e2.empno)
- -> Sort
- Sort Key: e1.empno
- -> Seq Scan on empsalary e1
- -> Sort
- Sort Key: e2.empno
- -> Seq Scan on empsalary e2
-(14 rows)
-
--- Ensure the run condition optimization is used in cases where the WindowFunc
--- has a Var from another query level
-EXPLAIN (COSTS OFF)
-SELECT 1 FROM
- (SELECT ntile(s1.x) OVER () AS c
- FROM (SELECT (SELECT 1) AS x) AS s1) s
-WHERE s.c = 1;
- QUERY PLAN
------------------------------------------------------------------
- Subquery Scan on s
- Filter: (s.c = 1)
- -> WindowAgg
- Run Condition: (ntile((InitPlan 1).col1) OVER (?) <= 1)
- InitPlan 1
- -> Result
- -> Result
-(7 rows)
-
--- Tests to ensure we don't push down the run condition when it's not valid to
--- do so.
--- Ensure we don't push down when the frame options show that the window
--- function is not monotonically increasing
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count(*) OVER (ORDER BY salary DESC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) c
- FROM empsalary) emp
-WHERE c <= 3;
- QUERY PLAN
------------------------------------------------
- Subquery Scan on emp
- Filter: (emp.c <= 3)
- -> WindowAgg
- -> Sort
- Sort Key: empsalary.salary DESC
- -> Seq Scan on empsalary
-(6 rows)
-
--- Ensure we don't push down when the window function's monotonic properties
--- don't match that of the clauses.
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count(*) OVER (ORDER BY salary) c
- FROM empsalary) emp
-WHERE 3 <= c;
- QUERY PLAN
-------------------------------------------
- Subquery Scan on emp
- Filter: (3 <= emp.c)
- -> WindowAgg
- -> Sort
- Sort Key: empsalary.salary
- -> Seq Scan on empsalary
-(6 rows)
-
--- Ensure we don't use a run condition when there's a volatile function in the
--- WindowFunc
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count(random()) OVER (ORDER BY empno DESC) c
- FROM empsalary) emp
-WHERE c = 1;
- QUERY PLAN
-----------------------------------------------
- Subquery Scan on emp
- Filter: (emp.c = 1)
- -> WindowAgg
- -> Sort
- Sort Key: empsalary.empno DESC
- -> Seq Scan on empsalary
-(6 rows)
-
--- Ensure we don't use a run condition when the WindowFunc contains subplans
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT empno,
- salary,
- count((SELECT 1)) OVER (ORDER BY empno DESC) c
- FROM empsalary) emp
-WHERE c = 1;
- QUERY PLAN
-----------------------------------------------
- Subquery Scan on emp
- Filter: (emp.c = 1)
- -> WindowAgg
- InitPlan 1
- -> Result
- -> Sort
- Sort Key: empsalary.empno DESC
- -> Seq Scan on empsalary
-(8 rows)
-
--- Test Sort node collapsing
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT depname,
- sum(salary) OVER (PARTITION BY depname order by empno) depsalary,
- min(salary) OVER (PARTITION BY depname, empno order by enroll_date) depminsalary
- FROM empsalary) emp
-WHERE depname = 'sales';
- QUERY PLAN
-----------------------------------------------------------------------
- Subquery Scan on emp
- -> WindowAgg
- -> WindowAgg
- -> Sort
- Sort Key: empsalary.empno, empsalary.enroll_date
- -> Seq Scan on empsalary
- Filter: ((depname)::text = 'sales'::text)
-(7 rows)
-
--- Ensure that the evaluation order of the WindowAggs results in the WindowAgg
--- with the same sort order that's required by the ORDER BY is evaluated last.
-EXPLAIN (COSTS OFF)
-SELECT empno,
- enroll_date,
- depname,
- sum(salary) OVER (PARTITION BY depname order by empno) depsalary,
- min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary
-FROM empsalary
-ORDER BY depname, empno;
- QUERY PLAN
-----------------------------------------------------
- WindowAgg
- -> Incremental Sort
- Sort Key: depname, empno
- Presorted Key: depname
- -> WindowAgg
- -> Sort
- Sort Key: depname, enroll_date
- -> Seq Scan on empsalary
-(8 rows)
-
--- As above, but with an adjusted ORDER BY to ensure the above plan didn't
--- perform only 2 sorts by accident.
-EXPLAIN (COSTS OFF)
-SELECT empno,
- enroll_date,
- depname,
- sum(salary) OVER (PARTITION BY depname order by empno) depsalary,
- min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary
-FROM empsalary
-ORDER BY depname, enroll_date;
- QUERY PLAN
------------------------------------------------
- WindowAgg
- -> Incremental Sort
- Sort Key: depname, enroll_date
- Presorted Key: depname
- -> WindowAgg
- -> Sort
- Sort Key: depname, empno
- -> Seq Scan on empsalary
-(8 rows)
-
-SET enable_hashagg TO off;
--- Ensure we don't get a sort for both DISTINCT and ORDER BY. We expect the
--- sort for the DISTINCT to provide presorted input for the ORDER BY.
-EXPLAIN (COSTS OFF)
-SELECT DISTINCT
- empno,
- enroll_date,
- depname,
- sum(salary) OVER (PARTITION BY depname order by empno) depsalary,
- min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary
-FROM empsalary
-ORDER BY depname, enroll_date;
- QUERY PLAN
------------------------------------------------------------------------------------------------
- Unique
- -> Incremental Sort
- Sort Key: depname, enroll_date, empno, (sum(salary) OVER (?)), (min(salary) OVER (?))
- Presorted Key: depname, enroll_date
- -> WindowAgg
- -> Incremental Sort
- Sort Key: depname, enroll_date
- Presorted Key: depname
- -> WindowAgg
- -> Sort
- Sort Key: depname, empno
- -> Seq Scan on empsalary
-(12 rows)
-
--- As above but adjust the ORDER BY clause to help ensure the plan with the
--- minimum amount of sorting wasn't a fluke.
-EXPLAIN (COSTS OFF)
-SELECT DISTINCT
- empno,
- enroll_date,
- depname,
- sum(salary) OVER (PARTITION BY depname order by empno) depsalary,
- min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary
-FROM empsalary
-ORDER BY depname, empno;
- QUERY PLAN
------------------------------------------------------------------------------------------------
- Unique
- -> Incremental Sort
- Sort Key: depname, empno, enroll_date, (sum(salary) OVER (?)), (min(salary) OVER (?))
- Presorted Key: depname, empno
- -> WindowAgg
- -> Incremental Sort
- Sort Key: depname, empno
- Presorted Key: depname
- -> WindowAgg
- -> Sort
- Sort Key: depname, enroll_date
- -> Seq Scan on empsalary
-(12 rows)
-
-RESET enable_hashagg;
--- Test Sort node reordering
-EXPLAIN (COSTS OFF)
-SELECT
- lead(1) OVER (PARTITION BY depname ORDER BY salary, enroll_date),
- lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno)
-FROM empsalary;
- QUERY PLAN
--------------------------------------------------------------
- WindowAgg
- -> WindowAgg
- -> Sort
- Sort Key: depname, salary, enroll_date, empno
- -> Seq Scan on empsalary
-(5 rows)
-
--- Test incremental sorting
-EXPLAIN (COSTS OFF)
-SELECT * FROM
- (SELECT depname,
- empno,
- salary,
- enroll_date,
- row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp,
- row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp
- FROM empsalary) emp
-WHERE first_emp = 1 OR last_emp = 1;
- QUERY PLAN
------------------------------------------------------------------------------------
- Subquery Scan on emp
- Filter: ((emp.first_emp = 1) OR (emp.last_emp = 1))
- -> WindowAgg
- -> Incremental Sort
- Sort Key: empsalary.depname, empsalary.enroll_date
- Presorted Key: empsalary.depname
- -> WindowAgg
- -> Sort
- Sort Key: empsalary.depname, empsalary.enroll_date DESC
- -> Seq Scan on empsalary
-(10 rows)
-
-SELECT * FROM
- (SELECT depname,
- empno,
- salary,
- enroll_date,
- row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp,
- row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp
- FROM empsalary) emp
-WHERE first_emp = 1 OR last_emp = 1;
- depname | empno | salary | enroll_date | first_emp | last_emp
------------+-------+--------+-------------+-----------+----------
- develop | 8 | 6000 | 10-01-2006 | 1 | 5
- develop | 7 | 4200 | 01-01-2008 | 5 | 1
- personnel | 2 | 3900 | 12-23-2006 | 1 | 2
- personnel | 5 | 3500 | 12-10-2007 | 2 | 1
- sales | 1 | 5000 | 10-01-2006 | 1 | 3
- sales | 4 | 4800 | 08-08-2007 | 3 | 1
-(6 rows)
-
--- cleanup
-DROP TABLE empsalary;
--- test user-defined window function with named args and default args
-CREATE FUNCTION nth_value_def(val anyelement, n integer = 1) RETURNS anyelement
- LANGUAGE internal WINDOW IMMUTABLE STRICT AS 'window_nth_value';
-SELECT nth_value_def(n := 2, val := ten) OVER (PARTITION BY four), ten, four
- FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s;
- nth_value_def | ten | four
----------------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 0 | 4 | 0
- 1 | 1 | 1
- 1 | 1 | 1
- 1 | 7 | 1
- 1 | 9 | 1
- | 0 | 2
- 3 | 1 | 3
- 3 | 3 | 3
-(10 rows)
-
-SELECT nth_value_def(ten) OVER (PARTITION BY four), ten, four
- FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s;
- nth_value_def | ten | four
----------------+-----+------
- 0 | 0 | 0
- 0 | 0 | 0
- 0 | 4 | 0
- 1 | 1 | 1
- 1 | 1 | 1
- 1 | 7 | 1
- 1 | 9 | 1
- 0 | 0 | 2
- 1 | 1 | 3
- 1 | 3 | 3
-(10 rows)
-
---
--- Test the basic moving-aggregate machinery
---
--- create aggregates that record the series of transform calls (these are
--- intentionally not true inverses)
-CREATE FUNCTION logging_sfunc_nonstrict(text, anyelement) RETURNS text AS
-$$ SELECT COALESCE($1, '') || '*' || quote_nullable($2) $$
-LANGUAGE SQL IMMUTABLE;
-CREATE FUNCTION logging_msfunc_nonstrict(text, anyelement) RETURNS text AS
-$$ SELECT COALESCE($1, '') || '+' || quote_nullable($2) $$
-LANGUAGE SQL IMMUTABLE;
-CREATE FUNCTION logging_minvfunc_nonstrict(text, anyelement) RETURNS text AS
-$$ SELECT $1 || '-' || quote_nullable($2) $$
-LANGUAGE SQL IMMUTABLE;
-CREATE AGGREGATE logging_agg_nonstrict (anyelement)
-(
- stype = text,
- sfunc = logging_sfunc_nonstrict,
- mstype = text,
- msfunc = logging_msfunc_nonstrict,
- minvfunc = logging_minvfunc_nonstrict
-);
-CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement)
-(
- stype = text,
- sfunc = logging_sfunc_nonstrict,
- mstype = text,
- msfunc = logging_msfunc_nonstrict,
- minvfunc = logging_minvfunc_nonstrict,
- initcond = 'I',
- minitcond = 'MI'
-);
-CREATE FUNCTION logging_sfunc_strict(text, anyelement) RETURNS text AS
-$$ SELECT $1 || '*' || quote_nullable($2) $$
-LANGUAGE SQL STRICT IMMUTABLE;
-CREATE FUNCTION logging_msfunc_strict(text, anyelement) RETURNS text AS
-$$ SELECT $1 || '+' || quote_nullable($2) $$
-LANGUAGE SQL STRICT IMMUTABLE;
-CREATE FUNCTION logging_minvfunc_strict(text, anyelement) RETURNS text AS
-$$ SELECT $1 || '-' || quote_nullable($2) $$
-LANGUAGE SQL STRICT IMMUTABLE;
-CREATE AGGREGATE logging_agg_strict (text)
-(
- stype = text,
- sfunc = logging_sfunc_strict,
- mstype = text,
- msfunc = logging_msfunc_strict,
- minvfunc = logging_minvfunc_strict
-);
-CREATE AGGREGATE logging_agg_strict_initcond (anyelement)
-(
- stype = text,
- sfunc = logging_sfunc_strict,
- mstype = text,
- msfunc = logging_msfunc_strict,
- minvfunc = logging_minvfunc_strict,
- initcond = 'I',
- minitcond = 'MI'
-);
--- test strict and non-strict cases
-SELECT
- p::text || ',' || i::text || ':' || COALESCE(v::text, 'NULL') AS row,
- logging_agg_nonstrict(v) over wnd as nstrict,
- logging_agg_nonstrict_initcond(v) over wnd as nstrict_init,
- logging_agg_strict(v::text) over wnd as strict,
- logging_agg_strict_initcond(v) over wnd as strict_init
-FROM (VALUES
- (1, 1, NULL),
- (1, 2, 'a'),
- (1, 3, 'b'),
- (1, 4, NULL),
- (1, 5, NULL),
- (1, 6, 'c'),
- (2, 1, NULL),
- (2, 2, 'x'),
- (3, 1, 'z')
-) AS t(p, i, v)
-WINDOW wnd AS (PARTITION BY P ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
-ORDER BY p, i;
- row | nstrict | nstrict_init | strict | strict_init
-----------+-----------------------------------------------+-------------------------------------------------+-----------+----------------
- 1,1:NULL | +NULL | MI+NULL | | MI
- 1,2:a | +NULL+'a' | MI+NULL+'a' | a | MI+'a'
- 1,3:b | +NULL+'a'-NULL+'b' | MI+NULL+'a'-NULL+'b' | a+'b' | MI+'a'+'b'
- 1,4:NULL | +NULL+'a'-NULL+'b'-'a'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL | a+'b'-'a' | MI+'a'+'b'-'a'
- 1,5:NULL | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | | MI
- 1,6:c | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | c | MI+'c'
- 2,1:NULL | +NULL | MI+NULL | | MI
- 2,2:x | +NULL+'x' | MI+NULL+'x' | x | MI+'x'
- 3,1:z | +'z' | MI+'z' | z | MI+'z'
-(9 rows)
-
--- and again, but with filter
-SELECT
- p::text || ',' || i::text || ':' ||
- CASE WHEN f THEN COALESCE(v::text, 'NULL') ELSE '-' END as row,
- logging_agg_nonstrict(v) filter(where f) over wnd as nstrict_filt,
- logging_agg_nonstrict_initcond(v) filter(where f) over wnd as nstrict_init_filt,
- logging_agg_strict(v::text) filter(where f) over wnd as strict_filt,
- logging_agg_strict_initcond(v) filter(where f) over wnd as strict_init_filt
-FROM (VALUES
- (1, 1, true, NULL),
- (1, 2, false, 'a'),
- (1, 3, true, 'b'),
- (1, 4, false, NULL),
- (1, 5, false, NULL),
- (1, 6, false, 'c'),
- (2, 1, false, NULL),
- (2, 2, true, 'x'),
- (3, 1, true, 'z')
-) AS t(p, i, f, v)
-WINDOW wnd AS (PARTITION BY p ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
-ORDER BY p, i;
- row | nstrict_filt | nstrict_init_filt | strict_filt | strict_init_filt
-----------+--------------+-------------------+-------------+------------------
- 1,1:NULL | +NULL | MI+NULL | | MI
- 1,2:- | +NULL | MI+NULL | | MI
- 1,3:b | +'b' | MI+'b' | b | MI+'b'
- 1,4:- | +'b' | MI+'b' | b | MI+'b'
- 1,5:- | | MI | | MI
- 1,6:- | | MI | | MI
- 2,1:- | | MI | | MI
- 2,2:x | +'x' | MI+'x' | x | MI+'x'
- 3,1:z | +'z' | MI+'z' | z | MI+'z'
-(9 rows)
-
--- test that volatile arguments disable moving-aggregate mode
-SELECT
- i::text || ':' || COALESCE(v::text, 'NULL') as row,
- logging_agg_strict(v::text)
- over wnd as inverse,
- logging_agg_strict(v::text || CASE WHEN random() < 0 then '?' ELSE '' END)
- over wnd as noinverse
-FROM (VALUES
- (1, 'a'),
- (2, 'b'),
- (3, 'c')
-) AS t(i, v)
-WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
-ORDER BY i;
- row | inverse | noinverse
------+---------------+-----------
- 1:a | a | a
- 2:b | a+'b' | a*'b'
- 3:c | a+'b'-'a'+'c' | b*'c'
-(3 rows)
-
-SELECT
- i::text || ':' || COALESCE(v::text, 'NULL') as row,
- logging_agg_strict(v::text) filter(where true)
- over wnd as inverse,
- logging_agg_strict(v::text) filter(where random() >= 0)
- over wnd as noinverse
-FROM (VALUES
- (1, 'a'),
- (2, 'b'),
- (3, 'c')
-) AS t(i, v)
-WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
-ORDER BY i;
- row | inverse | noinverse
------+---------------+-----------
- 1:a | a | a
- 2:b | a+'b' | a*'b'
- 3:c | a+'b'-'a'+'c' | b*'c'
-(3 rows)
-
--- test that non-overlapping windows don't use inverse transitions
-SELECT
- logging_agg_strict(v::text) OVER wnd
-FROM (VALUES
- (1, 'a'),
- (2, 'b'),
- (3, 'c')
-) AS t(i, v)
-WINDOW wnd AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW)
-ORDER BY i;
- logging_agg_strict
---------------------
- a
- b
- c
-(3 rows)
-
--- test that returning NULL from the inverse transition functions
--- restarts the aggregation from scratch. The second aggregate is supposed
--- to test cases where only some aggregates restart, the third one checks
--- that one aggregate restarting doesn't cause others to restart.
-CREATE FUNCTION sum_int_randrestart_minvfunc(int4, int4) RETURNS int4 AS
-$$ SELECT CASE WHEN random() < 0.2 THEN NULL ELSE $1 - $2 END $$
-LANGUAGE SQL STRICT;
-CREATE AGGREGATE sum_int_randomrestart (int4)
-(
- stype = int4,
- sfunc = int4pl,
- mstype = int4,
- msfunc = int4pl,
- minvfunc = sum_int_randrestart_minvfunc
-);
-WITH
-vs AS (
- SELECT i, (random() * 100)::int4 AS v
- FROM generate_series(1, 100) AS i
-),
-sum_following AS (
- SELECT i, SUM(v) OVER
- (ORDER BY i DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS s
- FROM vs
-)
-SELECT DISTINCT
- sum_following.s = sum_int_randomrestart(v) OVER fwd AS eq1,
- -sum_following.s = sum_int_randomrestart(-v) OVER fwd AS eq2,
- 100*3+(vs.i-1)*3 = length(logging_agg_nonstrict(''::text) OVER fwd) AS eq3
-FROM vs
-JOIN sum_following ON sum_following.i = vs.i
-WINDOW fwd AS (
- ORDER BY vs.i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
-);
- eq1 | eq2 | eq3
------+-----+-----
- t | t | t
-(1 row)
-
---
--- Test various built-in aggregates that have moving-aggregate support
---
--- test inverse transition functions handle NULLs properly
-SELECT i,AVG(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | avg
----+--------------------
- 1 | 1.5000000000000000
- 2 | 2.0000000000000000
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,AVG(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | avg
----+--------------------
- 1 | 1.5000000000000000
- 2 | 2.0000000000000000
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,AVG(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | avg
----+--------------------
- 1 | 1.5000000000000000
- 2 | 2.0000000000000000
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,AVG(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1.5),(2,2.5),(3,NULL),(4,NULL)) t(i,v);
- i | avg
----+--------------------
- 1 | 2.0000000000000000
- 2 | 2.5000000000000000
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,AVG(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v);
- i | avg
----+------------
- 1 | @ 1.5 secs
- 2 | @ 2 secs
- 3 |
- 4 |
-(4 rows)
-
--- moving aggregates over infinite intervals
-SELECT x
- ,avg(x) OVER(ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING ) as curr_next_avg
- ,avg(x) OVER(ROWS BETWEEN 1 PRECEDING AND CURRENT ROW ) as prev_curr_avg
- ,sum(x) OVER(ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING ) as curr_next_sum
- ,sum(x) OVER(ROWS BETWEEN 1 PRECEDING AND CURRENT ROW ) as prev_curr_sum
-FROM (VALUES (NULL::interval),
- ('infinity'::interval),
- ('-2147483648 days -2147483648 months -9223372036854775807 usecs'), -- extreme interval value
- ('-infinity'::interval),
- ('2147483647 days 2147483647 months 9223372036854775806 usecs'), -- extreme interval value
- ('infinity'::interval),
- ('6 days'::interval),
- ('7 days'::interval),
- (NULL::interval),
- ('-infinity'::interval)) v(x);
- x | curr_next_avg | prev_curr_avg | curr_next_sum | prev_curr_sum
-------------------------------------------------------------------------------+-------------------+-------------------+---------------+---------------
- | infinity | | infinity |
- infinity | infinity | infinity | infinity | infinity
- @ 178956970 years 8 mons 2147483648 days 2562047788 hours 54.775807 secs ago | -infinity | infinity | -infinity | infinity
- -infinity | -infinity | -infinity | -infinity | -infinity
- @ 178956970 years 7 mons 2147483647 days 2562047788 hours 54.775806 secs | infinity | -infinity | infinity | -infinity
- infinity | infinity | infinity | infinity | infinity
- @ 6 days | @ 6 days 12 hours | infinity | @ 13 days | infinity
- @ 7 days | @ 7 days | @ 6 days 12 hours | @ 7 days | @ 13 days
- | -infinity | @ 7 days | -infinity | @ 7 days
- -infinity | -infinity | -infinity | -infinity | -infinity
-(10 rows)
-
---should fail.
-SELECT x, avg(x) OVER(ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING)
-FROM (VALUES (NULL::interval),
- ('3 days'::interval),
- ('infinity'::timestamptz - now()),
- ('6 days'::interval),
- ('-infinity'::interval)) v(x);
-ERROR: interval out of range
---should fail.
-SELECT x, sum(x) OVER(ROWS BETWEEN CURRENT ROW AND 2 FOLLOWING)
-FROM (VALUES (NULL::interval),
- ('3 days'::interval),
- ('infinity'::timestamptz - now()),
- ('6 days'::interval),
- ('-infinity'::interval)) v(x);
-ERROR: interval out of range
-SELECT i,SUM(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | sum
----+-----
- 1 | 3
- 2 | 2
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | sum
----+-----
- 1 | 3
- 2 | 2
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,SUM(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | sum
----+-----
- 1 | 3
- 2 | 2
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,SUM(v::money) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,'1.10'),(2,'2.20'),(3,NULL),(4,NULL)) t(i,v);
- i | sum
----+-------
- 1 | $3.30
- 2 | $2.20
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,SUM(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v);
- i | sum
----+----------
- 1 | @ 3 secs
- 2 | @ 2 secs
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,SUM(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1.1),(2,2.2),(3,NULL),(4,NULL)) t(i,v);
- i | sum
----+-----
- 1 | 3.3
- 2 | 2.2
- 3 |
- 4 |
-(4 rows)
-
-SELECT SUM(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1.01),(2,2),(3,3)) v(i,n);
- sum
-------
- 6.01
- 5
- 3
-(3 rows)
-
-SELECT i,COUNT(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | count
----+-------
- 1 | 2
- 2 | 1
- 3 | 0
- 4 | 0
-(4 rows)
-
-SELECT i,COUNT(*) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | count
----+-------
- 1 | 4
- 2 | 3
- 3 | 2
- 4 | 1
-(4 rows)
-
-SELECT VAR_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- var_pop
------------------------
- 21704.000000000000
- 13868.750000000000
- 11266.666666666667
- 4225.0000000000000000
- 0
-(5 rows)
-
-SELECT VAR_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- var_pop
------------------------
- 21704.000000000000
- 13868.750000000000
- 11266.666666666667
- 4225.0000000000000000
- 0
-(5 rows)
-
-SELECT VAR_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- var_pop
------------------------
- 21704.000000000000
- 13868.750000000000
- 11266.666666666667
- 4225.0000000000000000
- 0
-(5 rows)
-
-SELECT VAR_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- var_pop
------------------------
- 21704.000000000000
- 13868.750000000000
- 11266.666666666667
- 4225.0000000000000000
- 0
-(5 rows)
-
-SELECT VAR_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- var_samp
------------------------
- 27130.000000000000
- 18491.666666666667
- 16900.000000000000
- 8450.0000000000000000
-
-(5 rows)
-
-SELECT VAR_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- var_samp
------------------------
- 27130.000000000000
- 18491.666666666667
- 16900.000000000000
- 8450.0000000000000000
-
-(5 rows)
-
-SELECT VAR_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- var_samp
------------------------
- 27130.000000000000
- 18491.666666666667
- 16900.000000000000
- 8450.0000000000000000
-
-(5 rows)
-
-SELECT VAR_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- var_samp
------------------------
- 27130.000000000000
- 18491.666666666667
- 16900.000000000000
- 8450.0000000000000000
-
-(5 rows)
-
-SELECT VARIANCE(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- variance
------------------------
- 27130.000000000000
- 18491.666666666667
- 16900.000000000000
- 8450.0000000000000000
-
-(5 rows)
-
-SELECT VARIANCE(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- variance
------------------------
- 27130.000000000000
- 18491.666666666667
- 16900.000000000000
- 8450.0000000000000000
-
-(5 rows)
-
-SELECT VARIANCE(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- variance
------------------------
- 27130.000000000000
- 18491.666666666667
- 16900.000000000000
- 8450.0000000000000000
-
-(5 rows)
-
-SELECT VARIANCE(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- variance
------------------------
- 27130.000000000000
- 18491.666666666667
- 16900.000000000000
- 8450.0000000000000000
-
-(5 rows)
-
-SELECT STDDEV_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
- stddev_pop
----------------------
- 147.322774885623
- 147.322774885623
- 117.765657133139
- 106.144555520604
- 65.0000000000000000
- 0
-(6 rows)
-
-SELECT STDDEV_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
- stddev_pop
----------------------
- 147.322774885623
- 147.322774885623
- 117.765657133139
- 106.144555520604
- 65.0000000000000000
- 0
-(6 rows)
-
-SELECT STDDEV_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
- stddev_pop
----------------------
- 147.322774885623
- 147.322774885623
- 117.765657133139
- 106.144555520604
- 65.0000000000000000
- 0
-(6 rows)
-
-SELECT STDDEV_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
- stddev_pop
----------------------
- 147.322774885623
- 147.322774885623
- 117.765657133139
- 106.144555520604
- 65.0000000000000000
- 0
-(6 rows)
-
-SELECT STDDEV_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
- stddev_samp
----------------------
- 164.711869639076
- 164.711869639076
- 135.984067694222
- 130.000000000000
- 91.9238815542511782
-
-(6 rows)
-
-SELECT STDDEV_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
- stddev_samp
----------------------
- 164.711869639076
- 164.711869639076
- 135.984067694222
- 130.000000000000
- 91.9238815542511782
-
-(6 rows)
-
-SELECT STDDEV_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
- stddev_samp
----------------------
- 164.711869639076
- 164.711869639076
- 135.984067694222
- 130.000000000000
- 91.9238815542511782
-
-(6 rows)
-
-SELECT STDDEV_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
- stddev_samp
----------------------
- 164.711869639076
- 164.711869639076
- 135.984067694222
- 130.000000000000
- 91.9238815542511782
-
-(6 rows)
-
-SELECT STDDEV(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- stddev
----------------------
- 164.711869639076
- 164.711869639076
- 135.984067694222
- 130.000000000000
- 91.9238815542511782
-
-(6 rows)
-
-SELECT STDDEV(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- stddev
----------------------
- 164.711869639076
- 164.711869639076
- 135.984067694222
- 130.000000000000
- 91.9238815542511782
-
-(6 rows)
-
-SELECT STDDEV(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- stddev
----------------------
- 164.711869639076
- 164.711869639076
- 135.984067694222
- 130.000000000000
- 91.9238815542511782
-
-(6 rows)
-
-SELECT STDDEV(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
- FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
- stddev
----------------------
- 164.711869639076
- 164.711869639076
- 135.984067694222
- 130.000000000000
- 91.9238815542511782
-
-(6 rows)
-
--- test that inverse transition functions work with various frame options
-SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | sum
----+-----
- 1 | 1
- 2 | 2
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
- i | sum
----+-----
- 1 | 3
- 2 | 2
- 3 |
- 4 |
-(4 rows)
-
-SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
- FROM (VALUES(1,1),(2,2),(3,3),(4,4)) t(i,v);
- i | sum
----+-----
- 1 | 3
- 2 | 6
- 3 | 9
- 4 | 7
-(4 rows)
-
--- ensure aggregate over numeric properly recovers from NaN values
-SELECT a, b,
- SUM(b) OVER(ORDER BY A ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
-FROM (VALUES(1,1::numeric),(2,2),(3,'NaN'),(4,3),(5,4)) t(a,b);
- a | b | sum
----+-----+-----
- 1 | 1 | 1
- 2 | 2 | 3
- 3 | NaN | NaN
- 4 | 3 | NaN
- 5 | 4 | 7
-(5 rows)
-
--- It might be tempting for someone to add an inverse trans function for
--- float and double precision. This should not be done as it can give incorrect
--- results. This test should fail if anyone ever does this without thinking too
--- hard about it.
-SELECT to_char(SUM(n::float8) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING),'999999999999999999999D9')
- FROM (VALUES(1,1e20),(2,1)) n(i,n);
- to_char
---------------------------
- 100000000000000000000
- 1.0
-(2 rows)
-
-SELECT i, b, bool_and(b) OVER w, bool_or(b) OVER w
- FROM (VALUES (1,true), (2,true), (3,false), (4,false), (5,true)) v(i,b)
- WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING);
- i | b | bool_and | bool_or
----+---+----------+---------
- 1 | t | t | t
- 2 | t | f | t
- 3 | f | f | f
- 4 | f | f | t
- 5 | t | t | t
-(5 rows)
-
---
--- Test WindowAgg costing takes into account the number of rows that need to
--- be fetched before the first row can be output.
---
--- Ensure we get a cheap start up plan as the WindowAgg can output the first
--- row after reading 1 row from the join.
-EXPLAIN (COSTS OFF)
-SELECT COUNT(*) OVER (ORDER BY t1.unique1)
-FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous
-LIMIT 1;
- QUERY PLAN
---------------------------------------------------------------------------
- Limit
- -> WindowAgg
- -> Nested Loop
- -> Index Only Scan using tenk1_unique1 on tenk1 t1
- -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2
- Index Cond: (tenthous = t1.unique1)
-(6 rows)
-
--- Ensure we get a cheap total plan. Lack of ORDER BY in the WindowClause
--- means that all rows must be read from the join, so a cheap startup plan
--- isn't a good choice.
-EXPLAIN (COSTS OFF)
-SELECT COUNT(*) OVER ()
-FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous
-WHERE t2.two = 1
-LIMIT 1;
- QUERY PLAN
--------------------------------------------------------------------
- Limit
- -> WindowAgg
- -> Hash Join
- Hash Cond: (t1.unique1 = t2.tenthous)
- -> Index Only Scan using tenk1_unique1 on tenk1 t1
- -> Hash
- -> Seq Scan on tenk1 t2
- Filter: (two = 1)
-(8 rows)
-
--- Ensure we get a cheap total plan. This time use UNBOUNDED FOLLOWING, which
--- needs to read all join rows to output the first WindowAgg row.
-EXPLAIN (COSTS OFF)
-SELECT COUNT(*) OVER (ORDER BY t1.unique1 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
-FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous
-LIMIT 1;
- QUERY PLAN
---------------------------------------------------------------------------------
- Limit
- -> WindowAgg
- -> Merge Join
- Merge Cond: (t1.unique1 = t2.tenthous)
- -> Index Only Scan using tenk1_unique1 on tenk1 t1
- -> Sort
- Sort Key: t2.tenthous
- -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2
-(8 rows)
-
--- Ensure we get a cheap total plan. This time use 10000 FOLLOWING so we need
--- to read all join rows.
-EXPLAIN (COSTS OFF)
-SELECT COUNT(*) OVER (ORDER BY t1.unique1 ROWS BETWEEN UNBOUNDED PRECEDING AND 10000 FOLLOWING)
-FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.tenthous
-LIMIT 1;
- QUERY PLAN
---------------------------------------------------------------------------------
- Limit
- -> WindowAgg
- -> Merge Join
- Merge Cond: (t1.unique1 = t2.tenthous)
- -> Index Only Scan using tenk1_unique1 on tenk1 t1
- -> Sort
- Sort Key: t2.tenthous
- -> Index Only Scan using tenk1_thous_tenthous on tenk1 t2
-(8 rows)
-
--- Tests for problems with failure to walk or mutate expressions
--- within window frame clauses.
--- test walker (fails with collation error if expressions are not walked)
-SELECT array_agg(i) OVER w
- FROM generate_series(1,5) i
-WINDOW w AS (ORDER BY i ROWS BETWEEN (('foo' < 'foobar')::integer) PRECEDING AND CURRENT ROW);
- array_agg
------------
- {1}
- {1,2}
- {2,3}
- {3,4}
- {4,5}
-(5 rows)
-
--- test mutator (fails when inlined if expressions are not mutated)
-CREATE FUNCTION pg_temp.f(group_size BIGINT) RETURNS SETOF integer[]
-AS $$
- SELECT array_agg(s) OVER w
- FROM generate_series(1,5) s
- WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING)
-$$ LANGUAGE SQL STABLE;
-EXPLAIN (costs off) SELECT * FROM pg_temp.f(2);
- QUERY PLAN
-------------------------------------------------------
- Subquery Scan on f
- -> WindowAgg
- -> Sort
- Sort Key: s.s
- -> Function Scan on generate_series s
-(5 rows)
-
-SELECT * FROM pg_temp.f(2);
- f
----------
- {1,2,3}
- {2,3,4}
- {3,4,5}
- {4,5}
- {5}
-(5 rows)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/xmlmap_1.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/xmlmap.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/xmlmap_1.out 2024-11-15 02:50:52.525989802 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/xmlmap.out 2024-11-15 02:59:17.221115691 +0000
@@ -1,107 +1,2 @@
-CREATE SCHEMA testxmlschema;
-CREATE TABLE testxmlschema.test1 (a int, b text);
-INSERT INTO testxmlschema.test1 VALUES (1, 'one'), (2, 'two'), (-1, null);
-CREATE DOMAIN testxmldomain AS varchar;
-CREATE TABLE testxmlschema.test2 (z int, y varchar(500), x char(6),
- w numeric(9,2), v smallint, u bigint, t real,
- s time, stz timetz, r timestamp, rtz timestamptz, q date,
- p xml, o testxmldomain, n bool, m bytea, aaa text);
-ALTER TABLE testxmlschema.test2 DROP COLUMN aaa;
-INSERT INTO testxmlschema.test2 VALUES (55, 'abc', 'def',
- 98.6, 2, 999, 0,
- '21:07', '21:11 +05', '2009-06-08 21:07:30', '2009-06-08 21:07:30 -07', '2009-06-08',
- NULL, 'ABC', true, 'XYZ');
-SELECT table_to_xml('testxmlschema.test1', false, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml('testxmlschema.test1', true, false, 'foo');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml('testxmlschema.test1', false, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml('testxmlschema.test1', true, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml('testxmlschema.test2', false, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xmlschema('testxmlschema.test1', false, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xmlschema('testxmlschema.test1', true, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xmlschema('testxmlschema.test1', false, true, 'foo');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xmlschema('testxmlschema.test1', true, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xmlschema('testxmlschema.test2', false, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, true, 'foo');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT query_to_xml('SELECT * FROM testxmlschema.test1', false, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT query_to_xmlschema('SELECT * FROM testxmlschema.test1', false, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT query_to_xml_and_xmlschema('SELECT * FROM testxmlschema.test1', true, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-DECLARE xc CURSOR WITH HOLD FOR SELECT * FROM testxmlschema.test1 ORDER BY 1, 2;
-SELECT cursor_to_xml('xc'::refcursor, 5, false, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT cursor_to_xmlschema('xc'::refcursor, false, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-MOVE BACKWARD ALL IN xc;
-SELECT cursor_to_xml('xc'::refcursor, 5, true, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT cursor_to_xmlschema('xc'::refcursor, true, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT schema_to_xml('testxmlschema', false, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT schema_to_xml('testxmlschema', true, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT schema_to_xmlschema('testxmlschema', false, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT schema_to_xmlschema('testxmlschema', true, false, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT schema_to_xml_and_xmlschema('testxmlschema', true, true, 'foo');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
--- test that domains are transformed like their base types
-CREATE DOMAIN testboolxmldomain AS bool;
-CREATE DOMAIN testdatexmldomain AS date;
-CREATE TABLE testxmlschema.test3
- AS SELECT true c1,
- true::testboolxmldomain c2,
- '2013-02-21'::date c3,
- '2013-02-21'::testdatexmldomain c4;
-SELECT xmlforest(c1, c2, c3, c4) FROM testxmlschema.test3;
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_to_xml('testxmlschema.test3', true, true, '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/functional_deps.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/functional_deps.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/functional_deps.out 2024-11-15 02:50:52.442128045 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/functional_deps.out 2024-11-15 02:59:17.233115707 +0000
@@ -1,232 +1,2 @@
--- from http://www.depesz.com/index.php/2010/04/19/getting-unique-elements/
-CREATE TEMP TABLE articles (
- id int CONSTRAINT articles_pkey PRIMARY KEY,
- keywords text,
- title text UNIQUE NOT NULL,
- body text UNIQUE,
- created date
-);
-CREATE TEMP TABLE articles_in_category (
- article_id int,
- category_id int,
- changed date,
- PRIMARY KEY (article_id, category_id)
-);
--- test functional dependencies based on primary keys/unique constraints
--- base tables
--- group by primary key (OK)
-SELECT id, keywords, title, body, created
-FROM articles
-GROUP BY id;
- id | keywords | title | body | created
-----+----------+-------+------+---------
-(0 rows)
-
--- group by unique not null (fail/todo)
-SELECT id, keywords, title, body, created
-FROM articles
-GROUP BY title;
-ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function
-LINE 1: SELECT id, keywords, title, body, created
- ^
--- group by unique nullable (fail)
-SELECT id, keywords, title, body, created
-FROM articles
-GROUP BY body;
-ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function
-LINE 1: SELECT id, keywords, title, body, created
- ^
--- group by something else (fail)
-SELECT id, keywords, title, body, created
-FROM articles
-GROUP BY keywords;
-ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function
-LINE 1: SELECT id, keywords, title, body, created
- ^
--- multiple tables
--- group by primary key (OK)
-SELECT a.id, a.keywords, a.title, a.body, a.created
-FROM articles AS a, articles_in_category AS aic
-WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138)
-GROUP BY a.id;
- id | keywords | title | body | created
-----+----------+-------+------+---------
-(0 rows)
-
--- group by something else (fail)
-SELECT a.id, a.keywords, a.title, a.body, a.created
-FROM articles AS a, articles_in_category AS aic
-WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138)
-GROUP BY aic.article_id, aic.category_id;
-ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function
-LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created
- ^
--- JOIN syntax
--- group by left table's primary key (OK)
-SELECT a.id, a.keywords, a.title, a.body, a.created
-FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
-WHERE aic.category_id in (14,62,70,53,138)
-GROUP BY a.id;
- id | keywords | title | body | created
-----+----------+-------+------+---------
-(0 rows)
-
--- group by something else (fail)
-SELECT a.id, a.keywords, a.title, a.body, a.created
-FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
-WHERE aic.category_id in (14,62,70,53,138)
-GROUP BY aic.article_id, aic.category_id;
-ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function
-LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created
- ^
--- group by right table's (composite) primary key (OK)
-SELECT aic.changed
-FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
-WHERE aic.category_id in (14,62,70,53,138)
-GROUP BY aic.category_id, aic.article_id;
- changed
----------
-(0 rows)
-
--- group by right table's partial primary key (fail)
-SELECT aic.changed
-FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
-WHERE aic.category_id in (14,62,70,53,138)
-GROUP BY aic.article_id;
-ERROR: column "aic.changed" must appear in the GROUP BY clause or be used in an aggregate function
-LINE 1: SELECT aic.changed
- ^
--- example from documentation
-CREATE TEMP TABLE products (product_id int, name text, price numeric);
-CREATE TEMP TABLE sales (product_id int, units int);
--- OK
-SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
- FROM products p LEFT JOIN sales s USING (product_id)
- GROUP BY product_id, p.name, p.price;
- product_id | name | sales
-------------+------+-------
-(0 rows)
-
--- fail
-SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
- FROM products p LEFT JOIN sales s USING (product_id)
- GROUP BY product_id;
-ERROR: column "p.name" must appear in the GROUP BY clause or be used in an aggregate function
-LINE 1: SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
- ^
-ALTER TABLE products ADD PRIMARY KEY (product_id);
--- OK now
-SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
- FROM products p LEFT JOIN sales s USING (product_id)
- GROUP BY product_id;
- product_id | name | sales
-------------+------+-------
-(0 rows)
-
--- Drupal example, http://drupal.org/node/555530
-CREATE TEMP TABLE node (
- nid SERIAL,
- vid integer NOT NULL default '0',
- type varchar(32) NOT NULL default '',
- title varchar(128) NOT NULL default '',
- uid integer NOT NULL default '0',
- status integer NOT NULL default '1',
- created integer NOT NULL default '0',
- -- snip
- PRIMARY KEY (nid, vid)
-);
-CREATE TEMP TABLE users (
- uid integer NOT NULL default '0',
- name varchar(60) NOT NULL default '',
- pass varchar(32) NOT NULL default '',
- -- snip
- PRIMARY KEY (uid),
- UNIQUE (name)
-);
--- OK
-SELECT u.uid, u.name FROM node n
-INNER JOIN users u ON u.uid = n.uid
-WHERE n.type = 'blog' AND n.status = 1
-GROUP BY u.uid, u.name;
- uid | name
------+------
-(0 rows)
-
--- OK
-SELECT u.uid, u.name FROM node n
-INNER JOIN users u ON u.uid = n.uid
-WHERE n.type = 'blog' AND n.status = 1
-GROUP BY u.uid;
- uid | name
------+------
-(0 rows)
-
--- Check views and dependencies
--- fail
-CREATE TEMP VIEW fdv1 AS
-SELECT id, keywords, title, body, created
-FROM articles
-GROUP BY body;
-ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function
-LINE 2: SELECT id, keywords, title, body, created
- ^
--- OK
-CREATE TEMP VIEW fdv1 AS
-SELECT id, keywords, title, body, created
-FROM articles
-GROUP BY id;
--- fail
-ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT;
-ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it
-DETAIL: view fdv1 depends on constraint articles_pkey on table articles
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP VIEW fdv1;
--- multiple dependencies
-CREATE TEMP VIEW fdv2 AS
-SELECT a.id, a.keywords, a.title, aic.category_id, aic.changed
-FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
-WHERE aic.category_id in (14,62,70,53,138)
-GROUP BY a.id, aic.category_id, aic.article_id;
-ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
-ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it
-DETAIL: view fdv2 depends on constraint articles_pkey on table articles
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-ALTER TABLE articles_in_category DROP CONSTRAINT articles_in_category_pkey RESTRICT; --fail
-ERROR: cannot drop constraint articles_in_category_pkey on table articles_in_category because other objects depend on it
-DETAIL: view fdv2 depends on constraint articles_in_category_pkey on table articles_in_category
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP VIEW fdv2;
--- nested queries
-CREATE TEMP VIEW fdv3 AS
-SELECT id, keywords, title, body, created
-FROM articles
-GROUP BY id
-UNION
-SELECT id, keywords, title, body, created
-FROM articles
-GROUP BY id;
-ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
-ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it
-DETAIL: view fdv3 depends on constraint articles_pkey on table articles
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP VIEW fdv3;
-CREATE TEMP VIEW fdv4 AS
-SELECT * FROM articles WHERE title IN (SELECT title FROM articles GROUP BY id);
-ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
-ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it
-DETAIL: view fdv4 depends on constraint articles_pkey on table articles
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP VIEW fdv4;
--- prepared query plans: this results in failure on reuse
-PREPARE foo AS
- SELECT id, keywords, title, body, created
- FROM articles
- GROUP BY id;
-EXECUTE foo;
- id | keywords | title | body | created
-----+----------+-------+------+---------
-(0 rows)
-
-ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT;
-EXECUTE foo; -- fail
-ERROR: column "articles.keywords" must appear in the GROUP BY clause or be used in an aggregate function
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/advisory_lock.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/advisory_lock.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/advisory_lock.out 2024-11-15 02:50:52.414174126 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/advisory_lock.out 2024-11-15 02:59:17.233115707 +0000
@@ -1,276 +1,2 @@
---
--- ADVISORY LOCKS
---
-SELECT oid AS datoid FROM pg_database WHERE datname = current_database() \gset
-BEGIN;
-SELECT
- pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2),
- pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2);
- pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared
------------------------+------------------------------+-----------------------+------------------------------
- | | |
-(1 row)
-
-SELECT locktype, classid, objid, objsubid, mode, granted
- FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid
- ORDER BY classid, objid, objsubid;
- locktype | classid | objid | objsubid | mode | granted
-----------+---------+-------+----------+---------------+---------
- advisory | 0 | 1 | 1 | ExclusiveLock | t
- advisory | 0 | 2 | 1 | ShareLock | t
- advisory | 1 | 1 | 2 | ExclusiveLock | t
- advisory | 2 | 2 | 2 | ShareLock | t
-(4 rows)
-
--- pg_advisory_unlock_all() shouldn't release xact locks
-SELECT pg_advisory_unlock_all();
- pg_advisory_unlock_all
-------------------------
-
-(1 row)
-
-SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid;
- count
--------
- 4
-(1 row)
-
--- can't unlock xact locks
-SELECT
- pg_advisory_unlock(1), pg_advisory_unlock_shared(2),
- pg_advisory_unlock(1, 1), pg_advisory_unlock_shared(2, 2);
-WARNING: you don't own a lock of type ExclusiveLock
-WARNING: you don't own a lock of type ShareLock
-WARNING: you don't own a lock of type ExclusiveLock
-WARNING: you don't own a lock of type ShareLock
- pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock_shared
---------------------+---------------------------+--------------------+---------------------------
- f | f | f | f
-(1 row)
-
--- automatically release xact locks at commit
-COMMIT;
-SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid;
- count
--------
- 0
-(1 row)
-
-BEGIN;
--- holding both session and xact locks on the same objects, xact first
-SELECT
- pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2),
- pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2);
- pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared
------------------------+------------------------------+-----------------------+------------------------------
- | | |
-(1 row)
-
-SELECT locktype, classid, objid, objsubid, mode, granted
- FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid
- ORDER BY classid, objid, objsubid;
- locktype | classid | objid | objsubid | mode | granted
-----------+---------+-------+----------+---------------+---------
- advisory | 0 | 1 | 1 | ExclusiveLock | t
- advisory | 0 | 2 | 1 | ShareLock | t
- advisory | 1 | 1 | 2 | ExclusiveLock | t
- advisory | 2 | 2 | 2 | ShareLock | t
-(4 rows)
-
-SELECT
- pg_advisory_lock(1), pg_advisory_lock_shared(2),
- pg_advisory_lock(1, 1), pg_advisory_lock_shared(2, 2);
- pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock_shared
-------------------+-------------------------+------------------+-------------------------
- | | |
-(1 row)
-
-ROLLBACK;
-SELECT locktype, classid, objid, objsubid, mode, granted
- FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid
- ORDER BY classid, objid, objsubid;
- locktype | classid | objid | objsubid | mode | granted
-----------+---------+-------+----------+---------------+---------
- advisory | 0 | 1 | 1 | ExclusiveLock | t
- advisory | 0 | 2 | 1 | ShareLock | t
- advisory | 1 | 1 | 2 | ExclusiveLock | t
- advisory | 2 | 2 | 2 | ShareLock | t
-(4 rows)
-
--- unlocking session locks
-SELECT
- pg_advisory_unlock(1), pg_advisory_unlock(1),
- pg_advisory_unlock_shared(2), pg_advisory_unlock_shared(2),
- pg_advisory_unlock(1, 1), pg_advisory_unlock(1, 1),
- pg_advisory_unlock_shared(2, 2), pg_advisory_unlock_shared(2, 2);
-WARNING: you don't own a lock of type ExclusiveLock
-WARNING: you don't own a lock of type ShareLock
-WARNING: you don't own a lock of type ExclusiveLock
-WARNING: you don't own a lock of type ShareLock
- pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared
---------------------+--------------------+---------------------------+---------------------------+--------------------+--------------------+---------------------------+---------------------------
- t | f | t | f | t | f | t | f
-(1 row)
-
-SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid;
- count
--------
- 0
-(1 row)
-
-BEGIN;
--- holding both session and xact locks on the same objects, session first
-SELECT
- pg_advisory_lock(1), pg_advisory_lock_shared(2),
- pg_advisory_lock(1, 1), pg_advisory_lock_shared(2, 2);
- pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock_shared
-------------------+-------------------------+------------------+-------------------------
- | | |
-(1 row)
-
-SELECT locktype, classid, objid, objsubid, mode, granted
- FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid
- ORDER BY classid, objid, objsubid;
- locktype | classid | objid | objsubid | mode | granted
-----------+---------+-------+----------+---------------+---------
- advisory | 0 | 1 | 1 | ExclusiveLock | t
- advisory | 0 | 2 | 1 | ShareLock | t
- advisory | 1 | 1 | 2 | ExclusiveLock | t
- advisory | 2 | 2 | 2 | ShareLock | t
-(4 rows)
-
-SELECT
- pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2),
- pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2);
- pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared
------------------------+------------------------------+-----------------------+------------------------------
- | | |
-(1 row)
-
-ROLLBACK;
-SELECT locktype, classid, objid, objsubid, mode, granted
- FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid
- ORDER BY classid, objid, objsubid;
- locktype | classid | objid | objsubid | mode | granted
-----------+---------+-------+----------+---------------+---------
- advisory | 0 | 1 | 1 | ExclusiveLock | t
- advisory | 0 | 2 | 1 | ShareLock | t
- advisory | 1 | 1 | 2 | ExclusiveLock | t
- advisory | 2 | 2 | 2 | ShareLock | t
-(4 rows)
-
--- releasing all session locks
-SELECT pg_advisory_unlock_all();
- pg_advisory_unlock_all
-------------------------
-
-(1 row)
-
-SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid;
- count
--------
- 0
-(1 row)
-
-BEGIN;
--- grabbing txn locks multiple times
-SELECT
- pg_advisory_xact_lock(1), pg_advisory_xact_lock(1),
- pg_advisory_xact_lock_shared(2), pg_advisory_xact_lock_shared(2),
- pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock(1, 1),
- pg_advisory_xact_lock_shared(2, 2), pg_advisory_xact_lock_shared(2, 2);
- pg_advisory_xact_lock | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock_shared
------------------------+-----------------------+------------------------------+------------------------------+-----------------------+-----------------------+------------------------------+------------------------------
- | | | | | | |
-(1 row)
-
-SELECT locktype, classid, objid, objsubid, mode, granted
- FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid
- ORDER BY classid, objid, objsubid;
- locktype | classid | objid | objsubid | mode | granted
-----------+---------+-------+----------+---------------+---------
- advisory | 0 | 1 | 1 | ExclusiveLock | t
- advisory | 0 | 2 | 1 | ShareLock | t
- advisory | 1 | 1 | 2 | ExclusiveLock | t
- advisory | 2 | 2 | 2 | ShareLock | t
-(4 rows)
-
-COMMIT;
-SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid;
- count
--------
- 0
-(1 row)
-
--- grabbing session locks multiple times
-SELECT
- pg_advisory_lock(1), pg_advisory_lock(1),
- pg_advisory_lock_shared(2), pg_advisory_lock_shared(2),
- pg_advisory_lock(1, 1), pg_advisory_lock(1, 1),
- pg_advisory_lock_shared(2, 2), pg_advisory_lock_shared(2, 2);
- pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared
-------------------+------------------+-------------------------+-------------------------+------------------+------------------+-------------------------+-------------------------
- | | | | | | |
-(1 row)
-
-SELECT locktype, classid, objid, objsubid, mode, granted
- FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid
- ORDER BY classid, objid, objsubid;
- locktype | classid | objid | objsubid | mode | granted
-----------+---------+-------+----------+---------------+---------
- advisory | 0 | 1 | 1 | ExclusiveLock | t
- advisory | 0 | 2 | 1 | ShareLock | t
- advisory | 1 | 1 | 2 | ExclusiveLock | t
- advisory | 2 | 2 | 2 | ShareLock | t
-(4 rows)
-
-SELECT
- pg_advisory_unlock(1), pg_advisory_unlock(1),
- pg_advisory_unlock_shared(2), pg_advisory_unlock_shared(2),
- pg_advisory_unlock(1, 1), pg_advisory_unlock(1, 1),
- pg_advisory_unlock_shared(2, 2), pg_advisory_unlock_shared(2, 2);
- pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared
---------------------+--------------------+---------------------------+---------------------------+--------------------+--------------------+---------------------------+---------------------------
- t | t | t | t | t | t | t | t
-(1 row)
-
-SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid;
- count
--------
- 0
-(1 row)
-
--- .. and releasing them all at once
-SELECT
- pg_advisory_lock(1), pg_advisory_lock(1),
- pg_advisory_lock_shared(2), pg_advisory_lock_shared(2),
- pg_advisory_lock(1, 1), pg_advisory_lock(1, 1),
- pg_advisory_lock_shared(2, 2), pg_advisory_lock_shared(2, 2);
- pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared
-------------------+------------------+-------------------------+-------------------------+------------------+------------------+-------------------------+-------------------------
- | | | | | | |
-(1 row)
-
-SELECT locktype, classid, objid, objsubid, mode, granted
- FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid
- ORDER BY classid, objid, objsubid;
- locktype | classid | objid | objsubid | mode | granted
-----------+---------+-------+----------+---------------+---------
- advisory | 0 | 1 | 1 | ExclusiveLock | t
- advisory | 0 | 2 | 1 | ShareLock | t
- advisory | 1 | 1 | 2 | ExclusiveLock | t
- advisory | 2 | 2 | 2 | ShareLock | t
-(4 rows)
-
-SELECT pg_advisory_unlock_all();
- pg_advisory_unlock_all
-------------------------
-
-(1 row)
-
-SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid;
- count
--------
- 0
-(1 row)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/indirect_toast.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/indirect_toast.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/indirect_toast.out 2024-11-15 02:50:52.450114879 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/indirect_toast.out 2024-11-15 02:59:17.237115713 +0000
@@ -1,166 +1,2 @@
---
--- Tests for external toast datums
---
--- directory paths and dlsuffix are passed to us in environment variables
-\getenv libdir PG_LIBDIR
-\getenv dlsuffix PG_DLSUFFIX
-\set regresslib :libdir '/regress' :dlsuffix
-CREATE FUNCTION make_tuple_indirect (record)
- RETURNS record
- AS :'regresslib'
- LANGUAGE C STRICT;
--- Other compression algorithms may cause the compressed data to be stored
--- inline. pglz guarantees that the data is externalized, so stick to it.
-SET default_toast_compression = 'pglz';
-CREATE TABLE indtoasttest(descr text, cnt int DEFAULT 0, f1 text, f2 text);
-INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-compressed', repeat('1234567890',1000), repeat('1234567890',1000));
-INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-toasted', repeat('1234567890',30000), repeat('1234567890',50000));
-INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-compressed,one-null', NULL, repeat('1234567890',1000));
-INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null', NULL, repeat('1234567890',50000));
--- check whether indirect tuples works on the most basic level
-SELECT descr, substring(make_tuple_indirect(indtoasttest)::text, 1, 200) FROM indtoasttest;
- descr | substring
--------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- two-compressed | (two-compressed,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012
- two-toasted | (two-toasted,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345
- one-compressed,one-null | ("one-compressed,one-null",0,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- one-toasted,one-null | ("one-toasted,one-null",0,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
--- modification without changing varlenas
-UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200);
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012
- (two-toasted,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345
- ("one-compressed,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
--- modification without modifying assigned value
-UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200);
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012
- (two-toasted,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345
- ("one-compressed,one-null",2,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",2,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
--- modification modifying, but effectively not changing
-UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200);
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012
- (two-toasted,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345
- ("one-compressed,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
-UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200);
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901
- (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234
- ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
-SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest;
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901
- (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234
- ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
--- check we didn't screw with main/toast tuple visibility
-VACUUM FREEZE indtoasttest;
-SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest;
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901
- (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234
- ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
--- now create a trigger that forces all Datums to be indirect ones
-CREATE FUNCTION update_using_indirect()
- RETURNS trigger
- LANGUAGE plpgsql AS $$
-BEGIN
- NEW := make_tuple_indirect(NEW);
- RETURN NEW;
-END$$;
-CREATE TRIGGER indtoasttest_update_indirect
- BEFORE INSERT OR UPDATE
- ON indtoasttest
- FOR EACH ROW
- EXECUTE PROCEDURE update_using_indirect();
--- modification without changing varlenas
-UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200);
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901
- (two-toasted,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234
- ("one-compressed,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
--- modification without modifying assigned value
-UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200);
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901
- (two-toasted,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234
- ("one-compressed,one-null",6,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",6,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
--- modification modifying, but effectively not changing
-UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200);
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901
- (two-toasted,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234
- ("one-compressed,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
-UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200);
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
- ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
-(4 rows)
-
-INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL);
-SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest;
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
- ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
- ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
-(5 rows)
-
--- check we didn't screw with main/toast tuple visibility
-VACUUM FREEZE indtoasttest;
-SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest;
- substring
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
- ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
- ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
- ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
-(5 rows)
-
-DROP TABLE indtoasttest;
-DROP FUNCTION update_using_indirect();
-RESET default_toast_compression;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/equivclass.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/equivclass.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/equivclass.out 2024-11-15 02:50:52.434141211 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/equivclass.out 2024-11-15 02:59:17.237115713 +0000
@@ -1,505 +1,2 @@
---
--- Tests for the planner's "equivalence class" mechanism
---
--- One thing that's not tested well during normal querying is the logic
--- for handling "broken" ECs. This is because an EC can only become broken
--- if its underlying btree operator family doesn't include a complete set
--- of cross-type equality operators. There are not (and should not be)
--- any such families built into Postgres; so we have to hack things up
--- to create one. We do this by making two alias types that are really
--- int8 (so we need no new C code) and adding only some operators for them
--- into the standard integer_ops opfamily.
-create type int8alias1;
-create function int8alias1in(cstring) returns int8alias1
- strict immutable language internal as 'int8in';
-NOTICE: return type int8alias1 is only a shell
-create function int8alias1out(int8alias1) returns cstring
- strict immutable language internal as 'int8out';
-NOTICE: argument type int8alias1 is only a shell
-LINE 1: create function int8alias1out(int8alias1) returns cstring
- ^
-create type int8alias1 (
- input = int8alias1in,
- output = int8alias1out,
- like = int8
-);
-create type int8alias2;
-create function int8alias2in(cstring) returns int8alias2
- strict immutable language internal as 'int8in';
-NOTICE: return type int8alias2 is only a shell
-create function int8alias2out(int8alias2) returns cstring
- strict immutable language internal as 'int8out';
-NOTICE: argument type int8alias2 is only a shell
-LINE 1: create function int8alias2out(int8alias2) returns cstring
- ^
-create type int8alias2 (
- input = int8alias2in,
- output = int8alias2out,
- like = int8
-);
-create cast (int8 as int8alias1) without function;
-create cast (int8 as int8alias2) without function;
-create cast (int8alias1 as int8) without function;
-create cast (int8alias2 as int8) without function;
-create function int8alias1eq(int8alias1, int8alias1) returns bool
- strict immutable language internal as 'int8eq';
-create operator = (
- procedure = int8alias1eq,
- leftarg = int8alias1, rightarg = int8alias1,
- commutator = =,
- restrict = eqsel, join = eqjoinsel,
- merges
-);
-alter operator family integer_ops using btree add
- operator 3 = (int8alias1, int8alias1);
-create function int8alias2eq(int8alias2, int8alias2) returns bool
- strict immutable language internal as 'int8eq';
-create operator = (
- procedure = int8alias2eq,
- leftarg = int8alias2, rightarg = int8alias2,
- commutator = =,
- restrict = eqsel, join = eqjoinsel,
- merges
-);
-alter operator family integer_ops using btree add
- operator 3 = (int8alias2, int8alias2);
-create function int8alias1eq(int8, int8alias1) returns bool
- strict immutable language internal as 'int8eq';
-create operator = (
- procedure = int8alias1eq,
- leftarg = int8, rightarg = int8alias1,
- restrict = eqsel, join = eqjoinsel,
- merges
-);
-alter operator family integer_ops using btree add
- operator 3 = (int8, int8alias1);
-create function int8alias1eq(int8alias1, int8alias2) returns bool
- strict immutable language internal as 'int8eq';
-create operator = (
- procedure = int8alias1eq,
- leftarg = int8alias1, rightarg = int8alias2,
- restrict = eqsel, join = eqjoinsel,
- merges
-);
-alter operator family integer_ops using btree add
- operator 3 = (int8alias1, int8alias2);
-create function int8alias1lt(int8alias1, int8alias1) returns bool
- strict immutable language internal as 'int8lt';
-create operator < (
- procedure = int8alias1lt,
- leftarg = int8alias1, rightarg = int8alias1
-);
-alter operator family integer_ops using btree add
- operator 1 < (int8alias1, int8alias1);
-create function int8alias1cmp(int8, int8alias1) returns int
- strict immutable language internal as 'btint8cmp';
-alter operator family integer_ops using btree add
- function 1 int8alias1cmp (int8, int8alias1);
-create table ec0 (ff int8 primary key, f1 int8, f2 int8);
-create table ec1 (ff int8 primary key, f1 int8alias1, f2 int8alias2);
-create table ec2 (xf int8 primary key, x1 int8alias1, x2 int8alias2);
--- for the moment we only want to look at nestloop plans
-set enable_hashjoin = off;
-set enable_mergejoin = off;
---
--- Note that for cases where there's a missing operator, we don't care so
--- much whether the plan is ideal as that we don't fail or generate an
--- outright incorrect plan.
---
-explain (costs off)
- select * from ec0 where ff = f1 and f1 = '42'::int8;
- QUERY PLAN
------------------------------------
- Index Scan using ec0_pkey on ec0
- Index Cond: (ff = '42'::bigint)
- Filter: (f1 = '42'::bigint)
-(3 rows)
-
-explain (costs off)
- select * from ec0 where ff = f1 and f1 = '42'::int8alias1;
- QUERY PLAN
----------------------------------------
- Index Scan using ec0_pkey on ec0
- Index Cond: (ff = '42'::int8alias1)
- Filter: (f1 = '42'::int8alias1)
-(3 rows)
-
-explain (costs off)
- select * from ec1 where ff = f1 and f1 = '42'::int8alias1;
- QUERY PLAN
----------------------------------------
- Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::int8alias1)
- Filter: (f1 = '42'::int8alias1)
-(3 rows)
-
-explain (costs off)
- select * from ec1 where ff = f1 and f1 = '42'::int8alias2;
- QUERY PLAN
----------------------------------------------------
- Seq Scan on ec1
- Filter: ((ff = f1) AND (f1 = '42'::int8alias2))
-(2 rows)
-
-explain (costs off)
- select * from ec1, ec2 where ff = x1 and ff = '42'::int8;
- QUERY PLAN
--------------------------------------------------------------------
- Nested Loop
- Join Filter: (ec1.ff = ec2.x1)
- -> Index Scan using ec1_pkey on ec1
- Index Cond: ((ff = '42'::bigint) AND (ff = '42'::bigint))
- -> Seq Scan on ec2
-(5 rows)
-
-explain (costs off)
- select * from ec1, ec2 where ff = x1 and ff = '42'::int8alias1;
- QUERY PLAN
----------------------------------------------
- Nested Loop
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::int8alias1)
- -> Seq Scan on ec2
- Filter: (x1 = '42'::int8alias1)
-(5 rows)
-
-explain (costs off)
- select * from ec1, ec2 where ff = x1 and '42'::int8 = x1;
- QUERY PLAN
------------------------------------------
- Nested Loop
- Join Filter: (ec1.ff = ec2.x1)
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::bigint)
- -> Seq Scan on ec2
- Filter: ('42'::bigint = x1)
-(6 rows)
-
-explain (costs off)
- select * from ec1, ec2 where ff = x1 and x1 = '42'::int8alias1;
- QUERY PLAN
----------------------------------------------
- Nested Loop
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::int8alias1)
- -> Seq Scan on ec2
- Filter: (x1 = '42'::int8alias1)
-(5 rows)
-
-explain (costs off)
- select * from ec1, ec2 where ff = x1 and x1 = '42'::int8alias2;
- QUERY PLAN
------------------------------------------
- Nested Loop
- -> Seq Scan on ec2
- Filter: (x1 = '42'::int8alias2)
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = ec2.x1)
-(5 rows)
-
-create unique index ec1_expr1 on ec1((ff + 1));
-create unique index ec1_expr2 on ec1((ff + 2 + 1));
-create unique index ec1_expr3 on ec1((ff + 3 + 1));
-create unique index ec1_expr4 on ec1((ff + 4));
-explain (costs off)
- select * from ec1,
- (select ff + 1 as x from
- (select ff + 2 as ff from ec1
- union all
- select ff + 3 as ff from ec1) ss0
- union all
- select ff + 4 as x from ec1) as ss1
- where ss1.x = ec1.f1 and ec1.ff = 42::int8;
- QUERY PLAN
------------------------------------------------------
- Nested Loop
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::bigint)
- -> Append
- -> Index Scan using ec1_expr2 on ec1 ec1_1
- Index Cond: (((ff + 2) + 1) = ec1.f1)
- -> Index Scan using ec1_expr3 on ec1 ec1_2
- Index Cond: (((ff + 3) + 1) = ec1.f1)
- -> Index Scan using ec1_expr4 on ec1 ec1_3
- Index Cond: ((ff + 4) = ec1.f1)
-(10 rows)
-
-explain (costs off)
- select * from ec1,
- (select ff + 1 as x from
- (select ff + 2 as ff from ec1
- union all
- select ff + 3 as ff from ec1) ss0
- union all
- select ff + 4 as x from ec1) as ss1
- where ss1.x = ec1.f1 and ec1.ff = 42::int8 and ec1.ff = ec1.f1;
- QUERY PLAN
--------------------------------------------------------------------
- Nested Loop
- Join Filter: ((((ec1_1.ff + 2) + 1)) = ec1.f1)
- -> Index Scan using ec1_pkey on ec1
- Index Cond: ((ff = '42'::bigint) AND (ff = '42'::bigint))
- Filter: (ff = f1)
- -> Append
- -> Index Scan using ec1_expr2 on ec1 ec1_1
- Index Cond: (((ff + 2) + 1) = '42'::bigint)
- -> Index Scan using ec1_expr3 on ec1 ec1_2
- Index Cond: (((ff + 3) + 1) = '42'::bigint)
- -> Index Scan using ec1_expr4 on ec1 ec1_3
- Index Cond: ((ff + 4) = '42'::bigint)
-(12 rows)
-
-explain (costs off)
- select * from ec1,
- (select ff + 1 as x from
- (select ff + 2 as ff from ec1
- union all
- select ff + 3 as ff from ec1) ss0
- union all
- select ff + 4 as x from ec1) as ss1,
- (select ff + 1 as x from
- (select ff + 2 as ff from ec1
- union all
- select ff + 3 as ff from ec1) ss0
- union all
- select ff + 4 as x from ec1) as ss2
- where ss1.x = ec1.f1 and ss1.x = ss2.x and ec1.ff = 42::int8;
- QUERY PLAN
----------------------------------------------------------------------
- Nested Loop
- -> Nested Loop
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::bigint)
- -> Append
- -> Index Scan using ec1_expr2 on ec1 ec1_1
- Index Cond: (((ff + 2) + 1) = ec1.f1)
- -> Index Scan using ec1_expr3 on ec1 ec1_2
- Index Cond: (((ff + 3) + 1) = ec1.f1)
- -> Index Scan using ec1_expr4 on ec1 ec1_3
- Index Cond: ((ff + 4) = ec1.f1)
- -> Append
- -> Index Scan using ec1_expr2 on ec1 ec1_4
- Index Cond: (((ff + 2) + 1) = (((ec1_1.ff + 2) + 1)))
- -> Index Scan using ec1_expr3 on ec1 ec1_5
- Index Cond: (((ff + 3) + 1) = (((ec1_1.ff + 2) + 1)))
- -> Index Scan using ec1_expr4 on ec1 ec1_6
- Index Cond: ((ff + 4) = (((ec1_1.ff + 2) + 1)))
-(18 rows)
-
--- let's try that as a mergejoin
-set enable_mergejoin = on;
-set enable_nestloop = off;
-explain (costs off)
- select * from ec1,
- (select ff + 1 as x from
- (select ff + 2 as ff from ec1
- union all
- select ff + 3 as ff from ec1) ss0
- union all
- select ff + 4 as x from ec1) as ss1,
- (select ff + 1 as x from
- (select ff + 2 as ff from ec1
- union all
- select ff + 3 as ff from ec1) ss0
- union all
- select ff + 4 as x from ec1) as ss2
- where ss1.x = ec1.f1 and ss1.x = ss2.x and ec1.ff = 42::int8;
- QUERY PLAN
------------------------------------------------------------------
- Merge Join
- Merge Cond: ((((ec1_4.ff + 2) + 1)) = (((ec1_1.ff + 2) + 1)))
- -> Merge Append
- Sort Key: (((ec1_4.ff + 2) + 1))
- -> Index Scan using ec1_expr2 on ec1 ec1_4
- -> Index Scan using ec1_expr3 on ec1 ec1_5
- -> Index Scan using ec1_expr4 on ec1 ec1_6
- -> Materialize
- -> Merge Join
- Merge Cond: ((((ec1_1.ff + 2) + 1)) = ec1.f1)
- -> Merge Append
- Sort Key: (((ec1_1.ff + 2) + 1))
- -> Index Scan using ec1_expr2 on ec1 ec1_1
- -> Index Scan using ec1_expr3 on ec1 ec1_2
- -> Index Scan using ec1_expr4 on ec1 ec1_3
- -> Sort
- Sort Key: ec1.f1 USING <
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::bigint)
-(19 rows)
-
--- check partially indexed scan
-set enable_nestloop = on;
-set enable_mergejoin = off;
-drop index ec1_expr3;
-explain (costs off)
- select * from ec1,
- (select ff + 1 as x from
- (select ff + 2 as ff from ec1
- union all
- select ff + 3 as ff from ec1) ss0
- union all
- select ff + 4 as x from ec1) as ss1
- where ss1.x = ec1.f1 and ec1.ff = 42::int8;
- QUERY PLAN
------------------------------------------------------
- Nested Loop
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::bigint)
- -> Append
- -> Index Scan using ec1_expr2 on ec1 ec1_1
- Index Cond: (((ff + 2) + 1) = ec1.f1)
- -> Seq Scan on ec1 ec1_2
- Filter: (((ff + 3) + 1) = ec1.f1)
- -> Index Scan using ec1_expr4 on ec1 ec1_3
- Index Cond: ((ff + 4) = ec1.f1)
-(10 rows)
-
--- let's try that as a mergejoin
-set enable_mergejoin = on;
-set enable_nestloop = off;
-explain (costs off)
- select * from ec1,
- (select ff + 1 as x from
- (select ff + 2 as ff from ec1
- union all
- select ff + 3 as ff from ec1) ss0
- union all
- select ff + 4 as x from ec1) as ss1
- where ss1.x = ec1.f1 and ec1.ff = 42::int8;
- QUERY PLAN
------------------------------------------------------
- Merge Join
- Merge Cond: ((((ec1_1.ff + 2) + 1)) = ec1.f1)
- -> Merge Append
- Sort Key: (((ec1_1.ff + 2) + 1))
- -> Index Scan using ec1_expr2 on ec1 ec1_1
- -> Sort
- Sort Key: (((ec1_2.ff + 3) + 1))
- -> Seq Scan on ec1 ec1_2
- -> Index Scan using ec1_expr4 on ec1 ec1_3
- -> Sort
- Sort Key: ec1.f1 USING <
- -> Index Scan using ec1_pkey on ec1
- Index Cond: (ff = '42'::bigint)
-(13 rows)
-
--- check effects of row-level security
-set enable_nestloop = on;
-set enable_mergejoin = off;
-alter table ec1 enable row level security;
-create policy p1 on ec1 using (f1 < '5'::int8alias1);
-create user regress_user_ectest;
-grant select on ec0 to regress_user_ectest;
-grant select on ec1 to regress_user_ectest;
--- without any RLS, we'll treat {a.ff, b.ff, 43} as an EquivalenceClass
-explain (costs off)
- select * from ec0 a, ec1 b
- where a.ff = b.ff and a.ff = 43::bigint::int8alias1;
- QUERY PLAN
----------------------------------------------
- Nested Loop
- -> Index Scan using ec0_pkey on ec0 a
- Index Cond: (ff = '43'::int8alias1)
- -> Index Scan using ec1_pkey on ec1 b
- Index Cond: (ff = '43'::int8alias1)
-(5 rows)
-
-set session authorization regress_user_ectest;
--- with RLS active, the non-leakproof a.ff = 43 clause is not treated
--- as a suitable source for an EquivalenceClass; currently, this is true
--- even though the RLS clause has nothing to do directly with the EC
-explain (costs off)
- select * from ec0 a, ec1 b
- where a.ff = b.ff and a.ff = 43::bigint::int8alias1;
- QUERY PLAN
----------------------------------------------
- Nested Loop
- -> Index Scan using ec0_pkey on ec0 a
- Index Cond: (ff = '43'::int8alias1)
- -> Index Scan using ec1_pkey on ec1 b
- Index Cond: (ff = a.ff)
- Filter: (f1 < '5'::int8alias1)
-(6 rows)
-
-reset session authorization;
-revoke select on ec0 from regress_user_ectest;
-revoke select on ec1 from regress_user_ectest;
-drop user regress_user_ectest;
--- check that X=X is converted to X IS NOT NULL when appropriate
-explain (costs off)
- select * from tenk1 where unique1 = unique1 and unique2 = unique2;
- QUERY PLAN
--------------------------------------------------------------
- Seq Scan on tenk1
- Filter: ((unique1 IS NOT NULL) AND (unique2 IS NOT NULL))
-(2 rows)
-
--- this could be converted, but isn't at present
-explain (costs off)
- select * from tenk1 where unique1 = unique1 or unique2 = unique2;
- QUERY PLAN
---------------------------------------------------------
- Seq Scan on tenk1
- Filter: ((unique1 = unique1) OR (unique2 = unique2))
-(2 rows)
-
--- check that we recognize equivalence with dummy domains in the way
-create temp table undername (f1 name, f2 int);
-create temp view overview as
- select f1::information_schema.sql_identifier as sqli, f2 from undername;
-explain (costs off) -- this should not require a sort
- select * from overview where sqli = 'foo' order by sqli;
- QUERY PLAN
-------------------------------
- Seq Scan on undername
- Filter: (f1 = 'foo'::name)
-(2 rows)
-
---
--- test handling of merge/hash clauses that do not have valid commutators
---
--- There are not (and should not be) any such operators built into Postgres
--- that are mergejoinable or hashable but have no commutators; so we leverage
--- the alias type 'int8alias1' created in this file to conduct the tests.
--- That's why this test is included here rather than in join.sql.
-begin;
-create table tbl_nocom(a int8, b int8alias1);
--- check that non-commutable merge clauses do not lead to error
-set enable_hashjoin to off;
-set enable_mergejoin to on;
-explain (costs off)
-select * from tbl_nocom t1 full join tbl_nocom t2 on t2.a = t1.b;
- QUERY PLAN
---------------------------------------
- Merge Full Join
- Merge Cond: (t2.a = t1.b)
- -> Sort
- Sort Key: t2.a
- -> Seq Scan on tbl_nocom t2
- -> Sort
- Sort Key: t1.b USING <
- -> Seq Scan on tbl_nocom t1
-(8 rows)
-
--- check that non-commutable hash clauses do not lead to error
-alter operator = (int8, int8alias1) set (hashes);
-alter operator family integer_ops using hash add
- operator 1 = (int8, int8alias1);
-create function hashint8alias1(int8alias1) returns int
- strict immutable language internal as 'hashint8';
-alter operator family integer_ops using hash add
- function 1 hashint8alias1(int8alias1);
-set enable_hashjoin to on;
-set enable_mergejoin to off;
-explain (costs off)
-select * from tbl_nocom t1 full join tbl_nocom t2 on t2.a = t1.b;
- QUERY PLAN
---------------------------------------
- Hash Full Join
- Hash Cond: (t2.a = t1.b)
- -> Seq Scan on tbl_nocom t2
- -> Hash
- -> Seq Scan on tbl_nocom t1
-(5 rows)
-
-abort;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/json.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/json.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/json.out 2024-11-15 02:50:52.458101712 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/json.out 2024-11-15 02:59:17.849116529 +0000
@@ -1,2718 +1,2 @@
--- Strings.
-SELECT '""'::json; -- OK.
- json
-------
- ""
-(1 row)
-
-SELECT $$''$$::json; -- ERROR, single quotes are not allowed
-ERROR: invalid input syntax for type json
-LINE 1: SELECT $$''$$::json;
- ^
-DETAIL: Token "'" is invalid.
-CONTEXT: JSON data, line 1: '...
-SELECT '"abc"'::json; -- OK
- json
--------
- "abc"
-(1 row)
-
-SELECT '"abc'::json; -- ERROR, quotes not closed
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '"abc'::json;
- ^
-DETAIL: Token ""abc" is invalid.
-CONTEXT: JSON data, line 1: "abc
-SELECT '"abc
-def"'::json; -- ERROR, unescaped newline in string constant
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '"abc
- ^
-DETAIL: Character with value 0x0a must be escaped.
-CONTEXT: JSON data, line 1: "abc
-SELECT '"\n\"\\"'::json; -- OK, legal escapes
- json
-----------
- "\n\"\\"
-(1 row)
-
-SELECT '"\v"'::json; -- ERROR, not a valid JSON escape
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '"\v"'::json;
- ^
-DETAIL: Escape sequence "\v" is invalid.
-CONTEXT: JSON data, line 1: "\v...
--- Check fast path for longer strings (at least 16 bytes long)
-SELECT ('"'||repeat('.', 12)||'abc"')::json; -- OK
- json
--------------------
- "............abc"
-(1 row)
-
-SELECT ('"'||repeat('.', 12)||'abc\n"')::json; -- OK, legal escapes
- json
----------------------
- "............abc\n"
-(1 row)
-
--- Test various lengths of strings to validate SIMD processing to escape
--- special chars in the JSON.
-SELECT row_to_json(j)::jsonb FROM (
- SELECT left(E'abcdefghijklmnopqrstuv"\twxyz012345678', a) AS a
- FROM generate_series(0,37) a
-) j;
- row_to_json
---------------------------------------------------
- {"a": ""}
- {"a": "a"}
- {"a": "ab"}
- {"a": "abc"}
- {"a": "abcd"}
- {"a": "abcde"}
- {"a": "abcdef"}
- {"a": "abcdefg"}
- {"a": "abcdefgh"}
- {"a": "abcdefghi"}
- {"a": "abcdefghij"}
- {"a": "abcdefghijk"}
- {"a": "abcdefghijkl"}
- {"a": "abcdefghijklm"}
- {"a": "abcdefghijklmn"}
- {"a": "abcdefghijklmno"}
- {"a": "abcdefghijklmnop"}
- {"a": "abcdefghijklmnopq"}
- {"a": "abcdefghijklmnopqr"}
- {"a": "abcdefghijklmnopqrs"}
- {"a": "abcdefghijklmnopqrst"}
- {"a": "abcdefghijklmnopqrstu"}
- {"a": "abcdefghijklmnopqrstuv"}
- {"a": "abcdefghijklmnopqrstuv\""}
- {"a": "abcdefghijklmnopqrstuv\"\t"}
- {"a": "abcdefghijklmnopqrstuv\"\tw"}
- {"a": "abcdefghijklmnopqrstuv\"\twx"}
- {"a": "abcdefghijklmnopqrstuv\"\twxy"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz0"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz01"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz012"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz0123"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz01234"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz012345"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz0123456"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz01234567"}
- {"a": "abcdefghijklmnopqrstuv\"\twxyz012345678"}
-(38 rows)
-
--- see json_encoding test for input with unicode escapes
--- Numbers.
-SELECT '1'::json; -- OK
- json
-------
- 1
-(1 row)
-
-SELECT '0'::json; -- OK
- json
-------
- 0
-(1 row)
-
-SELECT '01'::json; -- ERROR, not valid according to JSON spec
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '01'::json;
- ^
-DETAIL: Token "01" is invalid.
-CONTEXT: JSON data, line 1: 01
-SELECT '0.1'::json; -- OK
- json
-------
- 0.1
-(1 row)
-
-SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8
- json
----------------------
- 9223372036854775808
-(1 row)
-
-SELECT '1e100'::json; -- OK
- json
--------
- 1e100
-(1 row)
-
-SELECT '1.3e100'::json; -- OK
- json
----------
- 1.3e100
-(1 row)
-
-SELECT '1f2'::json; -- ERROR
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '1f2'::json;
- ^
-DETAIL: Token "1f2" is invalid.
-CONTEXT: JSON data, line 1: 1f2
-SELECT '0.x1'::json; -- ERROR
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '0.x1'::json;
- ^
-DETAIL: Token "0.x1" is invalid.
-CONTEXT: JSON data, line 1: 0.x1
-SELECT '1.3ex100'::json; -- ERROR
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '1.3ex100'::json;
- ^
-DETAIL: Token "1.3ex100" is invalid.
-CONTEXT: JSON data, line 1: 1.3ex100
--- Arrays.
-SELECT '[]'::json; -- OK
- json
-------
- []
-(1 row)
-
-SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::json; -- OK
- json
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
-(1 row)
-
-SELECT '[1,2]'::json; -- OK
- json
--------
- [1,2]
-(1 row)
-
-SELECT '[1,2,]'::json; -- ERROR, trailing comma
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '[1,2,]'::json;
- ^
-DETAIL: Expected JSON value, but found "]".
-CONTEXT: JSON data, line 1: [1,2,]
-SELECT '[1,2'::json; -- ERROR, no closing bracket
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '[1,2'::json;
- ^
-DETAIL: The input string ended unexpectedly.
-CONTEXT: JSON data, line 1: [1,2
-SELECT '[1,[2]'::json; -- ERROR, no closing bracket
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '[1,[2]'::json;
- ^
-DETAIL: The input string ended unexpectedly.
-CONTEXT: JSON data, line 1: [1,[2]
--- Objects.
-SELECT '{}'::json; -- OK
- json
-------
- {}
-(1 row)
-
-SELECT '{"abc"}'::json; -- ERROR, no value
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc"}'::json;
- ^
-DETAIL: Expected ":", but found "}".
-CONTEXT: JSON data, line 1: {"abc"}
-SELECT '{"abc":1}'::json; -- OK
- json
------------
- {"abc":1}
-(1 row)
-
-SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{1:"abc"}'::json;
- ^
-DETAIL: Expected string or "}", but found "1".
-CONTEXT: JSON data, line 1: {1...
-SELECT '{"abc",1}'::json; -- ERROR, wrong separator
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc",1}'::json;
- ^
-DETAIL: Expected ":", but found ",".
-CONTEXT: JSON data, line 1: {"abc",...
-SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc"=1}'::json;
- ^
-DETAIL: Token "=" is invalid.
-CONTEXT: JSON data, line 1: {"abc"=...
-SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc"::1}'::json;
- ^
-DETAIL: Expected JSON value, but found ":".
-CONTEXT: JSON data, line 1: {"abc"::...
-SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK
- json
----------------------------------------------------------
- {"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}
-(1 row)
-
-SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc":1:2}'::json;
- ^
-DETAIL: Expected "," or "}", but found ":".
-CONTEXT: JSON data, line 1: {"abc":1:...
-SELECT '{"abc":1,3}'::json; -- ERROR, no value
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc":1,3}'::json;
- ^
-DETAIL: Expected string, but found "3".
-CONTEXT: JSON data, line 1: {"abc":1,3...
--- Recursion.
-SET max_stack_depth = '100kB';
-SELECT repeat('[', 10000)::json;
-ERROR: stack depth limit exceeded
-HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
-SELECT repeat('{"a":', 10000)::json;
-ERROR: stack depth limit exceeded
-HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
-RESET max_stack_depth;
--- Miscellaneous stuff.
-SELECT 'true'::json; -- OK
- json
-------
- true
-(1 row)
-
-SELECT 'false'::json; -- OK
- json
--------
- false
-(1 row)
-
-SELECT 'null'::json; -- OK
- json
-------
- null
-(1 row)
-
-SELECT ' true '::json; -- OK, even with extra whitespace
- json
---------
- true
-(1 row)
-
-SELECT 'true false'::json; -- ERROR, too many values
-ERROR: invalid input syntax for type json
-LINE 1: SELECT 'true false'::json;
- ^
-DETAIL: Expected end of input, but found "false".
-CONTEXT: JSON data, line 1: true false
-SELECT 'true, false'::json; -- ERROR, too many values
-ERROR: invalid input syntax for type json
-LINE 1: SELECT 'true, false'::json;
- ^
-DETAIL: Expected end of input, but found ",".
-CONTEXT: JSON data, line 1: true,...
-SELECT 'truf'::json; -- ERROR, not a keyword
-ERROR: invalid input syntax for type json
-LINE 1: SELECT 'truf'::json;
- ^
-DETAIL: Token "truf" is invalid.
-CONTEXT: JSON data, line 1: truf
-SELECT 'trues'::json; -- ERROR, not a keyword
-ERROR: invalid input syntax for type json
-LINE 1: SELECT 'trues'::json;
- ^
-DETAIL: Token "trues" is invalid.
-CONTEXT: JSON data, line 1: trues
-SELECT ''::json; -- ERROR, no value
-ERROR: invalid input syntax for type json
-LINE 1: SELECT ''::json;
- ^
-DETAIL: The input string ended unexpectedly.
-CONTEXT: JSON data, line 1:
-SELECT ' '::json; -- ERROR, no value
-ERROR: invalid input syntax for type json
-LINE 1: SELECT ' '::json;
- ^
-DETAIL: The input string ended unexpectedly.
-CONTEXT: JSON data, line 1:
--- Multi-line JSON input to check ERROR reporting
-SELECT '{
- "one": 1,
- "two":"two",
- "three":
- true}'::json; -- OK
- json
-------------------------------
- { +
- "one": 1, +
- "two":"two",+
- "three": +
- true}
-(1 row)
-
-SELECT '{
- "one": 1,
- "two":,"two", -- ERROR extraneous comma before field "two"
- "three":
- true}'::json;
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{
- ^
-DETAIL: Expected JSON value, but found ",".
-CONTEXT: JSON data, line 3: "two":,...
-SELECT '{
- "one": 1,
- "two":"two",
- "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::json;
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{
- ^
-DETAIL: Expected JSON value, but found "}".
-CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":}
--- ERROR missing value for last field
--- test non-error-throwing input
-select pg_input_is_valid('{"a":true}', 'json');
- pg_input_is_valid
--------------------
- t
-(1 row)
-
-select pg_input_is_valid('{"a":true', 'json');
- pg_input_is_valid
--------------------
- f
-(1 row)
-
-select * from pg_input_error_info('{"a":true', 'json');
- message | detail | hint | sql_error_code
-------------------------------------+--------------------------------------+------+----------------
- invalid input syntax for type json | The input string ended unexpectedly. | | 22P02
-(1 row)
-
---constructors
--- array_to_json
-SELECT array_to_json(array(select 1 as a));
- array_to_json
----------------
- [1]
-(1 row)
-
-SELECT array_to_json(array_agg(q),false) from (select x as b, x * 2 as c from generate_series(1,3) x) q;
- array_to_json
----------------------------------------------
- [{"b":1,"c":2},{"b":2,"c":4},{"b":3,"c":6}]
-(1 row)
-
-SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from generate_series(1,3) x) q;
- array_to_json
------------------
- [{"b":1,"c":2},+
- {"b":2,"c":4},+
- {"b":3,"c":6}]
-(1 row)
-
-SELECT array_to_json(array_agg(q),false)
- FROM ( SELECT $$a$$ || x AS b, y AS c,
- ARRAY[ROW(x.*,ARRAY[1,2,3]),
- ROW(y.*,ARRAY[4,5,6])] AS z
- FROM generate_series(1,2) x,
- generate_series(4,5) y) q;
- array_to_json
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]},{"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}]
-(1 row)
-
-SELECT array_to_json(array_agg(x),false) from generate_series(5,10) x;
- array_to_json
-----------------
- [5,6,7,8,9,10]
-(1 row)
-
-SELECT array_to_json('{{1,5},{99,100}}'::int[]);
- array_to_json
-------------------
- [[1,5],[99,100]]
-(1 row)
-
--- row_to_json
-SELECT row_to_json(row(1,'foo'));
- row_to_json
----------------------
- {"f1":1,"f2":"foo"}
-(1 row)
-
-SELECT row_to_json(q)
-FROM (SELECT $$a$$ || x AS b,
- y AS c,
- ARRAY[ROW(x.*,ARRAY[1,2,3]),
- ROW(y.*,ARRAY[4,5,6])] AS z
- FROM generate_series(1,2) x,
- generate_series(4,5) y) q;
- row_to_json
---------------------------------------------------------------------
- {"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}
- {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
- {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}
- {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
-(4 rows)
-
-SELECT row_to_json(q,true)
-FROM (SELECT $$a$$ || x AS b,
- y AS c,
- ARRAY[ROW(x.*,ARRAY[1,2,3]),
- ROW(y.*,ARRAY[4,5,6])] AS z
- FROM generate_series(1,2) x,
- generate_series(4,5) y) q;
- row_to_json
------------------------------------------------------
- {"b":"a1", +
- "c":4, +
- "z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}
- {"b":"a1", +
- "c":5, +
- "z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
- {"b":"a2", +
- "c":4, +
- "z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}
- {"b":"a2", +
- "c":5, +
- "z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
-(4 rows)
-
-CREATE TEMP TABLE rows AS
-SELECT x, 'txt' || x as y
-FROM generate_series(1,3) AS x;
-SELECT row_to_json(q,true)
-FROM rows q;
- row_to_json
---------------
- {"x":1, +
- "y":"txt1"}
- {"x":2, +
- "y":"txt2"}
- {"x":3, +
- "y":"txt3"}
-(3 rows)
-
-SELECT row_to_json(row((select array_agg(x) as d from generate_series(5,10) x)),false);
- row_to_json
------------------------
- {"f1":[5,6,7,8,9,10]}
-(1 row)
-
--- anyarray column
-analyze rows;
-select attname, to_json(histogram_bounds) histogram_bounds
-from pg_stats
-where tablename = 'rows' and
- schemaname = pg_my_temp_schema()::regnamespace::text
-order by 1;
- attname | histogram_bounds
----------+------------------------
- x | [1,2,3]
- y | ["txt1","txt2","txt3"]
-(2 rows)
-
--- to_json, timestamps
-select to_json(timestamp '2014-05-28 12:22:35.614298');
- to_json
-------------------------------
- "2014-05-28T12:22:35.614298"
-(1 row)
-
-BEGIN;
-SET LOCAL TIME ZONE 10.5;
-select to_json(timestamptz '2014-05-28 12:22:35.614298-04');
- to_json
-------------------------------------
- "2014-05-29T02:52:35.614298+10:30"
-(1 row)
-
-SET LOCAL TIME ZONE -8;
-select to_json(timestamptz '2014-05-28 12:22:35.614298-04');
- to_json
-------------------------------------
- "2014-05-28T08:22:35.614298-08:00"
-(1 row)
-
-COMMIT;
-select to_json(date '2014-05-28');
- to_json
---------------
- "2014-05-28"
-(1 row)
-
-select to_json(date 'Infinity');
- to_json
-------------
- "infinity"
-(1 row)
-
-select to_json(date '-Infinity');
- to_json
--------------
- "-infinity"
-(1 row)
-
-select to_json(timestamp 'Infinity');
- to_json
-------------
- "infinity"
-(1 row)
-
-select to_json(timestamp '-Infinity');
- to_json
--------------
- "-infinity"
-(1 row)
-
-select to_json(timestamptz 'Infinity');
- to_json
-------------
- "infinity"
-(1 row)
-
-select to_json(timestamptz '-Infinity');
- to_json
--------------
- "-infinity"
-(1 row)
-
---json_agg
-SELECT json_agg(q)
- FROM ( SELECT $$a$$ || x AS b, y AS c,
- ARRAY[ROW(x.*,ARRAY[1,2,3]),
- ROW(y.*,ARRAY[4,5,6])] AS z
- FROM generate_series(1,2) x,
- generate_series(4,5) y) q;
- json_agg
------------------------------------------------------------------------
- [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, +
- {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}, +
- {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, +
- {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}]
-(1 row)
-
-SELECT json_agg(q ORDER BY x, y)
- FROM rows q;
- json_agg
------------------------
- [{"x":1,"y":"txt1"}, +
- {"x":2,"y":"txt2"}, +
- {"x":3,"y":"txt3"}]
-(1 row)
-
-UPDATE rows SET x = NULL WHERE x = 1;
-SELECT json_agg(q ORDER BY x NULLS FIRST, y)
- FROM rows q;
- json_agg
---------------------------
- [{"x":null,"y":"txt1"}, +
- {"x":2,"y":"txt2"}, +
- {"x":3,"y":"txt3"}]
-(1 row)
-
--- non-numeric output
-SELECT row_to_json(q)
-FROM (SELECT 'NaN'::float8 AS "float8field") q;
- row_to_json
------------------------
- {"float8field":"NaN"}
-(1 row)
-
-SELECT row_to_json(q)
-FROM (SELECT 'Infinity'::float8 AS "float8field") q;
- row_to_json
-----------------------------
- {"float8field":"Infinity"}
-(1 row)
-
-SELECT row_to_json(q)
-FROM (SELECT '-Infinity'::float8 AS "float8field") q;
- row_to_json
------------------------------
- {"float8field":"-Infinity"}
-(1 row)
-
--- json input
-SELECT row_to_json(q)
-FROM (SELECT '{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}'::json AS "jsonfield") q;
- row_to_json
-------------------------------------------------------------------
- {"jsonfield":{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}}
-(1 row)
-
--- json extraction functions
-CREATE TEMP TABLE test_json (
- json_type text,
- test_json json
-);
-INSERT INTO test_json VALUES
-('scalar','"a scalar"'),
-('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
-('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
-SELECT test_json -> 'x'
-FROM test_json
-WHERE json_type = 'scalar';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json -> 'x'
-FROM test_json
-WHERE json_type = 'array';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json -> 'x'
-FROM test_json
-WHERE json_type = 'object';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json->'field2'
-FROM test_json
-WHERE json_type = 'object';
- ?column?
-----------
- "val2"
-(1 row)
-
-SELECT test_json->>'field2'
-FROM test_json
-WHERE json_type = 'object';
- ?column?
-----------
- val2
-(1 row)
-
-SELECT test_json -> 2
-FROM test_json
-WHERE json_type = 'scalar';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json -> 2
-FROM test_json
-WHERE json_type = 'array';
- ?column?
-----------
- "two"
-(1 row)
-
-SELECT test_json -> -1
-FROM test_json
-WHERE json_type = 'array';
- ?column?
-----------
- {"f1":9}
-(1 row)
-
-SELECT test_json -> 2
-FROM test_json
-WHERE json_type = 'object';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json->>2
-FROM test_json
-WHERE json_type = 'array';
- ?column?
-----------
- two
-(1 row)
-
-SELECT test_json ->> 6 FROM test_json WHERE json_type = 'array';
- ?column?
-----------
- [1,2,3]
-(1 row)
-
-SELECT test_json ->> 7 FROM test_json WHERE json_type = 'array';
- ?column?
-----------
- {"f1":9}
-(1 row)
-
-SELECT test_json ->> 'field4' FROM test_json WHERE json_type = 'object';
- ?column?
-----------
- 4
-(1 row)
-
-SELECT test_json ->> 'field5' FROM test_json WHERE json_type = 'object';
- ?column?
-----------
- [1,2,3]
-(1 row)
-
-SELECT test_json ->> 'field6' FROM test_json WHERE json_type = 'object';
- ?column?
-----------
- {"f1":9}
-(1 row)
-
-SELECT json_object_keys(test_json)
-FROM test_json
-WHERE json_type = 'scalar';
-ERROR: cannot call json_object_keys on a scalar
-SELECT json_object_keys(test_json)
-FROM test_json
-WHERE json_type = 'array';
-ERROR: cannot call json_object_keys on an array
-SELECT json_object_keys(test_json)
-FROM test_json
-WHERE json_type = 'object';
- json_object_keys
-------------------
- field1
- field2
- field3
- field4
- field5
- field6
-(6 rows)
-
--- test extending object_keys resultset - initial resultset size is 256
-select count(*) from
- (select json_object_keys(json_object(array_agg(g)))
- from (select unnest(array['f'||n,n::text])as g
- from generate_series(1,300) as n) x ) y;
- count
--------
- 300
-(1 row)
-
--- nulls
-select (test_json->'field3') is null as expect_false
-from test_json
-where json_type = 'object';
- expect_false
---------------
- f
-(1 row)
-
-select (test_json->>'field3') is null as expect_true
-from test_json
-where json_type = 'object';
- expect_true
--------------
- t
-(1 row)
-
-select (test_json->3) is null as expect_false
-from test_json
-where json_type = 'array';
- expect_false
---------------
- f
-(1 row)
-
-select (test_json->>3) is null as expect_true
-from test_json
-where json_type = 'array';
- expect_true
--------------
- t
-(1 row)
-
--- corner cases
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::text;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::int;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> '';
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json -> 1;
- ?column?
--------------
- {"b": "cc"}
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json -> 3;
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json -> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": "c", "b": null}'::json -> 'b';
- ?column?
-----------
- null
-(1 row)
-
-select '"foo"'::json -> 1;
- ?column?
-----------
-
-(1 row)
-
-select '"foo"'::json -> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::text;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::int;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 1;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> '';
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json ->> 1;
- ?column?
--------------
- {"b": "cc"}
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json ->> 3;
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json ->> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": "c", "b": null}'::json ->> 'b';
- ?column?
-----------
-
-(1 row)
-
-select '"foo"'::json ->> 1;
- ?column?
-----------
-
-(1 row)
-
-select '"foo"'::json ->> 'z';
- ?column?
-----------
-
-(1 row)
-
--- array length
-SELECT json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]');
- json_array_length
--------------------
- 5
-(1 row)
-
-SELECT json_array_length('[]');
- json_array_length
--------------------
- 0
-(1 row)
-
-SELECT json_array_length('{"f1":1,"f2":[5,6]}');
-ERROR: cannot get array length of a non-array
-SELECT json_array_length('4');
-ERROR: cannot get array length of a scalar
--- each
-select json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}');
- json_each
--------------------
- (f1,"[1,2,3]")
- (f2,"{""f3"":1}")
- (f4,null)
-(3 rows)
-
-select * from json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
- key | value
------+-----------
- f1 | [1,2,3]
- f2 | {"f3":1}
- f4 | null
- f5 | 99
- f6 | "stringy"
-(5 rows)
-
-select json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}');
- json_each_text
--------------------
- (f1,"[1,2,3]")
- (f2,"{""f3"":1}")
- (f4,)
- (f5,null)
-(4 rows)
-
-select * from json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
- key | value
------+----------
- f1 | [1,2,3]
- f2 | {"f3":1}
- f4 |
- f5 | 99
- f6 | stringy
-(5 rows)
-
--- extract_path, extract_path_as_text
-select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
- json_extract_path
--------------------
- "stringy"
-(1 row)
-
-select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
- json_extract_path
--------------------
- {"f3":1}
-(1 row)
-
-select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
- json_extract_path
--------------------
- "f3"
-(1 row)
-
-select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
- json_extract_path
--------------------
- 1
-(1 row)
-
-select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
- json_extract_path_text
-------------------------
- stringy
-(1 row)
-
-select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
- json_extract_path_text
-------------------------
- {"f3":1}
-(1 row)
-
-select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
- json_extract_path_text
-------------------------
- f3
-(1 row)
-
-select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
- json_extract_path_text
-------------------------
- 1
-(1 row)
-
--- extract_path nulls
-select json_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_false;
- expect_false
---------------
- f
-(1 row)
-
-select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_true;
- expect_true
--------------
- t
-(1 row)
-
-select json_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_false;
- expect_false
---------------
- f
-(1 row)
-
-select json_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_true;
- expect_true
--------------
- t
-(1 row)
-
--- extract_path operators
-select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f4','f6'];
- ?column?
------------
- "stringy"
-(1 row)
-
-select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2'];
- ?column?
-----------
- {"f3":1}
-(1 row)
-
-select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','0'];
- ?column?
-----------
- "f3"
-(1 row)
-
-select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','1'];
- ?column?
-----------
- 1
-(1 row)
-
-select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f4','f6'];
- ?column?
-----------
- stringy
-(1 row)
-
-select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2'];
- ?column?
-----------
- {"f3":1}
-(1 row)
-
-select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','0'];
- ?column?
-----------
- f3
-(1 row)
-
-select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','1'];
- ?column?
-----------
- 1
-(1 row)
-
--- corner cases for same
-select '{"a": {"b":{"c": "foo"}}}'::json #> '{}';
- ?column?
----------------------------
- {"a": {"b":{"c": "foo"}}}
-(1 row)
-
-select '[1,2,3]'::json #> '{}';
- ?column?
-----------
- [1,2,3]
-(1 row)
-
-select '"foo"'::json #> '{}';
- ?column?
-----------
- "foo"
-(1 row)
-
-select '42'::json #> '{}';
- ?column?
-----------
- 42
-(1 row)
-
-select 'null'::json #> '{}';
- ?column?
-----------
- null
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #> array['a'];
- ?column?
---------------------
- {"b":{"c": "foo"}}
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', null];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', ''];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b'];
- ?column?
---------------
- {"c": "foo"}
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c'];
- ?column?
-----------
- "foo"
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c','d'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','z','c'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','1','b'];
- ?column?
-----------
- "cc"
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','z','b'];
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json #> array['1','b'];
- ?column?
-----------
- "cc"
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json #> array['z','b'];
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": null}]'::json #> array['1','b'];
- ?column?
-----------
- null
-(1 row)
-
-select '"foo"'::json #> array['z'];
- ?column?
-----------
-
-(1 row)
-
-select '42'::json #> array['f2'];
- ?column?
-----------
-
-(1 row)
-
-select '42'::json #> array['0'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #>> '{}';
- ?column?
----------------------------
- {"a": {"b":{"c": "foo"}}}
-(1 row)
-
-select '[1,2,3]'::json #>> '{}';
- ?column?
-----------
- [1,2,3]
-(1 row)
-
-select '"foo"'::json #>> '{}';
- ?column?
-----------
- foo
-(1 row)
-
-select '42'::json #>> '{}';
- ?column?
-----------
- 42
-(1 row)
-
-select 'null'::json #>> '{}';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a'];
- ?column?
---------------------
- {"b":{"c": "foo"}}
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', null];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', ''];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b'];
- ?column?
---------------
- {"c": "foo"}
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c'];
- ?column?
-----------
- foo
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c','d'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','z','c'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','1','b'];
- ?column?
-----------
- cc
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','z','b'];
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['1','b'];
- ?column?
-----------
- cc
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['z','b'];
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": null}]'::json #>> array['1','b'];
- ?column?
-----------
-
-(1 row)
-
-select '"foo"'::json #>> array['z'];
- ?column?
-----------
-
-(1 row)
-
-select '42'::json #>> array['f2'];
- ?column?
-----------
-
-(1 row)
-
-select '42'::json #>> array['0'];
- ?column?
-----------
-
-(1 row)
-
--- array_elements
-select json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
- json_array_elements
------------------------
- 1
- true
- [1,[2,3]]
- null
- {"f1":1,"f2":[7,8,9]}
- false
- "stringy"
-(7 rows)
-
-select * from json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
- value
------------------------
- 1
- true
- [1,[2,3]]
- null
- {"f1":1,"f2":[7,8,9]}
- false
- "stringy"
-(7 rows)
-
-select json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
- json_array_elements_text
---------------------------
- 1
- true
- [1,[2,3]]
-
- {"f1":1,"f2":[7,8,9]}
- false
- stringy
-(7 rows)
-
-select * from json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
- value
------------------------
- 1
- true
- [1,[2,3]]
-
- {"f1":1,"f2":[7,8,9]}
- false
- stringy
-(7 rows)
-
--- populate_record
-create type jpop as (a text, b int, c timestamp);
-CREATE DOMAIN js_int_not_null AS int NOT NULL;
-CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3);
-CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3);
-create type j_unordered_pair as (x int, y int);
-create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y);
-CREATE TYPE jsrec AS (
- i int,
- ia _int4,
- ia1 int[],
- ia2 int[][],
- ia3 int[][][],
- ia1d js_int_array_1d,
- ia2d js_int_array_2d,
- t text,
- ta text[],
- c char(10),
- ca char(10)[],
- ts timestamp,
- js json,
- jsb jsonb,
- jsa json[],
- rec jpop,
- reca jpop[]
-);
-CREATE TYPE jsrec_i_not_null AS (
- i js_int_not_null
-);
-select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q;
- a | b | c
---------+---+---
- blurfl | |
-(1 row)
-
-select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q;
- a | b | c
---------+---+--------------------------
- blurfl | 3 | Mon Dec 31 15:30:56 2012
-(1 row)
-
-select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q;
- a | b | c
---------+---+---
- blurfl | |
-(1 row)
-
-select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q;
- a | b | c
---------+---+--------------------------
- blurfl | 3 | Mon Dec 31 15:30:56 2012
-(1 row)
-
-select * from json_populate_record(null::jpop,'{"a":[100,200,false],"x":43.2}') q;
- a | b | c
------------------+---+---
- [100,200,false] | |
-(1 row)
-
-select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":[100,200,false],"x":43.2}') q;
- a | b | c
------------------+---+--------------------------
- [100,200,false] | 3 | Mon Dec 31 15:30:56 2012
-(1 row)
-
-select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"c":[100,200,false],"x":43.2}') q;
-ERROR: invalid input syntax for type timestamp: "[100,200,false]"
-select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{}') q;
- a | b | c
----+---+--------------------------
- x | 3 | Mon Dec 31 15:30:56 2012
-(1 row)
-
-SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"x": 43.2}') q;
-ERROR: domain js_int_not_null does not allow null values
-SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": null}') q;
-ERROR: domain js_int_not_null does not allow null values
-SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": 12345}') q;
- i
--------
- 12345
-(1 row)
-
-SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": null}') q;
- ia
-----
-
-(1 row)
-
-SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ia".
-SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [1, "2", null, 4]}') q;
- ia
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1, 2], [3, 4]]}') q;
- ia
----------------
- {{1,2},{3,4}}
-(1 row)
-
-SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], 2]}') q;
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ia".
-SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], [2, 3]]}') q;
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": "{1,2,3}"}') q;
- ia
----------
- {1,2,3}
-(1 row)
-
-SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": null}') q;
- ia1
------
-
-(1 row)
-
-SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ia1".
-SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [1, "2", null, 4]}') q;
- ia1
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [[1, 2, 3]]}') q;
- ia1
------------
- {{1,2,3}}
-(1 row)
-
-SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": null}') q;
- ia1d
-------
-
-(1 row)
-
-SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ia1d".
-SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null, 4]}') q;
-ERROR: value for domain js_int_array_1d violates check constraint "js_int_array_1d_check"
-SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null]}') q;
- ia1d
-------------
- {1,2,NULL}
-(1 row)
-
-SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [1, "2", null, 4]}') q;
- ia2
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [null, 4]]}') q;
- ia2
-------------------
- {{1,2},{NULL,4}}
-(1 row)
-
-SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[], []]}') q;
- ia2
------
- {}
-(1 row)
-
-SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [3]]}') q;
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], 3, 4]}') q;
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ia2".
-SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2"], [null, 4]]}') q;
-ERROR: value for domain js_int_array_2d violates check constraint "js_int_array_2d_check"
-SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q;
- ia2d
-----------------------
- {{1,2,3},{NULL,5,6}}
-(1 row)
-
-SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [1, "2", null, 4]}') q;
- ia3
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [[1, 2], [null, 4]]}') q;
- ia3
-------------------
- {{1,2},{NULL,4}}
-(1 row)
-
-SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q;
- ia3
------
- {}
-(1 row)
-
-SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q;
- ia3
--------------------
- {{{1,2}},{{3,4}}}
-(1 row)
-
-SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q;
- ia3
--------------------------------
- {{{1,2},{3,4}},{{5,6},{7,8}}}
-(1 row)
-
-SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q;
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": null}') q;
- ta
-----
-
-(1 row)
-
-SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ta".
-SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [1, "2", null, 4]}') q;
- ta
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q;
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ta".
-SELECT c FROM json_populate_record(NULL::jsrec, '{"c": null}') q;
- c
----
-
-(1 row)
-
-SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaa"}') q;
- c
-------------
- aaa
-(1 row)
-
-SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaa"}') q;
- c
-------------
- aaaaaaaaaa
-(1 row)
-
-SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaaaaa"}') q;
-ERROR: value too long for type character(10)
-SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": null}') q;
- ca
-----
-
-(1 row)
-
-SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ca".
-SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [1, "2", null, 4]}') q;
- ca
------------------------------------------------
- {"1 ","2 ",NULL,"4 "}
-(1 row)
-
-SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q;
-ERROR: value too long for type character(10)
-SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q;
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ca".
-SELECT js FROM json_populate_record(NULL::jsrec, '{"js": null}') q;
- js
-----
-
-(1 row)
-
-SELECT js FROM json_populate_record(NULL::jsrec, '{"js": true}') q;
- js
-------
- true
-(1 row)
-
-SELECT js FROM json_populate_record(NULL::jsrec, '{"js": 123.45}') q;
- js
---------
- 123.45
-(1 row)
-
-SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "123.45"}') q;
- js
-----------
- "123.45"
-(1 row)
-
-SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "abc"}') q;
- js
--------
- "abc"
-(1 row)
-
-SELECT js FROM json_populate_record(NULL::jsrec, '{"js": [123, "123", null, {"key": "value"}]}') q;
- js
---------------------------------------
- [123, "123", null, {"key": "value"}]
-(1 row)
-
-SELECT js FROM json_populate_record(NULL::jsrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q;
- js
---------------------------------------
- {"a": "bbb", "b": null, "c": 123.45}
-(1 row)
-
-SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": null}') q;
- jsb
------
-
-(1 row)
-
-SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": true}') q;
- jsb
-------
- true
-(1 row)
-
-SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": 123.45}') q;
- jsb
---------
- 123.45
-(1 row)
-
-SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "123.45"}') q;
- jsb
-----------
- "123.45"
-(1 row)
-
-SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "abc"}') q;
- jsb
--------
- "abc"
-(1 row)
-
-SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q;
- jsb
---------------------------------------
- [123, "123", null, {"key": "value"}]
-(1 row)
-
-SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q;
- jsb
---------------------------------------
- {"a": "bbb", "b": null, "c": 123.45}
-(1 row)
-
-SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": null}') q;
- jsa
------
-
-(1 row)
-
-SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "jsa".
-SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": [1, "2", null, 4]}') q;
- jsa
---------------------
- {1,"\"2\"",NULL,4}
-(1 row)
-
-SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q;
- jsa
-----------------------------------------------------------
- {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{ \"k\" : \"v\" }"}
-(1 row)
-
-SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": 123}') q;
-ERROR: cannot call populate_composite on a scalar
-SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": [1, 2]}') q;
-ERROR: cannot call populate_composite on an array
-SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q;
- rec
------------------------------------
- (abc,,"Thu Jan 02 00:00:00 2003")
-(1 row)
-
-SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": "(abc,42,01.02.2003)"}') q;
- rec
--------------------------------------
- (abc,42,"Thu Jan 02 00:00:00 2003")
-(1 row)
-
-SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "reca".
-SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [1, 2]}') q;
-ERROR: cannot call populate_composite on a scalar
-SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q;
- reca
---------------------------------------------------------
- {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
-(1 row)
-
-SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": ["(abc,42,01.02.2003)"]}') q;
- reca
--------------------------------------------
- {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"}
-(1 row)
-
-SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q;
- reca
--------------------------------------------
- {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"}
-(1 row)
-
-SELECT rec FROM json_populate_record(
- row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
- row('x',3,'2012-12-31 15:30:56')::jpop,NULL)::jsrec,
- '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}'
-) q;
- rec
-------------------------------------
- (abc,3,"Thu Jan 02 00:00:00 2003")
-(1 row)
-
--- anonymous record type
-SELECT json_populate_record(null::record, '{"x": 0, "y": 1}');
-ERROR: could not determine row type for result of json_populate_record
-HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
-SELECT json_populate_record(row(1,2), '{"f1": 0, "f2": 1}');
- json_populate_record
-----------------------
- (0,1)
-(1 row)
-
-SELECT * FROM
- json_populate_record(null::record, '{"x": 776}') AS (x int, y int);
- x | y
------+---
- 776 |
-(1 row)
-
--- composite domain
-SELECT json_populate_record(null::j_ordered_pair, '{"x": 0, "y": 1}');
- json_populate_record
-----------------------
- (0,1)
-(1 row)
-
-SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 0}');
- json_populate_record
-----------------------
- (0,2)
-(1 row)
-
-SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 1, "y": 0}');
-ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check"
--- populate_recordset
-select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+---+--------------------------
- blurfl | |
- | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+----+--------------------------
- blurfl | 99 |
- def | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+---+--------------------------
- blurfl | |
- | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+----+--------------------------
- blurfl | 99 |
- def | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
----------------+----+--------------------------
- [100,200,300] | 99 |
- {"z":true} | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-select * from json_populate_recordset(row('def',99,null)::jpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
-ERROR: invalid input syntax for type timestamp: "[100,200,300]"
-create type jpop2 as (a int, b json, c int, d int);
-select * from json_populate_recordset(null::jpop2, '[{"a":2,"c":3,"b":{"z":4},"d":6}]') q;
- a | b | c | d
----+---------+---+---
- 2 | {"z":4} | 3 | 6
-(1 row)
-
-select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+---+--------------------------
- blurfl | |
- | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+----+--------------------------
- blurfl | 99 |
- def | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
----------------+----+--------------------------
- [100,200,300] | 99 |
- {"z":true} | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
--- anonymous record type
-SELECT json_populate_recordset(null::record, '[{"x": 0, "y": 1}]');
-ERROR: could not determine row type for result of json_populate_recordset
-HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
-SELECT json_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]');
- json_populate_recordset
--------------------------
- (0,1)
-(1 row)
-
-SELECT i, json_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]')
-FROM (VALUES (1),(2)) v(i);
- i | json_populate_recordset
----+-------------------------
- 1 | (42,50)
- 1 | (1,43)
- 2 | (42,50)
- 2 | (2,43)
-(4 rows)
-
-SELECT * FROM
- json_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int);
- x | y
------+---
- 776 |
-(1 row)
-
--- empty array is a corner case
-SELECT json_populate_recordset(null::record, '[]');
-ERROR: could not determine row type for result of json_populate_recordset
-HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
-SELECT json_populate_recordset(row(1,2), '[]');
- json_populate_recordset
--------------------------
-(0 rows)
-
-SELECT * FROM json_populate_recordset(NULL::jpop,'[]') q;
- a | b | c
----+---+---
-(0 rows)
-
-SELECT * FROM
- json_populate_recordset(null::record, '[]') AS (x int, y int);
- x | y
----+---
-(0 rows)
-
--- composite domain
-SELECT json_populate_recordset(null::j_ordered_pair, '[{"x": 0, "y": 1}]');
- json_populate_recordset
--------------------------
- (0,1)
-(1 row)
-
-SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 0}, {"y": 3}]');
- json_populate_recordset
--------------------------
- (0,2)
- (1,3)
-(2 rows)
-
-SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 1, "y": 0}]');
-ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check"
--- negative cases where the wrong record type is supplied
-select * from json_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned row contains 1 attribute, but query expects 2.
-select * from json_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned type integer at ordinal position 1, but query expects text.
-select * from json_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned row contains 3 attributes, but query expects 2.
-select * from json_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text);
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned type integer at ordinal position 1, but query expects text.
--- test type info caching in json_populate_record()
-CREATE TEMP TABLE jspoptest (js json);
-INSERT INTO jspoptest
-SELECT '{
- "jsa": [1, "2", null, 4],
- "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2},
- "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]
-}'::json
-FROM generate_series(1, 3);
-SELECT (json_populate_record(NULL::jsrec, js)).* FROM jspoptest;
- i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca
----+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+--------------------------------------------------------
- | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
- | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
- | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
-(3 rows)
-
-DROP TYPE jsrec;
-DROP TYPE jsrec_i_not_null;
-DROP DOMAIN js_int_not_null;
-DROP DOMAIN js_int_array_1d;
-DROP DOMAIN js_int_array_2d;
-DROP DOMAIN j_ordered_pair;
-DROP TYPE j_unordered_pair;
---json_typeof() function
-select value, json_typeof(value)
- from (values (json '123.4'),
- (json '-1'),
- (json '"foo"'),
- (json 'true'),
- (json 'false'),
- (json 'null'),
- (json '[1, 2, 3]'),
- (json '[]'),
- (json '{"x":"foo", "y":123}'),
- (json '{}'),
- (NULL::json))
- as data(value);
- value | json_typeof
-----------------------+-------------
- 123.4 | number
- -1 | number
- "foo" | string
- true | boolean
- false | boolean
- null | null
- [1, 2, 3] | array
- [] | array
- {"x":"foo", "y":123} | object
- {} | object
- |
-(11 rows)
-
--- json_build_array, json_build_object, json_object_agg
-SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
- json_build_array
------------------------------------------------------------------------
- ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1,2,3]}]
-(1 row)
-
-SELECT json_build_array('a', NULL); -- ok
- json_build_array
-------------------
- ["a", null]
-(1 row)
-
-SELECT json_build_array(VARIADIC NULL::text[]); -- ok
- json_build_array
-------------------
-
-(1 row)
-
-SELECT json_build_array(VARIADIC '{}'::text[]); -- ok
- json_build_array
-------------------
- []
-(1 row)
-
-SELECT json_build_array(VARIADIC '{a,b,c}'::text[]); -- ok
- json_build_array
-------------------
- ["a", "b", "c"]
-(1 row)
-
-SELECT json_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok
- json_build_array
-------------------
- ["a", null]
-(1 row)
-
-SELECT json_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok
- json_build_array
-----------------------
- ["1", "2", "3", "4"]
-(1 row)
-
-SELECT json_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok
- json_build_array
-------------------
- [1, 2, 3, 4]
-(1 row)
-
-SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
- json_build_array
---------------------
- [1, 4, 2, 5, 3, 6]
-(1 row)
-
-SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
- json_build_object
-----------------------------------------------------------------------------
- {"a" : 1, "b" : 1.2, "c" : true, "d" : null, "e" : {"x": 3, "y": [1,2,3]}}
-(1 row)
-
-SELECT json_build_object(
- 'a', json_build_object('b',false,'c',99),
- 'd', json_build_object('e',array[9,8,7]::int[],
- 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r)));
- json_build_object
--------------------------------------------------------------------------------------------------
- {"a" : {"b" : false, "c" : 99}, "d" : {"e" : [9,8,7], "f" : {"relkind":"r","name":"pg_class"}}}
-(1 row)
-
-SELECT json_build_object('{a,b,c}'::text[]); -- error
-ERROR: argument list must have even number of elements
-HINT: The arguments of json_build_object() must consist of alternating keys and values.
-SELECT json_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array
-ERROR: key value must be scalar, not array, composite, or json
-SELECT json_build_object('a', 'b', 'c'); -- error
-ERROR: argument list must have even number of elements
-HINT: The arguments of json_build_object() must consist of alternating keys and values.
-SELECT json_build_object(NULL, 'a'); -- error, key cannot be NULL
-ERROR: null value not allowed for object key
-SELECT json_build_object('a', NULL); -- ok
- json_build_object
--------------------
- {"a" : null}
-(1 row)
-
-SELECT json_build_object(VARIADIC NULL::text[]); -- ok
- json_build_object
--------------------
-
-(1 row)
-
-SELECT json_build_object(VARIADIC '{}'::text[]); -- ok
- json_build_object
--------------------
- {}
-(1 row)
-
-SELECT json_build_object(VARIADIC '{a,b,c}'::text[]); -- error
-ERROR: argument list must have even number of elements
-HINT: The arguments of json_build_object() must consist of alternating keys and values.
-SELECT json_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok
- json_build_object
--------------------
- {"a" : null}
-(1 row)
-
-SELECT json_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL
-ERROR: null value not allowed for object key
-SELECT json_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok
- json_build_object
-------------------------
- {"1" : "2", "3" : "4"}
-(1 row)
-
-SELECT json_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok
- json_build_object
---------------------
- {"1" : 2, "3" : 4}
-(1 row)
-
-SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
- json_build_object
------------------------------
- {"1" : 4, "2" : 5, "3" : 6}
-(1 row)
-
--- empty objects/arrays
-SELECT json_build_array();
- json_build_array
-------------------
- []
-(1 row)
-
-SELECT json_build_object();
- json_build_object
--------------------
- {}
-(1 row)
-
--- make sure keys are quoted
-SELECT json_build_object(1,2);
- json_build_object
--------------------
- {"1" : 2}
-(1 row)
-
--- keys must be scalar and not null
-SELECT json_build_object(null,2);
-ERROR: null value not allowed for object key
-SELECT json_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r;
-ERROR: key value must be scalar, not array, composite, or json
-SELECT json_build_object(json '{"a":1,"b":2}', 3);
-ERROR: key value must be scalar, not array, composite, or json
-SELECT json_build_object('{1,2,3}'::int[], 3);
-ERROR: key value must be scalar, not array, composite, or json
-CREATE TEMP TABLE foo (serial_num int, name text, type text);
-INSERT INTO foo VALUES (847001,'t15','GE1043');
-INSERT INTO foo VALUES (847002,'t16','GE1043');
-INSERT INTO foo VALUES (847003,'sub-alpha','GESS90');
-SELECT json_build_object('turbines',json_object_agg(serial_num,json_build_object('name',name,'type',type)))
-FROM foo;
- json_build_object
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- {"turbines" : { "847001" : {"name" : "t15", "type" : "GE1043"}, "847002" : {"name" : "t16", "type" : "GE1043"}, "847003" : {"name" : "sub-alpha", "type" : "GESS90"} }}
-(1 row)
-
-SELECT json_object_agg(name, type) FROM foo;
- json_object_agg
-----------------------------------------------------------------
- { "t15" : "GE1043", "t16" : "GE1043", "sub-alpha" : "GESS90" }
-(1 row)
-
-INSERT INTO foo VALUES (999999, NULL, 'bar');
-SELECT json_object_agg(name, type) FROM foo;
-ERROR: null value not allowed for object key
--- json_object
--- empty object, one dimension
-SELECT json_object('{}');
- json_object
--------------
- {}
-(1 row)
-
--- empty object, two dimensions
-SELECT json_object('{}', '{}');
- json_object
--------------
- {}
-(1 row)
-
--- one dimension
-SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}');
- json_object
--------------------------------------------------------
- {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"}
-(1 row)
-
--- same but with two dimensions
-SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
- json_object
--------------------------------------------------------
- {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"}
-(1 row)
-
--- odd number error
-SELECT json_object('{a,b,c}');
-ERROR: array must have even number of elements
--- one column error
-SELECT json_object('{{a},{b}}');
-ERROR: array must have two columns
--- too many columns error
-SELECT json_object('{{a,b,c},{b,c,d}}');
-ERROR: array must have two columns
--- too many dimensions error
-SELECT json_object('{{{a,b},{c,d}},{{b,c},{d,e}}}');
-ERROR: wrong number of array subscripts
---two argument form of json_object
-select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}');
- json_object
-------------------------------------------------------
- {"a" : "1", "b" : "2", "c" : "3", "d e f" : "a b c"}
-(1 row)
-
--- too many dimensions
-SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
-ERROR: wrong number of array subscripts
--- mismatched dimensions
-select json_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}');
-ERROR: mismatched array dimensions
-select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}');
-ERROR: mismatched array dimensions
--- null key error
-select json_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}');
-ERROR: null value not allowed for object key
--- empty key is allowed
-select json_object('{a,b,"","d e f"}','{1,2,3,"a b c"}');
- json_object
------------------------------------------------------
- {"a" : "1", "b" : "2", "" : "3", "d e f" : "a b c"}
-(1 row)
-
--- json_object_agg_unique requires unique keys
-select json_object_agg_unique(mod(i,100), i) from generate_series(0, 199) i;
-ERROR: duplicate JSON object key value: "0"
--- json_to_record and json_to_recordset
-select * from json_to_record('{"a":1,"b":"foo","c":"bar"}')
- as x(a int, b text, d text);
- a | b | d
----+-----+---
- 1 | foo |
-(1 row)
-
-select * from json_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]')
- as x(a int, b text, c boolean);
- a | b | c
----+-----+---
- 1 | foo |
- 2 | bar | t
-(2 rows)
-
-select * from json_to_recordset('[{"a":1,"b":{"d":"foo"},"c":true},{"a":2,"c":false,"b":{"d":"bar"}}]')
- as x(a int, b json, c boolean);
- a | b | c
----+-------------+---
- 1 | {"d":"foo"} | t
- 2 | {"d":"bar"} | f
-(2 rows)
-
-select *, c is null as c_is_null
-from json_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::json)
- as t(a int, b json, c text, x int, ca char(5)[], ia int[][], r jpop);
- a | b | c | x | ca | ia | r | c_is_null
----+-----------------+---+---+-------------------+---------------+------------+-----------
- 1 | {"c":16, "d":2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t
-(1 row)
-
-select *, c is null as c_is_null
-from json_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::json)
- as t(a int, b json, c text, x int);
- a | b | c | x | c_is_null
----+-----------------+---+---+-----------
- 1 | {"c":16, "d":2} | | 8 | t
-(1 row)
-
-select * from json_to_record('{"ia": null}') as x(ia _int4);
- ia
-----
-
-(1 row)
-
-select * from json_to_record('{"ia": 123}') as x(ia _int4);
-ERROR: expected JSON array
-HINT: See the value of key "ia".
-select * from json_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4);
- ia
---------------
- {1,2,NULL,4}
-(1 row)
-
-select * from json_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4);
- ia
----------------
- {{1,2},{3,4}}
-(1 row)
-
-select * from json_to_record('{"ia": [[1], 2]}') as x(ia _int4);
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ia".
-select * from json_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4);
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-select * from json_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]);
- ia2
----------
- {1,2,3}
-(1 row)
-
-select * from json_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]);
- ia2
----------------
- {{1,2},{3,4}}
-(1 row)
-
-select * from json_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]);
- ia2
------------------
- {{{1},{2},{3}}}
-(1 row)
-
-select * from json_to_record('{"out": {"key": 1}}') as x(out json);
- out
-------------
- {"key": 1}
-(1 row)
-
-select * from json_to_record('{"out": [{"key": 1}]}') as x(out json);
- out
---------------
- [{"key": 1}]
-(1 row)
-
-select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out json);
- out
-----------------
- "{\"key\": 1}"
-(1 row)
-
-select * from json_to_record('{"out": {"key": 1}}') as x(out jsonb);
- out
-------------
- {"key": 1}
-(1 row)
-
-select * from json_to_record('{"out": [{"key": 1}]}') as x(out jsonb);
- out
---------------
- [{"key": 1}]
-(1 row)
-
-select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb);
- out
-----------------
- "{\"key\": 1}"
-(1 row)
-
--- json_strip_nulls
-select json_strip_nulls(null);
- json_strip_nulls
-------------------
-
-(1 row)
-
-select json_strip_nulls('1');
- json_strip_nulls
-------------------
- 1
-(1 row)
-
-select json_strip_nulls('"a string"');
- json_strip_nulls
-------------------
- "a string"
-(1 row)
-
-select json_strip_nulls('null');
- json_strip_nulls
-------------------
- null
-(1 row)
-
-select json_strip_nulls('[1,2,null,3,4]');
- json_strip_nulls
-------------------
- [1,2,null,3,4]
-(1 row)
-
-select json_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}');
- json_strip_nulls
-------------------------------------
- {"a":1,"c":[2,null,3],"d":{"e":4}}
-(1 row)
-
-select json_strip_nulls('[1,{"a":1,"b":null,"c":2},3]');
- json_strip_nulls
----------------------
- [1,{"a":1,"c":2},3]
-(1 row)
-
--- an empty object is not null and should not be stripped
-select json_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }');
- json_strip_nulls
-------------------
- {"a":{},"d":{}}
-(1 row)
-
--- json to tsvector
-select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json);
- to_tsvector
----------------------------------------------------------------------------
- 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11
-(1 row)
-
--- json to tsvector with config
-select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json);
- to_tsvector
----------------------------------------------------------------------------
- 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11
-(1 row)
-
--- json to tsvector with stop words
-select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::json);
- to_tsvector
-----------------------------------------------------------------------------
- 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13
-(1 row)
-
--- json to tsvector with numeric values
-select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::json);
- to_tsvector
----------------------------------
- 'aaa':1 'bbb':3 'ccc':5 'ddd':4
-(1 row)
-
--- json_to_tsvector
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"');
- json_to_tsvector
-----------------------------------------------------------------------------------------
- '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"');
- json_to_tsvector
---------------------------------
- 'b':2 'c':4 'd':6 'f':8 'g':10
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"');
- json_to_tsvector
-------------------
- 'aaa':1 'bbb':3
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"');
- json_to_tsvector
-------------------
- '123':1 '456':3
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"');
- json_to_tsvector
--------------------
- 'fals':3 'true':1
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]');
- json_to_tsvector
----------------------------------
- '123':5 '456':7 'aaa':1 'bbb':3
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"');
- json_to_tsvector
-----------------------------------------------------------------------------------------
- '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"');
- json_to_tsvector
---------------------------------
- 'b':2 'c':4 'd':6 'f':8 'g':10
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"');
- json_to_tsvector
-------------------
- 'aaa':1 'bbb':3
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"');
- json_to_tsvector
-------------------
- '123':1 '456':3
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"');
- json_to_tsvector
--------------------
- 'fals':3 'true':1
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]');
- json_to_tsvector
----------------------------------
- '123':5 '456':7 'aaa':1 'bbb':3
-(1 row)
-
--- to_tsvector corner cases
-select to_tsvector('""'::json);
- to_tsvector
--------------
-
-(1 row)
-
-select to_tsvector('{}'::json);
- to_tsvector
--------------
-
-(1 row)
-
-select to_tsvector('[]'::json);
- to_tsvector
--------------
-
-(1 row)
-
-select to_tsvector('null'::json);
- to_tsvector
--------------
-
-(1 row)
-
--- json_to_tsvector corner cases
-select json_to_tsvector('""'::json, '"all"');
- json_to_tsvector
-------------------
-
-(1 row)
-
-select json_to_tsvector('{}'::json, '"all"');
- json_to_tsvector
-------------------
-
-(1 row)
-
-select json_to_tsvector('[]'::json, '"all"');
- json_to_tsvector
-------------------
-
-(1 row)
-
-select json_to_tsvector('null'::json, '"all"');
- json_to_tsvector
-------------------
-
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""');
-ERROR: wrong flag in flag array: ""
-HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}');
-ERROR: wrong flag type, only arrays and scalars are allowed
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]');
- json_to_tsvector
-------------------
-
-(1 row)
-
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null');
-ERROR: flag array element is not a string
-HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
-select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]');
-ERROR: flag array element is not a string
-HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
--- ts_headline for json
-select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'));
- ts_headline
----------------------------------------------------------------------------------------------------------
- {"a":"aaa bbb","b":{"c":"ccc ddd fff","c1":"ccc1 ddd1"},"d":["ggg hhh","iii jjj"]}
-(1 row)
-
-select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'));
- ts_headline
-----------------------------------------------------------------------------------------
- {"a":"aaa bbb","b":{"c":"ccc ddd fff"},"d":["ggg hhh","iii jjj"]}
-(1 row)
-
-select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
- ts_headline
-------------------------------------------------------------------------------------------
- {"a":"aaa ","b":{"c":"ccc fff","c1":"ccc1 ddd1"},"d":["ggg ","iii jjj"]}
-(1 row)
-
-select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
- ts_headline
-------------------------------------------------------------------------------------------
- {"a":"aaa ","b":{"c":"ccc fff","c1":"ccc1 ddd1"},"d":["ggg ","iii jjj"]}
-(1 row)
-
--- corner cases for ts_headline with json
-select ts_headline('null'::json, tsquery('aaa & bbb'));
- ts_headline
--------------
- null
-(1 row)
-
-select ts_headline('{}'::json, tsquery('aaa & bbb'));
- ts_headline
--------------
- {}
-(1 row)
-
-select ts_headline('[]'::json, tsquery('aaa & bbb'));
- ts_headline
--------------
- []
-(1 row)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonb.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/jsonb.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/jsonb.out 2024-11-15 02:50:52.462095130 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/jsonb.out 2024-11-15 02:59:17.841116519 +0000
@@ -1,5717 +1,2 @@
--- directory paths are passed to us in environment variables
-\getenv abs_srcdir PG_ABS_SRCDIR
-CREATE TABLE testjsonb (
- j jsonb
-);
-\set filename :abs_srcdir '/data/jsonb.data'
-COPY testjsonb FROM :'filename';
--- Strings.
-SELECT '""'::jsonb; -- OK.
- jsonb
--------
- ""
-(1 row)
-
-SELECT $$''$$::jsonb; -- ERROR, single quotes are not allowed
-ERROR: invalid input syntax for type json
-LINE 1: SELECT $$''$$::jsonb;
- ^
-DETAIL: Token "'" is invalid.
-CONTEXT: JSON data, line 1: '...
-SELECT '"abc"'::jsonb; -- OK
- jsonb
--------
- "abc"
-(1 row)
-
-SELECT '"abc'::jsonb; -- ERROR, quotes not closed
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '"abc'::jsonb;
- ^
-DETAIL: Token ""abc" is invalid.
-CONTEXT: JSON data, line 1: "abc
-SELECT '"abc
-def"'::jsonb; -- ERROR, unescaped newline in string constant
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '"abc
- ^
-DETAIL: Character with value 0x0a must be escaped.
-CONTEXT: JSON data, line 1: "abc
-SELECT '"\n\"\\"'::jsonb; -- OK, legal escapes
- jsonb
-----------
- "\n\"\\"
-(1 row)
-
-SELECT '"\v"'::jsonb; -- ERROR, not a valid JSON escape
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '"\v"'::jsonb;
- ^
-DETAIL: Escape sequence "\v" is invalid.
-CONTEXT: JSON data, line 1: "\v...
--- see json_encoding test for input with unicode escapes
--- Numbers.
-SELECT '1'::jsonb; -- OK
- jsonb
--------
- 1
-(1 row)
-
-SELECT '0'::jsonb; -- OK
- jsonb
--------
- 0
-(1 row)
-
-SELECT '01'::jsonb; -- ERROR, not valid according to JSON spec
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '01'::jsonb;
- ^
-DETAIL: Token "01" is invalid.
-CONTEXT: JSON data, line 1: 01
-SELECT '0.1'::jsonb; -- OK
- jsonb
--------
- 0.1
-(1 row)
-
-SELECT '9223372036854775808'::jsonb; -- OK, even though it's too large for int8
- jsonb
----------------------
- 9223372036854775808
-(1 row)
-
-SELECT '1e100'::jsonb; -- OK
- jsonb
--------------------------------------------------------------------------------------------------------
- 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-(1 row)
-
-SELECT '1.3e100'::jsonb; -- OK
- jsonb
--------------------------------------------------------------------------------------------------------
- 13000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-(1 row)
-
-SELECT '1f2'::jsonb; -- ERROR
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '1f2'::jsonb;
- ^
-DETAIL: Token "1f2" is invalid.
-CONTEXT: JSON data, line 1: 1f2
-SELECT '0.x1'::jsonb; -- ERROR
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '0.x1'::jsonb;
- ^
-DETAIL: Token "0.x1" is invalid.
-CONTEXT: JSON data, line 1: 0.x1
-SELECT '1.3ex100'::jsonb; -- ERROR
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '1.3ex100'::jsonb;
- ^
-DETAIL: Token "1.3ex100" is invalid.
-CONTEXT: JSON data, line 1: 1.3ex100
--- Arrays.
-SELECT '[]'::jsonb; -- OK
- jsonb
--------
- []
-(1 row)
-
-SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::jsonb; -- OK
- jsonb
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
-(1 row)
-
-SELECT '[1,2]'::jsonb; -- OK
- jsonb
---------
- [1, 2]
-(1 row)
-
-SELECT '[1,2,]'::jsonb; -- ERROR, trailing comma
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '[1,2,]'::jsonb;
- ^
-DETAIL: Expected JSON value, but found "]".
-CONTEXT: JSON data, line 1: [1,2,]
-SELECT '[1,2'::jsonb; -- ERROR, no closing bracket
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '[1,2'::jsonb;
- ^
-DETAIL: The input string ended unexpectedly.
-CONTEXT: JSON data, line 1: [1,2
-SELECT '[1,[2]'::jsonb; -- ERROR, no closing bracket
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '[1,[2]'::jsonb;
- ^
-DETAIL: The input string ended unexpectedly.
-CONTEXT: JSON data, line 1: [1,[2]
--- Objects.
-SELECT '{}'::jsonb; -- OK
- jsonb
--------
- {}
-(1 row)
-
-SELECT '{"abc"}'::jsonb; -- ERROR, no value
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc"}'::jsonb;
- ^
-DETAIL: Expected ":", but found "}".
-CONTEXT: JSON data, line 1: {"abc"}
-SELECT '{"abc":1}'::jsonb; -- OK
- jsonb
-------------
- {"abc": 1}
-(1 row)
-
-SELECT '{1:"abc"}'::jsonb; -- ERROR, keys must be strings
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{1:"abc"}'::jsonb;
- ^
-DETAIL: Expected string or "}", but found "1".
-CONTEXT: JSON data, line 1: {1...
-SELECT '{"abc",1}'::jsonb; -- ERROR, wrong separator
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc",1}'::jsonb;
- ^
-DETAIL: Expected ":", but found ",".
-CONTEXT: JSON data, line 1: {"abc",...
-SELECT '{"abc"=1}'::jsonb; -- ERROR, totally wrong separator
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc"=1}'::jsonb;
- ^
-DETAIL: Token "=" is invalid.
-CONTEXT: JSON data, line 1: {"abc"=...
-SELECT '{"abc"::1}'::jsonb; -- ERROR, another wrong separator
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc"::1}'::jsonb;
- ^
-DETAIL: Expected JSON value, but found ":".
-CONTEXT: JSON data, line 1: {"abc"::...
-SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::jsonb; -- OK
- jsonb
---------------------------------------------------------------------
- {"abc": 1, "def": 2, "ghi": [3, 4], "hij": {"klm": 5, "nop": [6]}}
-(1 row)
-
-SELECT '{"abc":1:2}'::jsonb; -- ERROR, colon in wrong spot
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc":1:2}'::jsonb;
- ^
-DETAIL: Expected "," or "}", but found ":".
-CONTEXT: JSON data, line 1: {"abc":1:...
-SELECT '{"abc":1,3}'::jsonb; -- ERROR, no value
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{"abc":1,3}'::jsonb;
- ^
-DETAIL: Expected string, but found "3".
-CONTEXT: JSON data, line 1: {"abc":1,3...
--- Recursion.
-SET max_stack_depth = '100kB';
-SELECT repeat('[', 10000)::jsonb;
-ERROR: stack depth limit exceeded
-HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
-SELECT repeat('{"a":', 10000)::jsonb;
-ERROR: stack depth limit exceeded
-HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
-RESET max_stack_depth;
--- Miscellaneous stuff.
-SELECT 'true'::jsonb; -- OK
- jsonb
--------
- true
-(1 row)
-
-SELECT 'false'::jsonb; -- OK
- jsonb
--------
- false
-(1 row)
-
-SELECT 'null'::jsonb; -- OK
- jsonb
--------
- null
-(1 row)
-
-SELECT ' true '::jsonb; -- OK, even with extra whitespace
- jsonb
--------
- true
-(1 row)
-
-SELECT 'true false'::jsonb; -- ERROR, too many values
-ERROR: invalid input syntax for type json
-LINE 1: SELECT 'true false'::jsonb;
- ^
-DETAIL: Expected end of input, but found "false".
-CONTEXT: JSON data, line 1: true false
-SELECT 'true, false'::jsonb; -- ERROR, too many values
-ERROR: invalid input syntax for type json
-LINE 1: SELECT 'true, false'::jsonb;
- ^
-DETAIL: Expected end of input, but found ",".
-CONTEXT: JSON data, line 1: true,...
-SELECT 'truf'::jsonb; -- ERROR, not a keyword
-ERROR: invalid input syntax for type json
-LINE 1: SELECT 'truf'::jsonb;
- ^
-DETAIL: Token "truf" is invalid.
-CONTEXT: JSON data, line 1: truf
-SELECT 'trues'::jsonb; -- ERROR, not a keyword
-ERROR: invalid input syntax for type json
-LINE 1: SELECT 'trues'::jsonb;
- ^
-DETAIL: Token "trues" is invalid.
-CONTEXT: JSON data, line 1: trues
-SELECT ''::jsonb; -- ERROR, no value
-ERROR: invalid input syntax for type json
-LINE 1: SELECT ''::jsonb;
- ^
-DETAIL: The input string ended unexpectedly.
-CONTEXT: JSON data, line 1:
-SELECT ' '::jsonb; -- ERROR, no value
-ERROR: invalid input syntax for type json
-LINE 1: SELECT ' '::jsonb;
- ^
-DETAIL: The input string ended unexpectedly.
-CONTEXT: JSON data, line 1:
--- Multi-line JSON input to check ERROR reporting
-SELECT '{
- "one": 1,
- "two":"two",
- "three":
- true}'::jsonb; -- OK
- jsonb
------------------------------------------
- {"one": 1, "two": "two", "three": true}
-(1 row)
-
-SELECT '{
- "one": 1,
- "two":,"two", -- ERROR extraneous comma before field "two"
- "three":
- true}'::jsonb;
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{
- ^
-DETAIL: Expected JSON value, but found ",".
-CONTEXT: JSON data, line 3: "two":,...
-SELECT '{
- "one": 1,
- "two":"two",
- "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::jsonb;
-ERROR: invalid input syntax for type json
-LINE 1: SELECT '{
- ^
-DETAIL: Expected JSON value, but found "}".
-CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":}
--- ERROR missing value for last field
--- test non-error-throwing input
-select pg_input_is_valid('{"a":true}', 'jsonb');
- pg_input_is_valid
--------------------
- t
-(1 row)
-
-select pg_input_is_valid('{"a":true', 'jsonb');
- pg_input_is_valid
--------------------
- f
-(1 row)
-
-select * from pg_input_error_info('{"a":true', 'jsonb');
- message | detail | hint | sql_error_code
-------------------------------------+--------------------------------------+------+----------------
- invalid input syntax for type json | The input string ended unexpectedly. | | 22P02
-(1 row)
-
-select * from pg_input_error_info('{"a":1e1000000}', 'jsonb');
- message | detail | hint | sql_error_code
---------------------------------+--------+------+----------------
- value overflows numeric format | | | 22003
-(1 row)
-
--- make sure jsonb is passed through json generators without being escaped
-SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']);
- array_to_json
---------------------------
- [{"a": 1},{"b": [2, 3]}]
-(1 row)
-
--- anyarray column
-CREATE TEMP TABLE rows AS
-SELECT x, 'txt' || x as y
-FROM generate_series(1,3) AS x;
-analyze rows;
-select attname, to_jsonb(histogram_bounds) histogram_bounds
-from pg_stats
-where tablename = 'rows' and
- schemaname = pg_my_temp_schema()::regnamespace::text
-order by 1;
- attname | histogram_bounds
----------+--------------------------
- x | [1, 2, 3]
- y | ["txt1", "txt2", "txt3"]
-(2 rows)
-
--- to_jsonb, timestamps
-select to_jsonb(timestamp '2014-05-28 12:22:35.614298');
- to_jsonb
-------------------------------
- "2014-05-28T12:22:35.614298"
-(1 row)
-
-BEGIN;
-SET LOCAL TIME ZONE 10.5;
-select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04');
- to_jsonb
-------------------------------------
- "2014-05-29T02:52:35.614298+10:30"
-(1 row)
-
-SET LOCAL TIME ZONE -8;
-select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04');
- to_jsonb
-------------------------------------
- "2014-05-28T08:22:35.614298-08:00"
-(1 row)
-
-COMMIT;
-select to_jsonb(date '2014-05-28');
- to_jsonb
---------------
- "2014-05-28"
-(1 row)
-
-select to_jsonb(date 'Infinity');
- to_jsonb
-------------
- "infinity"
-(1 row)
-
-select to_jsonb(date '-Infinity');
- to_jsonb
--------------
- "-infinity"
-(1 row)
-
-select to_jsonb(timestamp 'Infinity');
- to_jsonb
-------------
- "infinity"
-(1 row)
-
-select to_jsonb(timestamp '-Infinity');
- to_jsonb
--------------
- "-infinity"
-(1 row)
-
-select to_jsonb(timestamptz 'Infinity');
- to_jsonb
-------------
- "infinity"
-(1 row)
-
-select to_jsonb(timestamptz '-Infinity');
- to_jsonb
--------------
- "-infinity"
-(1 row)
-
---jsonb_agg
-SELECT jsonb_agg(q)
- FROM ( SELECT $$a$$ || x AS b, y AS c,
- ARRAY[ROW(x.*,ARRAY[1,2,3]),
- ROW(y.*,ARRAY[4,5,6])] AS z
- FROM generate_series(1,2) x,
- generate_series(4,5) y) q;
- jsonb_agg
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- [{"b": "a1", "c": 4, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a1", "c": 5, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 4, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 5, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}]
-(1 row)
-
-SELECT jsonb_agg(q ORDER BY x, y)
- FROM rows q;
- jsonb_agg
------------------------------------------------------------------------
- [{"x": 1, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}]
-(1 row)
-
-UPDATE rows SET x = NULL WHERE x = 1;
-SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y)
- FROM rows q;
- jsonb_agg
---------------------------------------------------------------------------
- [{"x": null, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}]
-(1 row)
-
--- jsonb extraction functions
-CREATE TEMP TABLE test_jsonb (
- json_type text,
- test_json jsonb
-);
-INSERT INTO test_jsonb VALUES
-('scalar','"a scalar"'),
-('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
-('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
-SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'scalar';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'array';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'object';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json -> 'field2' FROM test_jsonb WHERE json_type = 'object';
- ?column?
-----------
- "val2"
-(1 row)
-
-SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'scalar';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'array';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'object';
- ?column?
-----------
- val2
-(1 row)
-
-SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'scalar';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'array';
- ?column?
-----------
- "two"
-(1 row)
-
-SELECT test_json -> 9 FROM test_jsonb WHERE json_type = 'array';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'object';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json ->> 6 FROM test_jsonb WHERE json_type = 'array';
- ?column?
------------
- [1, 2, 3]
-(1 row)
-
-SELECT test_json ->> 7 FROM test_jsonb WHERE json_type = 'array';
- ?column?
------------
- {"f1": 9}
-(1 row)
-
-SELECT test_json ->> 'field4' FROM test_jsonb WHERE json_type = 'object';
- ?column?
-----------
- 4
-(1 row)
-
-SELECT test_json ->> 'field5' FROM test_jsonb WHERE json_type = 'object';
- ?column?
------------
- [1, 2, 3]
-(1 row)
-
-SELECT test_json ->> 'field6' FROM test_jsonb WHERE json_type = 'object';
- ?column?
------------
- {"f1": 9}
-(1 row)
-
-SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'scalar';
- ?column?
-----------
-
-(1 row)
-
-SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'array';
- ?column?
-----------
- two
-(1 row)
-
-SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'object';
- ?column?
-----------
-
-(1 row)
-
-SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'scalar';
-ERROR: cannot call jsonb_object_keys on a scalar
-SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'array';
-ERROR: cannot call jsonb_object_keys on an array
-SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'object';
- jsonb_object_keys
--------------------
- field1
- field2
- field3
- field4
- field5
- field6
-(6 rows)
-
--- nulls
-SELECT (test_json->'field3') IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'object';
- expect_false
---------------
- f
-(1 row)
-
-SELECT (test_json->>'field3') IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'object';
- expect_true
--------------
- t
-(1 row)
-
-SELECT (test_json->3) IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'array';
- expect_false
---------------
- f
-(1 row)
-
-SELECT (test_json->>3) IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'array';
- expect_true
--------------
- t
-(1 row)
-
--- corner cases
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::text;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::int;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 1;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> '';
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 1;
- ?column?
--------------
- {"b": "cc"}
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 3;
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": "c", "b": null}'::jsonb -> 'b';
- ?column?
-----------
- null
-(1 row)
-
-select '"foo"'::jsonb -> 1;
- ?column?
-----------
-
-(1 row)
-
-select '"foo"'::jsonb -> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '[]'::jsonb -> -2147483648;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::text;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::int;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 1;
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> '';
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 1;
- ?column?
--------------
- {"b": "cc"}
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 3;
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": "c", "b": null}'::jsonb ->> 'b';
- ?column?
-----------
-
-(1 row)
-
-select '"foo"'::jsonb ->> 1;
- ?column?
-----------
-
-(1 row)
-
-select '"foo"'::jsonb ->> 'z';
- ?column?
-----------
-
-(1 row)
-
-select '[]'::jsonb ->> -2147483648;
- ?column?
-----------
-
-(1 row)
-
--- equality and inequality
-SELECT '{"x":"y"}'::jsonb = '{"x":"y"}'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"x":"y"}'::jsonb = '{"x":"z"}'::jsonb;
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"x":"y"}'::jsonb <> '{"x":"y"}'::jsonb;
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"x":"y"}'::jsonb <> '{"x":"z"}'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
--- containment
-SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}');
- jsonb_contains
-----------------
- t
-(1 row)
-
-SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":null}');
- jsonb_contains
-----------------
- t
-(1 row)
-
-SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "g":null}');
- jsonb_contains
-----------------
- f
-(1 row)
-
-SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"g":null}');
- jsonb_contains
-----------------
- f
-(1 row)
-
-SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"c"}');
- jsonb_contains
-----------------
- f
-(1 row)
-
-SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}');
- jsonb_contains
-----------------
- t
-(1 row)
-
-SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":"q"}');
- jsonb_contains
-----------------
- f
-(1 row)
-
-SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":null}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "g":null}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"g":null}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"c"}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":"q"}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '[1,2]'::jsonb @> '[1,2,2]'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
-SELECT '[1,1,2]'::jsonb @> '[1,2,2]'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
-SELECT '[[1,2]]'::jsonb @> '[[1,2,2]]'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
-SELECT '[1,2,2]'::jsonb <@ '[1,2]'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
-SELECT '[1,2,2]'::jsonb <@ '[1,1,2]'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
-SELECT '[[1,2,2]]'::jsonb <@ '[[1,2]]'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}');
- jsonb_contained
------------------
- t
-(1 row)
-
-SELECT jsonb_contained('{"a":"b", "c":null}', '{"a":"b", "b":1, "c":null}');
- jsonb_contained
------------------
- t
-(1 row)
-
-SELECT jsonb_contained('{"a":"b", "g":null}', '{"a":"b", "b":1, "c":null}');
- jsonb_contained
------------------
- f
-(1 row)
-
-SELECT jsonb_contained('{"g":null}', '{"a":"b", "b":1, "c":null}');
- jsonb_contained
------------------
- f
-(1 row)
-
-SELECT jsonb_contained('{"a":"c"}', '{"a":"b", "b":1, "c":null}');
- jsonb_contained
------------------
- f
-(1 row)
-
-SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}');
- jsonb_contained
------------------
- t
-(1 row)
-
-SELECT jsonb_contained('{"a":"b", "c":"q"}', '{"a":"b", "b":1, "c":null}');
- jsonb_contained
------------------
- f
-(1 row)
-
-SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":"b", "c":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":"b", "g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":"c"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":"b", "c":"q"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
- ?column?
-----------
- f
-(1 row)
-
--- Raw scalar may contain another raw scalar, array may contain a raw scalar
-SELECT '[5]'::jsonb @> '[5]';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '5'::jsonb @> '5';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '[5]'::jsonb @> '5';
- ?column?
-----------
- t
-(1 row)
-
--- But a raw scalar cannot contain an array
-SELECT '5'::jsonb @> '[5]';
- ?column?
-----------
- f
-(1 row)
-
--- In general, one thing should always contain itself. Test array containment:
-SELECT '["9", ["7", "3"], 1]'::jsonb @> '["9", ["7", "3"], 1]'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
-SELECT '["9", ["7", "3"], ["1"]]'::jsonb @> '["9", ["7", "3"], ["1"]]'::jsonb;
- ?column?
-----------
- t
-(1 row)
-
--- array containment string matching confusion bug
-SELECT '{ "name": "Bob", "tags": [ "enim", "qui"]}'::jsonb @> '{"tags":["qu"]}';
- ?column?
-----------
- f
-(1 row)
-
--- array length
-SELECT jsonb_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]');
- jsonb_array_length
---------------------
- 5
-(1 row)
-
-SELECT jsonb_array_length('[]');
- jsonb_array_length
---------------------
- 0
-(1 row)
-
-SELECT jsonb_array_length('{"f1":1,"f2":[5,6]}');
-ERROR: cannot get array length of a non-array
-SELECT jsonb_array_length('4');
-ERROR: cannot get array length of a scalar
--- each
-SELECT jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}');
- jsonb_each
---------------------
- (f1,"[1, 2, 3]")
- (f2,"{""f3"": 1}")
- (f4,null)
-(3 rows)
-
-SELECT jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
- q
-------------------------------------------------------
- (1,"""first""")
- (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}")
- (b,"[1, 2]")
- (c,"""cc""")
- (n,null)
-(5 rows)
-
-SELECT * FROM jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
- key | value
------+-----------
- f1 | [1, 2, 3]
- f2 | {"f3": 1}
- f4 | null
- f5 | 99
- f6 | "stringy"
-(5 rows)
-
-SELECT * FROM jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
- key | value
------+------------------------------------
- 1 | "first"
- a | {"1": "first", "b": "c", "c": "b"}
- b | [1, 2]
- c | "cc"
- n | null
-(5 rows)
-
-SELECT jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}');
- jsonb_each_text
---------------------
- (f1,"[1, 2, 3]")
- (f2,"{""f3"": 1}")
- (f4,)
- (f5,null)
-(4 rows)
-
-SELECT jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
- q
-------------------------------------------------------
- (1,first)
- (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}")
- (b,"[1, 2]")
- (c,cc)
- (n,)
-(5 rows)
-
-SELECT * FROM jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
- key | value
------+-----------
- f1 | [1, 2, 3]
- f2 | {"f3": 1}
- f4 |
- f5 | 99
- f6 | stringy
-(5 rows)
-
-SELECT * FROM jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
- key | value
------+------------------------------------
- 1 | first
- a | {"1": "first", "b": "c", "c": "b"}
- b | [1, 2]
- c | cc
- n |
-(5 rows)
-
--- exists
-SELECT jsonb_exists('{"a":null, "b":"qq"}', 'a');
- jsonb_exists
---------------
- t
-(1 row)
-
-SELECT jsonb_exists('{"a":null, "b":"qq"}', 'b');
- jsonb_exists
---------------
- t
-(1 row)
-
-SELECT jsonb_exists('{"a":null, "b":"qq"}', 'c');
- jsonb_exists
---------------
- f
-(1 row)
-
-SELECT jsonb_exists('{"a":"null", "b":"qq"}', 'a');
- jsonb_exists
---------------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ? 'a';
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ? 'b';
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ? 'c';
- ?column?
-----------
- f
-(1 row)
-
-SELECT jsonb '{"a":"null", "b":"qq"}' ? 'a';
- ?column?
-----------
- t
-(1 row)
-
--- array exists - array elements should behave as keys
-SELECT count(*) from testjsonb WHERE j->'array' ? 'bar';
- count
--------
- 3
-(1 row)
-
--- type sensitive array exists - should return no rows (since "exists" only
--- matches strings that are either object keys or array elements)
-SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text;
- count
--------
- 0
-(1 row)
-
--- However, a raw scalar is *contained* within the array
-SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb;
- count
--------
- 1
-(1 row)
-
-SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['a','b']);
- jsonb_exists_any
-------------------
- t
-(1 row)
-
-SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['b','a']);
- jsonb_exists_any
-------------------
- t
-(1 row)
-
-SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','a']);
- jsonb_exists_any
-------------------
- t
-(1 row)
-
-SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','d']);
- jsonb_exists_any
-------------------
- f
-(1 row)
-
-SELECT jsonb_exists_any('{"a":null, "b":"qq"}', '{}'::text[]);
- jsonb_exists_any
-------------------
- f
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['a','b'];
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['b','a'];
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','a'];
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','d'];
- ?column?
-----------
- f
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?| '{}'::text[];
- ?column?
-----------
- f
-(1 row)
-
-SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['a','b']);
- jsonb_exists_all
-------------------
- t
-(1 row)
-
-SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['b','a']);
- jsonb_exists_all
-------------------
- t
-(1 row)
-
-SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','a']);
- jsonb_exists_all
-------------------
- f
-(1 row)
-
-SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','d']);
- jsonb_exists_all
-------------------
- f
-(1 row)
-
-SELECT jsonb_exists_all('{"a":null, "b":"qq"}', '{}'::text[]);
- jsonb_exists_all
-------------------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','b'];
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['b','a'];
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','a'];
- ?column?
-----------
- f
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','d'];
- ?column?
-----------
- f
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','a', 'b', 'b', 'b'];
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '{"a":null, "b":"qq"}' ?& '{}'::text[];
- ?column?
-----------
- t
-(1 row)
-
--- typeof
-SELECT jsonb_typeof('{}') AS object;
- object
---------
- object
-(1 row)
-
-SELECT jsonb_typeof('{"c":3,"p":"o"}') AS object;
- object
---------
- object
-(1 row)
-
-SELECT jsonb_typeof('[]') AS array;
- array
--------
- array
-(1 row)
-
-SELECT jsonb_typeof('["a", 1]') AS array;
- array
--------
- array
-(1 row)
-
-SELECT jsonb_typeof('null') AS "null";
- null
-------
- null
-(1 row)
-
-SELECT jsonb_typeof('1') AS number;
- number
---------
- number
-(1 row)
-
-SELECT jsonb_typeof('-1') AS number;
- number
---------
- number
-(1 row)
-
-SELECT jsonb_typeof('1.0') AS number;
- number
---------
- number
-(1 row)
-
-SELECT jsonb_typeof('1e2') AS number;
- number
---------
- number
-(1 row)
-
-SELECT jsonb_typeof('-1.0') AS number;
- number
---------
- number
-(1 row)
-
-SELECT jsonb_typeof('true') AS boolean;
- boolean
----------
- boolean
-(1 row)
-
-SELECT jsonb_typeof('false') AS boolean;
- boolean
----------
- boolean
-(1 row)
-
-SELECT jsonb_typeof('"hello"') AS string;
- string
---------
- string
-(1 row)
-
-SELECT jsonb_typeof('"true"') AS string;
- string
---------
- string
-(1 row)
-
-SELECT jsonb_typeof('"1.0"') AS string;
- string
---------
- string
-(1 row)
-
--- jsonb_build_array, jsonb_build_object, jsonb_object_agg
-SELECT jsonb_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
- jsonb_build_array
--------------------------------------------------------------------------
- ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1, 2, 3]}]
-(1 row)
-
-SELECT jsonb_build_array('a', NULL); -- ok
- jsonb_build_array
--------------------
- ["a", null]
-(1 row)
-
-SELECT jsonb_build_array(VARIADIC NULL::text[]); -- ok
- jsonb_build_array
--------------------
-
-(1 row)
-
-SELECT jsonb_build_array(VARIADIC '{}'::text[]); -- ok
- jsonb_build_array
--------------------
- []
-(1 row)
-
-SELECT jsonb_build_array(VARIADIC '{a,b,c}'::text[]); -- ok
- jsonb_build_array
--------------------
- ["a", "b", "c"]
-(1 row)
-
-SELECT jsonb_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok
- jsonb_build_array
--------------------
- ["a", null]
-(1 row)
-
-SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok
- jsonb_build_array
-----------------------
- ["1", "2", "3", "4"]
-(1 row)
-
-SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok
- jsonb_build_array
--------------------
- [1, 2, 3, 4]
-(1 row)
-
-SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
- jsonb_build_array
---------------------
- [1, 4, 2, 5, 3, 6]
-(1 row)
-
-SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
- jsonb_build_object
--------------------------------------------------------------------------
- {"a": 1, "b": 1.2, "c": true, "d": null, "e": {"x": 3, "y": [1, 2, 3]}}
-(1 row)
-
-SELECT jsonb_build_object(
- 'a', jsonb_build_object('b',false,'c',99),
- 'd', jsonb_build_object('e',array[9,8,7]::int[],
- 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r)));
- jsonb_build_object
-------------------------------------------------------------------------------------------------
- {"a": {"b": false, "c": 99}, "d": {"e": [9, 8, 7], "f": {"name": "pg_class", "relkind": "r"}}}
-(1 row)
-
-SELECT jsonb_build_object('{a,b,c}'::text[]); -- error
-ERROR: argument list must have even number of elements
-HINT: The arguments of jsonb_build_object() must consist of alternating keys and values.
-SELECT jsonb_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array
-ERROR: key value must be scalar, not array, composite, or json
-SELECT jsonb_build_object('a', 'b', 'c'); -- error
-ERROR: argument list must have even number of elements
-HINT: The arguments of jsonb_build_object() must consist of alternating keys and values.
-SELECT jsonb_build_object(NULL, 'a'); -- error, key cannot be NULL
-ERROR: argument 1: key must not be null
-SELECT jsonb_build_object('a', NULL); -- ok
- jsonb_build_object
---------------------
- {"a": null}
-(1 row)
-
-SELECT jsonb_build_object(VARIADIC NULL::text[]); -- ok
- jsonb_build_object
---------------------
-
-(1 row)
-
-SELECT jsonb_build_object(VARIADIC '{}'::text[]); -- ok
- jsonb_build_object
---------------------
- {}
-(1 row)
-
-SELECT jsonb_build_object(VARIADIC '{a,b,c}'::text[]); -- error
-ERROR: argument list must have even number of elements
-HINT: The arguments of jsonb_build_object() must consist of alternating keys and values.
-SELECT jsonb_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok
- jsonb_build_object
---------------------
- {"a": null}
-(1 row)
-
-SELECT jsonb_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL
-ERROR: argument 1: key must not be null
-SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok
- jsonb_build_object
-----------------------
- {"1": "2", "3": "4"}
-(1 row)
-
-SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok
- jsonb_build_object
---------------------
- {"1": 2, "3": 4}
-(1 row)
-
-SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
- jsonb_build_object
---------------------------
- {"1": 4, "2": 5, "3": 6}
-(1 row)
-
--- empty objects/arrays
-SELECT jsonb_build_array();
- jsonb_build_array
--------------------
- []
-(1 row)
-
-SELECT jsonb_build_object();
- jsonb_build_object
---------------------
- {}
-(1 row)
-
--- make sure keys are quoted
-SELECT jsonb_build_object(1,2);
- jsonb_build_object
---------------------
- {"1": 2}
-(1 row)
-
--- keys must be scalar and not null
-SELECT jsonb_build_object(null,2);
-ERROR: argument 1: key must not be null
-SELECT jsonb_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r;
-ERROR: key value must be scalar, not array, composite, or json
-SELECT jsonb_build_object(json '{"a":1,"b":2}', 3);
-ERROR: key value must be scalar, not array, composite, or json
-SELECT jsonb_build_object('{1,2,3}'::int[], 3);
-ERROR: key value must be scalar, not array, composite, or json
--- handling of NULL values
-SELECT jsonb_object_agg(1, NULL::jsonb);
- jsonb_object_agg
-------------------
- {"1": null}
-(1 row)
-
-SELECT jsonb_object_agg(NULL, '{"a":1}');
-ERROR: field name must not be null
-CREATE TEMP TABLE foo (serial_num int, name text, type text);
-INSERT INTO foo VALUES (847001,'t15','GE1043');
-INSERT INTO foo VALUES (847002,'t16','GE1043');
-INSERT INTO foo VALUES (847003,'sub-alpha','GESS90');
-SELECT jsonb_build_object('turbines',jsonb_object_agg(serial_num,jsonb_build_object('name',name,'type',type)))
-FROM foo;
- jsonb_build_object
--------------------------------------------------------------------------------------------------------------------------------------------------------------
- {"turbines": {"847001": {"name": "t15", "type": "GE1043"}, "847002": {"name": "t16", "type": "GE1043"}, "847003": {"name": "sub-alpha", "type": "GESS90"}}}
-(1 row)
-
-SELECT jsonb_object_agg(name, type) FROM foo;
- jsonb_object_agg
------------------------------------------------------------
- {"t15": "GE1043", "t16": "GE1043", "sub-alpha": "GESS90"}
-(1 row)
-
-INSERT INTO foo VALUES (999999, NULL, 'bar');
-SELECT jsonb_object_agg(name, type) FROM foo;
-ERROR: field name must not be null
--- edge case for parser
-SELECT jsonb_object_agg(DISTINCT 'a', 'abc');
- jsonb_object_agg
-------------------
- {"a": "abc"}
-(1 row)
-
--- jsonb_object
--- empty object, one dimension
-SELECT jsonb_object('{}');
- jsonb_object
---------------
- {}
-(1 row)
-
--- empty object, two dimensions
-SELECT jsonb_object('{}', '{}');
- jsonb_object
---------------
- {}
-(1 row)
-
--- one dimension
-SELECT jsonb_object('{a,1,b,2,3,NULL,"d e f","a b c"}');
- jsonb_object
----------------------------------------------------
- {"3": null, "a": "1", "b": "2", "d e f": "a b c"}
-(1 row)
-
--- same but with two dimensions
-SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
- jsonb_object
----------------------------------------------------
- {"3": null, "a": "1", "b": "2", "d e f": "a b c"}
-(1 row)
-
--- odd number error
-SELECT jsonb_object('{a,b,c}');
-ERROR: array must have even number of elements
--- one column error
-SELECT jsonb_object('{{a},{b}}');
-ERROR: array must have two columns
--- too many columns error
-SELECT jsonb_object('{{a,b,c},{b,c,d}}');
-ERROR: array must have two columns
--- too many dimensions error
-SELECT jsonb_object('{{{a,b},{c,d}},{{b,c},{d,e}}}');
-ERROR: wrong number of array subscripts
---two argument form of jsonb_object
-select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}');
- jsonb_object
---------------------------------------------------
- {"a": "1", "b": "2", "c": "3", "d e f": "a b c"}
-(1 row)
-
--- too many dimensions
-SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
-ERROR: wrong number of array subscripts
--- mismatched dimensions
-select jsonb_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}');
-ERROR: mismatched array dimensions
-select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}');
-ERROR: mismatched array dimensions
--- null key error
-select jsonb_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}');
-ERROR: null value not allowed for object key
--- empty key is allowed
-select jsonb_object('{a,b,"","d e f"}','{1,2,3,"a b c"}');
- jsonb_object
--------------------------------------------------
- {"": "3", "a": "1", "b": "2", "d e f": "a b c"}
-(1 row)
-
--- extract_path, extract_path_as_text
-SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
- jsonb_extract_path
---------------------
- "stringy"
-(1 row)
-
-SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
- jsonb_extract_path
---------------------
- {"f3": 1}
-(1 row)
-
-SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
- jsonb_extract_path
---------------------
- "f3"
-(1 row)
-
-SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
- jsonb_extract_path
---------------------
- 1
-(1 row)
-
-SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
- jsonb_extract_path_text
--------------------------
- stringy
-(1 row)
-
-SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
- jsonb_extract_path_text
--------------------------
- {"f3": 1}
-(1 row)
-
-SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
- jsonb_extract_path_text
--------------------------
- f3
-(1 row)
-
-SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
- jsonb_extract_path_text
--------------------------
- 1
-(1 row)
-
--- extract_path nulls
-SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_false;
- expect_false
---------------
- f
-(1 row)
-
-SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_true;
- expect_true
--------------
- t
-(1 row)
-
-SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_false;
- expect_false
---------------
- f
-(1 row)
-
-SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_true;
- expect_true
--------------
- t
-(1 row)
-
--- extract_path operators
-SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f4','f6'];
- ?column?
------------
- "stringy"
-(1 row)
-
-SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2'];
- ?column?
------------
- {"f3": 1}
-(1 row)
-
-SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','0'];
- ?column?
-----------
- "f3"
-(1 row)
-
-SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','1'];
- ?column?
-----------
- 1
-(1 row)
-
-SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f4','f6'];
- ?column?
-----------
- stringy
-(1 row)
-
-SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2'];
- ?column?
------------
- {"f3": 1}
-(1 row)
-
-SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','0'];
- ?column?
-----------
- f3
-(1 row)
-
-SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','1'];
- ?column?
-----------
- 1
-(1 row)
-
--- corner cases for same
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #> '{}';
- ?column?
-----------------------------
- {"a": {"b": {"c": "foo"}}}
-(1 row)
-
-select '[1,2,3]'::jsonb #> '{}';
- ?column?
------------
- [1, 2, 3]
-(1 row)
-
-select '"foo"'::jsonb #> '{}';
- ?column?
-----------
- "foo"
-(1 row)
-
-select '42'::jsonb #> '{}';
- ?column?
-----------
- 42
-(1 row)
-
-select 'null'::jsonb #> '{}';
- ?column?
-----------
- null
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a'];
- ?column?
----------------------
- {"b": {"c": "foo"}}
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', null];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', ''];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b'];
- ?column?
---------------
- {"c": "foo"}
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c'];
- ?column?
-----------
- "foo"
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c','d'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','z','c'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','1','b'];
- ?column?
-----------
- "cc"
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','z','b'];
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['1','b'];
- ?column?
-----------
- "cc"
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['z','b'];
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": null}]'::jsonb #> array['1','b'];
- ?column?
-----------
- null
-(1 row)
-
-select '"foo"'::jsonb #> array['z'];
- ?column?
-----------
-
-(1 row)
-
-select '42'::jsonb #> array['f2'];
- ?column?
-----------
-
-(1 row)
-
-select '42'::jsonb #> array['0'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> '{}';
- ?column?
-----------------------------
- {"a": {"b": {"c": "foo"}}}
-(1 row)
-
-select '[1,2,3]'::jsonb #>> '{}';
- ?column?
------------
- [1, 2, 3]
-(1 row)
-
-select '"foo"'::jsonb #>> '{}';
- ?column?
-----------
- foo
-(1 row)
-
-select '42'::jsonb #>> '{}';
- ?column?
-----------
- 42
-(1 row)
-
-select 'null'::jsonb #>> '{}';
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a'];
- ?column?
----------------------
- {"b": {"c": "foo"}}
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', null];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', ''];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b'];
- ?column?
---------------
- {"c": "foo"}
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c'];
- ?column?
-----------
- foo
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c','d'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','z','c'];
- ?column?
-----------
-
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','1','b'];
- ?column?
-----------
- cc
-(1 row)
-
-select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','z','b'];
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['1','b'];
- ?column?
-----------
- cc
-(1 row)
-
-select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['z','b'];
- ?column?
-----------
-
-(1 row)
-
-select '[{"b": "c"}, {"b": null}]'::jsonb #>> array['1','b'];
- ?column?
-----------
-
-(1 row)
-
-select '"foo"'::jsonb #>> array['z'];
- ?column?
-----------
-
-(1 row)
-
-select '42'::jsonb #>> array['f2'];
- ?column?
-----------
-
-(1 row)
-
-select '42'::jsonb #>> array['0'];
- ?column?
-----------
-
-(1 row)
-
--- array_elements
-SELECT jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]');
- jsonb_array_elements
-----------------------------
- 1
- true
- [1, [2, 3]]
- null
- {"f1": 1, "f2": [7, 8, 9]}
- false
-(6 rows)
-
-SELECT * FROM jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]') q;
- value
-----------------------------
- 1
- true
- [1, [2, 3]]
- null
- {"f1": 1, "f2": [7, 8, 9]}
- false
-(6 rows)
-
-SELECT jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
- jsonb_array_elements_text
-----------------------------
- 1
- true
- [1, [2, 3]]
-
- {"f1": 1, "f2": [7, 8, 9]}
- false
- stringy
-(7 rows)
-
-SELECT * FROM jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
- value
-----------------------------
- 1
- true
- [1, [2, 3]]
-
- {"f1": 1, "f2": [7, 8, 9]}
- false
- stringy
-(7 rows)
-
--- populate_record
-CREATE TYPE jbpop AS (a text, b int, c timestamp);
-CREATE DOMAIN jsb_int_not_null AS int NOT NULL;
-CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3);
-CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3);
-create type jb_unordered_pair as (x int, y int);
-create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y);
-CREATE TYPE jsbrec AS (
- i int,
- ia _int4,
- ia1 int[],
- ia2 int[][],
- ia3 int[][][],
- ia1d jsb_int_array_1d,
- ia2d jsb_int_array_2d,
- t text,
- ta text[],
- c char(10),
- ca char(10)[],
- ts timestamp,
- js json,
- jsb jsonb,
- jsa json[],
- rec jbpop,
- reca jbpop[]
-);
-CREATE TYPE jsbrec_i_not_null AS (
- i jsb_int_not_null
-);
-SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q;
- a | b | c
---------+---+---
- blurfl | |
-(1 row)
-
-SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q;
- a | b | c
---------+---+--------------------------
- blurfl | 3 | Mon Dec 31 15:30:56 2012
-(1 row)
-
-SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q;
- a | b | c
---------+---+---
- blurfl | |
-(1 row)
-
-SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q;
- a | b | c
---------+---+--------------------------
- blurfl | 3 | Mon Dec 31 15:30:56 2012
-(1 row)
-
-SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":[100,200,false],"x":43.2}') q;
- a | b | c
--------------------+---+---
- [100, 200, false] | |
-(1 row)
-
-SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":[100,200,false],"x":43.2}') q;
- a | b | c
--------------------+---+--------------------------
- [100, 200, false] | 3 | Mon Dec 31 15:30:56 2012
-(1 row)
-
-SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"c":[100,200,false],"x":43.2}') q;
-ERROR: invalid input syntax for type timestamp: "[100, 200, false]"
-SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop, '{}') q;
- a | b | c
----+---+--------------------------
- x | 3 | Mon Dec 31 15:30:56 2012
-(1 row)
-
-SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"x": 43.2}') q;
-ERROR: domain jsb_int_not_null does not allow null values
-SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": null}') q;
-ERROR: domain jsb_int_not_null does not allow null values
-SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": 12345}') q;
- i
--------
- 12345
-(1 row)
-
-SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": null}') q;
- ia
-----
-
-(1 row)
-
-SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ia".
-SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [1, "2", null, 4]}') q;
- ia
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1, 2], [3, 4]]}') q;
- ia
----------------
- {{1,2},{3,4}}
-(1 row)
-
-SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], 2]}') q;
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ia".
-SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], [2, 3]]}') q;
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": "{1,2,3}"}') q;
- ia
----------
- {1,2,3}
-(1 row)
-
-SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": null}') q;
- ia1
------
-
-(1 row)
-
-SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ia1".
-SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [1, "2", null, 4]}') q;
- ia1
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [[1, 2, 3]]}') q;
- ia1
------------
- {{1,2,3}}
-(1 row)
-
-SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": null}') q;
- ia1d
-------
-
-(1 row)
-
-SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ia1d".
-SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null, 4]}') q;
-ERROR: value for domain jsb_int_array_1d violates check constraint "jsb_int_array_1d_check"
-SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null]}') q;
- ia1d
-------------
- {1,2,NULL}
-(1 row)
-
-SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [1, "2", null, 4]}') q;
- ia2
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [null, 4]]}') q;
- ia2
-------------------
- {{1,2},{NULL,4}}
-(1 row)
-
-SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[], []]}') q;
- ia2
------
- {}
-(1 row)
-
-SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [3]]}') q;
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], 3, 4]}') q;
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ia2".
-SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2"], [null, 4]]}') q;
-ERROR: value for domain jsb_int_array_2d violates check constraint "jsb_int_array_2d_check"
-SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q;
- ia2d
-----------------------
- {{1,2,3},{NULL,5,6}}
-(1 row)
-
-SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [1, "2", null, 4]}') q;
- ia3
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [[1, 2], [null, 4]]}') q;
- ia3
-------------------
- {{1,2},{NULL,4}}
-(1 row)
-
-SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q;
- ia3
------
- {}
-(1 row)
-
-SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q;
- ia3
--------------------
- {{{1,2}},{{3,4}}}
-(1 row)
-
-SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q;
- ia3
--------------------------------
- {{{1,2},{3,4}},{{5,6},{7,8}}}
-(1 row)
-
-SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q;
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": null}') q;
- ta
-----
-
-(1 row)
-
-SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ta".
-SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [1, "2", null, 4]}') q;
- ta
---------------
- {1,2,NULL,4}
-(1 row)
-
-SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q;
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ta".
-SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": null}') q;
- c
----
-
-(1 row)
-
-SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaa"}') q;
- c
-------------
- aaa
-(1 row)
-
-SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaa"}') q;
- c
-------------
- aaaaaaaaaa
-(1 row)
-
-SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaaaaa"}') q;
-ERROR: value too long for type character(10)
-SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": null}') q;
- ca
-----
-
-(1 row)
-
-SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "ca".
-SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [1, "2", null, 4]}') q;
- ca
------------------------------------------------
- {"1 ","2 ",NULL,"4 "}
-(1 row)
-
-SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q;
-ERROR: value too long for type character(10)
-SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q;
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ca".
-SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": null}') q;
- js
-----
-
-(1 row)
-
-SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": true}') q;
- js
-------
- true
-(1 row)
-
-SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": 123.45}') q;
- js
---------
- 123.45
-(1 row)
-
-SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "123.45"}') q;
- js
-----------
- "123.45"
-(1 row)
-
-SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "abc"}') q;
- js
--------
- "abc"
-(1 row)
-
-SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": [123, "123", null, {"key": "value"}]}') q;
- js
---------------------------------------
- [123, "123", null, {"key": "value"}]
-(1 row)
-
-SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q;
- js
---------------------------------------
- {"a": "bbb", "b": null, "c": 123.45}
-(1 row)
-
-SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": null}') q;
- jsb
------
-
-(1 row)
-
-SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": true}') q;
- jsb
-------
- true
-(1 row)
-
-SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": 123.45}') q;
- jsb
---------
- 123.45
-(1 row)
-
-SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "123.45"}') q;
- jsb
-----------
- "123.45"
-(1 row)
-
-SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "abc"}') q;
- jsb
--------
- "abc"
-(1 row)
-
-SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q;
- jsb
---------------------------------------
- [123, "123", null, {"key": "value"}]
-(1 row)
-
-SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q;
- jsb
---------------------------------------
- {"a": "bbb", "b": null, "c": 123.45}
-(1 row)
-
-SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": null}') q;
- jsa
------
-
-(1 row)
-
-SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "jsa".
-SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": [1, "2", null, 4]}') q;
- jsa
---------------------
- {1,"\"2\"",NULL,4}
-(1 row)
-
-SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q;
- jsa
--------------------------------------------------------
- {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{\"k\": \"v\"}"}
-(1 row)
-
-SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": 123}') q;
-ERROR: cannot call populate_composite on a scalar
-SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": [1, 2]}') q;
-ERROR: cannot call populate_composite on an array
-SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q;
- rec
------------------------------------
- (abc,,"Thu Jan 02 00:00:00 2003")
-(1 row)
-
-SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": "(abc,42,01.02.2003)"}') q;
- rec
--------------------------------------
- (abc,42,"Thu Jan 02 00:00:00 2003")
-(1 row)
-
-SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": 123}') q;
-ERROR: expected JSON array
-HINT: See the value of key "reca".
-SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [1, 2]}') q;
-ERROR: cannot call populate_composite on a scalar
-SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q;
- reca
---------------------------------------------------------
- {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
-(1 row)
-
-SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": ["(abc,42,01.02.2003)"]}') q;
- reca
--------------------------------------------
- {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"}
-(1 row)
-
-SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q;
- reca
--------------------------------------------
- {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"}
-(1 row)
-
-SELECT rec FROM jsonb_populate_record(
- row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
- row('x',3,'2012-12-31 15:30:56')::jbpop,NULL)::jsbrec,
- '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}'
-) q;
- rec
-------------------------------------
- (abc,3,"Thu Jan 02 00:00:00 2003")
-(1 row)
-
--- Tests to check soft-error support for populate_record_field()
--- populate_scalar()
-create type jsb_char2 as (a char(2));
-select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aaa"}');
- jsonb_populate_record_valid
------------------------------
- f
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aaa"}') q;
-ERROR: value too long for type character(2)
-select jsonb_populate_record_valid(NULL::jsb_char2, '{"a": "aa"}');
- jsonb_populate_record_valid
------------------------------
- t
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_char2, '{"a": "aa"}') q;
- a
-----
- aa
-(1 row)
-
--- populate_array()
-create type jsb_ia as (a int[]);
-create type jsb_ia2 as (a int[][]);
-select jsonb_populate_record_valid(NULL::jsb_ia, '{"a": 43.2}');
- jsonb_populate_record_valid
------------------------------
- f
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_ia, '{"a": 43.2}') q;
-ERROR: expected JSON array
-HINT: See the value of key "a".
-select jsonb_populate_record_valid(NULL::jsb_ia, '{"a": [1, 2]}');
- jsonb_populate_record_valid
------------------------------
- t
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_ia, '{"a": [1, 2]}') q;
- a
--------
- {1,2}
-(1 row)
-
-select jsonb_populate_record_valid(NULL::jsb_ia2, '{"a": [[1], [2, 3]]}');
- jsonb_populate_record_valid
------------------------------
- f
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_ia2, '{"a": [[1], [2, 3]]}') q;
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-select jsonb_populate_record_valid(NULL::jsb_ia2, '{"a": [[1, 0], [2, 3]]}');
- jsonb_populate_record_valid
------------------------------
- t
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_ia2, '{"a": [[1, 0], [2, 3]]}') q;
- a
----------------
- {{1,0},{2,3}}
-(1 row)
-
--- populate_domain()
-create domain jsb_i_not_null as int not null;
-create domain jsb_i_gt_1 as int check (value > 1);
-create type jsb_i_not_null_rec as (a jsb_i_not_null);
-create type jsb_i_gt_1_rec as (a jsb_i_gt_1);
-select jsonb_populate_record_valid(NULL::jsb_i_not_null_rec, '{"a": null}');
- jsonb_populate_record_valid
------------------------------
- f
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_i_not_null_rec, '{"a": null}') q;
-ERROR: domain jsb_i_not_null does not allow null values
-select jsonb_populate_record_valid(NULL::jsb_i_not_null_rec, '{"a": 1}');
- jsonb_populate_record_valid
------------------------------
- t
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_i_not_null_rec, '{"a": 1}') q;
- a
----
- 1
-(1 row)
-
-select jsonb_populate_record_valid(NULL::jsb_i_gt_1_rec, '{"a": 1}');
- jsonb_populate_record_valid
------------------------------
- f
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_i_gt_1_rec, '{"a": 1}') q;
-ERROR: value for domain jsb_i_gt_1 violates check constraint "jsb_i_gt_1_check"
-select jsonb_populate_record_valid(NULL::jsb_i_gt_1_rec, '{"a": 2}');
- jsonb_populate_record_valid
------------------------------
- t
-(1 row)
-
-select * from jsonb_populate_record(NULL::jsb_i_gt_1_rec, '{"a": 2}') q;
- a
----
- 2
-(1 row)
-
-drop type jsb_ia, jsb_ia2, jsb_char2, jsb_i_not_null_rec, jsb_i_gt_1_rec;
-drop domain jsb_i_not_null, jsb_i_gt_1;
--- anonymous record type
-SELECT jsonb_populate_record(null::record, '{"x": 0, "y": 1}');
-ERROR: could not determine row type for result of jsonb_populate_record
-HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
-SELECT jsonb_populate_record(row(1,2), '{"f1": 0, "f2": 1}');
- jsonb_populate_record
------------------------
- (0,1)
-(1 row)
-
-SELECT * FROM
- jsonb_populate_record(null::record, '{"x": 776}') AS (x int, y int);
- x | y
------+---
- 776 |
-(1 row)
-
--- composite domain
-SELECT jsonb_populate_record(null::jb_ordered_pair, '{"x": 0, "y": 1}');
- jsonb_populate_record
------------------------
- (0,1)
-(1 row)
-
-SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 0}');
- jsonb_populate_record
------------------------
- (0,2)
-(1 row)
-
-SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 1, "y": 0}');
-ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check"
--- populate_recordset
-SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+---+--------------------------
- blurfl | |
- | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+----+--------------------------
- blurfl | 99 |
- def | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+---+--------------------------
- blurfl | |
- | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+----+--------------------------
- blurfl | 99 |
- def | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
------------------+----+--------------------------
- [100, 200, 300] | 99 |
- {"z": true} | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
-ERROR: invalid input syntax for type timestamp: "[100, 200, 300]"
-SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+---+--------------------------
- blurfl | |
- | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
---------+----+--------------------------
- blurfl | 99 |
- def | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
-SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
- a | b | c
------------------+----+--------------------------
- [100, 200, 300] | 99 |
- {"z": true} | 3 | Fri Jan 20 10:42:53 2012
-(2 rows)
-
--- anonymous record type
-SELECT jsonb_populate_recordset(null::record, '[{"x": 0, "y": 1}]');
-ERROR: could not determine row type for result of jsonb_populate_recordset
-HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
-SELECT jsonb_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]');
- jsonb_populate_recordset
---------------------------
- (0,1)
-(1 row)
-
-SELECT i, jsonb_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]')
-FROM (VALUES (1),(2)) v(i);
- i | jsonb_populate_recordset
----+--------------------------
- 1 | (42,50)
- 1 | (1,43)
- 2 | (42,50)
- 2 | (2,43)
-(4 rows)
-
-SELECT * FROM
- jsonb_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int);
- x | y
------+---
- 776 |
-(1 row)
-
--- empty array is a corner case
-SELECT jsonb_populate_recordset(null::record, '[]');
-ERROR: could not determine row type for result of jsonb_populate_recordset
-HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
-SELECT jsonb_populate_recordset(row(1,2), '[]');
- jsonb_populate_recordset
---------------------------
-(0 rows)
-
-SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[]') q;
- a | b | c
----+---+---
-(0 rows)
-
-SELECT * FROM
- jsonb_populate_recordset(null::record, '[]') AS (x int, y int);
- x | y
----+---
-(0 rows)
-
--- composite domain
-SELECT jsonb_populate_recordset(null::jb_ordered_pair, '[{"x": 0, "y": 1}]');
- jsonb_populate_recordset
---------------------------
- (0,1)
-(1 row)
-
-SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 0}, {"y": 3}]');
- jsonb_populate_recordset
---------------------------
- (0,2)
- (1,3)
-(2 rows)
-
-SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 1, "y": 0}]');
-ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check"
--- negative cases where the wrong record type is supplied
-select * from jsonb_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned row contains 1 attribute, but query expects 2.
-select * from jsonb_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned type integer at ordinal position 1, but query expects text.
-select * from jsonb_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned row contains 3 attributes, but query expects 2.
-select * from jsonb_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text);
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned type integer at ordinal position 1, but query expects text.
--- jsonb_to_record and jsonb_to_recordset
-select * from jsonb_to_record('{"a":1,"b":"foo","c":"bar"}')
- as x(a int, b text, d text);
- a | b | d
----+-----+---
- 1 | foo |
-(1 row)
-
-select * from jsonb_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]')
- as x(a int, b text, c boolean);
- a | b | c
----+-----+---
- 1 | foo |
- 2 | bar | t
-(2 rows)
-
-select *, c is null as c_is_null
-from jsonb_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::jsonb)
- as t(a int, b jsonb, c text, x int, ca char(5)[], ia int[][], r jbpop);
- a | b | c | x | ca | ia | r | c_is_null
----+-------------------+---+---+-------------------+---------------+------------+-----------
- 1 | {"c": 16, "d": 2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t
-(1 row)
-
-select *, c is null as c_is_null
-from jsonb_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::jsonb)
- as t(a int, b jsonb, c text, x int);
- a | b | c | x | c_is_null
----+-------------------+---+---+-----------
- 1 | {"c": 16, "d": 2} | | 8 | t
-(1 row)
-
-select * from jsonb_to_record('{"ia": null}') as x(ia _int4);
- ia
-----
-
-(1 row)
-
-select * from jsonb_to_record('{"ia": 123}') as x(ia _int4);
-ERROR: expected JSON array
-HINT: See the value of key "ia".
-select * from jsonb_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4);
- ia
---------------
- {1,2,NULL,4}
-(1 row)
-
-select * from jsonb_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4);
- ia
----------------
- {{1,2},{3,4}}
-(1 row)
-
-select * from jsonb_to_record('{"ia": [[1], 2]}') as x(ia _int4);
-ERROR: expected JSON array
-HINT: See the array element [1] of key "ia".
-select * from jsonb_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4);
-ERROR: malformed JSON array
-DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
-select * from jsonb_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]);
- ia2
----------
- {1,2,3}
-(1 row)
-
-select * from jsonb_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]);
- ia2
----------------
- {{1,2},{3,4}}
-(1 row)
-
-select * from jsonb_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]);
- ia2
------------------
- {{{1},{2},{3}}}
-(1 row)
-
-select * from jsonb_to_record('{"out": {"key": 1}}') as x(out json);
- out
-------------
- {"key": 1}
-(1 row)
-
-select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out json);
- out
---------------
- [{"key": 1}]
-(1 row)
-
-select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out json);
- out
-----------------
- "{\"key\": 1}"
-(1 row)
-
-select * from jsonb_to_record('{"out": {"key": 1}}') as x(out jsonb);
- out
-------------
- {"key": 1}
-(1 row)
-
-select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out jsonb);
- out
---------------
- [{"key": 1}]
-(1 row)
-
-select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb);
- out
-----------------
- "{\"key\": 1}"
-(1 row)
-
--- test type info caching in jsonb_populate_record()
-CREATE TEMP TABLE jsbpoptest (js jsonb);
-INSERT INTO jsbpoptest
-SELECT '{
- "jsa": [1, "2", null, 4],
- "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2},
- "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]
-}'::jsonb
-FROM generate_series(1, 3);
-SELECT (jsonb_populate_record(NULL::jsbrec, js)).* FROM jsbpoptest;
- i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca
----+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+--------------------------------------------------------
- | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
- | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
- | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
-(3 rows)
-
-DROP TYPE jsbrec;
-DROP TYPE jsbrec_i_not_null;
-DROP DOMAIN jsb_int_not_null;
-DROP DOMAIN jsb_int_array_1d;
-DROP DOMAIN jsb_int_array_2d;
-DROP DOMAIN jb_ordered_pair;
-DROP TYPE jb_unordered_pair;
--- indexing
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j ? 'public';
- count
--------
- 194
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j ? 'bar';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled'];
- count
--------
- 337
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
- count
--------
- 42
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
- count
--------
- 1012
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)';
- count
--------
- 194
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)';
- count
--------
- 337
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)';
- count
--------
- 42
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$';
- count
--------
- 1012
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.public';
- count
--------
- 194
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
- count
--------
- 0
-(1 row)
-
-CREATE INDEX jidx ON testjsonb USING gin (j);
-SET enable_seqscan = off;
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}';
- count
--------
- 3
-(1 row)
-
--- exercise GIN_SEARCH_MODE_ALL
-SELECT count(*) FROM testjsonb WHERE j @> '{}';
- count
--------
- 1012
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j ? 'public';
- count
--------
- 194
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j ? 'bar';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled'];
- count
--------
- 337
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
- count
--------
- 42
-(1 row)
-
-EXPLAIN (COSTS OFF)
-SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
- QUERY PLAN
------------------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on testjsonb
- Recheck Cond: (j @@ '($."wait" == null)'::jsonpath)
- -> Bitmap Index Scan on jidx
- Index Cond: (j @@ '($."wait" == null)'::jsonpath)
-(5 rows)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
- count
--------
- 1012
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)';
- count
--------
- 194
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)';
- count
--------
- 0
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)';
- count
--------
- 337
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)';
- count
--------
- 42
-(1 row)
-
-EXPLAIN (COSTS OFF)
-SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
- QUERY PLAN
--------------------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on testjsonb
- Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
- -> Bitmap Index Scan on jidx
- Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
-(5 rows)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$';
- count
--------
- 1012
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.public';
- count
--------
- 194
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
- count
--------
- 0
-(1 row)
-
--- array exists - array elements should behave as keys (for GIN index scans too)
-CREATE INDEX jidx_array ON testjsonb USING gin((j->'array'));
-SELECT count(*) from testjsonb WHERE j->'array' ? 'bar';
- count
--------
- 3
-(1 row)
-
--- type sensitive array exists - should return no rows (since "exists" only
--- matches strings that are either object keys or array elements)
-SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text;
- count
--------
- 0
-(1 row)
-
--- However, a raw scalar is *contained* within the array
-SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb;
- count
--------
- 1
-(1 row)
-
-RESET enable_seqscan;
-SELECT count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow;
- count
--------
- 4791
-(1 row)
-
-SELECT key, count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow GROUP BY key ORDER BY count DESC, key;
- key | count
------------+-------
- line | 884
- query | 207
- pos | 203
- node | 202
- space | 197
- status | 195
- public | 194
- title | 190
- wait | 190
- org | 189
- user | 189
- coauthors | 188
- disabled | 185
- indexed | 184
- cleaned | 180
- bad | 179
- date | 179
- world | 176
- state | 172
- subtitle | 169
- auth | 168
- abstract | 161
- array | 5
- age | 2
- foo | 2
- fool | 1
-(26 rows)
-
--- sort/hash
-SELECT count(distinct j) FROM testjsonb;
- count
--------
- 894
-(1 row)
-
-SET enable_hashagg = off;
-SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2;
- count
--------
- 894
-(1 row)
-
-SET enable_hashagg = on;
-SET enable_sort = off;
-SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2;
- count
--------
- 894
-(1 row)
-
-SELECT distinct * FROM (values (jsonb '{}' || ''::text),('{}')) v(j);
- j
-----
- {}
-(1 row)
-
-SET enable_sort = on;
-RESET enable_hashagg;
-RESET enable_sort;
-DROP INDEX jidx;
-DROP INDEX jidx_array;
--- btree
-CREATE INDEX jidx ON testjsonb USING btree (j);
-SET enable_seqscan = off;
-SELECT count(*) FROM testjsonb WHERE j > '{"p":1}';
- count
--------
- 884
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j = '{"pos":98, "line":371, "node":"CBA", "indexed":true}';
- count
--------
- 1
-(1 row)
-
---gin path opclass
-DROP INDEX jidx;
-CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops);
-SET enable_seqscan = off;
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
- count
--------
- 2
-(1 row)
-
--- exercise GIN_SEARCH_MODE_ALL
-SELECT count(*) FROM testjsonb WHERE j @> '{}';
- count
--------
- 1012
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
- count
--------
- 1012
-(1 row)
-
-EXPLAIN (COSTS OFF)
-SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
- QUERY PLAN
--------------------------------------------------------------------
- Aggregate
- -> Bitmap Heap Scan on testjsonb
- Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
- -> Bitmap Index Scan on jidx
- Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
-(5 rows)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
- count
--------
- 1
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
- count
--------
- 15
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
- count
--------
- 2
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")';
- count
--------
- 3
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$';
- count
--------
- 1012
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.public';
- count
--------
- 194
-(1 row)
-
-SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
- count
--------
- 0
-(1 row)
-
-RESET enable_seqscan;
-DROP INDEX jidx;
--- nested tests
-SELECT '{"ff":{"a":12,"b":16}}'::jsonb;
- jsonb
-----------------------------
- {"ff": {"a": 12, "b": 16}}
-(1 row)
-
-SELECT '{"ff":{"a":12,"b":16},"qq":123}'::jsonb;
- jsonb
----------------------------------------
- {"ff": {"a": 12, "b": 16}, "qq": 123}
-(1 row)
-
-SELECT '{"aa":["a","aaa"],"qq":{"a":12,"b":16,"c":["c1","c2"],"d":{"d1":"d1","d2":"d2","d1":"d3"}}}'::jsonb;
- jsonb
---------------------------------------------------------------------------------------------------
- {"aa": ["a", "aaa"], "qq": {"a": 12, "b": 16, "c": ["c1", "c2"], "d": {"d1": "d3", "d2": "d2"}}}
-(1 row)
-
-SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2"],"d":{"d1":"d1","d2":"d2"}}}'::jsonb;
- jsonb
-------------------------------------------------------------------------------------------------------
- {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2"], "d": {"d1": "d1", "d2": "d2"}}}
-(1 row)
-
-SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2",["c3"],{"c4":4}],"d":{"d1":"d1","d2":"d2"}}}'::jsonb;
- jsonb
--------------------------------------------------------------------------------------------------------------------------
- {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2", ["c3"], {"c4": 4}], "d": {"d1": "d1", "d2": "d2"}}}
-(1 row)
-
-SELECT '{"ff":["a","aaa"]}'::jsonb;
- jsonb
-----------------------
- {"ff": ["a", "aaa"]}
-(1 row)
-
-SELECT
- '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'ff',
- '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'qq',
- ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'Y') IS NULL AS f,
- ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb ->> 'Y') IS NULL AS t,
- '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'x';
- ?column? | ?column? | f | t | ?column?
---------------------+----------+---+---+----------
- {"a": 12, "b": 16} | 123 | f | t | [1, 2]
-(1 row)
-
--- nested containment
-SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1,2]}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":[2,1],"c":"b"}'::jsonb @> '{"a":[1,2]}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":[1,2]}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":[1,2]}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":{"1":2}}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":{"1":2}}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '["a","b"]'::jsonb @> '["a","b","c","b"]';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '["a","b","c","b"]'::jsonb @> '["a","b"]';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '["a","b","c",[1,2]]'::jsonb @> '["a",[1,2]]';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '["a","b","c",[1,2]]'::jsonb @> '["b",[1,2]]';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1]}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[2]}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[3]}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"c":3}]}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4}]}';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},3]}';
- ?column?
-----------
- f
-(1 row)
-
-SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},1]}';
- ?column?
-----------
- t
-(1 row)
-
--- check some corner cases for indexed nested containment (bug #13756)
-create temp table nestjsonb (j jsonb);
-insert into nestjsonb (j) values ('{"a":[["b",{"x":1}],["b",{"x":2}]],"c":3}');
-insert into nestjsonb (j) values ('[[14,2,3]]');
-insert into nestjsonb (j) values ('[1,[14,2,3]]');
-create index on nestjsonb using gin(j jsonb_path_ops);
-set enable_seqscan = on;
-set enable_bitmapscan = off;
-select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb;
- j
----------------------------------------------------
- {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3}
-(1 row)
-
-select * from nestjsonb where j @> '{"c":3}';
- j
----------------------------------------------------
- {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3}
-(1 row)
-
-select * from nestjsonb where j @> '[[14]]';
- j
------------------
- [[14, 2, 3]]
- [1, [14, 2, 3]]
-(2 rows)
-
-set enable_seqscan = off;
-set enable_bitmapscan = on;
-select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb;
- j
----------------------------------------------------
- {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3}
-(1 row)
-
-select * from nestjsonb where j @> '{"c":3}';
- j
----------------------------------------------------
- {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3}
-(1 row)
-
-select * from nestjsonb where j @> '[[14]]';
- j
------------------
- [[14, 2, 3]]
- [1, [14, 2, 3]]
-(2 rows)
-
-reset enable_seqscan;
-reset enable_bitmapscan;
--- nested object field / array index lookup
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'n';
- ?column?
-----------
- null
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'a';
- ?column?
-----------
- 1
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'b';
- ?column?
-----------
- [1, 2]
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'c';
- ?column?
-----------
- {"1": 2}
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd';
- ?column?
----------------
- {"1": [2, 3]}
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd' -> '1';
- ?column?
-----------
- [2, 3]
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'e';
- ?column?
-----------
-
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 0; --expecting error
- ?column?
-----------
-
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> 0;
- ?column?
-----------
- "a"
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> 1;
- ?column?
-----------
- "b"
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> 2;
- ?column?
-----------
- "c"
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> 3;
- ?column?
-----------
- [1, 2]
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> 3 -> 1;
- ?column?
-----------
- 2
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> 4;
- ?column?
-----------
- null
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> 5;
- ?column?
-----------
-
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> -1;
- ?column?
-----------
- null
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> -5;
- ?column?
-----------
- "a"
-(1 row)
-
-SELECT '["a","b","c",[1,2],null]'::jsonb -> -6;
- ?column?
-----------
-
-(1 row)
-
---nested path extraction
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{0}';
- ?column?
-----------
-
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{a}';
- ?column?
-----------
- "b"
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c}';
- ?column?
------------
- [1, 2, 3]
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,0}';
- ?column?
-----------
- 1
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,1}';
- ?column?
-----------
- 2
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,2}';
- ?column?
-----------
- 3
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}';
- ?column?
-----------
-
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}';
- ?column?
-----------
- 3
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}';
- ?column?
-----------
- 1
-(1 row)
-
-SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}';
- ?column?
-----------
-
-(1 row)
-
-SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{0}';
- ?column?
-----------
- 0
-(1 row)
-
-SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{3}';
- ?column?
-----------
- [3, 4]
-(1 row)
-
-SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4}';
- ?column?
----------------
- {"5": "five"}
-(1 row)
-
-SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4,5}';
- ?column?
-----------
- "five"
-(1 row)
-
---nested exists
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'n';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'a';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'b';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'c';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'd';
- ?column?
-----------
- t
-(1 row)
-
-SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'e';
- ?column?
-----------
- f
-(1 row)
-
--- jsonb_strip_nulls
-select jsonb_strip_nulls(null);
- jsonb_strip_nulls
--------------------
-
-(1 row)
-
-select jsonb_strip_nulls('1');
- jsonb_strip_nulls
--------------------
- 1
-(1 row)
-
-select jsonb_strip_nulls('"a string"');
- jsonb_strip_nulls
--------------------
- "a string"
-(1 row)
-
-select jsonb_strip_nulls('null');
- jsonb_strip_nulls
--------------------
- null
-(1 row)
-
-select jsonb_strip_nulls('[1,2,null,3,4]');
- jsonb_strip_nulls
---------------------
- [1, 2, null, 3, 4]
-(1 row)
-
-select jsonb_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}');
- jsonb_strip_nulls
---------------------------------------------
- {"a": 1, "c": [2, null, 3], "d": {"e": 4}}
-(1 row)
-
-select jsonb_strip_nulls('[1,{"a":1,"b":null,"c":2},3]');
- jsonb_strip_nulls
---------------------------
- [1, {"a": 1, "c": 2}, 3]
-(1 row)
-
--- an empty object is not null and should not be stripped
-select jsonb_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }');
- jsonb_strip_nulls
---------------------
- {"a": {}, "d": {}}
-(1 row)
-
-select jsonb_pretty('{"a": "test", "b": [1, 2, 3], "c": "test3", "d":{"dd": "test4", "dd2":{"ddd": "test5"}}}');
- jsonb_pretty
-----------------------------
- { +
- "a": "test", +
- "b": [ +
- 1, +
- 2, +
- 3 +
- ], +
- "c": "test3", +
- "d": { +
- "dd": "test4", +
- "dd2": { +
- "ddd": "test5"+
- } +
- } +
- }
-(1 row)
-
-select jsonb_pretty('[{"f1":1,"f2":null},2,null,[[{"x":true},6,7],8],3]');
- jsonb_pretty
----------------------------
- [ +
- { +
- "f1": 1, +
- "f2": null +
- }, +
- 2, +
- null, +
- [ +
- [ +
- { +
- "x": true+
- }, +
- 6, +
- 7 +
- ], +
- 8 +
- ], +
- 3 +
- ]
-(1 row)
-
-select jsonb_pretty('{"a":["b", "c"], "d": {"e":"f"}}');
- jsonb_pretty
-------------------
- { +
- "a": [ +
- "b", +
- "c" +
- ], +
- "d": { +
- "e": "f"+
- } +
- }
-(1 row)
-
-select jsonb_concat('{"d": "test", "a": [1, 2]}', '{"g": "test2", "c": {"c1":1, "c2":2}}');
- jsonb_concat
--------------------------------------------------------------------
- {"a": [1, 2], "c": {"c1": 1, "c2": 2}, "d": "test", "g": "test2"}
-(1 row)
-
-select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"cq":"l", "b":"g", "fg":false}';
- ?column?
----------------------------------------------
- {"b": "g", "aa": 1, "cq": "l", "fg": false}
-(1 row)
-
-select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aq":"l"}';
- ?column?
----------------------------------------
- {"b": 2, "aa": 1, "aq": "l", "cq": 3}
-(1 row)
-
-select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aa":"l"}';
- ?column?
-------------------------------
- {"b": 2, "aa": "l", "cq": 3}
-(1 row)
-
-select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{}';
- ?column?
-----------------------------
- {"b": 2, "aa": 1, "cq": 3}
-(1 row)
-
-select '["a", "b"]'::jsonb || '["c"]';
- ?column?
------------------
- ["a", "b", "c"]
-(1 row)
-
-select '["a", "b"]'::jsonb || '["c", "d"]';
- ?column?
-----------------------
- ["a", "b", "c", "d"]
-(1 row)
-
-select '["c"]' || '["a", "b"]'::jsonb;
- ?column?
------------------
- ["c", "a", "b"]
-(1 row)
-
-select '["a", "b"]'::jsonb || '"c"';
- ?column?
------------------
- ["a", "b", "c"]
-(1 row)
-
-select '"c"' || '["a", "b"]'::jsonb;
- ?column?
------------------
- ["c", "a", "b"]
-(1 row)
-
-select '[]'::jsonb || '["a"]'::jsonb;
- ?column?
-----------
- ["a"]
-(1 row)
-
-select '[]'::jsonb || '"a"'::jsonb;
- ?column?
-----------
- ["a"]
-(1 row)
-
-select '"b"'::jsonb || '"a"'::jsonb;
- ?column?
-------------
- ["b", "a"]
-(1 row)
-
-select '{}'::jsonb || '{"a":"b"}'::jsonb;
- ?column?
-------------
- {"a": "b"}
-(1 row)
-
-select '[]'::jsonb || '{"a":"b"}'::jsonb;
- ?column?
---------------
- [{"a": "b"}]
-(1 row)
-
-select '{"a":"b"}'::jsonb || '[]'::jsonb;
- ?column?
---------------
- [{"a": "b"}]
-(1 row)
-
-select '"a"'::jsonb || '{"a":1}';
- ?column?
------------------
- ["a", {"a": 1}]
-(1 row)
-
-select '{"a":1}' || '"a"'::jsonb;
- ?column?
------------------
- [{"a": 1}, "a"]
-(1 row)
-
-select '[3]'::jsonb || '{}'::jsonb;
- ?column?
-----------
- [3, {}]
-(1 row)
-
-select '3'::jsonb || '[]'::jsonb;
- ?column?
-----------
- [3]
-(1 row)
-
-select '3'::jsonb || '4'::jsonb;
- ?column?
-----------
- [3, 4]
-(1 row)
-
-select '3'::jsonb || '{}'::jsonb;
- ?column?
-----------
- [3, {}]
-(1 row)
-
-select '["a", "b"]'::jsonb || '{"c":1}';
- ?column?
-----------------------
- ["a", "b", {"c": 1}]
-(1 row)
-
-select '{"c": 1}'::jsonb || '["a", "b"]';
- ?column?
-----------------------
- [{"c": 1}, "a", "b"]
-(1 row)
-
-select '{}'::jsonb || '{"cq":"l", "b":"g", "fg":false}';
- ?column?
-------------------------------------
- {"b": "g", "cq": "l", "fg": false}
-(1 row)
-
-select pg_column_size('{}'::jsonb || '{}'::jsonb) = pg_column_size('{}'::jsonb);
- ?column?
-----------
- t
-(1 row)
-
-select pg_column_size('{"aa":1}'::jsonb || '{"b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
- ?column?
-----------
- t
-(1 row)
-
-select pg_column_size('{"aa":1, "b":2}'::jsonb || '{}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
- ?column?
-----------
- t
-(1 row)
-
-select pg_column_size('{}'::jsonb || '{"aa":1, "b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'a');
- jsonb_delete
-------------------
- {"b": 2, "c": 3}
-(1 row)
-
-select jsonb_delete('{"a":null , "b":2, "c":3}'::jsonb, 'a');
- jsonb_delete
-------------------
- {"b": 2, "c": 3}
-(1 row)
-
-select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'b');
- jsonb_delete
-------------------
- {"a": 1, "c": 3}
-(1 row)
-
-select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'c');
- jsonb_delete
-------------------
- {"a": 1, "b": 2}
-(1 row)
-
-select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'd');
- jsonb_delete
---------------------------
- {"a": 1, "b": 2, "c": 3}
-(1 row)
-
-select '{"a":1 , "b":2, "c":3}'::jsonb - 'a';
- ?column?
-------------------
- {"b": 2, "c": 3}
-(1 row)
-
-select '{"a":null , "b":2, "c":3}'::jsonb - 'a';
- ?column?
-------------------
- {"b": 2, "c": 3}
-(1 row)
-
-select '{"a":1 , "b":2, "c":3}'::jsonb - 'b';
- ?column?
-------------------
- {"a": 1, "c": 3}
-(1 row)
-
-select '{"a":1 , "b":2, "c":3}'::jsonb - 'c';
- ?column?
-------------------
- {"a": 1, "b": 2}
-(1 row)
-
-select '{"a":1 , "b":2, "c":3}'::jsonb - 'd';
- ?column?
---------------------------
- {"a": 1, "b": 2, "c": 3}
-(1 row)
-
-select pg_column_size('{"a":1 , "b":2, "c":3}'::jsonb - 'b') = pg_column_size('{"a":1, "b":2}'::jsonb);
- ?column?
-----------
- t
-(1 row)
-
-select '["a","b","c"]'::jsonb - 3;
- ?column?
------------------
- ["a", "b", "c"]
-(1 row)
-
-select '["a","b","c"]'::jsonb - 2;
- ?column?
-------------
- ["a", "b"]
-(1 row)
-
-select '["a","b","c"]'::jsonb - 1;
- ?column?
-------------
- ["a", "c"]
-(1 row)
-
-select '["a","b","c"]'::jsonb - 0;
- ?column?
-------------
- ["b", "c"]
-(1 row)
-
-select '["a","b","c"]'::jsonb - -1;
- ?column?
-------------
- ["a", "b"]
-(1 row)
-
-select '["a","b","c"]'::jsonb - -2;
- ?column?
-------------
- ["a", "c"]
-(1 row)
-
-select '["a","b","c"]'::jsonb - -3;
- ?column?
-------------
- ["b", "c"]
-(1 row)
-
-select '["a","b","c"]'::jsonb - -4;
- ?column?
------------------
- ["a", "b", "c"]
-(1 row)
-
-select '{"a":1 , "b":2, "c":3}'::jsonb - '{b}'::text[];
- ?column?
-------------------
- {"a": 1, "c": 3}
-(1 row)
-
-select '{"a":1 , "b":2, "c":3}'::jsonb - '{c,b}'::text[];
- ?column?
-----------
- {"a": 1}
-(1 row)
-
-select '{"a":1 , "b":2, "c":3}'::jsonb - '{}'::text[];
- ?column?
---------------------------
- {"a": 1, "b": 2, "c": 3}
-(1 row)
-
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '[1,2,3]');
- jsonb_set
---------------------------------------------------------------------------
- {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": [1, 2, 3]}
-(1 row)
-
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '[1,2,3]');
- jsonb_set
------------------------------------------------------------------------------
- {"a": 1, "b": [1, [1, 2, 3]], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
-(1 row)
-
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '[1,2,3]');
- jsonb_set
------------------------------------------------------------------------------
- {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [[1, 2, 3], 3]}, "n": null}
-(1 row)
-
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '[1,2,3]');
-ERROR: path element at position 2 is null
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '{"1": 2}');
- jsonb_set
--------------------------------------------------------------------------
- {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": {"1": 2}}
-(1 row)
-
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"1": 2}');
- jsonb_set
-----------------------------------------------------------------------------
- {"a": 1, "b": [1, {"1": 2}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
-(1 row)
-
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '{"1": 2}');
- jsonb_set
-----------------------------------------------------------------------------
- {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [{"1": 2}, 3]}, "n": null}
-(1 row)
-
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '{"1": 2}');
-ERROR: path element at position 2 is null
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '"test"');
- jsonb_set
---------------------------------------------------------------------------
- {"a": 1, "b": [1, "test"], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
-(1 row)
-
-select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"f": "test"}');
- jsonb_set
----------------------------------------------------------------------------------
- {"a": 1, "b": [1, {"f": "test"}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
-(1 row)
-
-select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{n}');
- jsonb_delete_path
-----------------------------------------------------------
- {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}}
-(1 row)
-
-select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{b,-1}');
- jsonb_delete_path
-------------------------------------------------------------------
- {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
-(1 row)
-
-select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{d,1,0}');
- jsonb_delete_path
-------------------------------------------------------------------
- {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null}
-(1 row)
-
-select jsonb_delete_path('{"a":[]}', '{"a",-2147483648}');
- jsonb_delete_path
--------------------
- {"a": []}
-(1 row)
-
-select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{n}';
- ?column?
-----------------------------------------------------------
- {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}}
-(1 row)
-
-select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1}';
- ?column?
-------------------------------------------------------------------
- {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
-(1 row)
-
-select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript
-ERROR: path element at position 2 is not an integer: "-1e"
-select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}';
- ?column?
-------------------------------------------------------------------
- {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null}
-(1 row)
-
--- empty structure and error conditions for delete and replace
-select '"a"'::jsonb - 'a'; -- error
-ERROR: cannot delete from scalar
-select '{}'::jsonb - 'a';
- ?column?
-----------
- {}
-(1 row)
-
-select '[]'::jsonb - 'a';
- ?column?
-----------
- []
-(1 row)
-
-select '"a"'::jsonb - 1; -- error
-ERROR: cannot delete from scalar
-select '{}'::jsonb - 1; -- error
-ERROR: cannot delete from object using integer index
-select '[]'::jsonb - 1;
- ?column?
-----------
- []
-(1 row)
-
-select '"a"'::jsonb #- '{a}'; -- error
-ERROR: cannot delete path in scalar
-select '{}'::jsonb #- '{a}';
- ?column?
-----------
- {}
-(1 row)
-
-select '[]'::jsonb #- '{a}';
- ?column?
-----------
- []
-(1 row)
-
-select jsonb_set('"a"','{a}','"b"'); --error
-ERROR: cannot set path in scalar
-select jsonb_set('{}','{a}','"b"', false);
- jsonb_set
------------
- {}
-(1 row)
-
-select jsonb_set('[]','{1}','"b"', false);
- jsonb_set
------------
- []
-(1 row)
-
-select jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0}','[2,3,4]', false);
- jsonb_set
--------------------------
- [[2, 3, 4], 2, null, 3]
-(1 row)
-
--- jsonb_set adding instead of replacing
--- prepend to array
-select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,-33}','{"foo":123}');
- jsonb_set
--------------------------------------------------------
- {"a": 1, "b": [{"foo": 123}, 0, 1, 2], "c": {"d": 4}}
-(1 row)
-
--- append to array
-select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,33}','{"foo":123}');
- jsonb_set
--------------------------------------------------------
- {"a": 1, "b": [0, 1, 2, {"foo": 123}], "c": {"d": 4}}
-(1 row)
-
--- check nesting levels addition
-select jsonb_set('{"a":1,"b":[4,5,[0,1,2],6,7],"c":{"d":4}}','{b,2,33}','{"foo":123}');
- jsonb_set
----------------------------------------------------------------------
- {"a": 1, "b": [4, 5, [0, 1, 2, {"foo": 123}], 6, 7], "c": {"d": 4}}
-(1 row)
-
--- add new key
-select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{c,e}','{"foo":123}');
- jsonb_set
-------------------------------------------------------------
- {"a": 1, "b": [0, 1, 2], "c": {"d": 4, "e": {"foo": 123}}}
-(1 row)
-
--- adding doesn't do anything if elements before last aren't present
-select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,-33}','{"foo":123}');
- jsonb_set
------------------------------------------
- {"a": 1, "b": [0, 1, 2], "c": {"d": 4}}
-(1 row)
-
-select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,y}','{"foo":123}');
- jsonb_set
------------------------------------------
- {"a": 1, "b": [0, 1, 2], "c": {"d": 4}}
-(1 row)
-
--- add to empty object
-select jsonb_set('{}','{x}','{"foo":123}');
- jsonb_set
----------------------
- {"x": {"foo": 123}}
-(1 row)
-
---add to empty array
-select jsonb_set('[]','{0}','{"foo":123}');
- jsonb_set
-----------------
- [{"foo": 123}]
-(1 row)
-
-select jsonb_set('[]','{99}','{"foo":123}');
- jsonb_set
-----------------
- [{"foo": 123}]
-(1 row)
-
-select jsonb_set('[]','{-99}','{"foo":123}');
- jsonb_set
-----------------
- [{"foo": 123}]
-(1 row)
-
-select jsonb_set('{"a": [1, 2, 3]}', '{a, non_integer}', '"new_value"');
-ERROR: path element at position 2 is not an integer: "non_integer"
-select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, non_integer}', '"new_value"');
-ERROR: path element at position 3 is not an integer: "non_integer"
-select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, NULL}', '"new_value"');
-ERROR: path element at position 3 is null
--- jsonb_set_lax
-\pset null NULL
--- pass though non nulls to jsonb_set
-select jsonb_set_lax('{"a":1,"b":2}','{b}','5') ;
- jsonb_set_lax
-------------------
- {"a": 1, "b": 5}
-(1 row)
-
-select jsonb_set_lax('{"a":1,"b":2}','{d}','6', true) ;
- jsonb_set_lax
---------------------------
- {"a": 1, "b": 2, "d": 6}
-(1 row)
-
--- using the default treatment
-select jsonb_set_lax('{"a":1,"b":2}','{b}',null);
- jsonb_set_lax
----------------------
- {"a": 1, "b": null}
-(1 row)
-
-select jsonb_set_lax('{"a":1,"b":2}','{d}',null,true);
- jsonb_set_lax
------------------------------
- {"a": 1, "b": 2, "d": null}
-(1 row)
-
--- errors
-select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, null);
-ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception"
-select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, 'no_such_treatment');
-ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception"
--- explicit treatments
-select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'raise_exception') as raise_exception;
-ERROR: JSON value must not be null
-DETAIL: Exception was raised because null_value_treatment is "raise_exception".
-HINT: To avoid, either change the null_value_treatment argument or ensure that an SQL NULL is not passed.
-select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'return_target') as return_target;
- return_target
-------------------
- {"a": 1, "b": 2}
-(1 row)
-
-select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'delete_key') as delete_key;
- delete_key
-------------
- {"a": 1}
-(1 row)
-
-select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'use_json_null') as use_json_null;
- use_json_null
----------------------
- {"a": 1, "b": null}
-(1 row)
-
-\pset null ''
--- jsonb_insert
-select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"');
- jsonb_insert
--------------------------------
- {"a": [0, "new_value", 1, 2]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"', true);
- jsonb_insert
--------------------------------
- {"a": [0, 1, "new_value", 2]}
-(1 row)
-
-select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"');
- jsonb_insert
-------------------------------------------------------------
- {"a": {"b": {"c": [0, 1, "new_value", "test1", "test2"]}}}
-(1 row)
-
-select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"', true);
- jsonb_insert
-------------------------------------------------------------
- {"a": {"b": {"c": [0, 1, "test1", "new_value", "test2"]}}}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '{"b": "value"}');
- jsonb_insert
-----------------------------------
- {"a": [0, {"b": "value"}, 1, 2]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '["value1", "value2"]');
- jsonb_insert
-----------------------------------------
- {"a": [0, ["value1", "value2"], 1, 2]}
-(1 row)
-
--- edge cases
-select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"');
- jsonb_insert
--------------------------------
- {"a": ["new_value", 0, 1, 2]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"', true);
- jsonb_insert
--------------------------------
- {"a": [0, "new_value", 1, 2]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"');
- jsonb_insert
--------------------------------
- {"a": [0, 1, "new_value", 2]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"', true);
- jsonb_insert
--------------------------------
- {"a": [0, 1, 2, "new_value"]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"');
- jsonb_insert
--------------------------------
- {"a": [0, 1, "new_value", 2]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"', true);
- jsonb_insert
--------------------------------
- {"a": [0, 1, 2, "new_value"]}
-(1 row)
-
-select jsonb_insert('[]', '{1}', '"new_value"');
- jsonb_insert
----------------
- ["new_value"]
-(1 row)
-
-select jsonb_insert('[]', '{1}', '"new_value"', true);
- jsonb_insert
----------------
- ["new_value"]
-(1 row)
-
-select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"');
- jsonb_insert
-----------------------
- {"a": ["new_value"]}
-(1 row)
-
-select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"', true);
- jsonb_insert
-----------------------
- {"a": ["new_value"]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, 10}', '"new_value"');
- jsonb_insert
--------------------------------
- {"a": [0, 1, 2, "new_value"]}
-(1 row)
-
-select jsonb_insert('{"a": [0,1,2]}', '{a, -10}', '"new_value"');
- jsonb_insert
--------------------------------
- {"a": ["new_value", 0, 1, 2]}
-(1 row)
-
--- jsonb_insert should be able to insert new value for objects, but not to replace
-select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"');
- jsonb_insert
------------------------------------------
- {"a": {"b": "value", "c": "new_value"}}
-(1 row)
-
-select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"', true);
- jsonb_insert
------------------------------------------
- {"a": {"b": "value", "c": "new_value"}}
-(1 row)
-
-select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"');
-ERROR: cannot replace existing key
-HINT: Try using the function jsonb_set to replace key value.
-select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"', true);
-ERROR: cannot replace existing key
-HINT: Try using the function jsonb_set to replace key value.
--- jsonb subscript
-select ('123'::jsonb)['a'];
- jsonb
--------
-
-(1 row)
-
-select ('123'::jsonb)[0];
- jsonb
--------
-
-(1 row)
-
-select ('123'::jsonb)[NULL];
- jsonb
--------
-
-(1 row)
-
-select ('{"a": 1}'::jsonb)['a'];
- jsonb
--------
- 1
-(1 row)
-
-select ('{"a": 1}'::jsonb)[0];
- jsonb
--------
-
-(1 row)
-
-select ('{"a": 1}'::jsonb)['not_exist'];
- jsonb
--------
-
-(1 row)
-
-select ('{"a": 1}'::jsonb)[NULL];
- jsonb
--------
-
-(1 row)
-
-select ('[1, "2", null]'::jsonb)['a'];
- jsonb
--------
-
-(1 row)
-
-select ('[1, "2", null]'::jsonb)[0];
- jsonb
--------
- 1
-(1 row)
-
-select ('[1, "2", null]'::jsonb)['1'];
- jsonb
--------
- "2"
-(1 row)
-
-select ('[1, "2", null]'::jsonb)[1.0];
-ERROR: subscript type numeric is not supported
-LINE 1: select ('[1, "2", null]'::jsonb)[1.0];
- ^
-HINT: jsonb subscript must be coercible to either integer or text.
-select ('[1, "2", null]'::jsonb)[2];
- jsonb
--------
- null
-(1 row)
-
-select ('[1, "2", null]'::jsonb)[3];
- jsonb
--------
-
-(1 row)
-
-select ('[1, "2", null]'::jsonb)[-2];
- jsonb
--------
- "2"
-(1 row)
-
-select ('[1, "2", null]'::jsonb)[1]['a'];
- jsonb
--------
-
-(1 row)
-
-select ('[1, "2", null]'::jsonb)[1][0];
- jsonb
--------
-
-(1 row)
-
-select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['b'];
- jsonb
--------
- "c"
-(1 row)
-
-select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'];
- jsonb
------------
- [1, 2, 3]
-(1 row)
-
-select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'][1];
- jsonb
--------
- 2
-(1 row)
-
-select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']['a'];
- jsonb
--------
-
-(1 row)
-
-select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1'];
- jsonb
----------------
- {"a2": "aaa"}
-(1 row)
-
-select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2'];
- jsonb
--------
- "aaa"
-(1 row)
-
-select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']['a3'];
- jsonb
--------
-
-(1 row)
-
-select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'];
- jsonb
------------------------
- ["aaa", "bbb", "ccc"]
-(1 row)
-
-select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'][2];
- jsonb
--------
- "ccc"
-(1 row)
-
--- slices are not supported
-select ('{"a": 1}'::jsonb)['a':'b'];
-ERROR: jsonb subscript does not support slices
-LINE 1: select ('{"a": 1}'::jsonb)['a':'b'];
- ^
-select ('[1, "2", null]'::jsonb)[1:2];
-ERROR: jsonb subscript does not support slices
-LINE 1: select ('[1, "2", null]'::jsonb)[1:2];
- ^
-select ('[1, "2", null]'::jsonb)[:2];
-ERROR: jsonb subscript does not support slices
-LINE 1: select ('[1, "2", null]'::jsonb)[:2];
- ^
-select ('[1, "2", null]'::jsonb)[1:];
-ERROR: jsonb subscript does not support slices
-LINE 1: select ('[1, "2", null]'::jsonb)[1:];
- ^
-select ('[1, "2", null]'::jsonb)[:];
-ERROR: jsonb subscript does not support slices
-create TEMP TABLE test_jsonb_subscript (
- id int,
- test_json jsonb
-);
-insert into test_jsonb_subscript values
-(1, '{}'), -- empty jsonb
-(2, '{"key": "value"}'); -- jsonb with data
--- update empty jsonb
-update test_jsonb_subscript set test_json['a'] = '1' where id = 1;
-select * from test_jsonb_subscript;
- id | test_json
-----+------------------
- 2 | {"key": "value"}
- 1 | {"a": 1}
-(2 rows)
-
--- update jsonb with some data
-update test_jsonb_subscript set test_json['a'] = '1' where id = 2;
-select * from test_jsonb_subscript;
- id | test_json
-----+--------------------------
- 1 | {"a": 1}
- 2 | {"a": 1, "key": "value"}
-(2 rows)
-
--- replace jsonb
-update test_jsonb_subscript set test_json['a'] = '"test"';
-select * from test_jsonb_subscript;
- id | test_json
-----+-------------------------------
- 1 | {"a": "test"}
- 2 | {"a": "test", "key": "value"}
-(2 rows)
-
--- replace by object
-update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb;
-select * from test_jsonb_subscript;
- id | test_json
-----+---------------------------------
- 1 | {"a": {"b": 1}}
- 2 | {"a": {"b": 1}, "key": "value"}
-(2 rows)
-
--- replace by array
-update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb;
-select * from test_jsonb_subscript;
- id | test_json
-----+----------------------------------
- 1 | {"a": [1, 2, 3]}
- 2 | {"a": [1, 2, 3], "key": "value"}
-(2 rows)
-
--- use jsonb subscription in where clause
-select * from test_jsonb_subscript where test_json['key'] = '"value"';
- id | test_json
-----+----------------------------------
- 2 | {"a": [1, 2, 3], "key": "value"}
-(1 row)
-
-select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"';
- id | test_json
-----+-----------
-(0 rows)
-
-select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"';
- id | test_json
-----+-----------
-(0 rows)
-
--- NULL
-update test_jsonb_subscript set test_json[NULL] = '1';
-ERROR: jsonb subscript in assignment must not be null
-update test_jsonb_subscript set test_json['another_key'] = NULL;
-select * from test_jsonb_subscript;
- id | test_json
-----+-------------------------------------------------------
- 1 | {"a": [1, 2, 3], "another_key": null}
- 2 | {"a": [1, 2, 3], "key": "value", "another_key": null}
-(2 rows)
-
--- NULL as jsonb source
-insert into test_jsonb_subscript values (3, NULL);
-update test_jsonb_subscript set test_json['a'] = '1' where id = 3;
-select * from test_jsonb_subscript;
- id | test_json
-----+-------------------------------------------------------
- 1 | {"a": [1, 2, 3], "another_key": null}
- 2 | {"a": [1, 2, 3], "key": "value", "another_key": null}
- 3 | {"a": 1}
-(3 rows)
-
-update test_jsonb_subscript set test_json = NULL where id = 3;
-update test_jsonb_subscript set test_json[0] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+---------------------------------------------------------------
- 1 | {"0": 1, "a": [1, 2, 3], "another_key": null}
- 2 | {"0": 1, "a": [1, 2, 3], "key": "value", "another_key": null}
- 3 | [1]
-(3 rows)
-
--- Fill the gaps logic
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '[0]');
-update test_jsonb_subscript set test_json[5] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+--------------------------------
- 1 | [0, null, null, null, null, 1]
-(1 row)
-
-update test_jsonb_subscript set test_json[-4] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+-----------------------------
- 1 | [0, null, 1, null, null, 1]
-(1 row)
-
-update test_jsonb_subscript set test_json[-8] = '1';
-ERROR: path element at position 1 is out of range: -8
-select * from test_jsonb_subscript;
- id | test_json
-----+-----------------------------
- 1 | [0, null, 1, null, null, 1]
-(1 row)
-
--- keep consistent values position
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '[]');
-update test_jsonb_subscript set test_json[5] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+-----------------------------------
- 1 | [null, null, null, null, null, 1]
-(1 row)
-
--- create the whole path
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{}');
-update test_jsonb_subscript set test_json['a'][0]['b'][0]['c'] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+----------------------------
- 1 | {"a": [{"b": [{"c": 1}]}]}
-(1 row)
-
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{}');
-update test_jsonb_subscript set test_json['a'][2]['b'][2]['c'][2] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+------------------------------------------------------------------
- 1 | {"a": [null, null, {"b": [null, null, {"c": [null, null, 1]}]}]}
-(1 row)
-
--- create the whole path with already existing keys
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{"b": 1}');
-update test_jsonb_subscript set test_json['a'][0] = '2';
-select * from test_jsonb_subscript;
- id | test_json
-----+--------------------
- 1 | {"a": [2], "b": 1}
-(1 row)
-
--- the start jsonb is an object, first subscript is treated as a key
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{}');
-update test_jsonb_subscript set test_json[0]['a'] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+-----------------
- 1 | {"0": {"a": 1}}
-(1 row)
-
--- the start jsonb is an array
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '[]');
-update test_jsonb_subscript set test_json[0]['a'] = '1';
-update test_jsonb_subscript set test_json[2]['b'] = '2';
-select * from test_jsonb_subscript;
- id | test_json
-----+----------------------------
- 1 | [{"a": 1}, null, {"b": 2}]
-(1 row)
-
--- overwriting an existing path
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{}');
-update test_jsonb_subscript set test_json['a']['b'][1] = '1';
-update test_jsonb_subscript set test_json['a']['b'][10] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+----------------------------------------------------------------------------
- 1 | {"a": {"b": [null, 1, null, null, null, null, null, null, null, null, 1]}}
-(1 row)
-
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '[]');
-update test_jsonb_subscript set test_json[0][0][0] = '1';
-update test_jsonb_subscript set test_json[0][0][1] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+------------
- 1 | [[[1, 1]]]
-(1 row)
-
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{}');
-update test_jsonb_subscript set test_json['a']['b'][10] = '1';
-update test_jsonb_subscript set test_json['a'][10][10] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+------------------------------------------------------------------------------------------------------------------------------------------------------
- 1 | {"a": {"b": [null, null, null, null, null, null, null, null, null, null, 1], "10": [null, null, null, null, null, null, null, null, null, null, 1]}}
-(1 row)
-
--- an empty sub element
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{"a": {}}');
-update test_jsonb_subscript set test_json['a']['b']['c'][2] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+--------------------------------------
- 1 | {"a": {"b": {"c": [null, null, 1]}}}
-(1 row)
-
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{"a": []}');
-update test_jsonb_subscript set test_json['a'][1]['c'][2] = '1';
-select * from test_jsonb_subscript;
- id | test_json
-----+---------------------------------------
- 1 | {"a": [null, {"c": [null, null, 1]}]}
-(1 row)
-
--- trying replace assuming a composite object, but it's an element or a value
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, '{"a": 1}');
-update test_jsonb_subscript set test_json['a']['b'] = '1';
-ERROR: cannot replace existing key
-DETAIL: The path assumes key is a composite object, but it is a scalar value.
-update test_jsonb_subscript set test_json['a']['b']['c'] = '1';
-ERROR: cannot replace existing key
-DETAIL: The path assumes key is a composite object, but it is a scalar value.
-update test_jsonb_subscript set test_json['a'][0] = '1';
-ERROR: cannot replace existing key
-DETAIL: The path assumes key is a composite object, but it is a scalar value.
-update test_jsonb_subscript set test_json['a'][0]['c'] = '1';
-ERROR: cannot replace existing key
-DETAIL: The path assumes key is a composite object, but it is a scalar value.
-update test_jsonb_subscript set test_json['a'][0][0] = '1';
-ERROR: cannot replace existing key
-DETAIL: The path assumes key is a composite object, but it is a scalar value.
--- trying replace assuming a composite object, but it's a raw scalar
-delete from test_jsonb_subscript;
-insert into test_jsonb_subscript values (1, 'null');
-update test_jsonb_subscript set test_json[0] = '1';
-ERROR: cannot replace existing key
-DETAIL: The path assumes key is a composite object, but it is a scalar value.
-update test_jsonb_subscript set test_json[0][0] = '1';
-ERROR: cannot replace existing key
-DETAIL: The path assumes key is a composite object, but it is a scalar value.
--- try some things with short-header and toasted subscript values
-drop table test_jsonb_subscript;
-create temp table test_jsonb_subscript (
- id text,
- test_json jsonb
-);
-insert into test_jsonb_subscript values('foo', '{"foo": "bar"}');
-insert into test_jsonb_subscript
- select s, ('{"' || s || '": "bar"}')::jsonb from repeat('xyzzy', 500) s;
-select length(id), test_json[id] from test_jsonb_subscript;
- length | test_json
---------+-----------
- 3 | "bar"
- 2500 | "bar"
-(2 rows)
-
-update test_jsonb_subscript set test_json[id] = '"baz"';
-select length(id), test_json[id] from test_jsonb_subscript;
- length | test_json
---------+-----------
- 3 | "baz"
- 2500 | "baz"
-(2 rows)
-
-\x
-table test_jsonb_subscript;
--[ RECORD 1 ]--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-id | foo
-test_json | {"foo": "baz"}
--[ RECORD 2 ]--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-id | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy
-test_json | {"xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy": "baz"}
-
-\x
--- jsonb to tsvector
-select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb);
- to_tsvector
----------------------------------------------------------------------------
- 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11
-(1 row)
-
--- jsonb to tsvector with config
-select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb);
- to_tsvector
----------------------------------------------------------------------------
- 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11
-(1 row)
-
--- jsonb to tsvector with stop words
-select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::jsonb);
- to_tsvector
-----------------------------------------------------------------------------
- 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13
-(1 row)
-
--- jsonb to tsvector with numeric values
-select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::jsonb);
- to_tsvector
----------------------------------
- 'aaa':1 'bbb':3 'ccc':5 'ddd':4
-(1 row)
-
--- jsonb_to_tsvector
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"');
- jsonb_to_tsvector
-----------------------------------------------------------------------------------------
- '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"');
- jsonb_to_tsvector
---------------------------------
- 'b':2 'c':4 'd':6 'f':8 'g':10
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"');
- jsonb_to_tsvector
--------------------
- 'aaa':1 'bbb':3
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"');
- jsonb_to_tsvector
--------------------
- '123':1 '456':3
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"');
- jsonb_to_tsvector
--------------------
- 'fals':3 'true':1
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]');
- jsonb_to_tsvector
----------------------------------
- '123':5 '456':7 'aaa':1 'bbb':3
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"');
- jsonb_to_tsvector
-----------------------------------------------------------------------------------------
- '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"');
- jsonb_to_tsvector
---------------------------------
- 'b':2 'c':4 'd':6 'f':8 'g':10
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"');
- jsonb_to_tsvector
--------------------
- 'aaa':1 'bbb':3
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"');
- jsonb_to_tsvector
--------------------
- '123':1 '456':3
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"');
- jsonb_to_tsvector
--------------------
- 'fals':3 'true':1
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]');
- jsonb_to_tsvector
----------------------------------
- '123':5 '456':7 'aaa':1 'bbb':3
-(1 row)
-
--- to_tsvector corner cases
-select to_tsvector('""'::jsonb);
- to_tsvector
--------------
-
-(1 row)
-
-select to_tsvector('{}'::jsonb);
- to_tsvector
--------------
-
-(1 row)
-
-select to_tsvector('[]'::jsonb);
- to_tsvector
--------------
-
-(1 row)
-
-select to_tsvector('null'::jsonb);
- to_tsvector
--------------
-
-(1 row)
-
--- jsonb_to_tsvector corner cases
-select jsonb_to_tsvector('""'::jsonb, '"all"');
- jsonb_to_tsvector
--------------------
-
-(1 row)
-
-select jsonb_to_tsvector('{}'::jsonb, '"all"');
- jsonb_to_tsvector
--------------------
-
-(1 row)
-
-select jsonb_to_tsvector('[]'::jsonb, '"all"');
- jsonb_to_tsvector
--------------------
-
-(1 row)
-
-select jsonb_to_tsvector('null'::jsonb, '"all"');
- jsonb_to_tsvector
--------------------
-
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""');
-ERROR: wrong flag in flag array: ""
-HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}');
-ERROR: wrong flag type, only arrays and scalars are allowed
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]');
- jsonb_to_tsvector
--------------------
-
-(1 row)
-
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null');
-ERROR: flag array element is not a string
-HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
-select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]');
-ERROR: flag array element is not a string
-HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
--- ts_headline for jsonb
-select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'));
- ts_headline
-------------------------------------------------------------------------------------------------------------------
- {"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}
-(1 row)
-
-select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'));
- ts_headline
------------------------------------------------------------------------------------------------
- {"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}
-(1 row)
-
-select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
- ts_headline
----------------------------------------------------------------------------------------------------
- {"a": "aaa ", "b": {"c": "ccc fff", "c1": "ccc1 ddd1"}, "d": ["ggg ", "iii jjj"]}
-(1 row)
-
-select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
- ts_headline
----------------------------------------------------------------------------------------------------
- {"a": "aaa ", "b": {"c": "ccc fff", "c1": "ccc1 ddd1"}, "d": ["ggg ", "iii jjj"]}
-(1 row)
-
--- corner cases for ts_headline with jsonb
-select ts_headline('null'::jsonb, tsquery('aaa & bbb'));
- ts_headline
--------------
- null
-(1 row)
-
-select ts_headline('{}'::jsonb, tsquery('aaa & bbb'));
- ts_headline
--------------
- {}
-(1 row)
-
-select ts_headline('[]'::jsonb, tsquery('aaa & bbb'));
- ts_headline
--------------
- []
-(1 row)
-
--- casts
-select 'true'::jsonb::bool;
- bool
-------
- t
-(1 row)
-
-select '[]'::jsonb::bool;
-ERROR: cannot cast jsonb array to type boolean
-select '1.0'::jsonb::float;
- float8
---------
- 1
-(1 row)
-
-select '[1.0]'::jsonb::float;
-ERROR: cannot cast jsonb array to type double precision
-select '12345'::jsonb::int4;
- int4
--------
- 12345
-(1 row)
-
-select '"hello"'::jsonb::int4;
-ERROR: cannot cast jsonb string to type integer
-select '12345'::jsonb::numeric;
- numeric
----------
- 12345
-(1 row)
-
-select '{}'::jsonb::numeric;
-ERROR: cannot cast jsonb object to type numeric
-select '12345.05'::jsonb::numeric;
- numeric
-----------
- 12345.05
-(1 row)
-
-select '12345.05'::jsonb::float4;
- float4
-----------
- 12345.05
-(1 row)
-
-select '12345.05'::jsonb::float8;
- float8
-----------
- 12345.05
-(1 row)
-
-select '12345.05'::jsonb::int2;
- int2
--------
- 12345
-(1 row)
-
-select '12345.05'::jsonb::int4;
- int4
--------
- 12345
-(1 row)
-
-select '12345.05'::jsonb::int8;
- int8
--------
- 12345
-(1 row)
-
-select '12345.0000000000000000000000000000000000000000000005'::jsonb::numeric;
- numeric
-------------------------------------------------------
- 12345.0000000000000000000000000000000000000000000005
-(1 row)
-
-select '12345.0000000000000000000000000000000000000000000005'::jsonb::float4;
- float4
---------
- 12345
-(1 row)
-
-select '12345.0000000000000000000000000000000000000000000005'::jsonb::float8;
- float8
---------
- 12345
-(1 row)
-
-select '12345.0000000000000000000000000000000000000000000005'::jsonb::int2;
- int2
--------
- 12345
-(1 row)
-
-select '12345.0000000000000000000000000000000000000000000005'::jsonb::int4;
- int4
--------
- 12345
-(1 row)
-
-select '12345.0000000000000000000000000000000000000000000005'::jsonb::int8;
- int8
--------
- 12345
-(1 row)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/json_encoding_2.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/json_encoding.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/json_encoding_2.out 2024-11-15 02:50:52.458101712 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/json_encoding.out 2024-11-15 02:59:17.833116508 +0000
@@ -1,9 +1,2 @@
---
--- encoding-sensitive tests for json and jsonb
---
--- We provide expected-results files for UTF8 (json_encoding.out)
--- and for SQL_ASCII (json_encoding_1.out). Skip otherwise.
-SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
- AS skip_test \gset
-\if :skip_test
-\quit
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonpath.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/jsonpath.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/jsonpath.out 2024-11-15 02:50:52.462095130 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/jsonpath.out 2024-11-15 02:59:17.853116535 +0000
@@ -1,1296 +1,2 @@
---jsonpath io
-select ''::jsonpath;
-ERROR: invalid input syntax for type jsonpath: ""
-LINE 1: select ''::jsonpath;
- ^
-select '$'::jsonpath;
- jsonpath
-----------
- $
-(1 row)
-
-select 'strict $'::jsonpath;
- jsonpath
-----------
- strict $
-(1 row)
-
-select 'lax $'::jsonpath;
- jsonpath
-----------
- $
-(1 row)
-
-select '$.a'::jsonpath;
- jsonpath
-----------
- $."a"
-(1 row)
-
-select '$.a.v'::jsonpath;
- jsonpath
------------
- $."a"."v"
-(1 row)
-
-select '$.a.*'::jsonpath;
- jsonpath
-----------
- $."a".*
-(1 row)
-
-select '$.*[*]'::jsonpath;
- jsonpath
-----------
- $.*[*]
-(1 row)
-
-select '$.a[*]'::jsonpath;
- jsonpath
-----------
- $."a"[*]
-(1 row)
-
-select '$.a[*][*]'::jsonpath;
- jsonpath
--------------
- $."a"[*][*]
-(1 row)
-
-select '$[*]'::jsonpath;
- jsonpath
-----------
- $[*]
-(1 row)
-
-select '$[0]'::jsonpath;
- jsonpath
-----------
- $[0]
-(1 row)
-
-select '$[*][0]'::jsonpath;
- jsonpath
-----------
- $[*][0]
-(1 row)
-
-select '$[*].a'::jsonpath;
- jsonpath
-----------
- $[*]."a"
-(1 row)
-
-select '$[*][0].a.b'::jsonpath;
- jsonpath
------------------
- $[*][0]."a"."b"
-(1 row)
-
-select '$.a.**.b'::jsonpath;
- jsonpath
---------------
- $."a".**."b"
-(1 row)
-
-select '$.a.**{2}.b'::jsonpath;
- jsonpath
------------------
- $."a".**{2}."b"
-(1 row)
-
-select '$.a.**{2 to 2}.b'::jsonpath;
- jsonpath
------------------
- $."a".**{2}."b"
-(1 row)
-
-select '$.a.**{2 to 5}.b'::jsonpath;
- jsonpath
-----------------------
- $."a".**{2 to 5}."b"
-(1 row)
-
-select '$.a.**{0 to 5}.b'::jsonpath;
- jsonpath
-----------------------
- $."a".**{0 to 5}."b"
-(1 row)
-
-select '$.a.**{5 to last}.b'::jsonpath;
- jsonpath
--------------------------
- $."a".**{5 to last}."b"
-(1 row)
-
-select '$.a.**{last}.b'::jsonpath;
- jsonpath
---------------------
- $."a".**{last}."b"
-(1 row)
-
-select '$.a.**{last to 5}.b'::jsonpath;
- jsonpath
--------------------------
- $."a".**{last to 5}."b"
-(1 row)
-
-select '$+1'::jsonpath;
- jsonpath
-----------
- ($ + 1)
-(1 row)
-
-select '$-1'::jsonpath;
- jsonpath
-----------
- ($ - 1)
-(1 row)
-
-select '$--+1'::jsonpath;
- jsonpath
-----------
- ($ - -1)
-(1 row)
-
-select '$.a/+-1'::jsonpath;
- jsonpath
---------------
- ($."a" / -1)
-(1 row)
-
-select '1 * 2 + 4 % -3 != false'::jsonpath;
- jsonpath
----------------------------
- (1 * 2 + 4 % -3 != false)
-(1 row)
-
-select '"\b\f\r\n\t\v\"\''\\"'::jsonpath;
- jsonpath
--------------------------
- "\b\f\r\n\t\u000b\"'\\"
-(1 row)
-
-select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath;
- jsonpath
-----------
- "PgSQL"
-(1 row)
-
-select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath;
- jsonpath
----------------------
- $."fooPgSQL\t\"bar"
-(1 row)
-
-select '"\z"'::jsonpath; -- unrecognized escape is just the literal char
- jsonpath
-----------
- "z"
-(1 row)
-
-select '$.g ? ($.a == 1)'::jsonpath;
- jsonpath
---------------------
- $."g"?($."a" == 1)
-(1 row)
-
-select '$.g ? (@ == 1)'::jsonpath;
- jsonpath
-----------------
- $."g"?(@ == 1)
-(1 row)
-
-select '$.g ? (@.a == 1)'::jsonpath;
- jsonpath
---------------------
- $."g"?(@."a" == 1)
-(1 row)
-
-select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath;
- jsonpath
-----------------------------------
- $."g"?(@."a" == 1 || @."a" == 4)
-(1 row)
-
-select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath;
- jsonpath
-----------------------------------
- $."g"?(@."a" == 1 && @."a" == 4)
-(1 row)
-
-select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath;
- jsonpath
-------------------------------------------------
- $."g"?(@."a" == 1 || @."a" == 4 && @."b" == 7)
-(1 row)
-
-select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath;
- jsonpath
----------------------------------------------------
- $."g"?(@."a" == 1 || !(@."a" == 4) && @."b" == 7)
-(1 row)
-
-select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath;
- jsonpath
--------------------------------------------------------------------
- $."g"?(@."a" == 1 || !(@."x" >= 123 || @."a" == 4) && @."b" == 7)
-(1 row)
-
-select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath;
- jsonpath
----------------------------------------
- $."g"?(@."x" >= @[*]?(@."a" > "abc"))
-(1 row)
-
-select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath;
- jsonpath
--------------------------------------------------
- $."g"?((@."x" >= 123 || @."a" == 4) is unknown)
-(1 row)
-
-select '$.g ? (exists (@.x))'::jsonpath;
- jsonpath
-------------------------
- $."g"?(exists (@."x"))
-(1 row)
-
-select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath;
- jsonpath
-----------------------------------
- $."g"?(exists (@."x"?(@ == 14)))
-(1 row)
-
-select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath;
- jsonpath
-------------------------------------------------------------------
- $."g"?((@."x" >= 123 || @."a" == 4) && exists (@."x"?(@ == 14)))
-(1 row)
-
-select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath;
- jsonpath
-------------------------------------
- $."g"?(+@."x" >= +(-(+@."a" + 2)))
-(1 row)
-
-select '$a'::jsonpath;
- jsonpath
-----------
- $"a"
-(1 row)
-
-select '$a.b'::jsonpath;
- jsonpath
-----------
- $"a"."b"
-(1 row)
-
-select '$a[*]'::jsonpath;
- jsonpath
-----------
- $"a"[*]
-(1 row)
-
-select '$.g ? (@.zip == $zip)'::jsonpath;
- jsonpath
----------------------------
- $."g"?(@."zip" == $"zip")
-(1 row)
-
-select '$.a[1,2, 3 to 16]'::jsonpath;
- jsonpath
---------------------
- $."a"[1,2,3 to 16]
-(1 row)
-
-select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath;
- jsonpath
-----------------------------------------
- $."a"[$"a" + 1,$"b"[*] to -($[0] * 2)]
-(1 row)
-
-select '$.a[$.a.size() - 3]'::jsonpath;
- jsonpath
--------------------------
- $."a"[$."a".size() - 3]
-(1 row)
-
-select 'last'::jsonpath;
-ERROR: LAST is allowed only in array subscripts
-LINE 1: select 'last'::jsonpath;
- ^
-select '"last"'::jsonpath;
- jsonpath
-----------
- "last"
-(1 row)
-
-select '$.last'::jsonpath;
- jsonpath
-----------
- $."last"
-(1 row)
-
-select '$ ? (last > 0)'::jsonpath;
-ERROR: LAST is allowed only in array subscripts
-LINE 1: select '$ ? (last > 0)'::jsonpath;
- ^
-select '$[last]'::jsonpath;
- jsonpath
-----------
- $[last]
-(1 row)
-
-select '$[$[0] ? (last > 0)]'::jsonpath;
- jsonpath
---------------------
- $[$[0]?(last > 0)]
-(1 row)
-
-select 'null.type()'::jsonpath;
- jsonpath
--------------
- null.type()
-(1 row)
-
-select '1.type()'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1.t" of jsonpath input
-LINE 1: select '1.type()'::jsonpath;
- ^
-select '(1).type()'::jsonpath;
- jsonpath
-------------
- (1).type()
-(1 row)
-
-select '1.2.type()'::jsonpath;
- jsonpath
---------------
- (1.2).type()
-(1 row)
-
-select '"aaa".type()'::jsonpath;
- jsonpath
---------------
- "aaa".type()
-(1 row)
-
-select 'true.type()'::jsonpath;
- jsonpath
--------------
- true.type()
-(1 row)
-
-select '$.double().floor().ceiling().abs()'::jsonpath;
- jsonpath
-------------------------------------
- $.double().floor().ceiling().abs()
-(1 row)
-
-select '$.keyvalue().key'::jsonpath;
- jsonpath
---------------------
- $.keyvalue()."key"
-(1 row)
-
-select '$.datetime()'::jsonpath;
- jsonpath
---------------
- $.datetime()
-(1 row)
-
-select '$.datetime("datetime template")'::jsonpath;
- jsonpath
----------------------------------
- $.datetime("datetime template")
-(1 row)
-
-select '$.bigint().integer().number().decimal()'::jsonpath;
- jsonpath
------------------------------------------
- $.bigint().integer().number().decimal()
-(1 row)
-
-select '$.boolean()'::jsonpath;
- jsonpath
--------------
- $.boolean()
-(1 row)
-
-select '$.date()'::jsonpath;
- jsonpath
-----------
- $.date()
-(1 row)
-
-select '$.decimal(4,2)'::jsonpath;
- jsonpath
-----------------
- $.decimal(4,2)
-(1 row)
-
-select '$.string()'::jsonpath;
- jsonpath
-------------
- $.string()
-(1 row)
-
-select '$.time()'::jsonpath;
- jsonpath
-----------
- $.time()
-(1 row)
-
-select '$.time(6)'::jsonpath;
- jsonpath
------------
- $.time(6)
-(1 row)
-
-select '$.time_tz()'::jsonpath;
- jsonpath
--------------
- $.time_tz()
-(1 row)
-
-select '$.time_tz(4)'::jsonpath;
- jsonpath
---------------
- $.time_tz(4)
-(1 row)
-
-select '$.timestamp()'::jsonpath;
- jsonpath
----------------
- $.timestamp()
-(1 row)
-
-select '$.timestamp(2)'::jsonpath;
- jsonpath
-----------------
- $.timestamp(2)
-(1 row)
-
-select '$.timestamp_tz()'::jsonpath;
- jsonpath
-------------------
- $.timestamp_tz()
-(1 row)
-
-select '$.timestamp_tz(0)'::jsonpath;
- jsonpath
--------------------
- $.timestamp_tz(0)
-(1 row)
-
-select '$ ? (@ starts with "abc")'::jsonpath;
- jsonpath
--------------------------
- $?(@ starts with "abc")
-(1 row)
-
-select '$ ? (@ starts with $var)'::jsonpath;
- jsonpath
---------------------------
- $?(@ starts with $"var")
-(1 row)
-
-select '$ ? (@ like_regex "(invalid pattern")'::jsonpath;
-ERROR: invalid regular expression: parentheses () not balanced
-LINE 1: select '$ ? (@ like_regex "(invalid pattern")'::jsonpath;
- ^
-select '$ ? (@ like_regex "pattern")'::jsonpath;
- jsonpath
-----------------------------
- $?(@ like_regex "pattern")
-(1 row)
-
-select '$ ? (@ like_regex "pattern" flag "")'::jsonpath;
- jsonpath
-----------------------------
- $?(@ like_regex "pattern")
-(1 row)
-
-select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath;
- jsonpath
--------------------------------------
- $?(@ like_regex "pattern" flag "i")
-(1 row)
-
-select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath;
- jsonpath
---------------------------------------
- $?(@ like_regex "pattern" flag "is")
-(1 row)
-
-select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath;
- jsonpath
----------------------------------------
- $?(@ like_regex "pattern" flag "ism")
-(1 row)
-
-select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath;
-ERROR: XQuery "x" flag (expanded regular expressions) is not implemented
-LINE 1: select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath;
- ^
-select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath;
- jsonpath
--------------------------------------
- $?(@ like_regex "pattern" flag "q")
-(1 row)
-
-select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath;
- jsonpath
---------------------------------------
- $?(@ like_regex "pattern" flag "iq")
-(1 row)
-
-select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath;
- jsonpath
------------------------------------------
- $?(@ like_regex "pattern" flag "ismxq")
-(1 row)
-
-select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath;
-ERROR: invalid input syntax for type jsonpath
-LINE 1: select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath;
- ^
-DETAIL: Unrecognized flag character "a" in LIKE_REGEX predicate.
-select '$ < 1'::jsonpath;
- jsonpath
-----------
- ($ < 1)
-(1 row)
-
-select '($ < 1) || $.a.b <= $x'::jsonpath;
- jsonpath
-------------------------------
- ($ < 1 || $."a"."b" <= $"x")
-(1 row)
-
-select '@ + 1'::jsonpath;
-ERROR: @ is not allowed in root expressions
-LINE 1: select '@ + 1'::jsonpath;
- ^
-select '($).a.b'::jsonpath;
- jsonpath
------------
- $."a"."b"
-(1 row)
-
-select '($.a.b).c.d'::jsonpath;
- jsonpath
--------------------
- $."a"."b"."c"."d"
-(1 row)
-
-select '($.a.b + -$.x.y).c.d'::jsonpath;
- jsonpath
-----------------------------------
- ($."a"."b" + -$."x"."y")."c"."d"
-(1 row)
-
-select '(-+$.a.b).c.d'::jsonpath;
- jsonpath
--------------------------
- (-(+$."a"."b"))."c"."d"
-(1 row)
-
-select '1 + ($.a.b + 2).c.d'::jsonpath;
- jsonpath
--------------------------------
- (1 + ($."a"."b" + 2)."c"."d")
-(1 row)
-
-select '1 + ($.a.b > 2).c.d'::jsonpath;
- jsonpath
--------------------------------
- (1 + ($."a"."b" > 2)."c"."d")
-(1 row)
-
-select '($)'::jsonpath;
- jsonpath
-----------
- $
-(1 row)
-
-select '(($))'::jsonpath;
- jsonpath
-----------
- $
-(1 row)
-
-select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath;
- jsonpath
----------------------------------------------------
- (($ + 1)."a" + (2)."b"?(@ > 1 || exists (@."c")))
-(1 row)
-
-select '$ ? (@.a < 1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < -1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < -1)
-(1 row)
-
-select '$ ? (@.a < +1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < .1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 0.1)
-(1 row)
-
-select '$ ? (@.a < -.1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < -0.1)
-(1 row)
-
-select '$ ? (@.a < +.1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 0.1)
-(1 row)
-
-select '$ ? (@.a < 0.1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 0.1)
-(1 row)
-
-select '$ ? (@.a < -0.1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < -0.1)
-(1 row)
-
-select '$ ? (@.a < +0.1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 0.1)
-(1 row)
-
-select '$ ? (@.a < 10.1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < 10.1)
-(1 row)
-
-select '$ ? (@.a < -10.1)'::jsonpath;
- jsonpath
--------------------
- $?(@."a" < -10.1)
-(1 row)
-
-select '$ ? (@.a < +10.1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < 10.1)
-(1 row)
-
-select '$ ? (@.a < 1e1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < 10)
-(1 row)
-
-select '$ ? (@.a < -1e1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < -10)
-(1 row)
-
-select '$ ? (@.a < +1e1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < 10)
-(1 row)
-
-select '$ ? (@.a < .1e1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < -.1e1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < -1)
-(1 row)
-
-select '$ ? (@.a < +.1e1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < 0.1e1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < -0.1e1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < -1)
-(1 row)
-
-select '$ ? (@.a < +0.1e1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < 10.1e1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 101)
-(1 row)
-
-select '$ ? (@.a < -10.1e1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < -101)
-(1 row)
-
-select '$ ? (@.a < +10.1e1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 101)
-(1 row)
-
-select '$ ? (@.a < 1e-1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 0.1)
-(1 row)
-
-select '$ ? (@.a < -1e-1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < -0.1)
-(1 row)
-
-select '$ ? (@.a < +1e-1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 0.1)
-(1 row)
-
-select '$ ? (@.a < .1e-1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < 0.01)
-(1 row)
-
-select '$ ? (@.a < -.1e-1)'::jsonpath;
- jsonpath
--------------------
- $?(@."a" < -0.01)
-(1 row)
-
-select '$ ? (@.a < +.1e-1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < 0.01)
-(1 row)
-
-select '$ ? (@.a < 0.1e-1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < 0.01)
-(1 row)
-
-select '$ ? (@.a < -0.1e-1)'::jsonpath;
- jsonpath
--------------------
- $?(@."a" < -0.01)
-(1 row)
-
-select '$ ? (@.a < +0.1e-1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < 0.01)
-(1 row)
-
-select '$ ? (@.a < 10.1e-1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < 1.01)
-(1 row)
-
-select '$ ? (@.a < -10.1e-1)'::jsonpath;
- jsonpath
--------------------
- $?(@."a" < -1.01)
-(1 row)
-
-select '$ ? (@.a < +10.1e-1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < 1.01)
-(1 row)
-
-select '$ ? (@.a < 1e+1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < 10)
-(1 row)
-
-select '$ ? (@.a < -1e+1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < -10)
-(1 row)
-
-select '$ ? (@.a < +1e+1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < 10)
-(1 row)
-
-select '$ ? (@.a < .1e+1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < -.1e+1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < -1)
-(1 row)
-
-select '$ ? (@.a < +.1e+1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < 0.1e+1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < -0.1e+1)'::jsonpath;
- jsonpath
-----------------
- $?(@."a" < -1)
-(1 row)
-
-select '$ ? (@.a < +0.1e+1)'::jsonpath;
- jsonpath
----------------
- $?(@."a" < 1)
-(1 row)
-
-select '$ ? (@.a < 10.1e+1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 101)
-(1 row)
-
-select '$ ? (@.a < -10.1e+1)'::jsonpath;
- jsonpath
-------------------
- $?(@."a" < -101)
-(1 row)
-
-select '$ ? (@.a < +10.1e+1)'::jsonpath;
- jsonpath
------------------
- $?(@."a" < 101)
-(1 row)
-
--- numeric literals
-select '0'::jsonpath;
- jsonpath
-----------
- 0
-(1 row)
-
-select '00'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "00" of jsonpath input
-LINE 1: select '00'::jsonpath;
- ^
-select '0755'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '0755'::jsonpath;
- ^
-select '0.0'::jsonpath;
- jsonpath
-----------
- 0.0
-(1 row)
-
-select '0.000'::jsonpath;
- jsonpath
-----------
- 0.000
-(1 row)
-
-select '0.000e1'::jsonpath;
- jsonpath
-----------
- 0.00
-(1 row)
-
-select '0.000e2'::jsonpath;
- jsonpath
-----------
- 0.0
-(1 row)
-
-select '0.000e3'::jsonpath;
- jsonpath
-----------
- 0
-(1 row)
-
-select '0.0010'::jsonpath;
- jsonpath
-----------
- 0.0010
-(1 row)
-
-select '0.0010e-1'::jsonpath;
- jsonpath
-----------
- 0.00010
-(1 row)
-
-select '0.0010e+1'::jsonpath;
- jsonpath
-----------
- 0.010
-(1 row)
-
-select '0.0010e+2'::jsonpath;
- jsonpath
-----------
- 0.10
-(1 row)
-
-select '.001'::jsonpath;
- jsonpath
-----------
- 0.001
-(1 row)
-
-select '.001e1'::jsonpath;
- jsonpath
-----------
- 0.01
-(1 row)
-
-select '1.'::jsonpath;
- jsonpath
-----------
- 1
-(1 row)
-
-select '1.e1'::jsonpath;
- jsonpath
-----------
- 10
-(1 row)
-
-select '1a'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1a" of jsonpath input
-LINE 1: select '1a'::jsonpath;
- ^
-select '1e'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1e" of jsonpath input
-LINE 1: select '1e'::jsonpath;
- ^
-select '1.e'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1.e" of jsonpath input
-LINE 1: select '1.e'::jsonpath;
- ^
-select '1.2a'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1.2a" of jsonpath input
-LINE 1: select '1.2a'::jsonpath;
- ^
-select '1.2e'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1.2e" of jsonpath input
-LINE 1: select '1.2e'::jsonpath;
- ^
-select '1.2.e'::jsonpath;
- jsonpath
------------
- (1.2)."e"
-(1 row)
-
-select '(1.2).e'::jsonpath;
- jsonpath
------------
- (1.2)."e"
-(1 row)
-
-select '1e3'::jsonpath;
- jsonpath
-----------
- 1000
-(1 row)
-
-select '1.e3'::jsonpath;
- jsonpath
-----------
- 1000
-(1 row)
-
-select '1.e3.e'::jsonpath;
- jsonpath
-------------
- (1000)."e"
-(1 row)
-
-select '1.e3.e4'::jsonpath;
- jsonpath
--------------
- (1000)."e4"
-(1 row)
-
-select '1.2e3'::jsonpath;
- jsonpath
-----------
- 1200
-(1 row)
-
-select '1.2e3a'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1.2e3a" of jsonpath input
-LINE 1: select '1.2e3a'::jsonpath;
- ^
-select '1.2.e3'::jsonpath;
- jsonpath
-------------
- (1.2)."e3"
-(1 row)
-
-select '(1.2).e3'::jsonpath;
- jsonpath
-------------
- (1.2)."e3"
-(1 row)
-
-select '1..e'::jsonpath;
- jsonpath
-----------
- (1)."e"
-(1 row)
-
-select '1..e3'::jsonpath;
- jsonpath
-----------
- (1)."e3"
-(1 row)
-
-select '(1.).e'::jsonpath;
- jsonpath
-----------
- (1)."e"
-(1 row)
-
-select '(1.).e3'::jsonpath;
- jsonpath
-----------
- (1)."e3"
-(1 row)
-
-select '1?(2>3)'::jsonpath;
- jsonpath
--------------
- (1)?(2 > 3)
-(1 row)
-
--- nondecimal
-select '0b100101'::jsonpath;
- jsonpath
-----------
- 37
-(1 row)
-
-select '0o273'::jsonpath;
- jsonpath
-----------
- 187
-(1 row)
-
-select '0x42F'::jsonpath;
- jsonpath
-----------
- 1071
-(1 row)
-
--- error cases
-select '0b'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "0b" of jsonpath input
-LINE 1: select '0b'::jsonpath;
- ^
-select '1b'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1b" of jsonpath input
-LINE 1: select '1b'::jsonpath;
- ^
-select '0b0x'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '0b0x'::jsonpath;
- ^
-select '0o'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "0o" of jsonpath input
-LINE 1: select '0o'::jsonpath;
- ^
-select '1o'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1o" of jsonpath input
-LINE 1: select '1o'::jsonpath;
- ^
-select '0o0x'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '0o0x'::jsonpath;
- ^
-select '0x'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "0x" of jsonpath input
-LINE 1: select '0x'::jsonpath;
- ^
-select '1x'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1x" of jsonpath input
-LINE 1: select '1x'::jsonpath;
- ^
-select '0x0y'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '0x0y'::jsonpath;
- ^
--- underscores
-select '1_000_000'::jsonpath;
- jsonpath
-----------
- 1000000
-(1 row)
-
-select '1_2_3'::jsonpath;
- jsonpath
-----------
- 123
-(1 row)
-
-select '0x1EEE_FFFF'::jsonpath;
- jsonpath
------------
- 518979583
-(1 row)
-
-select '0o2_73'::jsonpath;
- jsonpath
-----------
- 187
-(1 row)
-
-select '0b10_0101'::jsonpath;
- jsonpath
-----------
- 37
-(1 row)
-
-select '1_000.000_005'::jsonpath;
- jsonpath
--------------
- 1000.000005
-(1 row)
-
-select '1_000.'::jsonpath;
- jsonpath
-----------
- 1000
-(1 row)
-
-select '.000_005'::jsonpath;
- jsonpath
-----------
- 0.000005
-(1 row)
-
-select '1_000.5e0_1'::jsonpath;
- jsonpath
-----------
- 10005
-(1 row)
-
--- error cases
-select '_100'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '_100'::jsonpath;
- ^
-select '100_'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "100_" of jsonpath input
-LINE 1: select '100_'::jsonpath;
- ^
-select '100__000'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '100__000'::jsonpath;
- ^
-select '_1_000.5'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '_1_000.5'::jsonpath;
- ^
-select '1_000_.5'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1_000_" of jsonpath input
-LINE 1: select '1_000_.5'::jsonpath;
- ^
-select '1_000._5'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1_000._" of jsonpath input
-LINE 1: select '1_000._5'::jsonpath;
- ^
-select '1_000.5_'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1_000.5_" of jsonpath input
-LINE 1: select '1_000.5_'::jsonpath;
- ^
-select '1_000.5e_1'::jsonpath;
-ERROR: trailing junk after numeric literal at or near "1_000.5e" of jsonpath input
-LINE 1: select '1_000.5e_1'::jsonpath;
- ^
--- underscore after prefix not allowed in JavaScript (but allowed in SQL)
-select '0b_10_0101'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '0b_10_0101'::jsonpath;
- ^
-select '0o_273'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '0o_273'::jsonpath;
- ^
-select '0x_42F'::jsonpath;
-ERROR: syntax error at end of jsonpath input
-LINE 1: select '0x_42F'::jsonpath;
- ^
--- test non-error-throwing API
-SELECT str as jsonpath,
- pg_input_is_valid(str,'jsonpath') as ok,
- errinfo.sql_error_code,
- errinfo.message,
- errinfo.detail,
- errinfo.hint
-FROM unnest(ARRAY['$ ? (@ like_regex "pattern" flag "smixq")'::text,
- '$ ? (@ like_regex "pattern" flag "a")',
- '@ + 1',
- '00',
- '1a']) str,
- LATERAL pg_input_error_info(str, 'jsonpath') as errinfo;
- jsonpath | ok | sql_error_code | message | detail | hint
--------------------------------------------+----+----------------+-----------------------------------------------------------------------+----------------------------------------------------------+------
- $ ? (@ like_regex "pattern" flag "smixq") | t | | | |
- $ ? (@ like_regex "pattern" flag "a") | f | 42601 | invalid input syntax for type jsonpath | Unrecognized flag character "a" in LIKE_REGEX predicate. |
- @ + 1 | f | 42601 | @ is not allowed in root expressions | |
- 00 | f | 42601 | trailing junk after numeric literal at or near "00" of jsonpath input | |
- 1a | f | 42601 | trailing junk after numeric literal at or near "1a" of jsonpath input | |
-(5 rows)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonpath_encoding_2.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/jsonpath_encoding.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/jsonpath_encoding_2.out 2024-11-15 02:50:52.462095130 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/jsonpath_encoding.out 2024-11-15 02:59:17.845116524 +0000
@@ -1,9 +1,2 @@
---
--- encoding-sensitive tests for jsonpath
---
--- We provide expected-results files for UTF8 (jsonpath_encoding.out)
--- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise.
-SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
- AS skip_test \gset
-\if :skip_test
-\quit
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonb_jsonpath.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/jsonb_jsonpath.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/jsonb_jsonpath.out 2024-11-15 02:50:52.462095130 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/jsonb_jsonpath.out 2024-11-15 02:59:17.853116535 +0000
@@ -1,4512 +1,2 @@
-select jsonb '{"a": 12}' @? '$';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": 12}' @? '1';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": 12}' @? '$.a.b';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"a": 12}' @? '$.b';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"a": 12}' @? '$.a + 2';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": 12}' @? '$.b + 2';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '{"a": {"a": 12}}' @? '$.a.a';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"a": 12}}' @? '$.*.a';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"b": {"a": 12}}' @? '$.*.a';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"b": {"a": 12}}' @? '$.*.b';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"b": {"a": 12}}' @? 'strict $.*.b';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '{}' @? '$.*';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"a": 1}' @? '$.*';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? 'lax $.**{1}';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? 'lax $.**{2}';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? 'lax $.**{3}';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '[]' @? '$[*]';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '[1]' @? '$[*]';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1]' @? '$[1]';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '[1]' @? 'strict $[1]';
- ?column?
-----------
-
-(1 row)
-
-select jsonb_path_query('[1]', 'strict $[1]');
-ERROR: jsonpath array subscript is out of bounds
-select jsonb_path_query('[1]', 'strict $[1]', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb '[1]' @? 'lax $[10000000000000000]';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '[1]' @? 'strict $[10000000000000000]';
- ?column?
-----------
-
-(1 row)
-
-select jsonb_path_query('[1]', 'lax $[10000000000000000]');
-ERROR: jsonpath array subscript is out of integer range
-select jsonb_path_query('[1]', 'strict $[10000000000000000]');
-ERROR: jsonpath array subscript is out of integer range
-select jsonb '[1]' @? '$[0]';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1]' @? '$[0.3]';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1]' @? '$[0.5]';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1]' @? '$[0.9]';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1]' @? '$[1.2]';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '[1]' @? 'strict $[1.2]';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] > @.b[*])';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] >= @.b[*])';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? '$ ? (@.a[*] >= @.b[*])';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? 'strict $ ? (@.a[*] >= @.b[*])';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"a": [1,2,3], "b": [3,4,null]}' @? '$ ? (@.a[*] >= @.b[*])';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '1' @? '$ ? ((@ == "1") is unknown)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '1' @? '$ ? ((@ == 1) is unknown)';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '[{"a": 1}, {"a": 2}]' @? '$[0 to 1] ? (@.a > 1)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => false);
- jsonb_path_exists
--------------------
- t
-(1 row)
-
-select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => true);
- jsonb_path_exists
--------------------
- t
-(1 row)
-
-select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => false);
-ERROR: jsonpath member accessor can only be applied to an object
-select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => true);
- jsonb_path_exists
--------------------
-
-(1 row)
-
-select jsonb_path_query('1', 'lax $.a');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1', 'strict $.a');
-ERROR: jsonpath member accessor can only be applied to an object
-select jsonb_path_query('1', 'strict $.*');
-ERROR: jsonpath wildcard member accessor can only be applied to an object
-select jsonb_path_query('1', 'strict $.a', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1', 'strict $.*', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'lax $.a');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.a');
-ERROR: jsonpath member accessor can only be applied to an object
-select jsonb_path_query('[]', 'strict $.a', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', 'lax $.a');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', 'strict $.a');
-ERROR: JSON object does not contain key "a"
-select jsonb_path_query('{}', 'strict $.a', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1', 'strict $[1]');
-ERROR: jsonpath array accessor can only be applied to an array
-select jsonb_path_query('1', 'strict $[*]');
-ERROR: jsonpath wildcard array accessor can only be applied to an array
-select jsonb_path_query('[]', 'strict $[1]');
-ERROR: jsonpath array subscript is out of bounds
-select jsonb_path_query('[]', 'strict $["a"]');
-ERROR: jsonpath array subscript is not a single numeric value
-select jsonb_path_query('1', 'strict $[1]', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1', 'strict $[*]', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $[1]', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $["a"]', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.a');
- jsonb_path_query
-------------------
- 12
-(1 row)
-
-select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.b');
- jsonb_path_query
-------------------
- {"a": 13}
-(1 row)
-
-select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.*');
- jsonb_path_query
-------------------
- 12
- {"a": 13}
-(2 rows)
-
-select jsonb_path_query('{"a": 12, "b": {"a": 13}}', 'lax $.*.a');
- jsonb_path_query
-------------------
- 13
-(1 row)
-
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].a');
- jsonb_path_query
-------------------
- 13
-(1 row)
-
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].*');
- jsonb_path_query
-------------------
- 13
- 14
-(2 rows)
-
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0].a');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[1].a');
- jsonb_path_query
-------------------
- 13
-(1 row)
-
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[2].a');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0,1].a');
- jsonb_path_query
-------------------
- 13
-(1 row)
-
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10].a');
- jsonb_path_query
-------------------
- 13
-(1 row)
-
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10 / 0].a');
-ERROR: division by zero
-select jsonb_path_query('[12, {"a": 13}, {"b": 14}, "ccc", true]', '$[2.5 - 1 to $.size() - 2]');
- jsonb_path_query
-------------------
- {"a": 13}
- {"b": 14}
- "ccc"
-(3 rows)
-
-select jsonb_path_query('1', 'lax $[0]');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('1', 'lax $[*]');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('[1]', 'lax $[0]');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('[1]', 'lax $[*]');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('[1,2,3]', 'lax $[*]');
- jsonb_path_query
-------------------
- 1
- 2
- 3
-(3 rows)
-
-select jsonb_path_query('[1,2,3]', 'strict $[*].a');
-ERROR: jsonpath member accessor can only be applied to an object
-select jsonb_path_query('[1,2,3]', 'strict $[*].a', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$[last]');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$[last ? (exists(last))]');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $[last]');
-ERROR: jsonpath array subscript is out of bounds
-select jsonb_path_query('[]', 'strict $[last]', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[1]', '$[last]');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('[1,2,3]', '$[last]');
- jsonb_path_query
-------------------
- 3
-(1 row)
-
-select jsonb_path_query('[1,2,3]', '$[last - 1]');
- jsonb_path_query
-------------------
- 2
-(1 row)
-
-select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "number")]');
- jsonb_path_query
-------------------
- 3
-(1 row)
-
-select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]');
-ERROR: jsonpath array subscript is not a single numeric value
-select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select * from jsonb_path_query('{"a": 10}', '$');
- jsonb_path_query
-------------------
- {"a": 10}
-(1 row)
-
-select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)');
-ERROR: could not find jsonpath variable "value"
-select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '1');
-ERROR: "vars" argument is not an object
-DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object.
-select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '[{"value" : 13}]');
-ERROR: "vars" argument is not an object
-DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object.
-select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 13}');
- jsonb_path_query
-------------------
- {"a": 10}
-(1 row)
-
-select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 8}');
- jsonb_path_query
-------------------
-(0 rows)
-
-select * from jsonb_path_query('{"a": 10}', '$.a ? (@ < $value)', '{"value" : 13}');
- jsonb_path_query
-------------------
- 10
-(1 row)
-
-select * from jsonb_path_query('[10,11,12,13,14,15]', '$[*] ? (@ < $value)', '{"value" : 13}');
- jsonb_path_query
-------------------
- 10
- 11
- 12
-(3 rows)
-
-select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0,1] ? (@ < $x.value)', '{"x": {"value" : 13}}');
- jsonb_path_query
-------------------
- 10
- 11
-(2 rows)
-
-select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0 to 2] ? (@ < $value)', '{"value" : 15}');
- jsonb_path_query
-------------------
- 10
- 11
- 12
-(3 rows)
-
-select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == "1")');
- jsonb_path_query
-------------------
- "1"
-(1 row)
-
-select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : "1"}');
- jsonb_path_query
-------------------
- "1"
-(1 row)
-
-select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : null}');
- jsonb_path_query
-------------------
- null
-(1 row)
-
-select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ != null)');
- jsonb_path_query
-------------------
- 1
- "2"
-(2 rows)
-
-select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ == null)');
- jsonb_path_query
-------------------
- null
-(1 row)
-
-select * from jsonb_path_query('{}', '$ ? (@ == @)');
- jsonb_path_query
-------------------
-(0 rows)
-
-select * from jsonb_path_query('[]', 'strict $ ? (@ == @)');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**');
- jsonb_path_query
-------------------
- {"a": {"b": 1}}
- {"b": 1}
- 1
-(3 rows)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}');
- jsonb_path_query
-------------------
- {"a": {"b": 1}}
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}');
- jsonb_path_query
-------------------
- {"a": {"b": 1}}
- {"b": 1}
- 1
-(3 rows)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}');
- jsonb_path_query
-------------------
- {"b": 1}
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}');
- jsonb_path_query
-------------------
- {"b": 1}
- 1
-(2 rows)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2}');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2 to last}');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{3 to last}');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{last}');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}.b ? (@ > 0)');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to 2}.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0}.b ? (@ > 0)');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1}.b ? (@ > 0)');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0 to last}.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to last}.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to 2}.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{2 to 3}.b ? (@ > 0)');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? '$.**.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? '$.**{0}.b ? ( @ > 0)';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? '$.**{1}.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? '$.**{0 to last}.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? '$.**{1 to last}.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"b": 1}}' @? '$.**{1 to 2}.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0}.b ? ( @ > 0)';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1}.b ? ( @ > 0)';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0 to last}.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to last}.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to 2}.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{2 to 3}.b ? ( @ > 0)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x))');
- jsonb_path_query
-------------------
- {"x": 2}
-(1 row)
-
-select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.y))');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x ? (@ >= 2) ))');
- jsonb_path_query
-------------------
- {"x": 2}
-(1 row)
-
-select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x))');
- jsonb_path_query
-------------------
- {"x": 2}
-(1 row)
-
-select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x + "3"))');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? ((exists (@.x + "3")) is unknown)');
- jsonb_path_query
-------------------
- {"x": 2}
- {"y": 3}
-(2 rows)
-
-select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? (exists (@.x))');
- jsonb_path_query
-------------------
- {"x": 2}
-(1 row)
-
-select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? ((exists (@.x)) is unknown)');
- jsonb_path_query
-------------------
- {"y": 3}
-(1 row)
-
-select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? (exists (@[*].x))');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? ((exists (@[*].x)) is unknown)');
- jsonb_path_query
-----------------------
- [{"x": 2}, {"y": 3}]
-(1 row)
-
---test ternary logic
-select
- x, y,
- jsonb_path_query(
- '[true, false, null]',
- '$[*] ? (@ == true && ($x == true && $y == true) ||
- @ == false && !($x == true && $y == true) ||
- @ == null && ($x == true && $y == true) is unknown)',
- jsonb_build_object('x', x, 'y', y)
- ) as "x && y"
-from
- (values (jsonb 'true'), ('false'), ('"null"')) x(x),
- (values (jsonb 'true'), ('false'), ('"null"')) y(y);
- x | y | x && y
---------+--------+--------
- true | true | true
- true | false | false
- true | "null" | null
- false | true | false
- false | false | false
- false | "null" | false
- "null" | true | null
- "null" | false | false
- "null" | "null" | null
-(9 rows)
-
-select
- x, y,
- jsonb_path_query(
- '[true, false, null]',
- '$[*] ? (@ == true && ($x == true || $y == true) ||
- @ == false && !($x == true || $y == true) ||
- @ == null && ($x == true || $y == true) is unknown)',
- jsonb_build_object('x', x, 'y', y)
- ) as "x || y"
-from
- (values (jsonb 'true'), ('false'), ('"null"')) x(x),
- (values (jsonb 'true'), ('false'), ('"null"')) y(y);
- x | y | x || y
---------+--------+--------
- true | true | true
- true | false | true
- true | "null" | true
- false | true | true
- false | false | false
- false | "null" | null
- "null" | true | true
- "null" | false | null
- "null" | "null" | null
-(9 rows)
-
-select jsonb '{"a": 1, "b":1}' @? '$ ? (@.a == @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": 1, "b":1}}' @? '$ ? (@.a == @.b)';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? (@.a == @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? ($.c.a == @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": 1, "b":1}}' @? '$.* ? (@.a == @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": 1, "b":1}' @? '$.** ? (@.a == @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": 1, "b":1}}' @? '$.** ? (@.a == @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == 1 + 1)');
- jsonb_path_query
-------------------
- {"a": 2, "b": 1}
-(1 row)
-
-select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (1 + 1))');
- jsonb_path_query
-------------------
- {"a": 2, "b": 1}
-(1 row)
-
-select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == @.b + 1)');
- jsonb_path_query
-------------------
- {"a": 2, "b": 1}
-(1 row)
-
-select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (@.b + 1))');
- jsonb_path_query
-------------------
- {"a": 2, "b": 1}
-(1 row)
-
-select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - 1)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -1)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -@.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": 2, "b":1}}' @? '$.** ? (@.a == 1 - - @.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - +@.b)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1,2,3]' @? '$ ? (+@[*] > +2)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1,2,3]' @? '$ ? (+@[*] > +3)';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '[1,2,3]' @? '$ ? (-@[*] < -2)';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1,2,3]' @? '$ ? (-@[*] < -3)';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '1' @? '$ ? ($ > 0)';
- ?column?
-----------
- t
-(1 row)
-
--- arithmetic errors
-select jsonb_path_query('[1,2,0,3]', '$[*] ? (2 / @ > 0)');
- jsonb_path_query
-------------------
- 1
- 2
- 3
-(3 rows)
-
-select jsonb_path_query('[1,2,0,3]', '$[*] ? ((2 / @ > 0) is unknown)');
- jsonb_path_query
-------------------
- 0
-(1 row)
-
-select jsonb_path_query('0', '1 / $');
-ERROR: division by zero
-select jsonb_path_query('0', '1 / $ + 2');
-ERROR: division by zero
-select jsonb_path_query('0', '-(3 + 1 % $)');
-ERROR: division by zero
-select jsonb_path_query('1', '$ + "2"');
-ERROR: right operand of jsonpath operator + is not a single numeric value
-select jsonb_path_query('[1, 2]', '3 * $');
-ERROR: right operand of jsonpath operator * is not a single numeric value
-select jsonb_path_query('"a"', '-$');
-ERROR: operand of unary jsonpath operator - is not a numeric value
-select jsonb_path_query('[1,"2",3]', '+$');
-ERROR: operand of unary jsonpath operator + is not a numeric value
-select jsonb_path_query('1', '$ + "2"', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[1, 2]', '3 * $', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"a"', '-$', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[1,"2",3]', '+$', silent => true);
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb '["1",2,0,3]' @? '-$[*]';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1,"2",0,3]' @? '-$[*]';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '["1",2,0,3]' @? 'strict -$[*]';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '[1,"2",0,3]' @? 'strict -$[*]';
- ?column?
-----------
-
-(1 row)
-
--- unwrapping of operator arguments in lax mode
-select jsonb_path_query('{"a": [2]}', 'lax $.a * 3');
- jsonb_path_query
-------------------
- 6
-(1 row)
-
-select jsonb_path_query('{"a": [2]}', 'lax $.a + 3');
- jsonb_path_query
-------------------
- 5
-(1 row)
-
-select jsonb_path_query('{"a": [2, 3, 4]}', 'lax -$.a');
- jsonb_path_query
-------------------
- -2
- -3
- -4
-(3 rows)
-
--- should fail
-select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3');
-ERROR: left operand of jsonpath operator * is not a single numeric value
-select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
--- any key on arrays with and without unwrapping.
-select jsonb_path_query('{"a": [1,2,3], "b": [3,4,5]}', '$.*');
- jsonb_path_query
-------------------
- [1, 2, 3]
- [3, 4, 5]
-(2 rows)
-
-select jsonb_path_query('[1,2,3]', '$.*');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[1,2,3,{"b": [3,4,5]}]', 'lax $.*');
- jsonb_path_query
-------------------
- [3, 4, 5]
-(1 row)
-
-select jsonb_path_query('[1,2,3,{"b": [3,4,5]}]', 'strict $.*');
-ERROR: jsonpath wildcard member accessor can only be applied to an object
-select jsonb_path_query('[1,2,3,{"b": [3,4,5]}]', 'strict $.*', NULL, true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$.*';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1,2,3]' @? '$.*';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '[1,2,3,{"b": [3,4,5]}]' @? 'lax $.*';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '[1,2,3,{"b": [3,4,5]}]' @? 'strict $.*';
- ?column?
-----------
-
-(1 row)
-
--- extension: boolean expressions
-select jsonb_path_query('2', '$ > 1');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('2', '$ <= 1');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('2', '$ == "2"');
- jsonb_path_query
-------------------
- null
-(1 row)
-
-select jsonb '2' @? '$ == "2"';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '2' @@ '$ > 1';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '2' @@ '$ <= 1';
- ?column?
-----------
- f
-(1 row)
-
-select jsonb '2' @@ '$ == "2"';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '2' @@ '1';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '{}' @@ '$';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '[]' @@ '$';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '[1,2,3]' @@ '$[*]';
- ?column?
-----------
-
-(1 row)
-
-select jsonb '[]' @@ '$[*]';
- ?column?
-----------
-
-(1 row)
-
-select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] > $x) [1]', '{"x": 1}');
- jsonb_path_match
-------------------
- f
-(1 row)
-
-select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] < $x) [1]', '{"x": 2}');
- jsonb_path_match
-------------------
- t
-(1 row)
-
-select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => false);
- jsonb_path_match
-------------------
- t
-(1 row)
-
-select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => true);
- jsonb_path_match
-------------------
- t
-(1 row)
-
-select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => false);
- jsonb_path_match
-------------------
-
-(1 row)
-
-select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => true);
- jsonb_path_match
-------------------
-
-(1 row)
-
-select jsonb_path_query('[null,1,true,"a",[],{}]', '$.type()');
- jsonb_path_query
-------------------
- "array"
-(1 row)
-
-select jsonb_path_query('[null,1,true,"a",[],{}]', 'lax $.type()');
- jsonb_path_query
-------------------
- "array"
-(1 row)
-
-select jsonb_path_query('[null,1,true,"a",[],{}]', '$[*].type()');
- jsonb_path_query
-------------------
- "null"
- "number"
- "boolean"
- "string"
- "array"
- "object"
-(6 rows)
-
-select jsonb_path_query('null', 'null.type()');
- jsonb_path_query
-------------------
- "null"
-(1 row)
-
-select jsonb_path_query('null', 'true.type()');
- jsonb_path_query
-------------------
- "boolean"
-(1 row)
-
-select jsonb_path_query('null', '(123).type()');
- jsonb_path_query
-------------------
- "number"
-(1 row)
-
-select jsonb_path_query('null', '"123".type()');
- jsonb_path_query
-------------------
- "string"
-(1 row)
-
-select jsonb_path_query('{"a": 2}', '($.a - 5).abs() + 10');
- jsonb_path_query
-------------------
- 13
-(1 row)
-
-select jsonb_path_query('{"a": 2.5}', '-($.a * $.a).floor() % 4.3');
- jsonb_path_query
-------------------
- -1.7
-(1 row)
-
-select jsonb_path_query('[1, 2, 3]', '($[*] > 2) ? (@ == true)');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('[1, 2, 3]', '($[*] > 3).type()');
- jsonb_path_query
-------------------
- "boolean"
-(1 row)
-
-select jsonb_path_query('[1, 2, 3]', '($[*].a > 3).type()');
- jsonb_path_query
-------------------
- "boolean"
-(1 row)
-
-select jsonb_path_query('[1, 2, 3]', 'strict ($[*].a > 3).type()');
- jsonb_path_query
-------------------
- "null"
-(1 row)
-
-select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()');
-ERROR: jsonpath item method .size() can only be applied to an array
-select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'lax $[*].size()');
- jsonb_path_query
-------------------
- 1
- 1
- 1
- 1
- 0
- 1
- 3
- 1
- 1
-(9 rows)
-
-select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].abs()');
- jsonb_path_query
-------------------
- 0
- 1
- 2
- 3.4
- 5.6
-(5 rows)
-
-select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].floor()');
- jsonb_path_query
-------------------
- 0
- 1
- -2
- -4
- 5
-(5 rows)
-
-select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling()');
- jsonb_path_query
-------------------
- 0
- 1
- -2
- -3
- 6
-(5 rows)
-
-select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs()');
- jsonb_path_query
-------------------
- 0
- 1
- 2
- 3
- 6
-(5 rows)
-
-select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs().type()');
- jsonb_path_query
-------------------
- "number"
- "number"
- "number"
- "number"
- "number"
-(5 rows)
-
-select jsonb_path_query('[{},1]', '$[*].keyvalue()');
-ERROR: jsonpath item method .keyvalue() can only be applied to an object
-select jsonb_path_query('[{},1]', '$[*].keyvalue()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.keyvalue()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{"a": 1, "b": [1, 2], "c": {"a": "bbb"}}', '$.keyvalue()');
- jsonb_path_query
-----------------------------------------------
- {"id": 0, "key": "a", "value": 1}
- {"id": 0, "key": "b", "value": [1, 2]}
- {"id": 0, "key": "c", "value": {"a": "bbb"}}
-(3 rows)
-
-select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', '$[*].keyvalue()');
- jsonb_path_query
------------------------------------------------
- {"id": 12, "key": "a", "value": 1}
- {"id": 12, "key": "b", "value": [1, 2]}
- {"id": 72, "key": "c", "value": {"a": "bbb"}}
-(3 rows)
-
-select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue()');
-ERROR: jsonpath item method .keyvalue() can only be applied to an object
-select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'lax $.keyvalue()');
- jsonb_path_query
------------------------------------------------
- {"id": 12, "key": "a", "value": 1}
- {"id": 12, "key": "b", "value": [1, 2]}
- {"id": 72, "key": "c", "value": {"a": "bbb"}}
-(3 rows)
-
-select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue().a');
-ERROR: jsonpath item method .keyvalue() can only be applied to an object
-select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue()';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue().key';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('null', '$.double()');
-ERROR: jsonpath item method .double() can only be applied to a string or numeric value
-select jsonb_path_query('true', '$.double()');
-ERROR: jsonpath item method .double() can only be applied to a string or numeric value
-select jsonb_path_query('null', '$.double()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('true', '$.double()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$.double()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.double()');
-ERROR: jsonpath item method .double() can only be applied to a string or numeric value
-select jsonb_path_query('{}', '$.double()');
-ERROR: jsonpath item method .double() can only be applied to a string or numeric value
-select jsonb_path_query('[]', 'strict $.double()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.double()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1.23', '$.double()');
- jsonb_path_query
-------------------
- 1.23
-(1 row)
-
-select jsonb_path_query('"1.23"', '$.double()');
- jsonb_path_query
-------------------
- 1.23
-(1 row)
-
-select jsonb_path_query('"1.23aaa"', '$.double()');
-ERROR: argument "1.23aaa" of jsonpath item method .double() is invalid for type double precision
-select jsonb_path_query('1e1000', '$.double()');
-ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .double() is invalid for type double precision
-select jsonb_path_query('"nan"', '$.double()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .double()
-select jsonb_path_query('"NaN"', '$.double()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .double()
-select jsonb_path_query('"inf"', '$.double()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .double()
-select jsonb_path_query('"-inf"', '$.double()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .double()
-select jsonb_path_query('"inf"', '$.double()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"-inf"', '$.double()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.abs()');
-ERROR: jsonpath item method .abs() can only be applied to a numeric value
-select jsonb_path_query('true', '$.floor()');
-ERROR: jsonpath item method .floor() can only be applied to a numeric value
-select jsonb_path_query('"1.2"', '$.ceiling()');
-ERROR: jsonpath item method .ceiling() can only be applied to a numeric value
-select jsonb_path_query('{}', '$.abs()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('true', '$.floor()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"1.2"', '$.ceiling()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('["", "a", "abc", "abcabc"]', '$[*] ? (@ starts with "abc")');
- jsonb_path_query
-------------------
- "abc"
- "abcabc"
-(2 rows)
-
-select jsonb_path_query('["", "a", "abc", "abcabc"]', 'strict $ ? (@[*] starts with "abc")');
- jsonb_path_query
-----------------------------
- ["", "a", "abc", "abcabc"]
-(1 row)
-
-select jsonb_path_query('["", "a", "abd", "abdabc"]', 'strict $ ? (@[*] starts with "abc")');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? (@[*] starts with "abc")');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? ((@[*] starts with "abc") is unknown)');
- jsonb_path_query
-----------------------------
- ["abc", "abcabc", null, 1]
-(1 row)
-
-select jsonb_path_query('[[null, 1, "abc", "abcabc"]]', 'lax $ ? (@[*] starts with "abc")');
- jsonb_path_query
-----------------------------
- [null, 1, "abc", "abcabc"]
-(1 row)
-
-select jsonb_path_query('[[null, 1, "abd", "abdabc"]]', 'lax $ ? ((@[*] starts with "abc") is unknown)');
- jsonb_path_query
-----------------------------
- [null, 1, "abd", "abdabc"]
-(1 row)
-
-select jsonb_path_query('[null, 1, "abd", "abdabc"]', 'lax $[*] ? ((@ starts with "abc") is unknown)');
- jsonb_path_query
-------------------
- null
- 1
-(2 rows)
-
-select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c")');
- jsonb_path_query
-------------------
- "abc"
- "abdacb"
-(2 rows)
-
-select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "i")');
- jsonb_path_query
-------------------
- "abc"
- "aBdC"
- "abdacb"
-(3 rows)
-
-select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "m")');
- jsonb_path_query
-------------------
- "abc"
- "abdacb"
- "adc\nabc"
-(3 rows)
-
-select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "s")');
- jsonb_path_query
-------------------
- "abc"
- "abdacb"
- "ab\nadc"
-(3 rows)
-
-select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "q")');
- jsonb_path_query
-------------------
- "a\\b"
- "^a\\b$"
-(2 rows)
-
-select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "")');
- jsonb_path_query
-------------------
- "a\b"
-(1 row)
-
-select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "q")');
- jsonb_path_query
-------------------
- "^a\\b$"
-(1 row)
-
-select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "q")');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "iq")');
- jsonb_path_query
-------------------
- "^a\\b$"
-(1 row)
-
-select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "")');
- jsonb_path_query
-------------------
- "a\b"
-(1 row)
-
-select jsonb_path_query('null', '$.datetime()');
-ERROR: jsonpath item method .datetime() can only be applied to a string
-select jsonb_path_query('true', '$.datetime()');
-ERROR: jsonpath item method .datetime() can only be applied to a string
-select jsonb_path_query('1', '$.datetime()');
-ERROR: jsonpath item method .datetime() can only be applied to a string
-select jsonb_path_query('[]', '$.datetime()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.datetime()');
-ERROR: jsonpath item method .datetime() can only be applied to a string
-select jsonb_path_query('{}', '$.datetime()');
-ERROR: jsonpath item method .datetime() can only be applied to a string
-select jsonb_path_query('"bogus"', '$.datetime()');
-ERROR: datetime format is not recognized: "bogus"
-HINT: Use a datetime template argument to specify the input data format.
-select jsonb_path_query('"12:34"', '$.datetime("aaa")');
-ERROR: invalid datetime format separator: "a"
-select jsonb_path_query('"aaaa"', '$.datetime("HH24")');
-ERROR: invalid value "aa" for "HH24"
-DETAIL: Value must be an integer.
-select jsonb '"10-03-2017"' @? '$.datetime("dd-mm-yyyy")';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy")');
- jsonb_path_query
-------------------
- "2017-03-10"
-(1 row)
-
-select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy").type()');
- jsonb_path_query
-------------------
- "date"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy")');
-ERROR: trailing characters remain in input string after datetime format
-select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy").type()');
-ERROR: trailing characters remain in input string after datetime format
-select jsonb_path_query('"10-03-2017 12:34"', ' $.datetime("dd-mm-yyyy HH24:MI").type()');
- jsonb_path_query
--------------------------------
- "timestamp without time zone"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM").type()');
- jsonb_path_query
-----------------------------
- "timestamp with time zone"
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.datetime("HH24:MI:SS").type()');
- jsonb_path_query
---------------------------
- "time without time zone"
-(1 row)
-
-select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").type()');
- jsonb_path_query
------------------------
- "time with time zone"
-(1 row)
-
-select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
- jsonb_path_query
------------------------
- "2017-03-10T12:34:56"
-(1 row)
-
-select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
-ERROR: unmatched format character "T"
-select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
-ERROR: unmatched format character "T"
--- Test .bigint()
-select jsonb_path_query('null', '$.bigint()');
-ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value
-select jsonb_path_query('true', '$.bigint()');
-ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value
-select jsonb_path_query('null', '$.bigint()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('true', '$.bigint()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$.bigint()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.bigint()');
-ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value
-select jsonb_path_query('{}', '$.bigint()');
-ERROR: jsonpath item method .bigint() can only be applied to a string or numeric value
-select jsonb_path_query('[]', 'strict $.bigint()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.bigint()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"1.23"', '$.bigint()');
-ERROR: argument "1.23" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('"1.23aaa"', '$.bigint()');
-ERROR: argument "1.23aaa" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('1e1000', '$.bigint()');
-ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('"nan"', '$.bigint()');
-ERROR: argument "nan" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('"NaN"', '$.bigint()');
-ERROR: argument "NaN" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('"inf"', '$.bigint()');
-ERROR: argument "inf" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('"-inf"', '$.bigint()');
-ERROR: argument "-inf" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('"inf"', '$.bigint()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"-inf"', '$.bigint()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('123', '$.bigint()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('"123"', '$.bigint()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('1.23', '$.bigint()');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('1.83', '$.bigint()');
- jsonb_path_query
-------------------
- 2
-(1 row)
-
-select jsonb_path_query('1234567890123', '$.bigint()');
- jsonb_path_query
-------------------
- 1234567890123
-(1 row)
-
-select jsonb_path_query('"1234567890123"', '$.bigint()');
- jsonb_path_query
-------------------
- 1234567890123
-(1 row)
-
-select jsonb_path_query('12345678901234567890', '$.bigint()');
-ERROR: argument "12345678901234567890" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('"12345678901234567890"', '$.bigint()');
-ERROR: argument "12345678901234567890" of jsonpath item method .bigint() is invalid for type bigint
-select jsonb_path_query('"+123"', '$.bigint()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('-123', '$.bigint()');
- jsonb_path_query
-------------------
- -123
-(1 row)
-
-select jsonb_path_query('"-123"', '$.bigint()');
- jsonb_path_query
-------------------
- -123
-(1 row)
-
-select jsonb_path_query('123', '$.bigint() * 2');
- jsonb_path_query
-------------------
- 246
-(1 row)
-
--- Test .boolean()
-select jsonb_path_query('null', '$.boolean()');
-ERROR: jsonpath item method .boolean() can only be applied to a boolean, string, or numeric value
-select jsonb_path_query('null', '$.boolean()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$.boolean()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.boolean()');
-ERROR: jsonpath item method .boolean() can only be applied to a boolean, string, or numeric value
-select jsonb_path_query('{}', '$.boolean()');
-ERROR: jsonpath item method .boolean() can only be applied to a boolean, string, or numeric value
-select jsonb_path_query('[]', 'strict $.boolean()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.boolean()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1.23', '$.boolean()');
-ERROR: argument "1.23" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('"1.23"', '$.boolean()');
-ERROR: argument "1.23" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('"1.23aaa"', '$.boolean()');
-ERROR: argument "1.23aaa" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('1e1000', '$.boolean()');
-ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('"nan"', '$.boolean()');
-ERROR: argument "nan" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('"NaN"', '$.boolean()');
-ERROR: argument "NaN" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('"inf"', '$.boolean()');
-ERROR: argument "inf" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('"-inf"', '$.boolean()');
-ERROR: argument "-inf" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('"inf"', '$.boolean()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"-inf"', '$.boolean()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"100"', '$.boolean()');
-ERROR: argument "100" of jsonpath item method .boolean() is invalid for type boolean
-select jsonb_path_query('true', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('false', '$.boolean()');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('1', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('0', '$.boolean()');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('-1', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('100', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('"1"', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('"0"', '$.boolean()');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('"true"', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('"false"', '$.boolean()');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('"TRUE"', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('"FALSE"', '$.boolean()');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('"yes"', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('"NO"', '$.boolean()');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('"T"', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('"f"', '$.boolean()');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('"y"', '$.boolean()');
- jsonb_path_query
-------------------
- true
-(1 row)
-
-select jsonb_path_query('"N"', '$.boolean()');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-select jsonb_path_query('true', '$.boolean().type()');
- jsonb_path_query
-------------------
- "boolean"
-(1 row)
-
-select jsonb_path_query('123', '$.boolean().type()');
- jsonb_path_query
-------------------
- "boolean"
-(1 row)
-
-select jsonb_path_query('"Yes"', '$.boolean().type()');
- jsonb_path_query
-------------------
- "boolean"
-(1 row)
-
-select jsonb_path_query_array('[1, "yes", false]', '$[*].boolean()');
- jsonb_path_query_array
-------------------------
- [true, true, false]
-(1 row)
-
--- Test .date()
-select jsonb_path_query('null', '$.date()');
-ERROR: jsonpath item method .date() can only be applied to a string
-select jsonb_path_query('true', '$.date()');
-ERROR: jsonpath item method .date() can only be applied to a string
-select jsonb_path_query('1', '$.date()');
-ERROR: jsonpath item method .date() can only be applied to a string
-select jsonb_path_query('[]', '$.date()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.date()');
-ERROR: jsonpath item method .date() can only be applied to a string
-select jsonb_path_query('{}', '$.date()');
-ERROR: jsonpath item method .date() can only be applied to a string
-select jsonb_path_query('"bogus"', '$.date()');
-ERROR: date format is not recognized: "bogus"
-select jsonb '"2023-08-15"' @? '$.date()';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('"2023-08-15"', '$.date()');
- jsonb_path_query
-------------------
- "2023-08-15"
-(1 row)
-
-select jsonb_path_query('"2023-08-15"', '$.date().type()');
- jsonb_path_query
-------------------
- "date"
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.date()');
-ERROR: date format is not recognized: "12:34:56"
-select jsonb_path_query('"12:34:56 +05:30"', '$.date()');
-ERROR: date format is not recognized: "12:34:56 +05:30"
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.date()');
- jsonb_path_query
-------------------
- "2023-08-15"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.date()');
-ERROR: cannot convert value from timestamptz to date without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.date()'); -- should work
- jsonb_path_query_tz
----------------------
- "2023-08-15"
-(1 row)
-
-select jsonb_path_query('"2023-08-15"', '$.date(2)');
-ERROR: syntax error at or near "2" of jsonpath input
-LINE 1: select jsonb_path_query('"2023-08-15"', '$.date(2)');
- ^
--- Test .decimal()
-select jsonb_path_query('null', '$.decimal()');
-ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value
-select jsonb_path_query('true', '$.decimal()');
-ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value
-select jsonb_path_query('null', '$.decimal()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('true', '$.decimal()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$.decimal()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.decimal()');
-ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value
-select jsonb_path_query('{}', '$.decimal()');
-ERROR: jsonpath item method .decimal() can only be applied to a string or numeric value
-select jsonb_path_query('[]', 'strict $.decimal()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.decimal()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1.23', '$.decimal()');
- jsonb_path_query
-------------------
- 1.23
-(1 row)
-
-select jsonb_path_query('"1.23"', '$.decimal()');
- jsonb_path_query
-------------------
- 1.23
-(1 row)
-
-select jsonb_path_query('"1.23aaa"', '$.decimal()');
-ERROR: argument "1.23aaa" of jsonpath item method .decimal() is invalid for type numeric
-select jsonb_path_query('1e1000', '$.decimal()');
- jsonb_path_query
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-(1 row)
-
-select jsonb_path_query('"nan"', '$.decimal()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal()
-select jsonb_path_query('"NaN"', '$.decimal()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal()
-select jsonb_path_query('"inf"', '$.decimal()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal()
-select jsonb_path_query('"-inf"', '$.decimal()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .decimal()
-select jsonb_path_query('"inf"', '$.decimal()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"-inf"', '$.decimal()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('123', '$.decimal()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('"123"', '$.decimal()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('12345678901234567890', '$.decimal()');
- jsonb_path_query
-----------------------
- 12345678901234567890
-(1 row)
-
-select jsonb_path_query('"12345678901234567890"', '$.decimal()');
- jsonb_path_query
-----------------------
- 12345678901234567890
-(1 row)
-
-select jsonb_path_query('"+12.3"', '$.decimal()');
- jsonb_path_query
-------------------
- 12.3
-(1 row)
-
-select jsonb_path_query('-12.3', '$.decimal()');
- jsonb_path_query
-------------------
- -12.3
-(1 row)
-
-select jsonb_path_query('"-12.3"', '$.decimal()');
- jsonb_path_query
-------------------
- -12.3
-(1 row)
-
-select jsonb_path_query('12.3', '$.decimal() * 2');
- jsonb_path_query
-------------------
- 24.6
-(1 row)
-
-select jsonb_path_query('12345.678', '$.decimal(6, 1)');
- jsonb_path_query
-------------------
- 12345.7
-(1 row)
-
-select jsonb_path_query('12345.678', '$.decimal(6, 2)');
-ERROR: argument "12345.678" of jsonpath item method .decimal() is invalid for type numeric
-select jsonb_path_query('1234.5678', '$.decimal(6, 2)');
- jsonb_path_query
-------------------
- 1234.57
-(1 row)
-
-select jsonb_path_query('12345.678', '$.decimal(4, 6)');
-ERROR: argument "12345.678" of jsonpath item method .decimal() is invalid for type numeric
-select jsonb_path_query('12345.678', '$.decimal(0, 6)');
-ERROR: NUMERIC precision 0 must be between 1 and 1000
-select jsonb_path_query('12345.678', '$.decimal(1001, 6)');
-ERROR: NUMERIC precision 1001 must be between 1 and 1000
-select jsonb_path_query('1234.5678', '$.decimal(+6, +2)');
- jsonb_path_query
-------------------
- 1234.57
-(1 row)
-
-select jsonb_path_query('1234.5678', '$.decimal(+6, -2)');
- jsonb_path_query
-------------------
- 1200
-(1 row)
-
-select jsonb_path_query('1234.5678', '$.decimal(-6, +2)');
-ERROR: NUMERIC precision -6 must be between 1 and 1000
-select jsonb_path_query('1234.5678', '$.decimal(6, -1001)');
-ERROR: NUMERIC scale -1001 must be between -1000 and 1000
-select jsonb_path_query('1234.5678', '$.decimal(6, 1001)');
-ERROR: NUMERIC scale 1001 must be between -1000 and 1000
-select jsonb_path_query('-1234.5678', '$.decimal(+6, -2)');
- jsonb_path_query
-------------------
- -1200
-(1 row)
-
-select jsonb_path_query('0.0123456', '$.decimal(1,2)');
- jsonb_path_query
-------------------
- 0.01
-(1 row)
-
-select jsonb_path_query('0.0012345', '$.decimal(2,4)');
- jsonb_path_query
-------------------
- 0.0012
-(1 row)
-
-select jsonb_path_query('-0.00123456', '$.decimal(2,-4)');
- jsonb_path_query
-------------------
- 0
-(1 row)
-
-select jsonb_path_query('12.3', '$.decimal(12345678901,1)');
-ERROR: precision of jsonpath item method .decimal() is out of range for type integer
-select jsonb_path_query('12.3', '$.decimal(1,12345678901)');
-ERROR: scale of jsonpath item method .decimal() is out of range for type integer
--- Test .integer()
-select jsonb_path_query('null', '$.integer()');
-ERROR: jsonpath item method .integer() can only be applied to a string or numeric value
-select jsonb_path_query('true', '$.integer()');
-ERROR: jsonpath item method .integer() can only be applied to a string or numeric value
-select jsonb_path_query('null', '$.integer()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('true', '$.integer()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$.integer()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.integer()');
-ERROR: jsonpath item method .integer() can only be applied to a string or numeric value
-select jsonb_path_query('{}', '$.integer()');
-ERROR: jsonpath item method .integer() can only be applied to a string or numeric value
-select jsonb_path_query('[]', 'strict $.integer()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.integer()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"1.23"', '$.integer()');
-ERROR: argument "1.23" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('"1.23aaa"', '$.integer()');
-ERROR: argument "1.23aaa" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('1e1000', '$.integer()');
-ERROR: argument "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('"nan"', '$.integer()');
-ERROR: argument "nan" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('"NaN"', '$.integer()');
-ERROR: argument "NaN" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('"inf"', '$.integer()');
-ERROR: argument "inf" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('"-inf"', '$.integer()');
-ERROR: argument "-inf" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('"inf"', '$.integer()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"-inf"', '$.integer()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('123', '$.integer()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('"123"', '$.integer()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('1.23', '$.integer()');
- jsonb_path_query
-------------------
- 1
-(1 row)
-
-select jsonb_path_query('1.83', '$.integer()');
- jsonb_path_query
-------------------
- 2
-(1 row)
-
-select jsonb_path_query('12345678901', '$.integer()');
-ERROR: argument "12345678901" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('"12345678901"', '$.integer()');
-ERROR: argument "12345678901" of jsonpath item method .integer() is invalid for type integer
-select jsonb_path_query('"+123"', '$.integer()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('-123', '$.integer()');
- jsonb_path_query
-------------------
- -123
-(1 row)
-
-select jsonb_path_query('"-123"', '$.integer()');
- jsonb_path_query
-------------------
- -123
-(1 row)
-
-select jsonb_path_query('123', '$.integer() * 2');
- jsonb_path_query
-------------------
- 246
-(1 row)
-
--- Test .number()
-select jsonb_path_query('null', '$.number()');
-ERROR: jsonpath item method .number() can only be applied to a string or numeric value
-select jsonb_path_query('true', '$.number()');
-ERROR: jsonpath item method .number() can only be applied to a string or numeric value
-select jsonb_path_query('null', '$.number()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('true', '$.number()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$.number()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.number()');
-ERROR: jsonpath item method .number() can only be applied to a string or numeric value
-select jsonb_path_query('{}', '$.number()');
-ERROR: jsonpath item method .number() can only be applied to a string or numeric value
-select jsonb_path_query('[]', 'strict $.number()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.number()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1.23', '$.number()');
- jsonb_path_query
-------------------
- 1.23
-(1 row)
-
-select jsonb_path_query('"1.23"', '$.number()');
- jsonb_path_query
-------------------
- 1.23
-(1 row)
-
-select jsonb_path_query('"1.23aaa"', '$.number()');
-ERROR: argument "1.23aaa" of jsonpath item method .number() is invalid for type numeric
-select jsonb_path_query('1e1000', '$.number()');
- jsonb_path_query
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-(1 row)
-
-select jsonb_path_query('"nan"', '$.number()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .number()
-select jsonb_path_query('"NaN"', '$.number()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .number()
-select jsonb_path_query('"inf"', '$.number()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .number()
-select jsonb_path_query('"-inf"', '$.number()');
-ERROR: NaN or Infinity is not allowed for jsonpath item method .number()
-select jsonb_path_query('"inf"', '$.number()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('"-inf"', '$.number()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('123', '$.number()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('"123"', '$.number()');
- jsonb_path_query
-------------------
- 123
-(1 row)
-
-select jsonb_path_query('12345678901234567890', '$.number()');
- jsonb_path_query
-----------------------
- 12345678901234567890
-(1 row)
-
-select jsonb_path_query('"12345678901234567890"', '$.number()');
- jsonb_path_query
-----------------------
- 12345678901234567890
-(1 row)
-
-select jsonb_path_query('"+12.3"', '$.number()');
- jsonb_path_query
-------------------
- 12.3
-(1 row)
-
-select jsonb_path_query('-12.3', '$.number()');
- jsonb_path_query
-------------------
- -12.3
-(1 row)
-
-select jsonb_path_query('"-12.3"', '$.number()');
- jsonb_path_query
-------------------
- -12.3
-(1 row)
-
-select jsonb_path_query('12.3', '$.number() * 2');
- jsonb_path_query
-------------------
- 24.6
-(1 row)
-
--- Test .string()
-select jsonb_path_query('null', '$.string()');
-ERROR: jsonpath item method .string() can only be applied to a boolean, string, numeric, or datetime value
-select jsonb_path_query('null', '$.string()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', '$.string()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.string()');
-ERROR: jsonpath item method .string() can only be applied to a boolean, string, numeric, or datetime value
-select jsonb_path_query('{}', '$.string()');
-ERROR: jsonpath item method .string() can only be applied to a boolean, string, numeric, or datetime value
-select jsonb_path_query('[]', 'strict $.string()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('{}', '$.string()', silent => true);
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('1.23', '$.string()');
- jsonb_path_query
-------------------
- "1.23"
-(1 row)
-
-select jsonb_path_query('"1.23"', '$.string()');
- jsonb_path_query
-------------------
- "1.23"
-(1 row)
-
-select jsonb_path_query('"1.23aaa"', '$.string()');
- jsonb_path_query
-------------------
- "1.23aaa"
-(1 row)
-
-select jsonb_path_query('1234', '$.string()');
- jsonb_path_query
-------------------
- "1234"
-(1 row)
-
-select jsonb_path_query('true', '$.string()');
- jsonb_path_query
-------------------
- "true"
-(1 row)
-
-select jsonb_path_query('1234', '$.string().type()');
- jsonb_path_query
-------------------
- "string"
-(1 row)
-
-select jsonb_path_query('[2, true]', '$.string()');
- jsonb_path_query
-------------------
- "2"
- "true"
-(2 rows)
-
-select jsonb_path_query_array('[1.23, "yes", false]', '$[*].string()');
- jsonb_path_query_array
---------------------------
- ["1.23", "yes", "false"]
-(1 row)
-
-select jsonb_path_query_array('[1.23, "yes", false]', '$[*].string().type()');
- jsonb_path_query_array
---------------------------------
- ["string", "string", "string"]
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +5:30"', '$.timestamp().string()');
-ERROR: cannot convert value from timestamptz to timestamp without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56 +5:30"', '$.timestamp().string()'); -- should work
- jsonb_path_query_tz
------------------------
- "2023-08-15T00:04:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp_tz().string()');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56"', '$.timestamp_tz().string()'); -- should work
- jsonb_path_query_tz
------------------------------
- "2023-08-15T12:34:56-07:00"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +5:30"', '$.timestamp_tz().string()');
- jsonb_path_query
------------------------------
- "2023-08-15T12:34:56+05:30"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().string()');
- jsonb_path_query
------------------------
- "2023-08-15T12:34:56"
-(1 row)
-
-select jsonb_path_query('"12:34:56 +5:30"', '$.time_tz().string()');
- jsonb_path_query
-------------------
- "12:34:56+05:30"
-(1 row)
-
--- this timetz usage will absorb the UTC offset of the current timezone setting
-begin;
-set local timezone = 'UTC-10';
-select jsonb_path_query_tz('"12:34:56"', '$.time_tz().string()');
- jsonb_path_query_tz
----------------------
- "12:34:56+10:00"
-(1 row)
-
-rollback;
-select jsonb_path_query('"12:34:56"', '$.time().string()');
- jsonb_path_query
-------------------
- "12:34:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15"', '$.date().string()');
- jsonb_path_query
-------------------
- "2023-08-15"
-(1 row)
-
--- .string() does not react to timezone or datestyle
-begin;
-set local timezone = 'UTC';
-set local datestyle = 'German';
-select jsonb_path_query('"2023-08-15 12:34:56 +5:30"', '$.timestamp_tz().string()');
- jsonb_path_query
------------------------------
- "2023-08-15T12:34:56+05:30"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().string()');
- jsonb_path_query
------------------------
- "2023-08-15T12:34:56"
-(1 row)
-
-rollback;
--- Test .time()
-select jsonb_path_query('null', '$.time()');
-ERROR: jsonpath item method .time() can only be applied to a string
-select jsonb_path_query('true', '$.time()');
-ERROR: jsonpath item method .time() can only be applied to a string
-select jsonb_path_query('1', '$.time()');
-ERROR: jsonpath item method .time() can only be applied to a string
-select jsonb_path_query('[]', '$.time()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.time()');
-ERROR: jsonpath item method .time() can only be applied to a string
-select jsonb_path_query('{}', '$.time()');
-ERROR: jsonpath item method .time() can only be applied to a string
-select jsonb_path_query('"bogus"', '$.time()');
-ERROR: time format is not recognized: "bogus"
-select jsonb '"12:34:56"' @? '$.time()';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.time()');
- jsonb_path_query
-------------------
- "12:34:56"
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.time().type()');
- jsonb_path_query
---------------------------
- "time without time zone"
-(1 row)
-
-select jsonb_path_query('"2023-08-15"', '$.time()');
-ERROR: time format is not recognized: "2023-08-15"
-select jsonb_path_query('"12:34:56 +05:30"', '$.time()');
-ERROR: cannot convert value from timetz to time without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"12:34:56 +05:30"', '$.time()'); -- should work
- jsonb_path_query_tz
----------------------
- "12:34:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.time()');
- jsonb_path_query
-------------------
- "12:34:56"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789"', '$.time(-1)');
-ERROR: syntax error at or near "-" of jsonpath input
-LINE 1: select jsonb_path_query('"12:34:56.789"', '$.time(-1)');
- ^
-select jsonb_path_query('"12:34:56.789"', '$.time(2.0)');
-ERROR: syntax error at or near "2.0" of jsonpath input
-LINE 1: select jsonb_path_query('"12:34:56.789"', '$.time(2.0)');
- ^
-select jsonb_path_query('"12:34:56.789"', '$.time(12345678901)');
-ERROR: time precision of jsonpath item method .time() is out of range for type integer
-select jsonb_path_query('"12:34:56.789"', '$.time(0)');
- jsonb_path_query
-------------------
- "12:34:57"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789"', '$.time(2)');
- jsonb_path_query
-------------------
- "12:34:56.79"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789"', '$.time(5)');
- jsonb_path_query
-------------------
- "12:34:56.789"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789"', '$.time(10)');
-WARNING: TIME(10) precision reduced to maximum allowed, 6
- jsonb_path_query
-------------------
- "12:34:56.789"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789012"', '$.time(8)');
-WARNING: TIME(8) precision reduced to maximum allowed, 6
- jsonb_path_query
--------------------
- "12:34:56.789012"
-(1 row)
-
--- Test .time_tz()
-select jsonb_path_query('null', '$.time_tz()');
-ERROR: jsonpath item method .time_tz() can only be applied to a string
-select jsonb_path_query('true', '$.time_tz()');
-ERROR: jsonpath item method .time_tz() can only be applied to a string
-select jsonb_path_query('1', '$.time_tz()');
-ERROR: jsonpath item method .time_tz() can only be applied to a string
-select jsonb_path_query('[]', '$.time_tz()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.time_tz()');
-ERROR: jsonpath item method .time_tz() can only be applied to a string
-select jsonb_path_query('{}', '$.time_tz()');
-ERROR: jsonpath item method .time_tz() can only be applied to a string
-select jsonb_path_query('"bogus"', '$.time_tz()');
-ERROR: time_tz format is not recognized: "bogus"
-select jsonb '"12:34:56 +05:30"' @? '$.time_tz()';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('"12:34:56 +05:30"', '$.time_tz()');
- jsonb_path_query
-------------------
- "12:34:56+05:30"
-(1 row)
-
-select jsonb_path_query('"12:34:56 +05:30"', '$.time_tz().type()');
- jsonb_path_query
------------------------
- "time with time zone"
-(1 row)
-
-select jsonb_path_query('"2023-08-15"', '$.time_tz()');
-ERROR: time_tz format is not recognized: "2023-08-15"
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.time_tz()');
-ERROR: time_tz format is not recognized: "2023-08-15 12:34:56"
-select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(-1)');
-ERROR: syntax error at or near "-" of jsonpath input
-LINE 1: select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(...
- ^
-select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(2.0)');
-ERROR: syntax error at or near "2.0" of jsonpath input
-LINE 1: select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(...
- ^
-select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(12345678901)');
-ERROR: time precision of jsonpath item method .time_tz() is out of range for type integer
-select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(0)');
- jsonb_path_query
-------------------
- "12:34:57+05:30"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(2)');
- jsonb_path_query
----------------------
- "12:34:56.79+05:30"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(5)');
- jsonb_path_query
-----------------------
- "12:34:56.789+05:30"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789 +05:30"', '$.time_tz(10)');
-WARNING: TIME(10) WITH TIME ZONE precision reduced to maximum allowed, 6
- jsonb_path_query
-----------------------
- "12:34:56.789+05:30"
-(1 row)
-
-select jsonb_path_query('"12:34:56.789012 +05:30"', '$.time_tz(8)');
-WARNING: TIME(8) WITH TIME ZONE precision reduced to maximum allowed, 6
- jsonb_path_query
--------------------------
- "12:34:56.789012+05:30"
-(1 row)
-
--- Test .timestamp()
-select jsonb_path_query('null', '$.timestamp()');
-ERROR: jsonpath item method .timestamp() can only be applied to a string
-select jsonb_path_query('true', '$.timestamp()');
-ERROR: jsonpath item method .timestamp() can only be applied to a string
-select jsonb_path_query('1', '$.timestamp()');
-ERROR: jsonpath item method .timestamp() can only be applied to a string
-select jsonb_path_query('[]', '$.timestamp()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.timestamp()');
-ERROR: jsonpath item method .timestamp() can only be applied to a string
-select jsonb_path_query('{}', '$.timestamp()');
-ERROR: jsonpath item method .timestamp() can only be applied to a string
-select jsonb_path_query('"bogus"', '$.timestamp()');
-ERROR: timestamp format is not recognized: "bogus"
-select jsonb '"2023-08-15 12:34:56"' @? '$.timestamp()';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp()');
- jsonb_path_query
------------------------
- "2023-08-15T12:34:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp().type()');
- jsonb_path_query
--------------------------------
- "timestamp without time zone"
-(1 row)
-
-select jsonb_path_query('"2023-08-15"', '$.timestamp()');
- jsonb_path_query
------------------------
- "2023-08-15T00:00:00"
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.timestamp()');
-ERROR: timestamp format is not recognized: "12:34:56"
-select jsonb_path_query('"12:34:56 +05:30"', '$.timestamp()');
-ERROR: timestamp format is not recognized: "12:34:56 +05:30"
-select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(-1)');
-ERROR: syntax error at or near "-" of jsonpath input
-LINE 1: ...ect jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timesta...
- ^
-select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(2.0)');
-ERROR: syntax error at or near "2.0" of jsonpath input
-LINE 1: ...ect jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timesta...
- ^
-select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(12345678901)');
-ERROR: time precision of jsonpath item method .timestamp() is out of range for type integer
-select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(0)');
- jsonb_path_query
------------------------
- "2023-08-15T12:34:57"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(2)');
- jsonb_path_query
---------------------------
- "2023-08-15T12:34:56.79"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(5)');
- jsonb_path_query
----------------------------
- "2023-08-15T12:34:56.789"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56.789"', '$.timestamp(10)');
-WARNING: TIMESTAMP(10) precision reduced to maximum allowed, 6
- jsonb_path_query
----------------------------
- "2023-08-15T12:34:56.789"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56.789012"', '$.timestamp(8)');
-WARNING: TIMESTAMP(8) precision reduced to maximum allowed, 6
- jsonb_path_query
-------------------------------
- "2023-08-15T12:34:56.789012"
-(1 row)
-
--- Test .timestamp_tz()
-select jsonb_path_query('null', '$.timestamp_tz()');
-ERROR: jsonpath item method .timestamp_tz() can only be applied to a string
-select jsonb_path_query('true', '$.timestamp_tz()');
-ERROR: jsonpath item method .timestamp_tz() can only be applied to a string
-select jsonb_path_query('1', '$.timestamp_tz()');
-ERROR: jsonpath item method .timestamp_tz() can only be applied to a string
-select jsonb_path_query('[]', '$.timestamp_tz()');
- jsonb_path_query
-------------------
-(0 rows)
-
-select jsonb_path_query('[]', 'strict $.timestamp_tz()');
-ERROR: jsonpath item method .timestamp_tz() can only be applied to a string
-select jsonb_path_query('{}', '$.timestamp_tz()');
-ERROR: jsonpath item method .timestamp_tz() can only be applied to a string
-select jsonb_path_query('"bogus"', '$.timestamp_tz()');
-ERROR: timestamp_tz format is not recognized: "bogus"
-select jsonb '"2023-08-15 12:34:56 +05:30"' @? '$.timestamp_tz()';
- ?column?
-----------
- t
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()');
- jsonb_path_query
------------------------------
- "2023-08-15T12:34:56+05:30"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz().type()');
- jsonb_path_query
-----------------------------
- "timestamp with time zone"
-(1 row)
-
-select jsonb_path_query('"2023-08-15"', '$.timestamp_tz()');
-ERROR: cannot convert value from date to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15"', '$.timestamp_tz()'); -- should work
- jsonb_path_query_tz
------------------------------
- "2023-08-15T00:00:00-07:00"
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.timestamp_tz()');
-ERROR: timestamp_tz format is not recognized: "12:34:56"
-select jsonb_path_query('"12:34:56 +05:30"', '$.timestamp_tz()');
-ERROR: timestamp_tz format is not recognized: "12:34:56 +05:30"
-select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(-1)');
-ERROR: syntax error at or near "-" of jsonpath input
-LINE 1: ...nb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timesta...
- ^
-select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(2.0)');
-ERROR: syntax error at or near "2.0" of jsonpath input
-LINE 1: ...nb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timesta...
- ^
-select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(12345678901)');
-ERROR: time precision of jsonpath item method .timestamp_tz() is out of range for type integer
-select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(0)');
- jsonb_path_query
------------------------------
- "2023-08-15T12:34:57+05:30"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(2)');
- jsonb_path_query
---------------------------------
- "2023-08-15T12:34:56.79+05:30"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(5)');
- jsonb_path_query
----------------------------------
- "2023-08-15T12:34:56.789+05:30"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56.789 +05:30"', '$.timestamp_tz(10)');
-WARNING: TIMESTAMP(10) WITH TIME ZONE precision reduced to maximum allowed, 6
- jsonb_path_query
----------------------------------
- "2023-08-15T12:34:56.789+05:30"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56.789012 +05:30"', '$.timestamp_tz(8)');
-WARNING: TIMESTAMP(8) WITH TIME ZONE precision reduced to maximum allowed, 6
- jsonb_path_query
-------------------------------------
- "2023-08-15T12:34:56.789012+05:30"
-(1 row)
-
-set time zone '+00';
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()');
-ERROR: cannot convert value from timestamptz to time without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work
- jsonb_path_query_tz
----------------------
- "07:04:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()');
- jsonb_path_query
-------------------
- "07:04:56+00:00"
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.time_tz()');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"12:34:56"', '$.time_tz()'); -- should work
- jsonb_path_query_tz
----------------------
- "12:34:56+00:00"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()');
-ERROR: cannot convert value from timestamptz to timestamp without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work
- jsonb_path_query_tz
------------------------
- "2023-08-15T07:04:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp_tz()');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -- should work
- jsonb_path_query_tz
------------------------------
- "2023-08-15T12:34:56+00:00"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")');
- jsonb_path_query
------------------------
- "2017-03-10T12:34:00"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
-ERROR: input string is too short for datetime format
-select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:00+05:00"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:00-05:00"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:00+05:20"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:00-05:20"
-(1 row)
-
-select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")');
- jsonb_path_query
-------------------
- "12:34:00"
-(1 row)
-
-select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")');
-ERROR: input string is too short for datetime format
-select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")');
- jsonb_path_query
-------------------
- "12:34:00+05:00"
-(1 row)
-
-select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")');
- jsonb_path_query
-------------------
- "12:34:00-05:00"
-(1 row)
-
-select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")');
- jsonb_path_query
-------------------
- "12:34:00+05:20"
-(1 row)
-
-select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")');
- jsonb_path_query
-------------------
- "12:34:00-05:20"
-(1 row)
-
-set time zone '+10';
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()');
-ERROR: cannot convert value from timestamptz to time without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work
- jsonb_path_query_tz
----------------------
- "17:04:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()');
- jsonb_path_query
-------------------
- "17:04:56+10:00"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()');
-ERROR: cannot convert value from timestamptz to timestamp without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work
- jsonb_path_query_tz
------------------------
- "2023-08-15T17:04:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56"', '$.timestamp_tz()');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56"', '$.timestamp_tz()'); -- should work
- jsonb_path_query_tz
------------------------------
- "2023-08-15T12:34:56+10:00"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()');
- jsonb_path_query
------------------------------
- "2023-08-15T12:34:56+05:30"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")');
- jsonb_path_query
------------------------
- "2017-03-10T12:34:00"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
-ERROR: input string is too short for datetime format
-select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:00+05:00"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:00-05:00"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:00+05:20"
-(1 row)
-
-select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:00-05:20"
-(1 row)
-
-select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")');
- jsonb_path_query
-------------------
- "12:34:00"
-(1 row)
-
-select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")');
-ERROR: input string is too short for datetime format
-select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")');
- jsonb_path_query
-------------------
- "12:34:00+05:00"
-(1 row)
-
-select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")');
- jsonb_path_query
-------------------
- "12:34:00-05:00"
-(1 row)
-
-select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")');
- jsonb_path_query
-------------------
- "12:34:00+05:20"
-(1 row)
-
-select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")');
- jsonb_path_query
-------------------
- "12:34:00-05:20"
-(1 row)
-
-set time zone default;
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time()');
-ERROR: cannot convert value from timestamptz to time without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.time()'); -- should work
- jsonb_path_query_tz
----------------------
- "00:04:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.time_tz()');
- jsonb_path_query
-------------------
- "00:04:56-07:00"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp()');
-ERROR: cannot convert value from timestamptz to timestamp without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz('"2023-08-15 12:34:56 +05:30"', '$.timestamp()'); -- should work
- jsonb_path_query_tz
------------------------
- "2023-08-15T00:04:56"
-(1 row)
-
-select jsonb_path_query('"2023-08-15 12:34:56 +05:30"', '$.timestamp_tz()');
- jsonb_path_query
------------------------------
- "2023-08-15T12:34:56+05:30"
-(1 row)
-
-select jsonb_path_query('"2017-03-10"', '$.datetime().type()');
- jsonb_path_query
-------------------
- "date"
-(1 row)
-
-select jsonb_path_query('"2017-03-10"', '$.datetime()');
- jsonb_path_query
-------------------
- "2017-03-10"
-(1 row)
-
-select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime().type()');
- jsonb_path_query
--------------------------------
- "timestamp without time zone"
-(1 row)
-
-select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()');
- jsonb_path_query
------------------------
- "2017-03-10T12:34:56"
-(1 row)
-
-select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()');
- jsonb_path_query
-----------------------------
- "timestamp with time zone"
-(1 row)
-
-select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:56+03:00"
-(1 row)
-
-select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()');
- jsonb_path_query
-----------------------------
- "timestamp with time zone"
-(1 row)
-
-select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:56+03:10"
-(1 row)
-
-select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()');
- jsonb_path_query
------------------------------
- "2017-03-10T12:34:56+03:10"
-(1 row)
-
-select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()');
-ERROR: datetime format is not recognized: "2017-03-10t12:34:56+3:10"
-HINT: Use a datetime template argument to specify the input data format.
-select jsonb_path_query('"2017-03-10 12:34:56.789+3:10"', '$.datetime()');
- jsonb_path_query
----------------------------------
- "2017-03-10T12:34:56.789+03:10"
-(1 row)
-
-select jsonb_path_query('"2017-03-10T12:34:56.789+3:10"', '$.datetime()');
- jsonb_path_query
----------------------------------
- "2017-03-10T12:34:56.789+03:10"
-(1 row)
-
-select jsonb_path_query('"2017-03-10t12:34:56.789+3:10"', '$.datetime()');
-ERROR: datetime format is not recognized: "2017-03-10t12:34:56.789+3:10"
-HINT: Use a datetime template argument to specify the input data format.
-select jsonb_path_query('"2017-03-10T12:34:56.789EST"', '$.datetime()');
- jsonb_path_query
----------------------------------
- "2017-03-10T12:34:56.789-05:00"
-(1 row)
-
-select jsonb_path_query('"2017-03-10T12:34:56.789Z"', '$.datetime()');
- jsonb_path_query
----------------------------------
- "2017-03-10T12:34:56.789+00:00"
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.datetime().type()');
- jsonb_path_query
---------------------------
- "time without time zone"
-(1 row)
-
-select jsonb_path_query('"12:34:56"', '$.datetime()');
- jsonb_path_query
-------------------
- "12:34:56"
-(1 row)
-
-select jsonb_path_query('"12:34:56+3"', '$.datetime().type()');
- jsonb_path_query
------------------------
- "time with time zone"
-(1 row)
-
-select jsonb_path_query('"12:34:56+3"', '$.datetime()');
- jsonb_path_query
-------------------
- "12:34:56+03:00"
-(1 row)
-
-select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()');
- jsonb_path_query
------------------------
- "time with time zone"
-(1 row)
-
-select jsonb_path_query('"12:34:56+3:10"', '$.datetime()');
- jsonb_path_query
-------------------
- "12:34:56+03:10"
-(1 row)
-
-set time zone '+00';
--- date comparison
-select jsonb_path_query(
- '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))');
-ERROR: cannot convert value from date to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))');
-ERROR: cannot convert value from date to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))');
-ERROR: cannot convert value from date to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-10"
- "2017-03-10T00:00:00"
- "2017-03-10T03:00:00+03:00"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-10"
- "2017-03-11"
- "2017-03-10T00:00:00"
- "2017-03-10T12:34:56"
- "2017-03-10T03:00:00+03:00"
-(5 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-09"
- "2017-03-10T01:02:03+04:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ == "2017-03-10".date())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10"
- "2017-03-10T00:00:00"
- "2017-03-10T03:00:00+03:00"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ >= "2017-03-10".date())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10"
- "2017-03-11"
- "2017-03-10T00:00:00"
- "2017-03-10T12:34:56"
- "2017-03-10T03:00:00+03:00"
-(5 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].datetime() ? (@ < "2017-03-10".date())');
- jsonb_path_query_tz
------------------------------
- "2017-03-09"
- "2017-03-10T01:02:03+04:00"
-(2 rows)
-
-select jsonb_path_query(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].date() ? (@ == "2017-03-10".date())');
-ERROR: cannot convert value from timestamptz to date without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].date() ? (@ >= "2017-03-10".date())');
-ERROR: cannot convert value from timestamptz to date without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].date() ? (@ < "2017-03-10".date())');
-ERROR: cannot convert value from timestamptz to date without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].date() ? (@ == "2017-03-10".date())');
- jsonb_path_query_tz
----------------------
- "2017-03-10"
- "2017-03-10"
- "2017-03-10"
- "2017-03-10"
-(4 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].date() ? (@ >= "2017-03-10".date())');
- jsonb_path_query_tz
----------------------
- "2017-03-10"
- "2017-03-11"
- "2017-03-10"
- "2017-03-10"
- "2017-03-10"
-(5 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10", "2017-03-11", "2017-03-09", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
- '$[*].date() ? (@ < "2017-03-10".date())');
- jsonb_path_query_tz
----------------------
- "2017-03-09"
- "2017-03-09"
-(2 rows)
-
--- time comparison
-select jsonb_path_query(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))');
- jsonb_path_query_tz
----------------------
- "12:35:00"
- "12:35:00+00:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))');
- jsonb_path_query_tz
----------------------
- "12:35:00"
- "12:36:00"
- "12:35:00+00:00"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))');
- jsonb_path_query_tz
----------------------
- "12:34:00"
- "12:35:00+01:00"
- "13:35:00+01:00"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ == "12:35:00".time())');
- jsonb_path_query_tz
----------------------
- "12:35:00"
- "12:35:00+00:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ >= "12:35:00".time())');
- jsonb_path_query_tz
----------------------
- "12:35:00"
- "12:36:00"
- "12:35:00+00:00"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].datetime() ? (@ < "12:35:00".time())');
- jsonb_path_query_tz
----------------------
- "12:34:00"
- "12:35:00+01:00"
- "13:35:00+01:00"
-(3 rows)
-
-select jsonb_path_query(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].time() ? (@ == "12:35:00".time())');
-ERROR: cannot convert value from timetz to time without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].time() ? (@ >= "12:35:00".time())');
-ERROR: cannot convert value from timetz to time without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].time() ? (@ < "12:35:00".time())');
-ERROR: cannot convert value from timetz to time without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00.123", "12:35:00.123", "12:36:00.1123", "12:35:00.1123+00", "12:35:00.123+01", "13:35:00.123+01", "2017-03-10 12:35:00.1", "2017-03-10 12:35:00.123+01"]',
- '$[*].time(2) ? (@ >= "12:35:00.123".time(2))');
-ERROR: cannot convert value from timetz to time without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].time() ? (@ == "12:35:00".time())');
- jsonb_path_query_tz
----------------------
- "12:35:00"
- "12:35:00"
- "12:35:00"
- "12:35:00"
-(4 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].time() ? (@ >= "12:35:00".time())');
- jsonb_path_query_tz
----------------------
- "12:35:00"
- "12:36:00"
- "12:35:00"
- "12:35:00"
- "13:35:00"
- "12:35:00"
-(6 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
- '$[*].time() ? (@ < "12:35:00".time())');
- jsonb_path_query_tz
----------------------
- "12:34:00"
- "11:35:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00.123", "12:35:00.123", "12:36:00.1123", "12:35:00.1123+00", "12:35:00.123+01", "13:35:00.123+01", "2017-03-10 12:35:00.1", "2017-03-10 12:35:00.123+01"]',
- '$[*].time(2) ? (@ >= "12:35:00.123".time(2))');
- jsonb_path_query_tz
----------------------
- "12:35:00.12"
- "12:36:00.11"
- "12:35:00.12"
- "13:35:00.12"
-(4 rows)
-
--- timetz comparison
-select jsonb_path_query(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))');
- jsonb_path_query_tz
----------------------
- "12:35:00+01:00"
-(1 row)
-
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))');
- jsonb_path_query_tz
----------------------
- "12:35:00+01:00"
- "12:36:00+01:00"
- "12:35:00-02:00"
- "11:35:00"
- "12:35:00"
-(5 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))');
- jsonb_path_query_tz
----------------------
- "12:34:00+01:00"
- "12:35:00+02:00"
- "10:35:00"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ == "12:35:00 +1".time_tz())');
- jsonb_path_query_tz
----------------------
- "12:35:00+01:00"
-(1 row)
-
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ >= "12:35:00 +1".time_tz())');
- jsonb_path_query_tz
----------------------
- "12:35:00+01:00"
- "12:36:00+01:00"
- "12:35:00-02:00"
- "11:35:00"
- "12:35:00"
-(5 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].datetime() ? (@ < "12:35:00 +1".time_tz())');
- jsonb_path_query_tz
----------------------
- "12:34:00+01:00"
- "12:35:00+02:00"
- "10:35:00"
-(3 rows)
-
-select jsonb_path_query(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].time_tz() ? (@ == "12:35:00 +1".time_tz())');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].time_tz() ? (@ >= "12:35:00 +1".time_tz())');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].time_tz() ? (@ < "12:35:00 +1".time_tz())');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["12:34:00.123+01", "12:35:00.123+01", "12:36:00.1123+01", "12:35:00.1123+02", "12:35:00.123-02", "10:35:00.123", "11:35:00.1", "12:35:00.123", "2017-03-10 12:35:00.123 +1"]',
- '$[*].time_tz(2) ? (@ >= "12:35:00.123 +1".time_tz(2))');
-ERROR: cannot convert value from time to timetz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].time_tz() ? (@ == "12:35:00 +1".time_tz())');
- jsonb_path_query_tz
----------------------
- "12:35:00+01:00"
-(1 row)
-
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].time_tz() ? (@ >= "12:35:00 +1".time_tz())');
- jsonb_path_query_tz
----------------------
- "12:35:00+01:00"
- "12:36:00+01:00"
- "12:35:00-02:00"
- "11:35:00+00:00"
- "12:35:00+00:00"
- "11:35:00+00:00"
-(6 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10 12:35:00 +1"]',
- '$[*].time_tz() ? (@ < "12:35:00 +1".time_tz())');
- jsonb_path_query_tz
----------------------
- "12:34:00+01:00"
- "12:35:00+02:00"
- "10:35:00+00:00"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["12:34:00.123+01", "12:35:00.123+01", "12:36:00.1123+01", "12:35:00.1123+02", "12:35:00.123-02", "10:35:00.123", "11:35:00.1", "12:35:00.123", "2017-03-10 12:35:00.123 +1"]',
- '$[*].time_tz(2) ? (@ >= "12:35:00.123 +1".time_tz(2))');
- jsonb_path_query_tz
----------------------
- "12:35:00.12+01:00"
- "12:36:00.11+01:00"
- "12:35:00.12-02:00"
- "12:35:00.12+00:00"
- "11:35:00.12+00:00"
-(5 rows)
-
--- timestamp comparison
-select jsonb_path_query(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00"
- "2017-03-10T13:35:00+01:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00"
- "2017-03-10T12:36:00"
- "2017-03-10T13:35:00+01:00"
- "2017-03-10T12:35:00-01:00"
- "2017-03-11"
-(5 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:34:00"
- "2017-03-10T12:35:00+01:00"
- "2017-03-10"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].datetime() ? (@ == "2017-03-10 12:35:00".timestamp())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00"
- "2017-03-10T13:35:00+01:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].datetime() ? (@ >= "2017-03-10 12:35:00".timestamp())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00"
- "2017-03-10T12:36:00"
- "2017-03-10T13:35:00+01:00"
- "2017-03-10T12:35:00-01:00"
- "2017-03-11"
-(5 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].datetime() ? (@ < "2017-03-10 12:35:00".timestamp())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:34:00"
- "2017-03-10T12:35:00+01:00"
- "2017-03-10"
-(3 rows)
-
-select jsonb_path_query(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp() ? (@ == "2017-03-10 12:35:00".timestamp())');
-ERROR: cannot convert value from timestamptz to timestamp without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp() ? (@ >= "2017-03-10 12:35:00".timestamp())');
-ERROR: cannot convert value from timestamptz to timestamp without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp() ? (@ < "2017-03-10 12:35:00".timestamp())');
-ERROR: cannot convert value from timestamptz to timestamp without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00.123", "2017-03-10 12:35:00.123", "2017-03-10 12:36:00.1123", "2017-03-10 12:35:00.1123+01", "2017-03-10 13:35:00.123+01", "2017-03-10 12:35:00.1-01", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp(2) ? (@ >= "2017-03-10 12:35:00.123".timestamp(2))');
-ERROR: cannot convert value from timestamptz to timestamp without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp() ? (@ == "2017-03-10 12:35:00".timestamp())');
- jsonb_path_query_tz
------------------------
- "2017-03-10T12:35:00"
- "2017-03-10T12:35:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp() ? (@ >= "2017-03-10 12:35:00".timestamp())');
- jsonb_path_query_tz
------------------------
- "2017-03-10T12:35:00"
- "2017-03-10T12:36:00"
- "2017-03-10T12:35:00"
- "2017-03-10T13:35:00"
- "2017-03-11T00:00:00"
-(5 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp() ? (@ < "2017-03-10 12:35:00".timestamp())');
- jsonb_path_query_tz
------------------------
- "2017-03-10T12:34:00"
- "2017-03-10T11:35:00"
- "2017-03-10T00:00:00"
-(3 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00.123", "2017-03-10 12:35:00.123", "2017-03-10 12:36:00.1123", "2017-03-10 12:35:00.1123+01", "2017-03-10 13:35:00.123+01", "2017-03-10 12:35:00.1-01", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp(2) ? (@ >= "2017-03-10 12:35:00.123".timestamp(2))');
- jsonb_path_query_tz
---------------------------
- "2017-03-10T12:35:00.12"
- "2017-03-10T12:36:00.11"
- "2017-03-10T12:35:00.12"
- "2017-03-10T13:35:00.1"
- "2017-03-11T00:00:00"
-(5 rows)
-
--- timestamptz comparison
-select jsonb_path_query(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00+01:00"
- "2017-03-10T11:35:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00+01:00"
- "2017-03-10T12:36:00+01:00"
- "2017-03-10T12:35:00-02:00"
- "2017-03-10T11:35:00"
- "2017-03-10T12:35:00"
- "2017-03-11"
-(6 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
- '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:34:00+01:00"
- "2017-03-10T12:35:00+02:00"
- "2017-03-10T10:35:00"
- "2017-03-10"
-(4 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].datetime() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00+01:00"
- "2017-03-10T11:35:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].datetime() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00+01:00"
- "2017-03-10T12:36:00+01:00"
- "2017-03-10T12:35:00-02:00"
- "2017-03-10T11:35:00"
- "2017-03-10T12:35:00"
- "2017-03-11"
-(6 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].datetime() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:34:00+01:00"
- "2017-03-10T12:35:00+02:00"
- "2017-03-10T10:35:00"
- "2017-03-10"
-(4 rows)
-
-select jsonb_path_query(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp_tz() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp_tz() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp_tz() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query(
- '["2017-03-10 12:34:00.123+01", "2017-03-10 12:35:00.123+01", "2017-03-10 12:36:00.1123+01", "2017-03-10 12:35:00.1123+02", "2017-03-10 12:35:00.123-02", "2017-03-10 10:35:00.123", "2017-03-10 11:35:00.1", "2017-03-10 12:35:00.123", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp_tz(2) ? (@ >= "2017-03-10 12:35:00.123 +1".timestamp_tz(2))');
-ERROR: cannot convert value from timestamp to timestamptz without time zone usage
-HINT: Use *_tz() function for time zone support.
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp_tz() ? (@ == "2017-03-10 12:35:00 +1".timestamp_tz())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00+01:00"
- "2017-03-10T11:35:00+00:00"
-(2 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp_tz() ? (@ >= "2017-03-10 12:35:00 +1".timestamp_tz())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:35:00+01:00"
- "2017-03-10T12:36:00+01:00"
- "2017-03-10T12:35:00-02:00"
- "2017-03-10T11:35:00+00:00"
- "2017-03-10T12:35:00+00:00"
- "2017-03-11T00:00:00+00:00"
-(6 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp_tz() ? (@ < "2017-03-10 12:35:00 +1".timestamp_tz())');
- jsonb_path_query_tz
------------------------------
- "2017-03-10T12:34:00+01:00"
- "2017-03-10T12:35:00+02:00"
- "2017-03-10T10:35:00+00:00"
- "2017-03-10T00:00:00+00:00"
-(4 rows)
-
-select jsonb_path_query_tz(
- '["2017-03-10 12:34:00.123+01", "2017-03-10 12:35:00.123+01", "2017-03-10 12:36:00.1123+01", "2017-03-10 12:35:00.1123+02", "2017-03-10 12:35:00.123-02", "2017-03-10 10:35:00.123", "2017-03-10 11:35:00.1", "2017-03-10 12:35:00.123", "2017-03-10", "2017-03-11"]',
- '$[*].timestamp_tz(2) ? (@ >= "2017-03-10 12:35:00.123 +1".timestamp_tz(2))');
- jsonb_path_query_tz
---------------------------------
- "2017-03-10T12:35:00.12+01:00"
- "2017-03-10T12:36:00.11+01:00"
- "2017-03-10T12:35:00.12-02:00"
- "2017-03-10T12:35:00.12+00:00"
- "2017-03-11T00:00:00+00:00"
-(5 rows)
-
--- overflow during comparison
-select jsonb_path_query('"1000000-01-01"', '$.datetime() > "2020-01-01 12:00:00".datetime()'::jsonpath);
- jsonb_path_query
-------------------
- true
-(1 row)
-
-set time zone default;
--- jsonpath operators
-SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*]');
- jsonb_path_query
-------------------
- {"a": 1}
- {"a": 2}
-(2 rows)
-
-SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)');
- jsonb_path_query
-------------------
-(0 rows)
-
-SELECT jsonb_path_query('[{"a": 1}]', '$undefined_var');
-ERROR: could not find jsonpath variable "undefined_var"
-SELECT jsonb_path_query('[{"a": 1}]', 'false');
- jsonb_path_query
-------------------
- false
-(1 row)
-
-SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a');
-ERROR: JSON object does not contain key "a"
-SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a');
- jsonb_path_query_array
-------------------------
- [1, 2]
-(1 row)
-
-SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)');
- jsonb_path_query_array
-------------------------
- [1]
-(1 row)
-
-SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)');
- jsonb_path_query_array
-------------------------
- []
-(1 row)
-
-SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}');
- jsonb_path_query_array
-------------------------
- [2, 3]
-(1 row)
-
-SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}');
- jsonb_path_query_array
-------------------------
- []
-(1 row)
-
-SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a');
-ERROR: JSON object does not contain key "a"
-SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a', silent => true);
- jsonb_path_query_first
-------------------------
- 1
-(1 row)
-
-SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a');
- jsonb_path_query_first
-------------------------
- 1
-(1 row)
-
-SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)');
- jsonb_path_query_first
-------------------------
- 1
-(1 row)
-
-SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)');
- jsonb_path_query_first
-------------------------
-
-(1 row)
-
-SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}');
- jsonb_path_query_first
-------------------------
- 2
-(1 row)
-
-SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}');
- jsonb_path_query_first
-------------------------
-
-(1 row)
-
-SELECT jsonb_path_query_first('[{"a": 1}]', '$undefined_var');
-ERROR: could not find jsonpath variable "undefined_var"
-SELECT jsonb_path_query_first('[{"a": 1}]', 'false');
- jsonb_path_query_first
-------------------------
- false
-(1 row)
-
-SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)';
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*] ? (@.a > 2)';
- ?column?
-----------
- f
-(1 row)
-
-SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 1)');
- jsonb_path_exists
--------------------
- t
-(1 row)
-
-SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 1, "max": 4}');
- jsonb_path_exists
--------------------
- t
-(1 row)
-
-SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 3, "max": 4}');
- jsonb_path_exists
--------------------
- f
-(1 row)
-
-SELECT jsonb_path_exists('[{"a": 1}]', '$undefined_var');
-ERROR: could not find jsonpath variable "undefined_var"
-SELECT jsonb_path_exists('[{"a": 1}]', 'false');
- jsonb_path_exists
--------------------
- t
-(1 row)
-
-SELECT jsonb_path_match('true', '$', silent => false);
- jsonb_path_match
-------------------
- t
-(1 row)
-
-SELECT jsonb_path_match('false', '$', silent => false);
- jsonb_path_match
-------------------
- f
-(1 row)
-
-SELECT jsonb_path_match('null', '$', silent => false);
- jsonb_path_match
-------------------
-
-(1 row)
-
-SELECT jsonb_path_match('1', '$', silent => true);
- jsonb_path_match
-------------------
-
-(1 row)
-
-SELECT jsonb_path_match('1', '$', silent => false);
-ERROR: single boolean result is expected
-SELECT jsonb_path_match('"a"', '$', silent => false);
-ERROR: single boolean result is expected
-SELECT jsonb_path_match('{}', '$', silent => false);
-ERROR: single boolean result is expected
-SELECT jsonb_path_match('[true]', '$', silent => false);
-ERROR: single boolean result is expected
-SELECT jsonb_path_match('{}', 'lax $.a', silent => false);
-ERROR: single boolean result is expected
-SELECT jsonb_path_match('{}', 'strict $.a', silent => false);
-ERROR: JSON object does not contain key "a"
-SELECT jsonb_path_match('{}', 'strict $.a', silent => true);
- jsonb_path_match
-------------------
-
-(1 row)
-
-SELECT jsonb_path_match('[true, true]', '$[*]', silent => false);
-ERROR: single boolean result is expected
-SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 1';
- ?column?
-----------
- t
-(1 row)
-
-SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 2';
- ?column?
-----------
- f
-(1 row)
-
-SELECT jsonb_path_match('[{"a": 1}, {"a": 2}]', '$[*].a > 1');
- jsonb_path_match
-------------------
- t
-(1 row)
-
-SELECT jsonb_path_match('[{"a": 1}]', '$undefined_var');
-ERROR: could not find jsonpath variable "undefined_var"
-SELECT jsonb_path_match('[{"a": 1}]', 'false');
- jsonb_path_match
-------------------
- f
-(1 row)
-
--- test string comparison (Unicode codepoint collation)
-WITH str(j, num) AS
-(
- SELECT jsonb_build_object('s', s), num
- FROM unnest('{"", "a", "ab", "abc", "abcd", "b", "A", "AB", "ABC", "ABc", "ABcD", "B"}'::text[]) WITH ORDINALITY AS a(s, num)
-)
-SELECT
- s1.j, s2.j,
- jsonb_path_query_first(s1.j, '$.s < $s', vars => s2.j) lt,
- jsonb_path_query_first(s1.j, '$.s <= $s', vars => s2.j) le,
- jsonb_path_query_first(s1.j, '$.s == $s', vars => s2.j) eq,
- jsonb_path_query_first(s1.j, '$.s >= $s', vars => s2.j) ge,
- jsonb_path_query_first(s1.j, '$.s > $s', vars => s2.j) gt
-FROM str s1, str s2
-ORDER BY s1.num, s2.num;
- j | j | lt | le | eq | ge | gt
----------------+---------------+-------+-------+-------+-------+-------
- {"s": ""} | {"s": ""} | false | true | true | true | false
- {"s": ""} | {"s": "a"} | true | true | false | false | false
- {"s": ""} | {"s": "ab"} | true | true | false | false | false
- {"s": ""} | {"s": "abc"} | true | true | false | false | false
- {"s": ""} | {"s": "abcd"} | true | true | false | false | false
- {"s": ""} | {"s": "b"} | true | true | false | false | false
- {"s": ""} | {"s": "A"} | true | true | false | false | false
- {"s": ""} | {"s": "AB"} | true | true | false | false | false
- {"s": ""} | {"s": "ABC"} | true | true | false | false | false
- {"s": ""} | {"s": "ABc"} | true | true | false | false | false
- {"s": ""} | {"s": "ABcD"} | true | true | false | false | false
- {"s": ""} | {"s": "B"} | true | true | false | false | false
- {"s": "a"} | {"s": ""} | false | false | false | true | true
- {"s": "a"} | {"s": "a"} | false | true | true | true | false
- {"s": "a"} | {"s": "ab"} | true | true | false | false | false
- {"s": "a"} | {"s": "abc"} | true | true | false | false | false
- {"s": "a"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "a"} | {"s": "b"} | true | true | false | false | false
- {"s": "a"} | {"s": "A"} | false | false | false | true | true
- {"s": "a"} | {"s": "AB"} | false | false | false | true | true
- {"s": "a"} | {"s": "ABC"} | false | false | false | true | true
- {"s": "a"} | {"s": "ABc"} | false | false | false | true | true
- {"s": "a"} | {"s": "ABcD"} | false | false | false | true | true
- {"s": "a"} | {"s": "B"} | false | false | false | true | true
- {"s": "ab"} | {"s": ""} | false | false | false | true | true
- {"s": "ab"} | {"s": "a"} | false | false | false | true | true
- {"s": "ab"} | {"s": "ab"} | false | true | true | true | false
- {"s": "ab"} | {"s": "abc"} | true | true | false | false | false
- {"s": "ab"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "ab"} | {"s": "b"} | true | true | false | false | false
- {"s": "ab"} | {"s": "A"} | false | false | false | true | true
- {"s": "ab"} | {"s": "AB"} | false | false | false | true | true
- {"s": "ab"} | {"s": "ABC"} | false | false | false | true | true
- {"s": "ab"} | {"s": "ABc"} | false | false | false | true | true
- {"s": "ab"} | {"s": "ABcD"} | false | false | false | true | true
- {"s": "ab"} | {"s": "B"} | false | false | false | true | true
- {"s": "abc"} | {"s": ""} | false | false | false | true | true
- {"s": "abc"} | {"s": "a"} | false | false | false | true | true
- {"s": "abc"} | {"s": "ab"} | false | false | false | true | true
- {"s": "abc"} | {"s": "abc"} | false | true | true | true | false
- {"s": "abc"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "abc"} | {"s": "b"} | true | true | false | false | false
- {"s": "abc"} | {"s": "A"} | false | false | false | true | true
- {"s": "abc"} | {"s": "AB"} | false | false | false | true | true
- {"s": "abc"} | {"s": "ABC"} | false | false | false | true | true
- {"s": "abc"} | {"s": "ABc"} | false | false | false | true | true
- {"s": "abc"} | {"s": "ABcD"} | false | false | false | true | true
- {"s": "abc"} | {"s": "B"} | false | false | false | true | true
- {"s": "abcd"} | {"s": ""} | false | false | false | true | true
- {"s": "abcd"} | {"s": "a"} | false | false | false | true | true
- {"s": "abcd"} | {"s": "ab"} | false | false | false | true | true
- {"s": "abcd"} | {"s": "abc"} | false | false | false | true | true
- {"s": "abcd"} | {"s": "abcd"} | false | true | true | true | false
- {"s": "abcd"} | {"s": "b"} | true | true | false | false | false
- {"s": "abcd"} | {"s": "A"} | false | false | false | true | true
- {"s": "abcd"} | {"s": "AB"} | false | false | false | true | true
- {"s": "abcd"} | {"s": "ABC"} | false | false | false | true | true
- {"s": "abcd"} | {"s": "ABc"} | false | false | false | true | true
- {"s": "abcd"} | {"s": "ABcD"} | false | false | false | true | true
- {"s": "abcd"} | {"s": "B"} | false | false | false | true | true
- {"s": "b"} | {"s": ""} | false | false | false | true | true
- {"s": "b"} | {"s": "a"} | false | false | false | true | true
- {"s": "b"} | {"s": "ab"} | false | false | false | true | true
- {"s": "b"} | {"s": "abc"} | false | false | false | true | true
- {"s": "b"} | {"s": "abcd"} | false | false | false | true | true
- {"s": "b"} | {"s": "b"} | false | true | true | true | false
- {"s": "b"} | {"s": "A"} | false | false | false | true | true
- {"s": "b"} | {"s": "AB"} | false | false | false | true | true
- {"s": "b"} | {"s": "ABC"} | false | false | false | true | true
- {"s": "b"} | {"s": "ABc"} | false | false | false | true | true
- {"s": "b"} | {"s": "ABcD"} | false | false | false | true | true
- {"s": "b"} | {"s": "B"} | false | false | false | true | true
- {"s": "A"} | {"s": ""} | false | false | false | true | true
- {"s": "A"} | {"s": "a"} | true | true | false | false | false
- {"s": "A"} | {"s": "ab"} | true | true | false | false | false
- {"s": "A"} | {"s": "abc"} | true | true | false | false | false
- {"s": "A"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "A"} | {"s": "b"} | true | true | false | false | false
- {"s": "A"} | {"s": "A"} | false | true | true | true | false
- {"s": "A"} | {"s": "AB"} | true | true | false | false | false
- {"s": "A"} | {"s": "ABC"} | true | true | false | false | false
- {"s": "A"} | {"s": "ABc"} | true | true | false | false | false
- {"s": "A"} | {"s": "ABcD"} | true | true | false | false | false
- {"s": "A"} | {"s": "B"} | true | true | false | false | false
- {"s": "AB"} | {"s": ""} | false | false | false | true | true
- {"s": "AB"} | {"s": "a"} | true | true | false | false | false
- {"s": "AB"} | {"s": "ab"} | true | true | false | false | false
- {"s": "AB"} | {"s": "abc"} | true | true | false | false | false
- {"s": "AB"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "AB"} | {"s": "b"} | true | true | false | false | false
- {"s": "AB"} | {"s": "A"} | false | false | false | true | true
- {"s": "AB"} | {"s": "AB"} | false | true | true | true | false
- {"s": "AB"} | {"s": "ABC"} | true | true | false | false | false
- {"s": "AB"} | {"s": "ABc"} | true | true | false | false | false
- {"s": "AB"} | {"s": "ABcD"} | true | true | false | false | false
- {"s": "AB"} | {"s": "B"} | true | true | false | false | false
- {"s": "ABC"} | {"s": ""} | false | false | false | true | true
- {"s": "ABC"} | {"s": "a"} | true | true | false | false | false
- {"s": "ABC"} | {"s": "ab"} | true | true | false | false | false
- {"s": "ABC"} | {"s": "abc"} | true | true | false | false | false
- {"s": "ABC"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "ABC"} | {"s": "b"} | true | true | false | false | false
- {"s": "ABC"} | {"s": "A"} | false | false | false | true | true
- {"s": "ABC"} | {"s": "AB"} | false | false | false | true | true
- {"s": "ABC"} | {"s": "ABC"} | false | true | true | true | false
- {"s": "ABC"} | {"s": "ABc"} | true | true | false | false | false
- {"s": "ABC"} | {"s": "ABcD"} | true | true | false | false | false
- {"s": "ABC"} | {"s": "B"} | true | true | false | false | false
- {"s": "ABc"} | {"s": ""} | false | false | false | true | true
- {"s": "ABc"} | {"s": "a"} | true | true | false | false | false
- {"s": "ABc"} | {"s": "ab"} | true | true | false | false | false
- {"s": "ABc"} | {"s": "abc"} | true | true | false | false | false
- {"s": "ABc"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "ABc"} | {"s": "b"} | true | true | false | false | false
- {"s": "ABc"} | {"s": "A"} | false | false | false | true | true
- {"s": "ABc"} | {"s": "AB"} | false | false | false | true | true
- {"s": "ABc"} | {"s": "ABC"} | false | false | false | true | true
- {"s": "ABc"} | {"s": "ABc"} | false | true | true | true | false
- {"s": "ABc"} | {"s": "ABcD"} | true | true | false | false | false
- {"s": "ABc"} | {"s": "B"} | true | true | false | false | false
- {"s": "ABcD"} | {"s": ""} | false | false | false | true | true
- {"s": "ABcD"} | {"s": "a"} | true | true | false | false | false
- {"s": "ABcD"} | {"s": "ab"} | true | true | false | false | false
- {"s": "ABcD"} | {"s": "abc"} | true | true | false | false | false
- {"s": "ABcD"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "ABcD"} | {"s": "b"} | true | true | false | false | false
- {"s": "ABcD"} | {"s": "A"} | false | false | false | true | true
- {"s": "ABcD"} | {"s": "AB"} | false | false | false | true | true
- {"s": "ABcD"} | {"s": "ABC"} | false | false | false | true | true
- {"s": "ABcD"} | {"s": "ABc"} | false | false | false | true | true
- {"s": "ABcD"} | {"s": "ABcD"} | false | true | true | true | false
- {"s": "ABcD"} | {"s": "B"} | true | true | false | false | false
- {"s": "B"} | {"s": ""} | false | false | false | true | true
- {"s": "B"} | {"s": "a"} | true | true | false | false | false
- {"s": "B"} | {"s": "ab"} | true | true | false | false | false
- {"s": "B"} | {"s": "abc"} | true | true | false | false | false
- {"s": "B"} | {"s": "abcd"} | true | true | false | false | false
- {"s": "B"} | {"s": "b"} | true | true | false | false | false
- {"s": "B"} | {"s": "A"} | false | false | false | true | true
- {"s": "B"} | {"s": "AB"} | false | false | false | true | true
- {"s": "B"} | {"s": "ABC"} | false | false | false | true | true
- {"s": "B"} | {"s": "ABc"} | false | false | false | true | true
- {"s": "B"} | {"s": "ABcD"} | false | false | false | true | true
- {"s": "B"} | {"s": "B"} | false | true | true | true | false
-(144 rows)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/sqljson.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/sqljson.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/sqljson.out 2024-11-15 02:50:52.502029300 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/sqljson.out 2024-11-15 02:59:17.853116535 +0000
@@ -1,1355 +1,2 @@
--- JSON()
-SELECT JSON();
-ERROR: syntax error at or near ")"
-LINE 1: SELECT JSON();
- ^
-SELECT JSON(NULL);
- json
-------
-
-(1 row)
-
-SELECT JSON('{ "a" : 1 } ');
- json
---------------
- { "a" : 1 }
-(1 row)
-
-SELECT JSON('{ "a" : 1 } ' FORMAT JSON);
- json
---------------
- { "a" : 1 }
-(1 row)
-
-SELECT JSON('{ "a" : 1 } ' FORMAT JSON ENCODING UTF8);
-ERROR: JSON ENCODING clause is only allowed for bytea input type
-LINE 1: SELECT JSON('{ "a" : 1 } ' FORMAT JSON ENCODING UTF8);
- ^
-SELECT JSON('{ "a" : 1 } '::bytea FORMAT JSON ENCODING UTF8);
- json
---------------
- { "a" : 1 }
-(1 row)
-
-SELECT pg_typeof(JSON('{ "a" : 1 } '));
- pg_typeof
------------
- json
-(1 row)
-
-SELECT JSON(' 1 '::json);
- json
----------
- 1
-(1 row)
-
-SELECT JSON(' 1 '::jsonb);
- json
-------
- 1
-(1 row)
-
-SELECT JSON(' 1 '::json WITH UNIQUE KEYS);
-ERROR: cannot use non-string types with WITH UNIQUE KEYS clause
-LINE 1: SELECT JSON(' 1 '::json WITH UNIQUE KEYS);
- ^
-SELECT JSON(123);
-ERROR: cannot cast type integer to json
-LINE 1: SELECT JSON(123);
- ^
-SELECT JSON('{"a": 1, "a": 2}');
- json
-------------------
- {"a": 1, "a": 2}
-(1 row)
-
-SELECT JSON('{"a": 1, "a": 2}' WITH UNIQUE KEYS);
-ERROR: duplicate JSON object key value
-SELECT JSON('{"a": 1, "a": 2}' WITHOUT UNIQUE KEYS);
- json
-------------------
- {"a": 1, "a": 2}
-(1 row)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123');
- QUERY PLAN
------------------------------
- Result
- Output: JSON('123'::json)
-(2 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' FORMAT JSON);
- QUERY PLAN
------------------------------
- Result
- Output: JSON('123'::json)
-(2 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'::bytea FORMAT JSON);
- QUERY PLAN
------------------------------------------------
- Result
- Output: JSON('\x313233'::bytea FORMAT JSON)
-(2 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123'::bytea FORMAT JSON ENCODING UTF8);
- QUERY PLAN
--------------------------------------------------------------
- Result
- Output: JSON('\x313233'::bytea FORMAT JSON ENCODING UTF8)
-(2 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' WITH UNIQUE KEYS);
- QUERY PLAN
-----------------------------------------------
- Result
- Output: JSON('123'::text WITH UNIQUE KEYS)
-(2 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123' WITHOUT UNIQUE KEYS);
- QUERY PLAN
------------------------------
- Result
- Output: JSON('123'::json)
-(2 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON('123');
- QUERY PLAN
------------------------------
- Result
- Output: JSON('123'::json)
-(2 rows)
-
-SELECT pg_typeof(JSON('123'));
- pg_typeof
------------
- json
-(1 row)
-
--- JSON_SCALAR()
-SELECT JSON_SCALAR();
-ERROR: syntax error at or near ")"
-LINE 1: SELECT JSON_SCALAR();
- ^
-SELECT JSON_SCALAR(NULL);
- json_scalar
--------------
-
-(1 row)
-
-SELECT JSON_SCALAR(NULL::int);
- json_scalar
--------------
-
-(1 row)
-
-SELECT JSON_SCALAR(123);
- json_scalar
--------------
- 123
-(1 row)
-
-SELECT JSON_SCALAR(123.45);
- json_scalar
--------------
- 123.45
-(1 row)
-
-SELECT JSON_SCALAR(123.45::numeric);
- json_scalar
--------------
- 123.45
-(1 row)
-
-SELECT JSON_SCALAR(true);
- json_scalar
--------------
- true
-(1 row)
-
-SELECT JSON_SCALAR(false);
- json_scalar
--------------
- false
-(1 row)
-
-SELECT JSON_SCALAR(' 123.45');
- json_scalar
--------------
- " 123.45"
-(1 row)
-
-SELECT JSON_SCALAR('2020-06-07'::date);
- json_scalar
---------------
- "2020-06-07"
-(1 row)
-
-SELECT JSON_SCALAR('2020-06-07 01:02:03'::timestamp);
- json_scalar
------------------------
- "2020-06-07T01:02:03"
-(1 row)
-
-SELECT JSON_SCALAR('{}'::json);
- json_scalar
--------------
- {}
-(1 row)
-
-SELECT JSON_SCALAR('{}'::jsonb);
- json_scalar
--------------
- {}
-(1 row)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SCALAR(123);
- QUERY PLAN
-----------------------------
- Result
- Output: JSON_SCALAR(123)
-(2 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SCALAR('123');
- QUERY PLAN
-------------------------------------
- Result
- Output: JSON_SCALAR('123'::text)
-(2 rows)
-
--- JSON_SERIALIZE()
-SELECT JSON_SERIALIZE();
-ERROR: syntax error at or near ")"
-LINE 1: SELECT JSON_SERIALIZE();
- ^
-SELECT JSON_SERIALIZE(NULL);
- json_serialize
-----------------
-
-(1 row)
-
-SELECT JSON_SERIALIZE(JSON('{ "a" : 1 } '));
- json_serialize
-----------------
- { "a" : 1 }
-(1 row)
-
-SELECT JSON_SERIALIZE('{ "a" : 1 } ');
- json_serialize
-----------------
- { "a" : 1 }
-(1 row)
-
-SELECT JSON_SERIALIZE('1');
- json_serialize
-----------------
- 1
-(1 row)
-
-SELECT JSON_SERIALIZE('1' FORMAT JSON);
- json_serialize
-----------------
- 1
-(1 row)
-
-SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING bytea);
- json_serialize
-----------------------------
- \x7b20226122203a2031207d20
-(1 row)
-
-SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING varchar);
- json_serialize
-----------------
- { "a" : 1 }
-(1 row)
-
-SELECT pg_typeof(JSON_SERIALIZE(NULL));
- pg_typeof
------------
- text
-(1 row)
-
--- only string types or bytea allowed
-SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING jsonb);
-ERROR: cannot use RETURNING type jsonb in JSON_SERIALIZE()
-HINT: Try returning a string type or bytea.
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SERIALIZE('{}');
- QUERY PLAN
------------------------------------------------------
- Result
- Output: JSON_SERIALIZE('{}'::json RETURNING text)
-(2 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_SERIALIZE('{}' RETURNING bytea);
- QUERY PLAN
-------------------------------------------------------
- Result
- Output: JSON_SERIALIZE('{}'::json RETURNING bytea)
-(2 rows)
-
--- JSON_OBJECT()
-SELECT JSON_OBJECT();
- json_object
--------------
- {}
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING json);
- json_object
--------------
- {}
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING json FORMAT JSON);
- json_object
--------------
- {}
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING jsonb);
- json_object
--------------
- {}
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING jsonb FORMAT JSON);
- json_object
--------------
- {}
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING text);
- json_object
--------------
- {}
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING text FORMAT JSON);
- json_object
--------------
- {}
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING UTF8);
-ERROR: cannot set JSON encoding for non-bytea output types
-LINE 1: SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING UTF8)...
- ^
-SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING INVALID_ENCODING);
-ERROR: unrecognized JSON encoding: invalid_encoding
-LINE 1: ...T JSON_OBJECT(RETURNING text FORMAT JSON ENCODING INVALID_EN...
- ^
-SELECT JSON_OBJECT(RETURNING bytea);
- json_object
--------------
- \x7b7d
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON);
- json_object
--------------
- \x7b7d
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF8);
- json_object
--------------
- \x7b7d
-(1 row)
-
-SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF16);
-ERROR: unsupported JSON encoding
-LINE 1: SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF1...
- ^
-HINT: Only UTF8 JSON encoding is supported.
-SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF32);
-ERROR: unsupported JSON encoding
-LINE 1: SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF3...
- ^
-HINT: Only UTF8 JSON encoding is supported.
-SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON);
-ERROR: cannot use non-string types with explicit FORMAT JSON clause
-LINE 1: SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON);
- ^
-SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON ENCODING UTF8);
-ERROR: JSON ENCODING clause is only allowed for bytea input type
-LINE 1: SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON ENCODING UTF...
- ^
-SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON);
- json_object
-----------------
- {"foo" : null}
-(1 row)
-
-SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON ENCODING UTF8);
-ERROR: JSON ENCODING clause is only allowed for bytea input type
-LINE 1: SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON ENCODING UT...
- ^
-SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON);
- json_object
----------------
- {"foo": null}
-(1 row)
-
-SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON ENCODING UTF8);
-ERROR: JSON ENCODING clause is only allowed for bytea input type
-LINE 1: SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON ENCODING U...
- ^
-SELECT JSON_OBJECT(NULL: 1);
-ERROR: null value not allowed for object key
-SELECT JSON_OBJECT('a': 2 + 3);
- json_object
--------------
- {"a" : 5}
-(1 row)
-
-SELECT JSON_OBJECT('a' VALUE 2 + 3);
- json_object
--------------
- {"a" : 5}
-(1 row)
-
---SELECT JSON_OBJECT(KEY 'a' VALUE 2 + 3);
-SELECT JSON_OBJECT('a' || 2: 1);
- json_object
--------------
- {"a2" : 1}
-(1 row)
-
-SELECT JSON_OBJECT(('a' || 2) VALUE 1);
- json_object
--------------
- {"a2" : 1}
-(1 row)
-
---SELECT JSON_OBJECT('a' || 2 VALUE 1);
---SELECT JSON_OBJECT(KEY 'a' || 2 VALUE 1);
-SELECT JSON_OBJECT('a': 2::text);
- json_object
--------------
- {"a" : "2"}
-(1 row)
-
-SELECT JSON_OBJECT('a' VALUE 2::text);
- json_object
--------------
- {"a" : "2"}
-(1 row)
-
---SELECT JSON_OBJECT(KEY 'a' VALUE 2::text);
-SELECT JSON_OBJECT(1::text: 2);
- json_object
--------------
- {"1" : 2}
-(1 row)
-
-SELECT JSON_OBJECT((1::text) VALUE 2);
- json_object
--------------
- {"1" : 2}
-(1 row)
-
---SELECT JSON_OBJECT(1::text VALUE 2);
---SELECT JSON_OBJECT(KEY 1::text VALUE 2);
-SELECT JSON_OBJECT(json '[1]': 123);
-ERROR: key value must be scalar, not array, composite, or json
-SELECT JSON_OBJECT(ARRAY[1,2,3]: 'aaa');
-ERROR: key value must be scalar, not array, composite, or json
-SELECT JSON_OBJECT(
- 'a': '123',
- 1.23: 123,
- 'c': json '[ 1,true,{ } ]',
- 'd': jsonb '{ "x" : 123.45 }'
-);
- json_object
--------------------------------------------------------------------
- {"a": "123", "c": [1, true, {}], "d": {"x": 123.45}, "1.23": 123}
-(1 row)
-
-SELECT JSON_OBJECT(
- 'a': '123',
- 1.23: 123,
- 'c': json '[ 1,true,{ } ]',
- 'd': jsonb '{ "x" : 123.45 }'
- RETURNING jsonb
-);
- json_object
--------------------------------------------------------------------
- {"a": "123", "c": [1, true, {}], "d": {"x": 123.45}, "1.23": 123}
-(1 row)
-
-/*
-SELECT JSON_OBJECT(
- 'a': '123',
- KEY 1.23 VALUE 123,
- 'c' VALUE json '[1, true, {}]'
-);
-*/
-SELECT JSON_OBJECT('a': '123', 'b': JSON_OBJECT('a': 111, 'b': 'aaa'));
- json_object
------------------------------------------------
- {"a" : "123", "b" : {"a" : 111, "b" : "aaa"}}
-(1 row)
-
-SELECT JSON_OBJECT('a': '123', 'b': JSON_OBJECT('a': 111, 'b': 'aaa' RETURNING jsonb));
- json_object
--------------------------------------------
- {"a": "123", "b": {"a": 111, "b": "aaa"}}
-(1 row)
-
-SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING text));
- json_object
------------------------
- {"a" : "{\"b\" : 1}"}
-(1 row)
-
-SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING text) FORMAT JSON);
- json_object
--------------------
- {"a" : {"b" : 1}}
-(1 row)
-
-SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING bytea));
- json_object
----------------------------------
- {"a" : "\\x7b226222203a20317d"}
-(1 row)
-
-SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING bytea) FORMAT JSON);
- json_object
--------------------
- {"a" : {"b" : 1}}
-(1 row)
-
-SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2);
- json_object
-----------------------------------
- {"a" : "1", "b" : null, "c" : 2}
-(1 row)
-
-SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2 NULL ON NULL);
- json_object
-----------------------------------
- {"a" : "1", "b" : null, "c" : 2}
-(1 row)
-
-SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2 ABSENT ON NULL);
- json_object
-----------------------
- {"a" : "1", "c" : 2}
-(1 row)
-
-SELECT JSON_OBJECT(1: 1, '2': NULL, '3': 1, repeat('x', 1000): 1, 2: repeat('a', 100) WITH UNIQUE);
-ERROR: duplicate JSON object key value: "2"
-SELECT JSON_OBJECT(1: 1, '1': NULL WITH UNIQUE);
-ERROR: duplicate JSON object key value: "1"
-SELECT JSON_OBJECT(1: 1, '1': NULL ABSENT ON NULL WITH UNIQUE);
-ERROR: duplicate JSON object key value: "1"
-SELECT JSON_OBJECT(1: 1, '1': NULL NULL ON NULL WITH UNIQUE RETURNING jsonb);
-ERROR: duplicate JSON object key value
-SELECT JSON_OBJECT(1: 1, '1': NULL ABSENT ON NULL WITH UNIQUE RETURNING jsonb);
-ERROR: duplicate JSON object key value
-SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 NULL ON NULL WITH UNIQUE);
-ERROR: duplicate JSON object key value: "1"
-SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITH UNIQUE);
-ERROR: duplicate JSON object key value: "1"
-SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITHOUT UNIQUE);
- json_object
---------------------
- {"1" : 1, "1" : 1}
-(1 row)
-
-SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITH UNIQUE RETURNING jsonb);
-ERROR: duplicate JSON object key value
-SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITHOUT UNIQUE RETURNING jsonb);
- json_object
--------------
- {"1": 1}
-(1 row)
-
-SELECT JSON_OBJECT(1: 1, '2': NULL, '3': 1, 4: NULL, '5': 'a' ABSENT ON NULL WITH UNIQUE RETURNING jsonb);
- json_object
-----------------------------
- {"1": 1, "3": 1, "5": "a"}
-(1 row)
-
--- JSON_ARRAY()
-SELECT JSON_ARRAY();
- json_array
-------------
- []
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING json);
- json_array
-------------
- []
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING json FORMAT JSON);
- json_array
-------------
- []
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING jsonb);
- json_array
-------------
- []
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING jsonb FORMAT JSON);
- json_array
-------------
- []
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING text);
- json_array
-------------
- []
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING text FORMAT JSON);
- json_array
-------------
- []
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING UTF8);
-ERROR: cannot set JSON encoding for non-bytea output types
-LINE 1: SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING UTF8);
- ^
-SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING INVALID_ENCODING);
-ERROR: unrecognized JSON encoding: invalid_encoding
-LINE 1: ...CT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING INVALID_EN...
- ^
-SELECT JSON_ARRAY(RETURNING bytea);
- json_array
-------------
- \x5b5d
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON);
- json_array
-------------
- \x5b5d
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF8);
- json_array
-------------
- \x5b5d
-(1 row)
-
-SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF16);
-ERROR: unsupported JSON encoding
-LINE 1: SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF16...
- ^
-HINT: Only UTF8 JSON encoding is supported.
-SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF32);
-ERROR: unsupported JSON encoding
-LINE 1: SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF32...
- ^
-HINT: Only UTF8 JSON encoding is supported.
-SELECT JSON_ARRAY('aaa', 111, true, array[1,2,3], NULL, json '{"a": [1]}', jsonb '["a",3]');
- json_array
------------------------------------------------------
- ["aaa", 111, true, [1, 2, 3], {"a": [1]}, ["a", 3]]
-(1 row)
-
-SELECT JSON_ARRAY('a', NULL, 'b' NULL ON NULL);
- json_array
-------------------
- ["a", null, "b"]
-(1 row)
-
-SELECT JSON_ARRAY('a', NULL, 'b' ABSENT ON NULL);
- json_array
-------------
- ["a", "b"]
-(1 row)
-
-SELECT JSON_ARRAY(NULL, NULL, 'b' ABSENT ON NULL);
- json_array
-------------
- ["b"]
-(1 row)
-
-SELECT JSON_ARRAY('a', NULL, 'b' NULL ON NULL RETURNING jsonb);
- json_array
-------------------
- ["a", null, "b"]
-(1 row)
-
-SELECT JSON_ARRAY('a', NULL, 'b' ABSENT ON NULL RETURNING jsonb);
- json_array
-------------
- ["a", "b"]
-(1 row)
-
-SELECT JSON_ARRAY(NULL, NULL, 'b' ABSENT ON NULL RETURNING jsonb);
- json_array
-------------
- ["b"]
-(1 row)
-
-SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' RETURNING text));
- json_array
--------------------------------
- ["[\"{ \\\"a\\\" : 123 }\"]"]
-(1 row)
-
-SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' FORMAT JSON RETURNING text));
- json_array
------------------------
- ["[{ \"a\" : 123 }]"]
-(1 row)
-
-SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' FORMAT JSON RETURNING text) FORMAT JSON);
- json_array
--------------------
- [[{ "a" : 123 }]]
-(1 row)
-
-SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i));
- json_array
-------------
- [1, 2, 4]
-(1 row)
-
-SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i));
- json_array
-------------
- [[1,2], +
- [3,4]]
-(1 row)
-
-SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) RETURNING jsonb);
- json_array
-------------------
- [[1, 2], [3, 4]]
-(1 row)
-
---SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) NULL ON NULL);
---SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) NULL ON NULL RETURNING jsonb);
-SELECT JSON_ARRAY(SELECT i FROM (VALUES (3), (1), (NULL), (2)) foo(i) ORDER BY i);
- json_array
-------------
- [1, 2, 3]
-(1 row)
-
--- Should fail
-SELECT JSON_ARRAY(SELECT FROM (VALUES (1)) foo(i));
-ERROR: subquery must return only one column
-LINE 1: SELECT JSON_ARRAY(SELECT FROM (VALUES (1)) foo(i));
- ^
-SELECT JSON_ARRAY(SELECT i, i FROM (VALUES (1)) foo(i));
-ERROR: subquery must return only one column
-LINE 1: SELECT JSON_ARRAY(SELECT i, i FROM (VALUES (1)) foo(i));
- ^
-SELECT JSON_ARRAY(SELECT * FROM (VALUES (1, 2)) foo(i, j));
-ERROR: subquery must return only one column
-LINE 1: SELECT JSON_ARRAY(SELECT * FROM (VALUES (1, 2)) foo(i, j));
- ^
--- JSON_ARRAYAGG()
-SELECT JSON_ARRAYAGG(i) IS NULL,
- JSON_ARRAYAGG(i RETURNING jsonb) IS NULL
-FROM generate_series(1, 0) i;
- ?column? | ?column?
-----------+----------
- t | t
-(1 row)
-
-SELECT JSON_ARRAYAGG(i),
- JSON_ARRAYAGG(i RETURNING jsonb)
-FROM generate_series(1, 5) i;
- json_arrayagg | json_arrayagg
------------------+-----------------
- [1, 2, 3, 4, 5] | [1, 2, 3, 4, 5]
-(1 row)
-
-SELECT JSON_ARRAYAGG(i ORDER BY i DESC)
-FROM generate_series(1, 5) i;
- json_arrayagg
------------------
- [5, 4, 3, 2, 1]
-(1 row)
-
-SELECT JSON_ARRAYAGG(i::text::json)
-FROM generate_series(1, 5) i;
- json_arrayagg
------------------
- [1, 2, 3, 4, 5]
-(1 row)
-
-SELECT JSON_ARRAYAGG(JSON_ARRAY(i, i + 1 RETURNING text) FORMAT JSON)
-FROM generate_series(1, 5) i;
- json_arrayagg
-------------------------------------------
- [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]
-(1 row)
-
-SELECT JSON_ARRAYAGG(NULL),
- JSON_ARRAYAGG(NULL RETURNING jsonb)
-FROM generate_series(1, 5);
- json_arrayagg | json_arrayagg
----------------+---------------
- [] | []
-(1 row)
-
-SELECT JSON_ARRAYAGG(NULL NULL ON NULL),
- JSON_ARRAYAGG(NULL NULL ON NULL RETURNING jsonb)
-FROM generate_series(1, 5);
- json_arrayagg | json_arrayagg
---------------------------------+--------------------------------
- [null, null, null, null, null] | [null, null, null, null, null]
-(1 row)
-
-\x
-SELECT
- JSON_ARRAYAGG(bar) as no_options,
- JSON_ARRAYAGG(bar RETURNING jsonb) as returning_jsonb,
- JSON_ARRAYAGG(bar ABSENT ON NULL) as absent_on_null,
- JSON_ARRAYAGG(bar ABSENT ON NULL RETURNING jsonb) as absentonnull_returning_jsonb,
- JSON_ARRAYAGG(bar NULL ON NULL) as null_on_null,
- JSON_ARRAYAGG(bar NULL ON NULL RETURNING jsonb) as nullonnull_returning_jsonb,
- JSON_ARRAYAGG(foo) as row_no_options,
- JSON_ARRAYAGG(foo RETURNING jsonb) as row_returning_jsonb,
- JSON_ARRAYAGG(foo ORDER BY bar) FILTER (WHERE bar > 2) as row_filtered_agg,
- JSON_ARRAYAGG(foo ORDER BY bar RETURNING jsonb) FILTER (WHERE bar > 2) as row_filtered_agg_returning_jsonb
-FROM
- (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL)) foo(bar);
--[ RECORD 1 ]--------------------+-------------------------------------------------------------------------------------------------------------------------
-no_options | [1, 2, 3, 4, 5]
-returning_jsonb | [1, 2, 3, 4, 5]
-absent_on_null | [1, 2, 3, 4, 5]
-absentonnull_returning_jsonb | [1, 2, 3, 4, 5]
-null_on_null | [1, 2, 3, 4, 5, null, null, null, null]
-nullonnull_returning_jsonb | [1, 2, 3, 4, 5, null, null, null, null]
-row_no_options | [{"bar":1}, +
- | {"bar":2}, +
- | {"bar":3}, +
- | {"bar":4}, +
- | {"bar":5}, +
- | {"bar":null}, +
- | {"bar":null}, +
- | {"bar":null}, +
- | {"bar":null}]
-row_returning_jsonb | [{"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 5}, {"bar": null}, {"bar": null}, {"bar": null}, {"bar": null}]
-row_filtered_agg | [{"bar":3}, +
- | {"bar":4}, +
- | {"bar":5}]
-row_filtered_agg_returning_jsonb | [{"bar": 3}, {"bar": 4}, {"bar": 5}]
-
-\x
-SELECT
- bar, JSON_ARRAYAGG(bar) FILTER (WHERE bar > 2) OVER (PARTITION BY foo.bar % 2)
-FROM
- (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL), (5), (4)) foo(bar);
- bar | json_arrayagg
------+---------------
- 4 | [4, 4]
- 4 | [4, 4]
- 2 | [4, 4]
- 5 | [5, 3, 5]
- 3 | [5, 3, 5]
- 1 | [5, 3, 5]
- 5 | [5, 3, 5]
- |
- |
- |
- |
-(11 rows)
-
--- JSON_OBJECTAGG()
-SELECT JSON_OBJECTAGG('key': 1) IS NULL,
- JSON_OBJECTAGG('key': 1 RETURNING jsonb) IS NULL
-WHERE FALSE;
- ?column? | ?column?
-----------+----------
- t | t
-(1 row)
-
-SELECT JSON_OBJECTAGG(NULL: 1);
-ERROR: null value not allowed for object key
-SELECT JSON_OBJECTAGG(NULL: 1 RETURNING jsonb);
-ERROR: field name must not be null
-SELECT
- JSON_OBJECTAGG(i: i),
--- JSON_OBJECTAGG(i VALUE i),
--- JSON_OBJECTAGG(KEY i VALUE i),
- JSON_OBJECTAGG(i: i RETURNING jsonb)
-FROM
- generate_series(1, 5) i;
- json_objectagg | json_objectagg
--------------------------------------------------+------------------------------------------
- { "1" : 1, "2" : 2, "3" : 3, "4" : 4, "5" : 5 } | {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5}
-(1 row)
-
-SELECT
- JSON_OBJECTAGG(k: v),
- JSON_OBJECTAGG(k: v NULL ON NULL),
- JSON_OBJECTAGG(k: v ABSENT ON NULL),
- JSON_OBJECTAGG(k: v RETURNING jsonb),
- JSON_OBJECTAGG(k: v NULL ON NULL RETURNING jsonb),
- JSON_OBJECTAGG(k: v ABSENT ON NULL RETURNING jsonb)
-FROM
- (VALUES (1, 1), (1, NULL), (2, NULL), (3, 3)) foo(k, v);
- json_objectagg | json_objectagg | json_objectagg | json_objectagg | json_objectagg | json_objectagg
-----------------------------------------------+----------------------------------------------+----------------------+--------------------------------+--------------------------------+------------------
- { "1" : 1, "1" : null, "2" : null, "3" : 3 } | { "1" : 1, "1" : null, "2" : null, "3" : 3 } | { "1" : 1, "3" : 3 } | {"1": null, "2": null, "3": 3} | {"1": null, "2": null, "3": 3} | {"1": 1, "3": 3}
-(1 row)
-
-SELECT JSON_OBJECTAGG(k: v WITH UNIQUE KEYS)
-FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v);
-ERROR: duplicate JSON object key value: "1"
-SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS)
-FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v);
-ERROR: duplicate JSON object key value: "1"
-SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS)
-FROM (VALUES (1, 1), (0, NULL), (3, NULL), (2, 2), (4, NULL)) foo(k, v);
- json_objectagg
-----------------------
- { "1" : 1, "2" : 2 }
-(1 row)
-
-SELECT JSON_OBJECTAGG(k: v WITH UNIQUE KEYS RETURNING jsonb)
-FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v);
-ERROR: duplicate JSON object key value
-SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS RETURNING jsonb)
-FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v);
-ERROR: duplicate JSON object key value
-SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS RETURNING jsonb)
-FROM (VALUES (1, 1), (0, NULL),(4, null), (5, null),(6, null),(2, 2)) foo(k, v);
- json_objectagg
-------------------
- {"1": 1, "2": 2}
-(1 row)
-
-SELECT JSON_OBJECTAGG(mod(i,100): (i)::text FORMAT JSON WITH UNIQUE)
-FROM generate_series(0, 199) i;
-ERROR: duplicate JSON object key value: "0"
--- Test JSON_OBJECT deparsing
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json);
- QUERY PLAN
-------------------------------------------------------------------------------
- Result
- Output: JSON_OBJECT('foo' : '1'::json, 'bar' : 'baz'::text RETURNING json)
-(2 rows)
-
-CREATE VIEW json_object_view AS
-SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json);
-\sv json_object_view
-CREATE OR REPLACE VIEW public.json_object_view AS
- SELECT JSON_OBJECT('foo' : '1'::text FORMAT JSON, 'bar' : 'baz'::text RETURNING json) AS "json_object"
-DROP VIEW json_object_view;
-SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v WITH UNIQUE KEYS) OVER (ORDER BY k)
-FROM (VALUES (1,1), (2,2)) a(k,v);
- a | json_objectagg
----------------+----------------------
- {"k":1,"v":1} | { "1" : 1 }
- {"k":2,"v":2} | { "1" : 1, "2" : 2 }
-(2 rows)
-
-SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v WITH UNIQUE KEYS) OVER (ORDER BY k)
-FROM (VALUES (1,1), (1,2), (2,2)) a(k,v);
-ERROR: duplicate JSON object key value: "1"
-SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL WITH UNIQUE KEYS)
- OVER (ORDER BY k)
-FROM (VALUES (1,1), (1,null), (2,2)) a(k,v);
-ERROR: duplicate JSON object key value: "1"
-SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL)
-OVER (ORDER BY k)
-FROM (VALUES (1,1), (1,null), (2,2)) a(k,v);
- a | json_objectagg
-------------------+----------------------
- {"k":1,"v":1} | { "1" : 1 }
- {"k":1,"v":null} | { "1" : 1 }
- {"k":2,"v":2} | { "1" : 1, "2" : 2 }
-(3 rows)
-
-SELECT to_json(a) AS a, JSON_OBJECTAGG(k : v ABSENT ON NULL)
-OVER (ORDER BY k RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
-FROM (VALUES (1,1), (1,null), (2,2)) a(k,v);
- a | json_objectagg
-------------------+----------------------
- {"k":1,"v":1} | { "1" : 1, "2" : 2 }
- {"k":1,"v":null} | { "1" : 1, "2" : 2 }
- {"k":2,"v":2} | { "1" : 1, "2" : 2 }
-(3 rows)
-
--- Test JSON_ARRAY deparsing
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json);
- QUERY PLAN
----------------------------------------------------
- Result
- Output: JSON_ARRAY('1'::json, 2 RETURNING json)
-(2 rows)
-
-CREATE VIEW json_array_view AS
-SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json);
-\sv json_array_view
-CREATE OR REPLACE VIEW public.json_array_view AS
- SELECT JSON_ARRAY('1'::text FORMAT JSON, 2 RETURNING json) AS "json_array"
-DROP VIEW json_array_view;
--- Test JSON_OBJECTAGG deparsing
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3)
-FROM generate_series(1,5) i;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------
- Aggregate
- Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) FILTER (WHERE (i > 3))
- -> Function Scan on pg_catalog.generate_series i
- Output: i
- Function Call: generate_series(1, 5)
-(5 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) OVER (PARTITION BY i % 2)
-FROM generate_series(1,5) i;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------
- WindowAgg
- Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) OVER (?), ((i % 2))
- -> Sort
- Output: ((i % 2)), i
- Sort Key: ((i.i % 2))
- -> Function Scan on pg_catalog.generate_series i
- Output: (i % 2), i
- Function Call: generate_series(1, 5)
-(8 rows)
-
-CREATE VIEW json_objectagg_view AS
-SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3)
-FROM generate_series(1,5) i;
-\sv json_objectagg_view
-CREATE OR REPLACE VIEW public.json_objectagg_view AS
- SELECT JSON_OBJECTAGG(i : ('111'::text || i)::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) FILTER (WHERE i > 3) AS "json_objectagg"
- FROM generate_series(1, 5) i(i)
-DROP VIEW json_objectagg_view;
--- Test JSON_ARRAYAGG deparsing
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3)
-FROM generate_series(1,5) i;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------
- Aggregate
- Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE (i > 3))
- -> Function Scan on pg_catalog.generate_series i
- Output: i
- Function Call: generate_series(1, 5)
-(5 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) OVER (PARTITION BY i % 2)
-FROM generate_series(1,5) i;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------
- WindowAgg
- Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL ON NULL RETURNING text) OVER (?), ((i % 2))
- -> Sort
- Output: ((i % 2)), i
- Sort Key: ((i.i % 2))
- -> Function Scan on pg_catalog.generate_series i
- Output: (i % 2), i
- Function Call: generate_series(1, 5)
-(8 rows)
-
-CREATE VIEW json_arrayagg_view AS
-SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3)
-FROM generate_series(1,5) i;
-\sv json_arrayagg_view
-CREATE OR REPLACE VIEW public.json_arrayagg_view AS
- SELECT JSON_ARRAYAGG(('111'::text || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) AS "json_arrayagg"
- FROM generate_series(1, 5) i(i)
-DROP VIEW json_arrayagg_view;
--- Test JSON_ARRAY(subquery) deparsing
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb);
- QUERY PLAN
----------------------------------------------------------------------
- Result
- Output: (InitPlan 1).col1
- InitPlan 1
- -> Aggregate
- Output: JSON_ARRAYAGG("*VALUES*".column1 RETURNING jsonb)
- -> Values Scan on "*VALUES*"
- Output: "*VALUES*".column1
-(7 rows)
-
-CREATE VIEW json_array_subquery_view AS
-SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb);
-\sv json_array_subquery_view
-CREATE OR REPLACE VIEW public.json_array_subquery_view AS
- SELECT ( SELECT JSON_ARRAYAGG(q.a RETURNING jsonb) AS "json_arrayagg"
- FROM ( SELECT foo.i
- FROM ( VALUES (1), (2), (NULL::integer), (4)) foo(i)) q(a)) AS "json_array"
-DROP VIEW json_array_subquery_view;
--- IS JSON predicate
-SELECT NULL IS JSON;
- ?column?
-----------
-
-(1 row)
-
-SELECT NULL IS NOT JSON;
- ?column?
-----------
-
-(1 row)
-
-SELECT NULL::json IS JSON;
- ?column?
-----------
-
-(1 row)
-
-SELECT NULL::jsonb IS JSON;
- ?column?
-----------
-
-(1 row)
-
-SELECT NULL::text IS JSON;
- ?column?
-----------
-
-(1 row)
-
-SELECT NULL::bytea IS JSON;
- ?column?
-----------
-
-(1 row)
-
-SELECT NULL::int IS JSON;
-ERROR: cannot use type integer in IS JSON predicate
-SELECT '' IS JSON;
- ?column?
-----------
- f
-(1 row)
-
-SELECT bytea '\x00' IS JSON;
-ERROR: invalid byte sequence for encoding "UTF8": 0x00
-CREATE TABLE test_is_json (js text);
-INSERT INTO test_is_json VALUES
- (NULL),
- (''),
- ('123'),
- ('"aaa "'),
- ('true'),
- ('null'),
- ('[]'),
- ('[1, "2", {}]'),
- ('{}'),
- ('{ "a": 1, "b": null }'),
- ('{ "a": 1, "a": null }'),
- ('{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] }'),
- ('{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] }'),
- ('aaa'),
- ('{a:1}'),
- ('["a",]');
-SELECT
- js,
- js IS JSON "IS JSON",
- js IS NOT JSON "IS NOT JSON",
- js IS JSON VALUE "IS VALUE",
- js IS JSON OBJECT "IS OBJECT",
- js IS JSON ARRAY "IS ARRAY",
- js IS JSON SCALAR "IS SCALAR",
- js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
- js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
-FROM
- test_is_json;
- js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+-------------
- | | | | | | | |
- | f | t | f | f | f | f | f | f
- 123 | t | f | t | f | f | t | t | t
- "aaa " | t | f | t | f | f | t | t | t
- true | t | f | t | f | f | t | t | t
- null | t | f | t | f | f | t | t | t
- [] | t | f | t | f | t | f | t | t
- [1, "2", {}] | t | f | t | f | t | f | t | t
- {} | t | f | t | t | f | f | t | t
- { "a": 1, "b": null } | t | f | t | t | f | f | t | t
- { "a": 1, "a": null } | t | f | t | t | f | f | t | f
- { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
- { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
- aaa | f | t | f | f | f | f | f | f
- {a:1} | f | t | f | f | f | f | f | f
- ["a",] | f | t | f | f | f | f | f | f
-(16 rows)
-
-SELECT
- js,
- js IS JSON "IS JSON",
- js IS NOT JSON "IS NOT JSON",
- js IS JSON VALUE "IS VALUE",
- js IS JSON OBJECT "IS OBJECT",
- js IS JSON ARRAY "IS ARRAY",
- js IS JSON SCALAR "IS SCALAR",
- js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
- js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
-FROM
- (SELECT js::json FROM test_is_json WHERE js IS JSON) foo(js);
- js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+-------------
- 123 | t | f | t | f | f | t | t | t
- "aaa " | t | f | t | f | f | t | t | t
- true | t | f | t | f | f | t | t | t
- null | t | f | t | f | f | t | t | t
- [] | t | f | t | f | t | f | t | t
- [1, "2", {}] | t | f | t | f | t | f | t | t
- {} | t | f | t | t | f | f | t | t
- { "a": 1, "b": null } | t | f | t | t | f | f | t | t
- { "a": 1, "a": null } | t | f | t | t | f | f | t | f
- { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
- { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
-(11 rows)
-
-SELECT
- js0,
- js IS JSON "IS JSON",
- js IS NOT JSON "IS NOT JSON",
- js IS JSON VALUE "IS VALUE",
- js IS JSON OBJECT "IS OBJECT",
- js IS JSON ARRAY "IS ARRAY",
- js IS JSON SCALAR "IS SCALAR",
- js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
- js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
-FROM
- (SELECT js, js::bytea FROM test_is_json WHERE js IS JSON) foo(js0, js);
- js0 | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
------------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+-------------
- 123 | t | f | t | f | f | t | t | t
- "aaa " | t | f | t | f | f | t | t | t
- true | t | f | t | f | f | t | t | t
- null | t | f | t | f | f | t | t | t
- [] | t | f | t | f | t | f | t | t
- [1, "2", {}] | t | f | t | f | t | f | t | t
- {} | t | f | t | t | f | f | t | t
- { "a": 1, "b": null } | t | f | t | t | f | f | t | t
- { "a": 1, "a": null } | t | f | t | t | f | f | t | f
- { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
- { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
-(11 rows)
-
-SELECT
- js,
- js IS JSON "IS JSON",
- js IS NOT JSON "IS NOT JSON",
- js IS JSON VALUE "IS VALUE",
- js IS JSON OBJECT "IS OBJECT",
- js IS JSON ARRAY "IS ARRAY",
- js IS JSON SCALAR "IS SCALAR",
- js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
- js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
-FROM
- (SELECT js::jsonb FROM test_is_json WHERE js IS JSON) foo(js);
- js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
--------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+-------------
- 123 | t | f | t | f | f | t | t | t
- "aaa " | t | f | t | f | f | t | t | t
- true | t | f | t | f | f | t | t | t
- null | t | f | t | f | f | t | t | t
- [] | t | f | t | f | t | f | t | t
- [1, "2", {}] | t | f | t | f | t | f | t | t
- {} | t | f | t | t | f | f | t | t
- {"a": 1, "b": null} | t | f | t | t | f | f | t | t
- {"a": null} | t | f | t | t | f | f | t | t
- {"a": 1, "b": [{"a": 1}, {"a": 2}]} | t | f | t | t | f | f | t | t
- {"a": 1, "b": [{"a": 2, "b": 0}]} | t | f | t | t | f | f | t | t
-(11 rows)
-
--- Test IS JSON deparsing
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------
- Function Scan on pg_catalog.generate_series i
- Output: ('1'::text IS JSON), (('1'::text || (i)::text) IS JSON SCALAR), (NOT ('[]'::text IS JSON ARRAY)), ('{}'::text IS JSON OBJECT WITH UNIQUE KEYS)
- Function Call: generate_series(1, 3)
-(3 rows)
-
-CREATE VIEW is_json_view AS
-SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i;
-\sv is_json_view
-CREATE OR REPLACE VIEW public.is_json_view AS
- SELECT '1'::text IS JSON AS "any",
- ('1'::text || i) IS JSON SCALAR AS scalar,
- NOT '[]'::text IS JSON ARRAY AS "array",
- '{}'::text IS JSON OBJECT WITH UNIQUE KEYS AS object
- FROM generate_series(1, 3) i(i)
-DROP VIEW is_json_view;
--- Test implicit coercion to a fixed-length type specified in RETURNING
-SELECT JSON_SERIALIZE('{ "a" : 1 } ' RETURNING varchar(2));
-ERROR: value too long for type character varying(2)
-SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING varchar(2)));
-ERROR: value too long for type character varying(2)
-SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' RETURNING varchar(2)));
-ERROR: value too long for type character varying(2)
-SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING varchar(2)) FROM generate_series(1,1) i;
-ERROR: value too long for type character varying(2)
-SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING varchar(2)) FROM generate_series(1, 1) i;
-ERROR: value too long for type character varying(2)
--- Now try domain over fixed-length type
-CREATE DOMAIN sqljson_char2 AS char(2) CHECK (VALUE NOT IN ('12'));
-SELECT JSON_SERIALIZE('123' RETURNING sqljson_char2);
-ERROR: value too long for type character(2)
-SELECT JSON_SERIALIZE('12' RETURNING sqljson_char2);
-ERROR: value for domain sqljson_char2 violates check constraint "sqljson_char2_check"
--- Bug #18657: JsonValueExpr.raw_expr was not initialized in ExecInitExprRec()
--- causing the Aggrefs contained in it to also not be initialized, which led
--- to a crash in ExecBuildAggTrans() as mentioned in the bug report:
--- https://postgr.es/m/18657-1b90ccce2b16bdb8@postgresql.org
-CREATE FUNCTION volatile_one() RETURNS int AS $$ BEGIN RETURN 1; END; $$ LANGUAGE plpgsql VOLATILE;
-CREATE FUNCTION stable_one() RETURNS int AS $$ BEGIN RETURN 1; END; $$ LANGUAGE plpgsql STABLE;
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': volatile_one() RETURNING text) FORMAT JSON);
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------
- Aggregate
- Output: JSON_OBJECT('a' : JSON_OBJECTAGG('b' : volatile_one() RETURNING text) FORMAT JSON RETURNING json)
- -> Result
-(3 rows)
-
-SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': volatile_one() RETURNING text) FORMAT JSON);
- json_object
----------------------
- {"a" : { "b" : 1 }}
-(1 row)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': stable_one() RETURNING text) FORMAT JSON);
- QUERY PLAN
------------------------------------------------------------------------------------------------------------
- Aggregate
- Output: JSON_OBJECT('a' : JSON_OBJECTAGG('b' : stable_one() RETURNING text) FORMAT JSON RETURNING json)
- -> Result
-(3 rows)
-
-SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': stable_one() RETURNING text) FORMAT JSON);
- json_object
----------------------
- {"a" : { "b" : 1 }}
-(1 row)
-
-EXPLAIN (VERBOSE, COSTS OFF) SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': 1 RETURNING text) FORMAT JSON);
- QUERY PLAN
-------------------------------------------------------------------------------------------------
- Aggregate
- Output: JSON_OBJECT('a' : JSON_OBJECTAGG('b' : 1 RETURNING text) FORMAT JSON RETURNING json)
- -> Result
-(3 rows)
-
-SELECT JSON_OBJECT('a': JSON_OBJECTAGG('b': 1 RETURNING text) FORMAT JSON);
- json_object
----------------------
- {"a" : { "b" : 1 }}
-(1 row)
-
-DROP FUNCTION volatile_one, stable_one;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/sqljson_queryfuncs.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/sqljson_queryfuncs.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/sqljson_queryfuncs.out 2024-11-15 02:50:52.502029300 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/sqljson_queryfuncs.out 2024-11-15 02:59:17.849116529 +0000
@@ -1,1452 +1,2 @@
--- JSON_EXISTS
-SELECT JSON_EXISTS(NULL::jsonb, '$');
- json_exists
--------------
-
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '[]', '$');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(JSON_OBJECT(RETURNING jsonb), '$');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '1', '$');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb 'null', '$');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '[]', '$');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '1', '$.a');
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '1', 'strict $.a'); -- FALSE on error
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '1', 'strict $.a' ERROR ON ERROR);
-ERROR: jsonpath member accessor can only be applied to an object
-SELECT JSON_EXISTS(jsonb 'null', '$.a');
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '[]', '$.a');
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '[1, "aaa", {"a": 1}]', 'strict $.a'); -- FALSE on error
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '[1, "aaa", {"a": 1}]', 'lax $.a');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '{}', '$.a');
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '{"b": 1, "a": 2}', '$.a');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '1', '$.a.b');
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '{"a": {"b": 1}}', '$.a.b');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.a.b');
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.* ? (@ > $x)' PASSING 1 AS x);
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.* ? (@ > $x)' PASSING '1' AS x);
- json_exists
--------------
- f
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.* ? (@ > $x && @ < $y)' PASSING 0 AS x, 2 AS y);
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '{"a": 1, "b": 2}', '$.* ? (@ > $x && @ < $y)' PASSING 0 AS x, 1 AS y);
- json_exists
--------------
- f
-(1 row)
-
--- extension: boolean expressions
-SELECT JSON_EXISTS(jsonb '1', '$ > 2');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_EXISTS(jsonb '1', '$.a > 2' ERROR ON ERROR);
- json_exists
--------------
- t
-(1 row)
-
--- JSON_VALUE
-SELECT JSON_VALUE(NULL::jsonb, '$');
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$');
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$' RETURNING int);
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'true', '$');
- json_value
-------------
- t
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'true', '$' RETURNING bool);
- json_value
-------------
- t
-(1 row)
-
-SELECT JSON_VALUE(jsonb '123', '$');
- json_value
-------------
- 123
-(1 row)
-
-SELECT JSON_VALUE(jsonb '123', '$' RETURNING int) + 234;
- ?column?
-----------
- 357
-(1 row)
-
-SELECT JSON_VALUE(jsonb '123', '$' RETURNING text);
- json_value
-------------
- 123
-(1 row)
-
-/* jsonb bytea ??? */
-SELECT JSON_VALUE(jsonb '123', '$' RETURNING bytea ERROR ON ERROR);
- json_value
-------------
- \x313233
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1.23', '$');
- json_value
-------------
- 1.23
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1.23', '$' RETURNING int);
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"1.23"', '$' RETURNING numeric);
- json_value
-------------
- 1.23
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"1.23"', '$' RETURNING int ERROR ON ERROR);
-ERROR: invalid input syntax for type integer: "1.23"
-SELECT JSON_VALUE(jsonb '"aaa"', '$');
- json_value
-------------
- aaa
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING text);
- json_value
-------------
- aaa
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING char(5));
- json_value
-------------
- aaa
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING char(2) ERROR ON ERROR);
-ERROR: value too long for type character(2)
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING char(2));
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING char(3) ERROR ON ERROR);
- json_value
-------------
- aaa
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING json);
- json_value
-------------
- "aaa"
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING jsonb);
- json_value
-------------
- "aaa"
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING json ERROR ON ERROR);
- json_value
-------------
- "aaa"
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING jsonb ERROR ON ERROR);
- json_value
-------------
- "aaa"
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"\"aaa\""', '$' RETURNING json);
- json_value
-------------
- "\"aaa\""
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"\"aaa\""', '$' RETURNING jsonb);
- json_value
-------------
- "\"aaa\""
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING int);
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING int ERROR ON ERROR);
-ERROR: invalid input syntax for type integer: "aaa"
-SELECT JSON_VALUE(jsonb '"aaa"', '$' RETURNING int DEFAULT 111 ON ERROR);
- json_value
-------------
- 111
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"123"', '$' RETURNING int) + 234;
- ?column?
-----------
- 357
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"2017-02-20"', '$' RETURNING date) + 9;
- ?column?
-------------
- 03-01-2017
-(1 row)
-
--- Test NULL checks execution in domain types
-CREATE DOMAIN sqljsonb_int_not_null AS int NOT NULL;
-SELECT JSON_VALUE(jsonb 'null', '$' RETURNING sqljsonb_int_not_null);
-ERROR: could not coerce ON ERROR expression (NULL) to the RETURNING type
-DETAIL: domain sqljsonb_int_not_null does not allow null values
-SELECT JSON_VALUE(jsonb 'null', '$' RETURNING sqljsonb_int_not_null ERROR ON ERROR);
-ERROR: domain sqljsonb_int_not_null does not allow null values
-SELECT JSON_VALUE(jsonb 'null', '$' RETURNING sqljsonb_int_not_null DEFAULT 2 ON EMPTY ERROR ON ERROR);
-ERROR: domain sqljsonb_int_not_null does not allow null values
-SELECT JSON_VALUE(jsonb '1', '$.a' RETURNING sqljsonb_int_not_null DEFAULT 2 ON EMPTY ERROR ON ERROR);
- json_value
-------------
- 2
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', '$.a' RETURNING sqljsonb_int_not_null DEFAULT NULL ON EMPTY ERROR ON ERROR);
-ERROR: domain sqljsonb_int_not_null does not allow null values
-CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple');
-CREATE DOMAIN rgb AS rainbow CHECK (VALUE IN ('red', 'green', 'blue'));
-SELECT JSON_VALUE('"purple"'::jsonb, 'lax $[*]' RETURNING rgb);
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE('"purple"'::jsonb, 'lax $[*]' RETURNING rgb ERROR ON ERROR);
-ERROR: value for domain rgb violates check constraint "rgb_check"
-SELECT JSON_VALUE(jsonb '[]', '$');
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '[]', '$' ERROR ON ERROR);
-ERROR: JSON path expression in JSON_VALUE should return single scalar item
-SELECT JSON_VALUE(jsonb '{}', '$');
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '{}', '$' ERROR ON ERROR);
-ERROR: JSON path expression in JSON_VALUE should return single scalar item
-SELECT JSON_VALUE(jsonb '1', '$.a');
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', 'strict $.a' ERROR ON ERROR);
-ERROR: jsonpath member accessor can only be applied to an object
-SELECT JSON_VALUE(jsonb '1', 'strict $.a' DEFAULT 'error' ON ERROR);
- json_value
-------------
- error
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', 'lax $.a' ERROR ON ERROR); -- NULL ON EMPTY
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', 'lax $.a' ERROR ON EMPTY ERROR ON ERROR);
-ERROR: no SQL/JSON item found for specified path
-SELECT JSON_VALUE(jsonb '1', 'strict $.*' DEFAULT 2 ON ERROR);
- json_value
-------------
- 2
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', 'lax $.a' DEFAULT 2 ON ERROR);
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', 'lax $.a' DEFAULT '2' ON EMPTY);
- json_value
-------------
- 2
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', 'lax $.a' NULL ON EMPTY DEFAULT '2' ON ERROR);
- json_value
-------------
-
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', 'lax $.a' DEFAULT '2' ON EMPTY DEFAULT '3' ON ERROR);
- json_value
-------------
- 2
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1', 'lax $.a' ERROR ON EMPTY DEFAULT '3' ON ERROR);
-ERROR: no SQL/JSON item found for specified path
-SELECT JSON_VALUE(jsonb '[1,2]', '$[*]' ERROR ON ERROR);
-ERROR: JSON path expression in JSON_VALUE should return single scalar item
-SELECT JSON_VALUE(jsonb '[1,2]', '$[*]' DEFAULT '0' ON ERROR);
- json_value
-------------
- 0
-(1 row)
-
-SELECT JSON_VALUE(jsonb '[" "]', '$[*]' RETURNING int ERROR ON ERROR);
-ERROR: invalid input syntax for type integer: " "
-SELECT JSON_VALUE(jsonb '[" "]', '$[*]' RETURNING int DEFAULT 2 + 3 ON ERROR);
- json_value
-------------
- 5
-(1 row)
-
-SELECT JSON_VALUE(jsonb '["1"]', '$[*]' RETURNING int DEFAULT 2 + 3 ON ERROR);
- json_value
-------------
- 1
-(1 row)
-
-SELECT JSON_VALUE(jsonb '["1"]', '$[*]' RETURNING int FORMAT JSON); -- RETURNING FORMAT not allowed
-ERROR: cannot specify FORMAT JSON in RETURNING clause of JSON_VALUE()
-LINE 1: ...CT JSON_VALUE(jsonb '["1"]', '$[*]' RETURNING int FORMAT JSO...
- ^
--- RETUGNING pseudo-types not allowed
-SELECT JSON_VALUE(jsonb '["1"]', '$[*]' RETURNING record);
-ERROR: returning pseudo-types is not supported in SQL/JSON functions
-SELECT
- x,
- JSON_VALUE(
- jsonb '{"a": 1, "b": 2}',
- '$.* ? (@ > $x)' PASSING x AS x
- RETURNING int
- DEFAULT -1 ON EMPTY
- DEFAULT -2 ON ERROR
- ) y
-FROM
- generate_series(0, 2) x;
- x | y
----+----
- 0 | -2
- 1 | 2
- 2 | -1
-(3 rows)
-
-SELECT JSON_VALUE(jsonb 'null', '$a' PASSING point ' (1, 2 )' AS a);
- json_value
-------------
- (1,2)
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$a' PASSING point ' (1, 2 )' AS a RETURNING point);
- json_value
-------------
- (1,2)
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$a' PASSING point ' (1, 2 )' AS a RETURNING point ERROR ON ERROR);
- json_value
-------------
- (1,2)
-(1 row)
-
--- Test PASSING and RETURNING date/time types
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts);
- json_value
-------------------------------
- Tue Feb 20 18:34:56 2018 PST
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING timestamptz);
- json_value
-------------------------------
- Tue Feb 20 18:34:56 2018 PST
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING timestamp);
- json_value
---------------------------
- Tue Feb 20 18:34:56 2018
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING date '2018-02-21 12:34:56 +10' AS ts RETURNING date);
- json_value
-------------
- 02-21-2018
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING time '2018-02-21 12:34:56 +10' AS ts RETURNING time);
- json_value
-------------
- 12:34:56
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timetz '2018-02-21 12:34:56 +10' AS ts RETURNING timetz);
- json_value
--------------
- 12:34:56+10
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamp '2018-02-21 12:34:56 +10' AS ts RETURNING timestamp);
- json_value
---------------------------
- Wed Feb 21 12:34:56 2018
-(1 row)
-
--- Also test RETURNING json[b]
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING json);
- json_value
------------------------------
- "2018-02-21T02:34:56+00:00"
-(1 row)
-
-SELECT JSON_VALUE(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING jsonb);
- json_value
------------------------------
- "2018-02-21T02:34:56+00:00"
-(1 row)
-
--- Test that numeric JSON values are coerced uniformly
-select json_value('{"a": 1.234}', '$.a' returning int error on error);
-ERROR: invalid input syntax for type integer: "1.234"
-select json_value('{"a": "1.234"}', '$.a' returning int error on error);
-ERROR: invalid input syntax for type integer: "1.234"
--- JSON_QUERY
-SELECT JSON_VALUE(NULL::jsonb, '$');
- json_value
-------------
-
-(1 row)
-
-SELECT
- JSON_QUERY(js, '$') AS "unspec",
- JSON_QUERY(js, '$' WITHOUT WRAPPER) AS "without",
- JSON_QUERY(js, '$' WITH CONDITIONAL WRAPPER) AS "with cond",
- JSON_QUERY(js, '$' WITH UNCONDITIONAL ARRAY WRAPPER) AS "with uncond",
- JSON_QUERY(js, '$' WITH ARRAY WRAPPER) AS "with"
-FROM
- (VALUES
- (jsonb 'null'),
- ('12.3'),
- ('true'),
- ('"aaa"'),
- ('[1, null, "2"]'),
- ('{"a": 1, "b": [2]}')
- ) foo(js);
- unspec | without | with cond | with uncond | with
---------------------+--------------------+--------------------+----------------------+----------------------
- null | null | null | [null] | [null]
- 12.3 | 12.3 | 12.3 | [12.3] | [12.3]
- true | true | true | [true] | [true]
- "aaa" | "aaa" | "aaa" | ["aaa"] | ["aaa"]
- [1, null, "2"] | [1, null, "2"] | [1, null, "2"] | [[1, null, "2"]] | [[1, null, "2"]]
- {"a": 1, "b": [2]} | {"a": 1, "b": [2]} | {"a": 1, "b": [2]} | [{"a": 1, "b": [2]}] | [{"a": 1, "b": [2]}]
-(6 rows)
-
-SELECT
- JSON_QUERY(js, 'strict $[*]') AS "unspec",
- JSON_QUERY(js, 'strict $[*]' WITHOUT WRAPPER) AS "without",
- JSON_QUERY(js, 'strict $[*]' WITH CONDITIONAL WRAPPER) AS "with cond",
- JSON_QUERY(js, 'strict $[*]' WITH UNCONDITIONAL ARRAY WRAPPER) AS "with uncond",
- JSON_QUERY(js, 'strict $[*]' WITH ARRAY WRAPPER) AS "with"
-FROM
- (VALUES
- (jsonb '1'),
- ('[]'),
- ('[null]'),
- ('[12.3]'),
- ('[true]'),
- ('["aaa"]'),
- ('[[1, 2, 3]]'),
- ('[{"a": 1, "b": [2]}]'),
- ('[1, "2", null, [3]]')
- ) foo(js);
- unspec | without | with cond | with uncond | with
---------------------+--------------------+---------------------+----------------------+----------------------
- | | | |
- | | | |
- null | null | null | [null] | [null]
- 12.3 | 12.3 | 12.3 | [12.3] | [12.3]
- true | true | true | [true] | [true]
- "aaa" | "aaa" | "aaa" | ["aaa"] | ["aaa"]
- [1, 2, 3] | [1, 2, 3] | [1, 2, 3] | [[1, 2, 3]] | [[1, 2, 3]]
- {"a": 1, "b": [2]} | {"a": 1, "b": [2]} | {"a": 1, "b": [2]} | [{"a": 1, "b": [2]}] | [{"a": 1, "b": [2]}]
- | | [1, "2", null, [3]] | [1, "2", null, [3]] | [1, "2", null, [3]]
-(9 rows)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text);
- json_query
-------------
- "aaa"
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text KEEP QUOTES);
- json_query
-------------
- "aaa"
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text KEEP QUOTES ON SCALAR STRING);
- json_query
-------------
- "aaa"
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text OMIT QUOTES);
- json_query
-------------
- aaa
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING text OMIT QUOTES ON SCALAR STRING);
- json_query
-------------
- aaa
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$' OMIT QUOTES ERROR ON ERROR);
-ERROR: invalid input syntax for type json
-DETAIL: Token "aaa" is invalid.
-CONTEXT: JSON data, line 1: aaa
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING json OMIT QUOTES ERROR ON ERROR);
-ERROR: invalid input syntax for type json
-DETAIL: Token "aaa" is invalid.
-CONTEXT: JSON data, line 1: aaa
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING bytea FORMAT JSON OMIT QUOTES ERROR ON ERROR);
- json_query
-------------
- \x616161
-(1 row)
-
--- Behavior when a RETURNING type has typmod != -1
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING char(3) ERROR ON ERROR);
-ERROR: value too long for type character(3)
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING char(3));
- json_query
-------------
-
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$' RETURNING char(3) OMIT QUOTES ERROR ON ERROR);
- json_query
-------------
- aaa
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$.a' RETURNING char(2) OMIT QUOTES DEFAULT 'bb' ON EMPTY);
- json_query
-------------
- bb
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"aaa"', '$.a' RETURNING char(2) OMIT QUOTES DEFAULT '"bb"'::jsonb ON EMPTY);
- json_query
-------------
- bb
-(1 row)
-
--- OMIT QUOTES behavior should not be specified when WITH WRAPPER used:
--- Should fail
-SELECT JSON_QUERY(jsonb '[1]', '$' WITH WRAPPER OMIT QUOTES);
-ERROR: SQL/JSON QUOTES behavior must not be specified when WITH WRAPPER is used
-LINE 1: SELECT JSON_QUERY(jsonb '[1]', '$' WITH WRAPPER OMIT QUOTES)...
- ^
-SELECT JSON_QUERY(jsonb '[1]', '$' WITH CONDITIONAL WRAPPER OMIT QUOTES);
-ERROR: SQL/JSON QUOTES behavior must not be specified when WITH WRAPPER is used
-LINE 1: SELECT JSON_QUERY(jsonb '[1]', '$' WITH CONDITIONAL WRAPPER ...
- ^
--- Should succeed
-SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITH CONDITIONAL WRAPPER KEEP QUOTES);
- json_query
-------------
- "1"
-(1 row)
-
-SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITH UNCONDITIONAL WRAPPER KEEP QUOTES);
- json_query
-------------
- ["1"]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITH WRAPPER KEEP QUOTES);
- json_query
-------------
- ["1"]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITHOUT WRAPPER OMIT QUOTES);
- json_query
-------------
- 1
-(1 row)
-
-SELECT JSON_QUERY(jsonb '["1"]', '$[*]' WITHOUT WRAPPER KEEP QUOTES);
- json_query
-------------
- "1"
-(1 row)
-
--- test QUOTES behavior.
-SELECT JSON_QUERY(jsonb'{"rec": "{1,2,3}"}', '$.rec' returning int[] omit quotes);
- json_query
-------------
- {1,2,3}
-(1 row)
-
-SELECT JSON_QUERY(jsonb'{"rec": "{1,2,3}"}', '$.rec' returning int[] keep quotes);
- json_query
-------------
-
-(1 row)
-
-SELECT JSON_QUERY(jsonb'{"rec": "{1,2,3}"}', '$.rec' returning int[] keep quotes error on error);
-ERROR: expected JSON array
-SELECT JSON_QUERY(jsonb'{"rec": "[1,2]"}', '$.rec' returning int4range omit quotes);
- json_query
-------------
- [1,3)
-(1 row)
-
-SELECT JSON_QUERY(jsonb'{"rec": "[1,2]"}', '$.rec' returning int4range keep quotes);
- json_query
-------------
-
-(1 row)
-
-SELECT JSON_QUERY(jsonb'{"rec": "[1,2]"}', '$.rec' returning int4range keep quotes error on error);
-ERROR: malformed range literal: ""[1,2]""
-DETAIL: Missing left parenthesis or bracket.
-CREATE DOMAIN qf_char_domain AS char(1);
-CREATE DOMAIN qf_jsonb_domain AS jsonb;
-SELECT JSON_QUERY(jsonb '"1"', '$' RETURNING qf_char_domain OMIT QUOTES ERROR ON ERROR);
- json_query
-------------
- 1
-(1 row)
-
-SELECT JSON_QUERY(jsonb '"1"', '$' RETURNING qf_jsonb_domain OMIT QUOTES ERROR ON ERROR);
- json_query
-------------
- 1
-(1 row)
-
-DROP DOMAIN qf_char_domain, qf_jsonb_domain;
-SELECT JSON_QUERY(jsonb '[]', '$[*]');
- json_query
-------------
-
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[]', '$[*]' NULL ON EMPTY);
- json_query
-------------
-
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[]', '$[*]' EMPTY ON EMPTY);
- json_query
-------------
- []
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[]', '$[*]' EMPTY ARRAY ON EMPTY);
- json_query
-------------
- []
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[]', '$[*]' EMPTY OBJECT ON EMPTY);
- json_query
-------------
- {}
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY);
-ERROR: no SQL/JSON item found for specified path
-SELECT JSON_QUERY(jsonb '[]', '$[*]' DEFAULT '"empty"' ON EMPTY);
- json_query
-------------
- "empty"
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY NULL ON ERROR);
-ERROR: no SQL/JSON item found for specified path
-SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY EMPTY ARRAY ON ERROR);
-ERROR: no SQL/JSON item found for specified path
-SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY EMPTY OBJECT ON ERROR);
-ERROR: no SQL/JSON item found for specified path
-SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON EMPTY ERROR ON ERROR);
-ERROR: no SQL/JSON item found for specified path
-SELECT JSON_QUERY(jsonb '[]', '$[*]' ERROR ON ERROR); -- NULL ON EMPTY
- json_query
-------------
-
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' ERROR ON ERROR);
-ERROR: JSON path expression in JSON_QUERY should return single item without wrapper
-HINT: Use the WITH WRAPPER clause to wrap SQL/JSON items into an array.
-SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' DEFAULT '"empty"' ON ERROR);
- json_query
-------------
- "empty"
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING json);
- json_query
-------------
- [1, 2]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING json FORMAT JSON);
- json_query
-------------
- [1, 2]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING jsonb);
- json_query
-------------
- [1, 2]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING jsonb FORMAT JSON);
- json_query
-------------
- [1, 2]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING text);
- json_query
-------------
- [1, 2]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING char(10));
- json_query
-------------
- [1, 2]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING text FORMAT JSON);
- json_query
-------------
- [1, 2]
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING bytea);
- json_query
-----------------
- \x5b312c20325d
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$' RETURNING bytea FORMAT JSON);
- json_query
-----------------
- \x5b312c20325d
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' RETURNING bytea EMPTY OBJECT ON ERROR);
- json_query
-------------
- \x7b7d
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' RETURNING bytea FORMAT JSON EMPTY OBJECT ON ERROR);
- json_query
-------------
- \x7b7d
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' RETURNING json EMPTY OBJECT ON ERROR);
- json_query
-------------
- {}
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2]', '$[*]' RETURNING jsonb EMPTY OBJECT ON ERROR);
- json_query
-------------
- {}
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[3,4]', '$[*]' RETURNING bigint[] EMPTY OBJECT ON ERROR);
-ERROR: could not coerce ON ERROR expression (EMPTY OBJECT) to the RETURNING type
-DETAIL: expected JSON array
-SELECT JSON_QUERY(jsonb '"[3,4]"', '$[*]' RETURNING bigint[] EMPTY OBJECT ON ERROR);
-ERROR: could not coerce ON ERROR expression (EMPTY OBJECT) to the RETURNING type
-DETAIL: expected JSON array
--- Coercion fails with quotes on
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING int2 error on error);
-ERROR: invalid input syntax for type smallint: ""123.1""
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING int4 error on error);
-ERROR: invalid input syntax for type integer: ""123.1""
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING int8 error on error);
-ERROR: invalid input syntax for type bigint: ""123.1""
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING bool error on error);
-ERROR: invalid input syntax for type boolean: ""123.1""
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING numeric error on error);
-ERROR: invalid input syntax for type numeric: ""123.1""
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING real error on error);
-ERROR: invalid input syntax for type real: ""123.1""
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING float8 error on error);
-ERROR: invalid input syntax for type double precision: ""123.1""
--- Fine with OMIT QUOTES
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING int2 omit quotes error on error);
-ERROR: invalid input syntax for type smallint: "123.1"
-SELECT JSON_QUERY(jsonb '"123.1"', '$' RETURNING float8 omit quotes error on error);
- json_query
-------------
- 123.1
-(1 row)
-
--- RETUGNING pseudo-types not allowed
-SELECT JSON_QUERY(jsonb '[3,4]', '$[*]' RETURNING anyarray EMPTY OBJECT ON ERROR);
-ERROR: returning pseudo-types is not supported in SQL/JSON functions
-SELECT
- x, y,
- JSON_QUERY(
- jsonb '[1,2,3,4,5,null]',
- '$[*] ? (@ >= $x && @ <= $y)'
- PASSING x AS x, y AS y
- WITH CONDITIONAL WRAPPER
- EMPTY ARRAY ON EMPTY
- ) list
-FROM
- generate_series(0, 4) x,
- generate_series(0, 4) y;
- x | y | list
----+---+--------------
- 0 | 0 | []
- 0 | 1 | 1
- 0 | 2 | [1, 2]
- 0 | 3 | [1, 2, 3]
- 0 | 4 | [1, 2, 3, 4]
- 1 | 0 | []
- 1 | 1 | 1
- 1 | 2 | [1, 2]
- 1 | 3 | [1, 2, 3]
- 1 | 4 | [1, 2, 3, 4]
- 2 | 0 | []
- 2 | 1 | []
- 2 | 2 | 2
- 2 | 3 | [2, 3]
- 2 | 4 | [2, 3, 4]
- 3 | 0 | []
- 3 | 1 | []
- 3 | 2 | []
- 3 | 3 | 3
- 3 | 4 | [3, 4]
- 4 | 0 | []
- 4 | 1 | []
- 4 | 2 | []
- 4 | 3 | []
- 4 | 4 | 4
-(25 rows)
-
--- record type returning with quotes behavior.
-CREATE TYPE comp_abc AS (a text, b int, c timestamp);
-SELECT JSON_QUERY(jsonb'{"rec": "(abc,42,01.02.2003)"}', '$.rec' returning comp_abc omit quotes);
- json_query
--------------------------------------
- (abc,42,"Thu Jan 02 00:00:00 2003")
-(1 row)
-
-SELECT JSON_QUERY(jsonb'{"rec": "(abc,42,01.02.2003)"}', '$.rec' returning comp_abc keep quotes);
- json_query
-------------
-
-(1 row)
-
-SELECT JSON_QUERY(jsonb'{"rec": "(abc,42,01.02.2003)"}', '$.rec' returning comp_abc keep quotes error on error);
-ERROR: cannot call populate_composite on a scalar
-DROP TYPE comp_abc;
--- Extension: record types returning
-CREATE TYPE sqljsonb_rec AS (a int, t text, js json, jb jsonb, jsa json[]);
-CREATE TYPE sqljsonb_reca AS (reca sqljsonb_rec[]);
-SELECT JSON_QUERY(jsonb '[{"a": 1, "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING sqljsonb_rec);
- json_query
------------------------------------------------------
- (1,aaa,"[1, ""2"", {}]","{""x"": [1, ""2"", {}]}",)
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[{"a": "a", "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING sqljsonb_rec ERROR ON ERROR);
-ERROR: invalid input syntax for type integer: "a"
-SELECT JSON_QUERY(jsonb '[{"a": "a", "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING sqljsonb_rec);
- json_query
-------------
-
-(1 row)
-
-SELECT * FROM unnest((JSON_QUERY(jsonb '{"jsa": [{"a": 1, "b": ["foo"]}, {"a": 2, "c": {}}, 123]}', '$' RETURNING sqljsonb_rec)).jsa);
- unnest
-------------------------
- {"a": 1, "b": ["foo"]}
- {"a": 2, "c": {}}
- 123
-(3 rows)
-
-SELECT * FROM unnest((JSON_QUERY(jsonb '{"reca": [{"a": 1, "t": ["foo", []]}, {"a": 2, "jb": [{}, true]}]}', '$' RETURNING sqljsonb_reca)).reca);
- a | t | js | jb | jsa
----+-------------+----+------------+-----
- 1 | ["foo", []] | | |
- 2 | | | [{}, true] |
-(2 rows)
-
-SELECT JSON_QUERY(jsonb '[{"a": 1, "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING jsonpath);
- json_query
-------------
-
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[{"a": 1, "b": "foo", "t": "aaa", "js": [1, "2", {}], "jb": {"x": [1, "2", {}]}}, {"a": 2}]', '$[0]' RETURNING jsonpath ERROR ON ERROR);
-ERROR: syntax error at or near "{" of jsonpath input
--- Extension: array types returning
-SELECT JSON_QUERY(jsonb '[1,2,null,"3"]', '$[*]' RETURNING int[] WITH WRAPPER);
- json_query
---------------
- {1,2,NULL,3}
-(1 row)
-
-SELECT JSON_QUERY(jsonb '[1,2,null,"a"]', '$[*]' RETURNING int[] WITH WRAPPER ERROR ON ERROR);
-ERROR: invalid input syntax for type integer: "a"
-SELECT JSON_QUERY(jsonb '[1,2,null,"a"]', '$[*]' RETURNING int[] WITH WRAPPER);
- json_query
-------------
-
-(1 row)
-
-SELECT * FROM unnest(JSON_QUERY(jsonb '[{"a": 1, "t": ["foo", []]}, {"a": 2, "jb": [{}, true]}]', '$' RETURNING sqljsonb_rec[]));
- a | t | js | jb | jsa
----+-------------+----+------------+-----
- 1 | ["foo", []] | | |
- 2 | | | [{}, true] |
-(2 rows)
-
--- Extension: domain types returning
-SELECT JSON_QUERY(jsonb '{"a": 1}', '$.a' RETURNING sqljsonb_int_not_null);
- json_query
-------------
- 1
-(1 row)
-
-SELECT JSON_QUERY(jsonb '{"a": 1}', '$.b' RETURNING sqljsonb_int_not_null);
-ERROR: could not coerce ON EMPTY expression (NULL) to the RETURNING type
-DETAIL: domain sqljsonb_int_not_null does not allow null values
-SELECT JSON_QUERY(jsonb '{"a": 1}', '$.b' RETURNING sqljsonb_int_not_null ERROR ON EMPTY ERROR ON ERROR);
-ERROR: no SQL/JSON item found for specified path
--- Test timestamptz passing and output
-SELECT JSON_QUERY(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts);
- json_query
------------------------------
- "2018-02-21T02:34:56+00:00"
-(1 row)
-
-SELECT JSON_QUERY(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING json);
- json_query
------------------------------
- "2018-02-21T02:34:56+00:00"
-(1 row)
-
-SELECT JSON_QUERY(jsonb 'null', '$ts' PASSING timestamptz '2018-02-21 12:34:56 +10' AS ts RETURNING jsonb);
- json_query
------------------------------
- "2018-02-21T02:34:56+00:00"
-(1 row)
-
--- Test constraints
-CREATE TABLE test_jsonb_constraints (
- js text,
- i int,
- x jsonb DEFAULT JSON_QUERY(jsonb '[1,2]', '$[*]' WITH WRAPPER)
- CONSTRAINT test_jsonb_constraint1
- CHECK (js IS JSON)
- CONSTRAINT test_jsonb_constraint2
- CHECK (JSON_EXISTS(js::jsonb, '$.a' PASSING i + 5 AS int, i::text AS txt, array[1,2,3] as arr))
- CONSTRAINT test_jsonb_constraint3
- CHECK (JSON_VALUE(js::jsonb, '$.a' RETURNING int DEFAULT '12' ON EMPTY ERROR ON ERROR) > i)
- CONSTRAINT test_jsonb_constraint4
- CHECK (JSON_QUERY(js::jsonb, '$.a' WITH CONDITIONAL WRAPPER EMPTY OBJECT ON ERROR) = jsonb '[10]')
- CONSTRAINT test_jsonb_constraint5
- CHECK (JSON_QUERY(js::jsonb, '$.a' RETURNING char(5) OMIT QUOTES EMPTY ARRAY ON EMPTY) > 'a' COLLATE "C")
-);
-\d test_jsonb_constraints
- Table "public.test_jsonb_constraints"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+--------------------------------------------------------------------------------------------
- js | text | | |
- i | integer | | |
- x | jsonb | | | JSON_QUERY('[1, 2]'::jsonb, '$[*]' RETURNING jsonb WITH UNCONDITIONAL WRAPPER KEEP QUOTES)
-Check constraints:
- "test_jsonb_constraint1" CHECK (js IS JSON)
- "test_jsonb_constraint2" CHECK (JSON_EXISTS(js::jsonb, '$."a"' PASSING i + 5 AS int, i::text AS txt, ARRAY[1, 2, 3] AS arr))
- "test_jsonb_constraint3" CHECK (JSON_VALUE(js::jsonb, '$."a"' RETURNING integer DEFAULT 12 ON EMPTY ERROR ON ERROR) > i)
- "test_jsonb_constraint4" CHECK (JSON_QUERY(js::jsonb, '$."a"' RETURNING jsonb WITH CONDITIONAL WRAPPER KEEP QUOTES EMPTY OBJECT ON ERROR) = '[10]'::jsonb)
- "test_jsonb_constraint5" CHECK (JSON_QUERY(js::jsonb, '$."a"' RETURNING character(5) WITHOUT WRAPPER OMIT QUOTES EMPTY ARRAY ON EMPTY) > ('a'::bpchar COLLATE "C"))
-
-SELECT check_clause
-FROM information_schema.check_constraints
-WHERE constraint_name LIKE 'test_jsonb_constraint%'
-ORDER BY 1;
- check_clause
-----------------------------------------------------------------------------------------------------------------------------------------
- (JSON_QUERY((js)::jsonb, '$."a"' RETURNING character(5) WITHOUT WRAPPER OMIT QUOTES EMPTY ARRAY ON EMPTY) > ('a'::bpchar COLLATE "C"))
- (JSON_QUERY((js)::jsonb, '$."a"' RETURNING jsonb WITH CONDITIONAL WRAPPER KEEP QUOTES EMPTY OBJECT ON ERROR) = '[10]'::jsonb)
- (JSON_VALUE((js)::jsonb, '$."a"' RETURNING integer DEFAULT 12 ON EMPTY ERROR ON ERROR) > i)
- (js IS JSON)
- JSON_EXISTS((js)::jsonb, '$."a"' PASSING (i + 5) AS int, (i)::text AS txt, ARRAY[1, 2, 3] AS arr)
-(5 rows)
-
-SELECT pg_get_expr(adbin, adrelid)
-FROM pg_attrdef
-WHERE adrelid = 'test_jsonb_constraints'::regclass
-ORDER BY 1;
- pg_get_expr
---------------------------------------------------------------------------------------------
- JSON_QUERY('[1, 2]'::jsonb, '$[*]' RETURNING jsonb WITH UNCONDITIONAL WRAPPER KEEP QUOTES)
-(1 row)
-
-INSERT INTO test_jsonb_constraints VALUES ('', 1);
-ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint1"
-DETAIL: Failing row contains (, 1, [1, 2]).
-INSERT INTO test_jsonb_constraints VALUES ('1', 1);
-ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint2"
-DETAIL: Failing row contains (1, 1, [1, 2]).
-INSERT INTO test_jsonb_constraints VALUES ('[]');
-ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint2"
-DETAIL: Failing row contains ([], null, [1, 2]).
-INSERT INTO test_jsonb_constraints VALUES ('{"b": 1}', 1);
-ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint2"
-DETAIL: Failing row contains ({"b": 1}, 1, [1, 2]).
-INSERT INTO test_jsonb_constraints VALUES ('{"a": 1}', 1);
-ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint3"
-DETAIL: Failing row contains ({"a": 1}, 1, [1, 2]).
-INSERT INTO test_jsonb_constraints VALUES ('{"a": 10}', 1);
-ERROR: new row for relation "test_jsonb_constraints" violates check constraint "test_jsonb_constraint4"
-DETAIL: Failing row contains ({"a": 10}, 1, [1, 2]).
-DROP TABLE test_jsonb_constraints;
--- Test mutabilily of query functions
-CREATE TABLE test_jsonb_mutability(js jsonb, b int);
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$'));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a[0]'));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.time()'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.date()'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.time_tz()'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.timestamp()'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.timestamp_tz()'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.date() < $.time_tz())'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.date() < $.time())'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.time() < $.time())'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.time() < $.time_tz())'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp() < $.timestamp_tz())'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp_tz() < $.timestamp_tz())'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.time() < $.datetime("HH:MI TZH"))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.date() < $.datetime("HH:MI TZH"))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp() < $.datetime("HH:MI TZH"))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp() < $.datetime("HH:MI"))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp_tz() < $.datetime("HH:MI TZH"))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp_tz() < $.datetime("HH:MI"))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.date() < $x' PASSING '12:34'::timetz AS x));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.date() < $x' PASSING '1234'::int AS x));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.timestamp(2) < $.timestamp(3))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime()'));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@ < $.datetime())'));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime() < $.datetime())'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime() < $.datetime("HH:MI TZH"))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime("HH:MI TZH") < $.datetime("HH:MI TZH"))'));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime("HH:MI") < $.datetime("YY-MM-DD HH:MI"))'));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.a ? (@.datetime("HH:MI TZH") < $.datetime("YY-MM-DD HH:MI"))'));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime("HH:MI TZH") < $x' PASSING '12:34'::timetz AS x));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime("HH:MI TZH") < $y' PASSING '12:34'::timetz AS x));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime() < $x' PASSING '12:34'::timetz AS x));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime() < $x' PASSING '1234'::int AS x));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime() ? (@ == $x)' PASSING '12:34'::time AS x));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$.datetime("YY-MM-DD") ? (@ == $x)' PASSING '2020-07-14'::date AS x));
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$[1, $.a ? (@.datetime() == $x)]' PASSING '12:34'::time AS x));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$[1, 0 to $.a ? (@.datetime() == $x)]' PASSING '12:34'::time AS x));
-ERROR: functions in index expression must be marked IMMUTABLE
-CREATE INDEX ON test_jsonb_mutability (JSON_QUERY(js, '$[1, $.a ? (@.datetime("HH:MI") == $x)]' PASSING '12:34'::time AS x));
-CREATE INDEX ON test_jsonb_mutability (JSON_VALUE(js, '$' DEFAULT random()::int ON ERROR));
-ERROR: functions in index expression must be marked IMMUTABLE
--- DEFAULT expression
-CREATE OR REPLACE FUNCTION ret_setint() RETURNS SETOF integer AS
-$$
-BEGIN
- RETURN QUERY EXECUTE 'select 1 union all select 1';
-END;
-$$
-LANGUAGE plpgsql IMMUTABLE;
-SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT ret_setint() ON ERROR) FROM test_jsonb_mutability;
-ERROR: DEFAULT expression must not return a set
-LINE 1: SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT ret_setint(...
- ^
-SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT b + 1 ON ERROR) FROM test_jsonb_mutability;
-ERROR: DEFAULT expression must not contain column references
-LINE 1: SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT b + 1 ON ER...
- ^
-SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT sum(1) over() ON ERROR) FROM test_jsonb_mutability;
-ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT
-LINE 1: SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT sum(1) over...
- ^
-SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT (SELECT 1) ON ERROR) FROM test_jsonb_mutability;
-ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT
-LINE 1: SELECT JSON_QUERY(js, '$' RETURNING int DEFAULT (SELECT 1) ...
- ^
-DROP TABLE test_jsonb_mutability;
-DROP FUNCTION ret_setint;
-CREATE DOMAIN queryfuncs_test_domain AS text CHECK (value <> 'foo');
-SELECT JSON_VALUE(jsonb '{"d1": "H"}', '$.a2' RETURNING queryfuncs_test_domain DEFAULT 'foo'::queryfuncs_test_domain ON EMPTY);
-ERROR: could not coerce ON EMPTY expression (DEFAULT) to the RETURNING type
-DETAIL: value for domain queryfuncs_test_domain violates check constraint "queryfuncs_test_domain_check"
-SELECT JSON_VALUE(jsonb '{"d1": "H"}', '$.a2' RETURNING queryfuncs_test_domain DEFAULT 'foo1'::queryfuncs_test_domain ON EMPTY);
- json_value
-------------
- foo1
-(1 row)
-
-SELECT JSON_VALUE(jsonb '{"d1": "H"}', '$.a2' RETURNING queryfuncs_test_domain DEFAULT '"foo1"'::jsonb::text ON EMPTY);
- json_value
-------------
- "foo1"
-(1 row)
-
-SELECT JSON_VALUE(jsonb '{"d1": "foo"}', '$.a2' RETURNING queryfuncs_test_domain DEFAULT 'foo1'::queryfuncs_test_domain ON EMPTY);
- json_value
-------------
- foo1
-(1 row)
-
--- Check the cases where a coercion-related expression is masking an
--- unsupported expressions
--- CoerceViaIO
-SELECT JSON_QUERY('"a"', '$.a' RETURNING int DEFAULT (SELECT '"1"')::jsonb ON ERROR);
-ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT
-LINE 1: ...CT JSON_QUERY('"a"', '$.a' RETURNING int DEFAULT (SELECT '"...
- ^
--- CoerceToDomain
-SELECT JSON_QUERY('"a"', '$.a' RETURNING queryfuncs_test_domain DEFAULT (select '"1"')::queryfuncs_test_domain ON ERROR);
-ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT
-LINE 1: ...', '$.a' RETURNING queryfuncs_test_domain DEFAULT (select '"...
- ^
--- RelabelType
-SELECT JSON_QUERY('"a"', '$.a' RETURNING int DEFAULT (SELECT 1)::oid::int ON ERROR);
-ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT
-LINE 1: ...CT JSON_QUERY('"a"', '$.a' RETURNING int DEFAULT (SELECT 1)...
- ^
--- ArrayCoerceExpr
-SELECT JSON_QUERY('"a"', '$.a' RETURNING int[] DEFAULT (SELECT '{1}')::oid[]::int[] ON ERROR);
-ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT
-LINE 1: ... JSON_QUERY('"a"', '$.a' RETURNING int[] DEFAULT (SELECT '{...
- ^
--- CollateExpr
-SELECT JSON_QUERY('"a"', '$.a' RETURNING int[] DEFAULT (SELECT '{1}')::text COLLATE "C" ON ERROR);
-ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT
-LINE 1: ... JSON_QUERY('"a"', '$.a' RETURNING int[] DEFAULT (SELECT '{...
- ^
--- ConvertRowtypeExpr
-CREATE TABLE someparent (a int);
-CREATE TABLE somechild () INHERITS (someparent);
-SELECT JSON_QUERY('"a"', '$.a' RETURNING someparent DEFAULT (SELECT '(1)')::somechild::someparent ON ERROR);
-ERROR: can only specify a constant, non-aggregate function, or operator expression for DEFAULT
-LINE 1: ..._QUERY('"a"', '$.a' RETURNING someparent DEFAULT (SELECT '(...
- ^
-DROP DOMAIN queryfuncs_test_domain;
-DROP TABLE someparent, somechild;
--- Extension: non-constant JSON path
-SELECT JSON_EXISTS(jsonb '{"a": 123}', '$' || '.' || 'a');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_VALUE(jsonb '{"a": 123}', '$' || '.' || 'a');
- json_value
-------------
- 123
-(1 row)
-
-SELECT JSON_VALUE(jsonb '{"a": 123}', '$' || '.' || 'b' DEFAULT 'foo' ON EMPTY);
- json_value
-------------
- foo
-(1 row)
-
-SELECT JSON_QUERY(jsonb '{"a": 123}', '$' || '.' || 'a');
- json_query
-------------
- 123
-(1 row)
-
-SELECT JSON_QUERY(jsonb '{"a": 123}', '$' || '.' || 'a' WITH WRAPPER);
- json_query
-------------
- [123]
-(1 row)
-
--- Should fail (invalid path)
-SELECT JSON_QUERY(jsonb '{"a": 123}', 'error' || ' ' || 'error');
-ERROR: syntax error at or near " " of jsonpath input
--- Non-jsonb inputs automatically coerced to jsonb
-SELECT JSON_EXISTS(json '{"a": 123}', '$' || '.' || 'a');
- json_exists
--------------
- t
-(1 row)
-
-SELECT JSON_QUERY(NULL FORMAT JSON, '$');
- json_query
-------------
-
-(1 row)
-
--- Test non-const jsonpath
-CREATE TEMP TABLE jsonpaths (path) AS SELECT '$';
-SELECT json_value('"aaa"', path RETURNING json) FROM jsonpaths;
- json_value
-------------
- "aaa"
-(1 row)
-
--- Test PASSING argument parsing
-SELECT JSON_QUERY(jsonb 'null', '$xyz' PASSING 1 AS xy);
-ERROR: could not find jsonpath variable "xyz"
-SELECT JSON_QUERY(jsonb 'null', '$xy' PASSING 1 AS xyz);
-ERROR: could not find jsonpath variable "xy"
-SELECT JSON_QUERY(jsonb 'null', '$xyz' PASSING 1 AS xyz);
- json_query
-------------
- 1
-(1 row)
-
--- Test ON ERROR / EMPTY value validity for the function; all fail.
-SELECT JSON_EXISTS(jsonb '1', '$' DEFAULT 1 ON ERROR);
-ERROR: invalid ON ERROR behavior
-LINE 1: SELECT JSON_EXISTS(jsonb '1', '$' DEFAULT 1 ON ERROR);
- ^
-DETAIL: Only ERROR, TRUE, FALSE, or UNKNOWN is allowed in ON ERROR for JSON_EXISTS().
-SELECT JSON_VALUE(jsonb '1', '$' EMPTY ON ERROR);
-ERROR: invalid ON ERROR behavior
-LINE 1: SELECT JSON_VALUE(jsonb '1', '$' EMPTY ON ERROR);
- ^
-DETAIL: Only ERROR, NULL, or DEFAULT expression is allowed in ON ERROR for JSON_VALUE().
-SELECT JSON_QUERY(jsonb '1', '$' TRUE ON ERROR);
-ERROR: invalid ON ERROR behavior
-LINE 1: SELECT JSON_QUERY(jsonb '1', '$' TRUE ON ERROR);
- ^
-DETAIL: Only ERROR, NULL, EMPTY ARRAY, EMPTY OBJECT, or DEFAULT expression is allowed in ON ERROR for JSON_QUERY().
--- Test implicit coercion to a domain over fixed-length type specified in
--- RETURNING
-CREATE DOMAIN queryfuncs_char2 AS char(2);
-CREATE DOMAIN queryfuncs_char2_chk AS char(2) CHECK (VALUE NOT IN ('12'));
-SELECT JSON_QUERY(jsonb '123', '$' RETURNING queryfuncs_char2 ERROR ON ERROR);
-ERROR: value too long for type character(2)
-SELECT JSON_QUERY(jsonb '123', '$' RETURNING queryfuncs_char2 DEFAULT '1' ON ERROR);
- json_query
-------------
- 1
-(1 row)
-
-SELECT JSON_QUERY(jsonb '123', '$' RETURNING queryfuncs_char2_chk ERROR ON ERROR);
-ERROR: value too long for type character(2)
-SELECT JSON_QUERY(jsonb '123', '$' RETURNING queryfuncs_char2_chk DEFAULT '1' ON ERROR);
- json_query
-------------
- 1
-(1 row)
-
-SELECT JSON_VALUE(jsonb '123', '$' RETURNING queryfuncs_char2 ERROR ON ERROR);
-ERROR: value too long for type character(2)
-SELECT JSON_VALUE(jsonb '123', '$' RETURNING queryfuncs_char2 DEFAULT 1 ON ERROR);
- json_value
-------------
- 1
-(1 row)
-
-SELECT JSON_VALUE(jsonb '123', '$' RETURNING queryfuncs_char2_chk ERROR ON ERROR);
-ERROR: value too long for type character(2)
-SELECT JSON_VALUE(jsonb '123', '$' RETURNING queryfuncs_char2_chk DEFAULT 1 ON ERROR);
- json_value
-------------
- 1
-(1 row)
-
-DROP DOMAIN queryfuncs_char2, queryfuncs_char2_chk;
--- Test coercion to domain over another fixed-length type of the ON ERROR /
--- EMPTY expressions. Ask user to cast the DEFAULT expression explicitly if
--- automatic casting cannot be done, for example, from int to bit(2).
-CREATE DOMAIN queryfuncs_d_varbit3 AS varbit(3) CHECK (VALUE <> '01');
-SELECT JSON_VALUE(jsonb '1234', '$' RETURNING queryfuncs_d_varbit3 DEFAULT '111111' ON ERROR);
-ERROR: bit string too long for type bit varying(3)
-SELECT JSON_VALUE(jsonb '1234', '$' RETURNING queryfuncs_d_varbit3 DEFAULT '010' ON ERROR);
- json_value
-------------
- 010
-(1 row)
-
-SELECT JSON_VALUE(jsonb '1234', '$' RETURNING queryfuncs_d_varbit3 DEFAULT '01' ON ERROR);
-ERROR: could not coerce ON ERROR expression (DEFAULT) to the RETURNING type
-DETAIL: value for domain queryfuncs_d_varbit3 violates check constraint "queryfuncs_d_varbit3_check"
-SELECT JSON_VALUE(jsonb '"111"', '$' RETURNING bit(2) ERROR ON ERROR);
-ERROR: bit string length 3 does not match type bit(2)
-SELECT JSON_VALUE(jsonb '1234', '$' RETURNING bit(3) DEFAULT 1 ON ERROR);
-ERROR: cannot cast behavior expression of type integer to bit
-LINE 1: ...VALUE(jsonb '1234', '$' RETURNING bit(3) DEFAULT 1 ON ERROR...
- ^
-HINT: You will need to explicitly cast the expression to type bit.
-SELECT JSON_VALUE(jsonb '1234', '$' RETURNING bit(3) DEFAULT 1::bit(3) ON ERROR);
- json_value
-------------
- 001
-(1 row)
-
-SELECT JSON_VALUE(jsonb '"111"', '$.a' RETURNING bit(3) DEFAULT '1111' ON EMPTY);
-ERROR: bit string length 4 does not match type bit(3)
-DROP DOMAIN queryfuncs_d_varbit3;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/sqljson_jsontable.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/sqljson_jsontable.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/sqljson_jsontable.out 2024-11-15 02:50:52.502029300 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/sqljson_jsontable.out 2024-11-15 02:59:17.849116529 +0000
@@ -1,1180 +1,2 @@
--- JSON_TABLE
--- Should fail (JSON_TABLE can be used only in FROM clause)
-SELECT JSON_TABLE('[]', '$');
-ERROR: syntax error at or near "("
-LINE 1: SELECT JSON_TABLE('[]', '$');
- ^
--- Only allow EMPTY and ERROR for ON ERROR
-SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') DEFAULT 1 ON ERROR);
-ERROR: invalid ON ERROR behavior
-LINE 1: ...BLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') DEFAULT 1 ...
- ^
-DETAIL: Only EMPTY [ ARRAY ] or ERROR is allowed in the top-level ON ERROR clause.
-SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') NULL ON ERROR);
-ERROR: invalid ON ERROR behavior
-LINE 1: ...BLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') NULL ON ER...
- ^
-DETAIL: Only EMPTY [ ARRAY ] or ERROR is allowed in the top-level ON ERROR clause.
-SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') EMPTY ON ERROR);
- js2
------
-(0 rows)
-
-SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') ERROR ON ERROR);
-ERROR: jsonpath member accessor can only be applied to an object
--- Column and path names must be distinct
-SELECT * FROM JSON_TABLE(jsonb'"1.23"', '$.a' as js2 COLUMNS (js2 int path '$'));
-ERROR: duplicate JSON_TABLE column or path name: js2
-LINE 1: ...M JSON_TABLE(jsonb'"1.23"', '$.a' as js2 COLUMNS (js2 int pa...
- ^
--- Should fail (no columns)
-SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS ());
-ERROR: syntax error at or near ")"
-LINE 1: SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS ());
- ^
-SELECT * FROM JSON_TABLE (NULL::jsonb, '$' COLUMNS (v1 timestamp)) AS f (v1, v2);
-ERROR: JSON_TABLE function has 1 columns available but 2 columns specified
---duplicated column name
-SELECT * FROM JSON_TABLE(jsonb'"1.23"', '$.a' COLUMNS (js2 int path '$', js2 int path '$'));
-ERROR: duplicate JSON_TABLE column or path name: js2
-LINE 1: ...E(jsonb'"1.23"', '$.a' COLUMNS (js2 int path '$', js2 int pa...
- ^
---return composite data type.
-create type comp as (a int, b int);
-SELECT * FROM JSON_TABLE(jsonb '{"rec": "(1,2)"}', '$' COLUMNS (id FOR ORDINALITY, comp comp path '$.rec' omit quotes)) jt;
- id | comp
-----+-------
- 1 | (1,2)
-(1 row)
-
-drop type comp;
--- NULL => empty table
-SELECT * FROM JSON_TABLE(NULL::jsonb, '$' COLUMNS (foo int)) bar;
- foo
------
-(0 rows)
-
-SELECT * FROM JSON_TABLE(jsonb'"1.23"', 'strict $.a' COLUMNS (js2 int PATH '$'));
- js2
------
-(0 rows)
-
---
-SELECT * FROM JSON_TABLE(jsonb '123', '$'
- COLUMNS (item int PATH '$', foo int)) bar;
- item | foo
-------+-----
- 123 |
-(1 row)
-
--- JSON_TABLE: basic functionality
-CREATE DOMAIN jsonb_test_domain AS text CHECK (value <> 'foo');
-CREATE TEMP TABLE json_table_test (js) AS
- (VALUES
- ('1'),
- ('[]'),
- ('{}'),
- ('[1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""]')
- );
--- Regular "unformatted" columns
-SELECT *
-FROM json_table_test vals
- LEFT OUTER JOIN
- JSON_TABLE(
- vals.js::jsonb, 'lax $[*]'
- COLUMNS (
- id FOR ORDINALITY,
- "int" int PATH '$',
- "text" text PATH '$',
- "char(4)" char(4) PATH '$',
- "bool" bool PATH '$',
- "numeric" numeric PATH '$',
- "domain" jsonb_test_domain PATH '$',
- js json PATH '$',
- jb jsonb PATH '$'
- )
- ) jt
- ON true;
- js | id | int | text | char(4) | bool | numeric | domain | js | jb
----------------------------------------------------------------------------------------+----+-----+---------+---------+------+---------+---------+--------------+--------------
- 1 | 1 | 1 | 1 | 1 | t | 1 | 1 | 1 | 1
- [] | | | | | | | | |
- {} | 1 | | | | | | | {} | {}
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 1 | 1 | 1 | 1 | t | 1 | 1 | 1 | 1
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 2 | | 1.23 | 1.23 | | 1.23 | 1.23 | 1.23 | 1.23
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 3 | 2 | 2 | 2 | | 2 | 2 | "2" | "2"
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 4 | | aaaaaaa | | | | aaaaaaa | "aaaaaaa" | "aaaaaaa"
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 5 | | foo | foo | | | | "foo" | "foo"
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 6 | | | | | | | null | null
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 7 | | f | f | f | | false | false | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 8 | | t | t | t | | true | true | true
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 9 | | | | | | | {"aaa": 123} | {"aaa": 123}
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 10 | | [1,2] | | | | [1,2] | "[1,2]" | "[1,2]"
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 11 | | "str" | | | | "str" | "\"str\"" | "\"str\""
-(14 rows)
-
--- "formatted" columns
-SELECT *
-FROM json_table_test vals
- LEFT OUTER JOIN
- JSON_TABLE(
- vals.js::jsonb, 'lax $[*]'
- COLUMNS (
- id FOR ORDINALITY,
- jst text FORMAT JSON PATH '$',
- jsc char(4) FORMAT JSON PATH '$',
- jsv varchar(4) FORMAT JSON PATH '$',
- jsb jsonb FORMAT JSON PATH '$',
- jsbq jsonb FORMAT JSON PATH '$' OMIT QUOTES
- )
- ) jt
- ON true;
- js | id | jst | jsc | jsv | jsb | jsbq
----------------------------------------------------------------------------------------+----+--------------+------+------+--------------+--------------
- 1 | 1 | 1 | 1 | 1 | 1 | 1
- [] | | | | | |
- {} | 1 | {} | {} | {} | {} | {}
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 1 | 1 | 1 | 1 | 1 | 1
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 2 | 1.23 | 1.23 | 1.23 | 1.23 | 1.23
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 3 | "2" | "2" | "2" | "2" | 2
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 4 | "aaaaaaa" | | | "aaaaaaa" |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 5 | "foo" | | | "foo" |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 6 | null | null | null | null | null
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 7 | false | | | false | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 8 | true | true | true | true | true
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 9 | {"aaa": 123} | | | {"aaa": 123} | {"aaa": 123}
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 10 | "[1,2]" | | | "[1,2]" | [1, 2]
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 11 | "\"str\"" | | | "\"str\"" | "str"
-(14 rows)
-
--- EXISTS columns
-SELECT *
-FROM json_table_test vals
- LEFT OUTER JOIN
- JSON_TABLE(
- vals.js::jsonb, 'lax $[*]'
- COLUMNS (
- id FOR ORDINALITY,
- exists1 bool EXISTS PATH '$.aaa',
- exists2 int EXISTS PATH '$.aaa',
- exists3 int EXISTS PATH 'strict $.aaa' UNKNOWN ON ERROR,
- exists4 text EXISTS PATH 'strict $.aaa' FALSE ON ERROR
- )
- ) jt
- ON true;
- js | id | exists1 | exists2 | exists3 | exists4
----------------------------------------------------------------------------------------+----+---------+---------+---------+---------
- 1 | 1 | f | 0 | | false
- [] | | | | |
- {} | 1 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 1 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 2 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 3 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 4 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 5 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 6 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 7 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 8 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 9 | t | 1 | 1 | true
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 10 | f | 0 | | false
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 11 | f | 0 | | false
-(14 rows)
-
--- Other miscellaneous checks
-SELECT *
-FROM json_table_test vals
- LEFT OUTER JOIN
- JSON_TABLE(
- vals.js::jsonb, 'lax $[*]'
- COLUMNS (
- id FOR ORDINALITY,
- aaa int, -- "aaa" has implicit path '$."aaa"'
- aaa1 int PATH '$.aaa',
- js2 json PATH '$',
- jsb2w jsonb PATH '$' WITH WRAPPER,
- jsb2q jsonb PATH '$' OMIT QUOTES,
- ia int[] PATH '$',
- ta text[] PATH '$',
- jba jsonb[] PATH '$'
- )
- ) jt
- ON true;
- js | id | aaa | aaa1 | js2 | jsb2w | jsb2q | ia | ta | jba
----------------------------------------------------------------------------------------+----+-----+------+--------------+----------------+--------------+----+----+-----
- 1 | 1 | | | 1 | [1] | 1 | | |
- [] | | | | | | | | |
- {} | 1 | | | {} | [{}] | {} | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 1 | | | 1 | [1] | 1 | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 2 | | | 1.23 | [1.23] | 1.23 | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 3 | | | "2" | ["2"] | 2 | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 4 | | | "aaaaaaa" | ["aaaaaaa"] | | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 5 | | | "foo" | ["foo"] | | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 6 | | | null | [null] | null | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 7 | | | false | [false] | false | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 8 | | | true | [true] | true | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 9 | 123 | 123 | {"aaa": 123} | [{"aaa": 123}] | {"aaa": 123} | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 10 | | | "[1,2]" | ["[1,2]"] | [1, 2] | | |
- [1, 1.23, "2", "aaaaaaa", "foo", null, false, true, {"aaa": 123}, "[1,2]", "\"str\""] | 11 | | | "\"str\"" | ["\"str\""] | "str" | | |
-(14 rows)
-
--- Test using casts in DEFAULT .. ON ERROR expression
-SELECT * FROM JSON_TABLE(jsonb '{"d1": "H"}', '$'
- COLUMNS (js1 jsonb_test_domain PATH '$.a2' DEFAULT '"foo1"'::jsonb::text ON EMPTY));
- js1
---------
- "foo1"
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '{"d1": "H"}', '$'
- COLUMNS (js1 jsonb_test_domain PATH '$.a2' DEFAULT 'foo'::jsonb_test_domain ON EMPTY));
-ERROR: could not coerce ON EMPTY expression (DEFAULT) to the RETURNING type
-DETAIL: value for domain jsonb_test_domain violates check constraint "jsonb_test_domain_check"
-SELECT * FROM JSON_TABLE(jsonb '{"d1": "H"}', '$'
- COLUMNS (js1 jsonb_test_domain PATH '$.a2' DEFAULT 'foo1'::jsonb_test_domain ON EMPTY));
- js1
-------
- foo1
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '{"d1": "foo"}', '$'
- COLUMNS (js1 jsonb_test_domain PATH '$.d1' DEFAULT 'foo2'::jsonb_test_domain ON ERROR));
- js1
-------
- foo2
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '{"d1": "foo"}', '$'
- COLUMNS (js1 oid[] PATH '$.d2' DEFAULT '{1}'::int[]::oid[] ON EMPTY));
- js1
------
- {1}
-(1 row)
-
--- JSON_TABLE: Test backward parsing
-CREATE VIEW jsonb_table_view2 AS
-SELECT * FROM
- JSON_TABLE(
- jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c"
- COLUMNS (
- "int" int PATH '$',
- "text" text PATH '$',
- "char(4)" char(4) PATH '$',
- "bool" bool PATH '$',
- "numeric" numeric PATH '$',
- "domain" jsonb_test_domain PATH '$'));
-CREATE VIEW jsonb_table_view3 AS
-SELECT * FROM
- JSON_TABLE(
- jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c"
- COLUMNS (
- js json PATH '$',
- jb jsonb PATH '$',
- jst text FORMAT JSON PATH '$',
- jsc char(4) FORMAT JSON PATH '$',
- jsv varchar(4) FORMAT JSON PATH '$'));
-CREATE VIEW jsonb_table_view4 AS
-SELECT * FROM
- JSON_TABLE(
- jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c"
- COLUMNS (
- jsb jsonb FORMAT JSON PATH '$',
- jsbq jsonb FORMAT JSON PATH '$' OMIT QUOTES,
- aaa int, -- implicit path '$."aaa"',
- aaa1 int PATH '$.aaa'));
-CREATE VIEW jsonb_table_view5 AS
-SELECT * FROM
- JSON_TABLE(
- jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c"
- COLUMNS (
- exists1 bool EXISTS PATH '$.aaa',
- exists2 int EXISTS PATH '$.aaa' TRUE ON ERROR,
- exists3 text EXISTS PATH 'strict $.aaa' UNKNOWN ON ERROR));
-CREATE VIEW jsonb_table_view6 AS
-SELECT * FROM
- JSON_TABLE(
- jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c"
- COLUMNS (
- js2 json PATH '$',
- jsb2w jsonb PATH '$' WITH WRAPPER,
- jsb2q jsonb PATH '$' OMIT QUOTES,
- ia int[] PATH '$',
- ta text[] PATH '$',
- jba jsonb[] PATH '$'));
-\sv jsonb_table_view2
-CREATE OR REPLACE VIEW public.jsonb_table_view2 AS
- SELECT "int",
- text,
- "char(4)",
- bool,
- "numeric",
- domain
- FROM JSON_TABLE(
- 'null'::jsonb, '$[*]' AS json_table_path_0
- PASSING
- 1 + 2 AS a,
- '"foo"'::json AS "b c"
- COLUMNS (
- "int" integer PATH '$',
- text text PATH '$',
- "char(4)" character(4) PATH '$',
- bool boolean PATH '$',
- "numeric" numeric PATH '$',
- domain jsonb_test_domain PATH '$'
- )
- )
-\sv jsonb_table_view3
-CREATE OR REPLACE VIEW public.jsonb_table_view3 AS
- SELECT js,
- jb,
- jst,
- jsc,
- jsv
- FROM JSON_TABLE(
- 'null'::jsonb, '$[*]' AS json_table_path_0
- PASSING
- 1 + 2 AS a,
- '"foo"'::json AS "b c"
- COLUMNS (
- js json PATH '$' WITHOUT WRAPPER KEEP QUOTES,
- jb jsonb PATH '$' WITHOUT WRAPPER KEEP QUOTES,
- jst text FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES,
- jsc character(4) FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES,
- jsv character varying(4) FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES
- )
- )
-\sv jsonb_table_view4
-CREATE OR REPLACE VIEW public.jsonb_table_view4 AS
- SELECT jsb,
- jsbq,
- aaa,
- aaa1
- FROM JSON_TABLE(
- 'null'::jsonb, '$[*]' AS json_table_path_0
- PASSING
- 1 + 2 AS a,
- '"foo"'::json AS "b c"
- COLUMNS (
- jsb jsonb PATH '$' WITHOUT WRAPPER KEEP QUOTES,
- jsbq jsonb PATH '$' WITHOUT WRAPPER OMIT QUOTES,
- aaa integer PATH '$."aaa"',
- aaa1 integer PATH '$."aaa"'
- )
- )
-\sv jsonb_table_view5
-CREATE OR REPLACE VIEW public.jsonb_table_view5 AS
- SELECT exists1,
- exists2,
- exists3
- FROM JSON_TABLE(
- 'null'::jsonb, '$[*]' AS json_table_path_0
- PASSING
- 1 + 2 AS a,
- '"foo"'::json AS "b c"
- COLUMNS (
- exists1 boolean EXISTS PATH '$."aaa"',
- exists2 integer EXISTS PATH '$."aaa"' TRUE ON ERROR,
- exists3 text EXISTS PATH 'strict $."aaa"' UNKNOWN ON ERROR
- )
- )
-\sv jsonb_table_view6
-CREATE OR REPLACE VIEW public.jsonb_table_view6 AS
- SELECT js2,
- jsb2w,
- jsb2q,
- ia,
- ta,
- jba
- FROM JSON_TABLE(
- 'null'::jsonb, '$[*]' AS json_table_path_0
- PASSING
- 1 + 2 AS a,
- '"foo"'::json AS "b c"
- COLUMNS (
- js2 json PATH '$' WITHOUT WRAPPER KEEP QUOTES,
- jsb2w jsonb PATH '$' WITH UNCONDITIONAL WRAPPER KEEP QUOTES,
- jsb2q jsonb PATH '$' WITHOUT WRAPPER OMIT QUOTES,
- ia integer[] PATH '$' WITHOUT WRAPPER KEEP QUOTES,
- ta text[] PATH '$' WITHOUT WRAPPER KEEP QUOTES,
- jba jsonb[] PATH '$' WITHOUT WRAPPER KEEP QUOTES
- )
- )
-EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view2;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Table Function Scan on "json_table"
- Output: "json_table"."int", "json_table".text, "json_table"."char(4)", "json_table".bool, "json_table"."numeric", "json_table".domain
- Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS ("int" integer PATH '$', text text PATH '$', "char(4)" character(4) PATH '$', bool boolean PATH '$', "numeric" numeric PATH '$', domain jsonb_test_domain PATH '$'))
-(3 rows)
-
-EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view3;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Table Function Scan on "json_table"
- Output: "json_table".js, "json_table".jb, "json_table".jst, "json_table".jsc, "json_table".jsv
- Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (js json PATH '$' WITHOUT WRAPPER KEEP QUOTES, jb jsonb PATH '$' WITHOUT WRAPPER KEEP QUOTES, jst text FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES, jsc character(4) FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES, jsv character varying(4) FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES))
-(3 rows)
-
-EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view4;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Table Function Scan on "json_table"
- Output: "json_table".jsb, "json_table".jsbq, "json_table".aaa, "json_table".aaa1
- Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (jsb jsonb PATH '$' WITHOUT WRAPPER KEEP QUOTES, jsbq jsonb PATH '$' WITHOUT WRAPPER OMIT QUOTES, aaa integer PATH '$."aaa"', aaa1 integer PATH '$."aaa"'))
-(3 rows)
-
-EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view5;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Table Function Scan on "json_table"
- Output: "json_table".exists1, "json_table".exists2, "json_table".exists3
- Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (exists1 boolean EXISTS PATH '$."aaa"', exists2 integer EXISTS PATH '$."aaa"' TRUE ON ERROR, exists3 text EXISTS PATH 'strict $."aaa"' UNKNOWN ON ERROR))
-(3 rows)
-
-EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view6;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Table Function Scan on "json_table"
- Output: "json_table".js2, "json_table".jsb2w, "json_table".jsb2q, "json_table".ia, "json_table".ta, "json_table".jba
- Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (js2 json PATH '$' WITHOUT WRAPPER KEEP QUOTES, jsb2w jsonb PATH '$' WITH UNCONDITIONAL WRAPPER KEEP QUOTES, jsb2q jsonb PATH '$' WITHOUT WRAPPER OMIT QUOTES, ia integer[] PATH '$' WITHOUT WRAPPER KEEP QUOTES, ta text[] PATH '$' WITHOUT WRAPPER KEEP QUOTES, jba jsonb[] PATH '$' WITHOUT WRAPPER KEEP QUOTES))
-(3 rows)
-
--- JSON_TABLE() with alias
-EXPLAIN (COSTS OFF, VERBOSE)
-SELECT * FROM
- JSON_TABLE(
- jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c"
- COLUMNS (
- id FOR ORDINALITY,
- "int" int PATH '$',
- "text" text PATH '$'
- )) json_table_func;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Table Function Scan on "json_table" json_table_func
- Output: id, "int", text
- Table Function Call: JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '"foo"'::jsonb AS "b c" COLUMNS (id FOR ORDINALITY, "int" integer PATH '$', text text PATH '$'))
-(3 rows)
-
-EXPLAIN (COSTS OFF, FORMAT JSON, VERBOSE)
-SELECT * FROM
- JSON_TABLE(
- jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c"
- COLUMNS (
- id FOR ORDINALITY,
- "int" int PATH '$',
- "text" text PATH '$'
- )) json_table_func;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- [ +
- { +
- "Plan": { +
- "Node Type": "Table Function Scan", +
- "Parallel Aware": false, +
- "Async Capable": false, +
- "Table Function Name": "json_table", +
- "Alias": "json_table_func", +
- "Disabled": false, +
- "Output": ["id", "\"int\"", "text"], +
- "Table Function Call": "JSON_TABLE('null'::jsonb, '$[*]' AS json_table_path_0 PASSING 3 AS a, '\"foo\"'::jsonb AS \"b c\" COLUMNS (id FOR ORDINALITY, \"int\" integer PATH '$', text text PATH '$'))"+
- } +
- } +
- ]
-(1 row)
-
-DROP VIEW jsonb_table_view2;
-DROP VIEW jsonb_table_view3;
-DROP VIEW jsonb_table_view4;
-DROP VIEW jsonb_table_view5;
-DROP VIEW jsonb_table_view6;
-DROP DOMAIN jsonb_test_domain;
--- JSON_TABLE: only one FOR ORDINALITY columns allowed
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (id FOR ORDINALITY, id2 FOR ORDINALITY, a int PATH '$.a' ERROR ON EMPTY)) jt;
-ERROR: only one FOR ORDINALITY column is allowed
-LINE 1: ..._TABLE(jsonb '1', '$' COLUMNS (id FOR ORDINALITY, id2 FOR OR...
- ^
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (id FOR ORDINALITY, a int PATH '$' ERROR ON EMPTY)) jt;
- id | a
-----+---
- 1 | 1
-(1 row)
-
--- JSON_TABLE: ON EMPTY/ON ERROR behavior
-SELECT *
-FROM
- (VALUES ('1'), ('"err"')) vals(js),
- JSON_TABLE(vals.js::jsonb, '$' COLUMNS (a int PATH '$')) jt;
- js | a
--------+---
- 1 | 1
- "err" |
-(2 rows)
-
-SELECT *
-FROM
- (VALUES ('1'), ('"err"')) vals(js)
- LEFT OUTER JOIN
- JSON_TABLE(vals.js::jsonb, '$' COLUMNS (a int PATH '$' ERROR ON ERROR)) jt
- ON true;
-ERROR: invalid input syntax for type integer: "err"
--- TABLE-level ERROR ON ERROR is not propagated to columns
-SELECT *
-FROM
- (VALUES ('1'), ('"err"')) vals(js)
- LEFT OUTER JOIN
- JSON_TABLE(vals.js::jsonb, '$' COLUMNS (a int PATH '$' ERROR ON ERROR)) jt
- ON true;
-ERROR: invalid input syntax for type integer: "err"
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int PATH '$.a' ERROR ON EMPTY)) jt;
-ERROR: no SQL/JSON item found for specified path of column "a"
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int PATH 'strict $.a' ERROR ON ERROR) ERROR ON ERROR) jt;
-ERROR: jsonpath member accessor can only be applied to an object
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int PATH 'lax $.a' ERROR ON EMPTY) ERROR ON ERROR) jt;
-ERROR: no SQL/JSON item found for specified path of column "a"
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int PATH '$' DEFAULT 1 ON EMPTY DEFAULT 2 ON ERROR)) jt;
- a
----
- 2
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int PATH 'strict $.a' DEFAULT 1 ON EMPTY DEFAULT 2 ON ERROR)) jt;
- a
----
- 2
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int PATH 'lax $.a' DEFAULT 1 ON EMPTY DEFAULT 2 ON ERROR)) jt;
- a
----
- 1
-(1 row)
-
--- JSON_TABLE: EXISTS PATH types
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int4 EXISTS PATH '$.a' ERROR ON ERROR)); -- ok; can cast to int4
- a
----
- 0
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int4 EXISTS PATH '$' ERROR ON ERROR)); -- ok; can cast to int4
- a
----
- 1
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int2 EXISTS PATH '$.a'));
-ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type
-DETAIL: invalid input syntax for type smallint: "false"
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a int8 EXISTS PATH '$.a'));
-ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type
-DETAIL: invalid input syntax for type bigint: "false"
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a float4 EXISTS PATH '$.a'));
-ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type
-DETAIL: invalid input syntax for type real: "false"
--- Default FALSE (ON ERROR) doesn't fit char(3)
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a char(3) EXISTS PATH '$.a'));
-ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type
-DETAIL: value too long for type character(3)
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a char(3) EXISTS PATH '$.a' ERROR ON ERROR));
-ERROR: value too long for type character(3)
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a char(5) EXISTS PATH '$.a' ERROR ON ERROR));
- a
--------
- false
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a json EXISTS PATH '$.a'));
- a
--------
- false
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a jsonb EXISTS PATH '$.a'));
- a
--------
- false
-(1 row)
-
--- EXISTS PATH domain over int
-CREATE DOMAIN dint4 AS int;
-CREATE DOMAIN dint4_0 AS int CHECK (VALUE <> 0 );
-SELECT a, a::bool FROM JSON_TABLE(jsonb '"a"', '$' COLUMNS (a dint4 EXISTS PATH '$.a' ));
- a | a
----+---
- 0 | f
-(1 row)
-
-SELECT a, a::bool FROM JSON_TABLE(jsonb '{"a":1}', '$' COLUMNS (a dint4_0 EXISTS PATH '$.b'));
-ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type
-DETAIL: value for domain dint4_0 violates check constraint "dint4_0_check"
-SELECT a, a::bool FROM JSON_TABLE(jsonb '{"a":1}', '$' COLUMNS (a dint4_0 EXISTS PATH '$.b' ERROR ON ERROR));
-ERROR: value for domain dint4_0 violates check constraint "dint4_0_check"
-SELECT a, a::bool FROM JSON_TABLE(jsonb '{"a":1}', '$' COLUMNS (a dint4_0 EXISTS PATH '$.b' FALSE ON ERROR));
-ERROR: could not coerce ON ERROR expression (FALSE) to the RETURNING type
-DETAIL: value for domain dint4_0 violates check constraint "dint4_0_check"
-SELECT a, a::bool FROM JSON_TABLE(jsonb '{"a":1}', '$' COLUMNS (a dint4_0 EXISTS PATH '$.b' TRUE ON ERROR));
- a | a
----+---
- 1 | t
-(1 row)
-
-DROP DOMAIN dint4, dint4_0;
--- JSON_TABLE: WRAPPER/QUOTES clauses on scalar columns
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text PATH '$' KEEP QUOTES ON SCALAR STRING));
- item
----------
- "world"
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text PATH '$' OMIT QUOTES ON SCALAR STRING));
- item
--------
- world
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' KEEP QUOTES));
- item
----------
- "world"
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' OMIT QUOTES));
- item
--------
- world
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' WITHOUT WRAPPER KEEP QUOTES));
- item
----------
- "world"
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text PATH '$' WITHOUT WRAPPER OMIT QUOTES));
- item
--------
- world
-(1 row)
-
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' WITH WRAPPER));
- item
------------
- ["world"]
-(1 row)
-
--- Error: OMIT QUOTES should not be specified when WITH WRAPPER is present
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text PATH '$' WITH WRAPPER OMIT QUOTES));
-ERROR: SQL/JSON QUOTES behavior must not be specified when WITH WRAPPER is used
-LINE 1: ...T * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text ...
- ^
--- But KEEP QUOTES (the default) is fine
-SELECT * FROM JSON_TABLE(jsonb '"world"', '$' COLUMNS (item text FORMAT JSON PATH '$' WITH WRAPPER KEEP QUOTES));
- item
------------
- ["world"]
-(1 row)
-
--- Test PASSING args
-SELECT *
-FROM JSON_TABLE(
- jsonb '[1,2,3]',
- '$[*] ? (@ < $x)'
- PASSING 3 AS x
- COLUMNS (y text FORMAT JSON PATH '$')
- ) jt;
- y
----
- 1
- 2
-(2 rows)
-
--- PASSING arguments are also passed to column paths
-SELECT *
-FROM JSON_TABLE(
- jsonb '[1,2,3]',
- '$[*] ? (@ < $x)'
- PASSING 10 AS x, 3 AS y
- COLUMNS (a text FORMAT JSON PATH '$ ? (@ < $y)')
- ) jt;
- a
----
- 1
- 2
-
-(3 rows)
-
--- Should fail (not supported)
-SELECT * FROM JSON_TABLE(jsonb '{"a": 123}', '$' || '.' || 'a' COLUMNS (foo int));
-ERROR: only string constants are supported in JSON_TABLE path specification
-LINE 1: SELECT * FROM JSON_TABLE(jsonb '{"a": 123}', '$' || '.' || '...
- ^
--- JsonPathQuery() error message mentioning column name
-SELECT * FROM JSON_TABLE('{"a": [{"b": "1"}, {"b": "2"}]}', '$' COLUMNS (b json path '$.a[*].b' ERROR ON ERROR));
-ERROR: JSON path expression for column "b" should return single item without wrapper
-HINT: Use the WITH WRAPPER clause to wrap SQL/JSON items into an array.
--- JSON_TABLE: nested paths
--- Duplicate path names
-SELECT * FROM JSON_TABLE(
- jsonb '[]', '$' AS a
- COLUMNS (
- b int,
- NESTED PATH '$' AS a
- COLUMNS (
- c int
- )
- )
-) jt;
-ERROR: duplicate JSON_TABLE column or path name: a
-LINE 5: NESTED PATH '$' AS a
- ^
-SELECT * FROM JSON_TABLE(
- jsonb '[]', '$' AS a
- COLUMNS (
- b int,
- NESTED PATH '$' AS n_a
- COLUMNS (
- c int
- )
- )
-) jt;
- b | c
----+---
- |
-(1 row)
-
-SELECT * FROM JSON_TABLE(
- jsonb '[]', '$'
- COLUMNS (
- b int,
- NESTED PATH '$' AS b
- COLUMNS (
- c int
- )
- )
-) jt;
-ERROR: duplicate JSON_TABLE column or path name: b
-LINE 5: NESTED PATH '$' AS b
- ^
-SELECT * FROM JSON_TABLE(
- jsonb '[]', '$'
- COLUMNS (
- NESTED PATH '$' AS a
- COLUMNS (
- b int
- ),
- NESTED PATH '$'
- COLUMNS (
- NESTED PATH '$' AS a
- COLUMNS (
- c int
- )
- )
- )
-) jt;
-ERROR: duplicate JSON_TABLE column or path name: a
-LINE 10: NESTED PATH '$' AS a
- ^
--- JSON_TABLE: plan execution
-CREATE TEMP TABLE jsonb_table_test (js jsonb);
-INSERT INTO jsonb_table_test
-VALUES (
- '[
- {"a": 1, "b": [], "c": []},
- {"a": 2, "b": [1, 2, 3], "c": [10, null, 20]},
- {"a": 3, "b": [1, 2], "c": []},
- {"x": "4", "b": [1, 2], "c": 123}
- ]'
-);
-select
- jt.*
-from
- jsonb_table_test jtt,
- json_table (
- jtt.js,'strict $[*]' as p
- columns (
- n for ordinality,
- a int path 'lax $.a' default -1 on empty,
- nested path 'strict $.b[*]' as pb columns (b_id for ordinality, b int path '$' ),
- nested path 'strict $.c[*]' as pc columns (c_id for ordinality, c int path '$' )
- )
- ) jt;
- n | a | b_id | b | c_id | c
----+----+------+---+------+----
- 1 | 1 | | | |
- 2 | 2 | 1 | 1 | |
- 2 | 2 | 2 | 2 | |
- 2 | 2 | 3 | 3 | |
- 2 | 2 | | | 1 | 10
- 2 | 2 | | | 2 |
- 2 | 2 | | | 3 | 20
- 3 | 3 | 1 | 1 | |
- 3 | 3 | 2 | 2 | |
- 4 | -1 | 1 | 1 | |
- 4 | -1 | 2 | 2 | |
-(11 rows)
-
--- PASSING arguments are passed to nested paths and their columns' paths
-SELECT *
-FROM
- generate_series(1, 3) x,
- generate_series(1, 3) y,
- JSON_TABLE(jsonb
- '[[1,2,3],[2,3,4,5],[3,4,5,6]]',
- 'strict $[*] ? (@[*] <= $x)'
- PASSING x AS x, y AS y
- COLUMNS (
- y text FORMAT JSON PATH '$',
- NESTED PATH 'strict $[*] ? (@ == $y)'
- COLUMNS (
- z int PATH '$'
- )
- )
- ) jt;
- x | y | y | z
----+---+--------------+---
- 1 | 1 | [1, 2, 3] | 1
- 2 | 1 | [1, 2, 3] | 1
- 2 | 1 | [2, 3, 4, 5] |
- 3 | 1 | [1, 2, 3] | 1
- 3 | 1 | [2, 3, 4, 5] |
- 3 | 1 | [3, 4, 5, 6] |
- 1 | 2 | [1, 2, 3] | 2
- 2 | 2 | [1, 2, 3] | 2
- 2 | 2 | [2, 3, 4, 5] | 2
- 3 | 2 | [1, 2, 3] | 2
- 3 | 2 | [2, 3, 4, 5] | 2
- 3 | 2 | [3, 4, 5, 6] |
- 1 | 3 | [1, 2, 3] | 3
- 2 | 3 | [1, 2, 3] | 3
- 2 | 3 | [2, 3, 4, 5] | 3
- 3 | 3 | [1, 2, 3] | 3
- 3 | 3 | [2, 3, 4, 5] | 3
- 3 | 3 | [3, 4, 5, 6] | 3
-(18 rows)
-
--- JSON_TABLE: Test backward parsing with nested paths
-CREATE VIEW jsonb_table_view_nested AS
-SELECT * FROM
- JSON_TABLE(
- jsonb 'null', 'lax $[*]' PASSING 1 + 2 AS a, json '"foo"' AS "b c"
- COLUMNS (
- id FOR ORDINALITY,
- NESTED PATH '$[1]' AS p1 COLUMNS (
- a1 int,
- NESTED PATH '$[*]' AS "p1 1" COLUMNS (
- a11 text
- ),
- b1 text
- ),
- NESTED PATH '$[2]' AS p2 COLUMNS (
- NESTED PATH '$[*]' AS "p2:1" COLUMNS (
- a21 text
- ),
- NESTED PATH '$[*]' AS p22 COLUMNS (
- a22 text
- )
- )
- )
- );
-\sv jsonb_table_view_nested
-CREATE OR REPLACE VIEW public.jsonb_table_view_nested AS
- SELECT id,
- a1,
- b1,
- a11,
- a21,
- a22
- FROM JSON_TABLE(
- 'null'::jsonb, '$[*]' AS json_table_path_0
- PASSING
- 1 + 2 AS a,
- '"foo"'::json AS "b c"
- COLUMNS (
- id FOR ORDINALITY,
- NESTED PATH '$[1]' AS p1
- COLUMNS (
- a1 integer PATH '$."a1"',
- b1 text PATH '$."b1"',
- NESTED PATH '$[*]' AS "p1 1"
- COLUMNS (
- a11 text PATH '$."a11"'
- )
- ),
- NESTED PATH '$[2]' AS p2
- COLUMNS (
- NESTED PATH '$[*]' AS "p2:1"
- COLUMNS (
- a21 text PATH '$."a21"'
- ),
- NESTED PATH '$[*]' AS p22
- COLUMNS (
- a22 text PATH '$."a22"'
- )
- )
- )
- )
-DROP VIEW jsonb_table_view_nested;
-CREATE TABLE s (js jsonb);
-INSERT INTO s VALUES
- ('{"a":{"za":[{"z1": [11,2222]},{"z21": [22, 234,2345]},{"z22": [32, 204,145]}]},"c": 3}'),
- ('{"a":{"za":[{"z1": [21,4222]},{"z21": [32, 134,1345]}]},"c": 10}');
--- error
-SELECT sub.* FROM s,
- JSON_TABLE(js, '$' PASSING 32 AS x, 13 AS y COLUMNS (
- xx int path '$.c',
- NESTED PATH '$.a.za[1]' columns (NESTED PATH '$.z21[*]' COLUMNS (z21 int path '$?(@ >= $"x")' ERROR ON ERROR))
- )) sub;
- xx | z21
-----+------
- 3 |
- 3 | 234
- 3 | 2345
- 10 | 32
- 10 | 134
- 10 | 1345
-(6 rows)
-
--- Parent columns xx1, xx appear before NESTED ones
-SELECT sub.* FROM s,
- (VALUES (23)) x(x), generate_series(13, 13) y,
- JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y COLUMNS (
- NESTED PATH '$.a.za[2]' COLUMNS (
- NESTED PATH '$.z22[*]' as z22 COLUMNS (c int PATH '$')),
- NESTED PATH '$.a.za[1]' columns (d int[] PATH '$.z21'),
- NESTED PATH '$.a.za[0]' columns (NESTED PATH '$.z1[*]' as z1 COLUMNS (a int PATH '$')),
- xx1 int PATH '$.c',
- NESTED PATH '$.a.za[1]' columns (NESTED PATH '$.z21[*]' as z21 COLUMNS (b int PATH '$')),
- xx int PATH '$.c'
- )) sub;
- xx1 | xx | c | d | a | b
------+----+-----+---------------+------+------
- 3 | 3 | 32 | | |
- 3 | 3 | 204 | | |
- 3 | 3 | 145 | | |
- 3 | 3 | | {22,234,2345} | |
- 3 | 3 | | | 11 |
- 3 | 3 | | | 2222 |
- 3 | 3 | | | | 22
- 3 | 3 | | | | 234
- 3 | 3 | | | | 2345
- 10 | 10 | | {32,134,1345} | |
- 10 | 10 | | | 21 |
- 10 | 10 | | | 4222 |
- 10 | 10 | | | | 32
- 10 | 10 | | | | 134
- 10 | 10 | | | | 1345
-(15 rows)
-
--- Test applying PASSING variables at different nesting levels
-SELECT sub.* FROM s,
- (VALUES (23)) x(x), generate_series(13, 13) y,
- JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y COLUMNS (
- xx1 int PATH '$.c',
- NESTED PATH '$.a.za[0].z1[*]' COLUMNS (NESTED PATH '$ ?(@ >= ($"x" -2))' COLUMNS (a int PATH '$')),
- NESTED PATH '$.a.za[0]' COLUMNS (NESTED PATH '$.z1[*] ? (@ >= ($"x" -2))' COLUMNS (b int PATH '$'))
- )) sub;
- xx1 | a | b
------+------+------
- 3 | |
- 3 | 2222 |
- 3 | | 2222
- 10 | 21 |
- 10 | 4222 |
- 10 | | 21
- 10 | | 4222
-(7 rows)
-
--- Test applying PASSING variable to paths all the levels
-SELECT sub.* FROM s,
- (VALUES (23)) x(x),
- generate_series(13, 13) y,
- JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y
- COLUMNS (
- xx1 int PATH '$.c',
- NESTED PATH '$.a.za[1]'
- COLUMNS (NESTED PATH '$.z21[*]' COLUMNS (b int PATH '$')),
- NESTED PATH '$.a.za[1] ? (@.z21[*] >= ($"x"-1))' COLUMNS
- (NESTED PATH '$.z21[*] ? (@ >= ($"y" + 3))' as z22 COLUMNS (a int PATH '$ ? (@ >= ($"y" + 12))')),
- NESTED PATH '$.a.za[1]' COLUMNS
- (NESTED PATH '$.z21[*] ? (@ >= ($"y" +121))' as z21 COLUMNS (c int PATH '$ ? (@ > ($"x" +111))'))
- )) sub;
- xx1 | b | a | c
------+------+------+------
- 3 | 22 | |
- 3 | 234 | |
- 3 | 2345 | |
- 3 | | |
- 3 | | 234 |
- 3 | | 2345 |
- 3 | | | 234
- 3 | | | 2345
- 10 | 32 | |
- 10 | 134 | |
- 10 | 1345 | |
- 10 | | 32 |
- 10 | | 134 |
- 10 | | 1345 |
- 10 | | |
- 10 | | | 1345
-(16 rows)
-
------ test on empty behavior
-SELECT sub.* FROM s,
- (values(23)) x(x),
- generate_series(13, 13) y,
- JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y
- COLUMNS (
- xx1 int PATH '$.c',
- NESTED PATH '$.a.za[2]' COLUMNS (NESTED PATH '$.z22[*]' as z22 COLUMNS (c int PATH '$')),
- NESTED PATH '$.a.za[1]' COLUMNS (d json PATH '$ ? (@.z21[*] == ($"x" -1))'),
- NESTED PATH '$.a.za[0]' COLUMNS (NESTED PATH '$.z1[*] ? (@ >= ($"x" -2))' as z1 COLUMNS (a int PATH '$')),
- NESTED PATH '$.a.za[1]' COLUMNS
- (NESTED PATH '$.z21[*] ? (@ >= ($"y" +121))' as z21 COLUMNS (b int PATH '$ ? (@ > ($"x" +111))' DEFAULT 0 ON EMPTY))
- )) sub;
- xx1 | c | d | a | b
------+-----+--------------------------+------+------
- 3 | 32 | | |
- 3 | 204 | | |
- 3 | 145 | | |
- 3 | | {"z21": [22, 234, 2345]} | |
- 3 | | | 2222 |
- 3 | | | | 234
- 3 | | | | 2345
- 10 | | | |
- 10 | | | 21 |
- 10 | | | 4222 |
- 10 | | | | 0
- 10 | | | | 1345
-(12 rows)
-
-CREATE OR REPLACE VIEW jsonb_table_view7 AS
-SELECT sub.* FROM s,
- (values(23)) x(x),
- generate_series(13, 13) y,
- JSON_TABLE(js, '$' AS c1 PASSING x AS x, y AS y
- COLUMNS (
- xx1 int PATH '$.c',
- NESTED PATH '$.a.za[2]' COLUMNS (NESTED PATH '$.z22[*]' as z22 COLUMNS (c int PATH '$' WITHOUT WRAPPER OMIT QUOTES)),
- NESTED PATH '$.a.za[1]' COLUMNS (d json PATH '$ ? (@.z21[*] == ($"x" -1))' WITH WRAPPER),
- NESTED PATH '$.a.za[0]' COLUMNS (NESTED PATH '$.z1[*] ? (@ >= ($"x" -2))' as z1 COLUMNS (a int PATH '$' KEEP QUOTES)),
- NESTED PATH '$.a.za[1]' COLUMNS
- (NESTED PATH '$.z21[*] ? (@ >= ($"y" +121))' as z21 COLUMNS (b int PATH '$ ? (@ > ($"x" +111))' DEFAULT 0 ON EMPTY))
- )) sub;
-\sv jsonb_table_view7
-CREATE OR REPLACE VIEW public.jsonb_table_view7 AS
- SELECT sub.xx1,
- sub.c,
- sub.d,
- sub.a,
- sub.b
- FROM s,
- ( VALUES (23)) x(x),
- generate_series(13, 13) y(y),
- LATERAL JSON_TABLE(
- s.js, '$' AS c1
- PASSING
- x.x AS x,
- y.y AS y
- COLUMNS (
- xx1 integer PATH '$."c"',
- NESTED PATH '$."a"."za"[2]' AS json_table_path_0
- COLUMNS (
- NESTED PATH '$."z22"[*]' AS z22
- COLUMNS (
- c integer PATH '$' WITHOUT WRAPPER OMIT QUOTES
- )
- ),
- NESTED PATH '$."a"."za"[1]' AS json_table_path_1
- COLUMNS (
- d json PATH '$?(@."z21"[*] == $"x" - 1)' WITH UNCONDITIONAL WRAPPER KEEP QUOTES
- ),
- NESTED PATH '$."a"."za"[0]' AS json_table_path_2
- COLUMNS (
- NESTED PATH '$."z1"[*]?(@ >= $"x" - 2)' AS z1
- COLUMNS (
- a integer PATH '$' WITHOUT WRAPPER KEEP QUOTES
- )
- ),
- NESTED PATH '$."a"."za"[1]' AS json_table_path_3
- COLUMNS (
- NESTED PATH '$."z21"[*]?(@ >= $"y" + 121)' AS z21
- COLUMNS (
- b integer PATH '$?(@ > $"x" + 111)' DEFAULT 0 ON EMPTY
- )
- )
- )
- ) sub
-DROP VIEW jsonb_table_view7;
-DROP TABLE s;
--- Prevent ON EMPTY specification on EXISTS columns
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int exists empty object on empty));
-ERROR: syntax error at or near "empty"
-LINE 1: ...sonb '1', '$' COLUMNS (a int exists empty object on empty));
- ^
--- Test ON ERROR / EMPTY value validity for the function and column types;
--- all fail
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int) NULL ON ERROR);
-ERROR: invalid ON ERROR behavior
-LINE 1: ... * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int) NULL ON ER...
- ^
-DETAIL: Only EMPTY [ ARRAY ] or ERROR is allowed in the top-level ON ERROR clause.
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int true on empty));
-ERROR: invalid ON EMPTY behavior for column "a"
-LINE 1: ...T * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int true on em...
- ^
-DETAIL: Only ERROR, NULL, or DEFAULT expression is allowed in ON EMPTY for scalar columns.
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int omit quotes true on error));
-ERROR: invalid ON ERROR behavior for column "a"
-LINE 1: ...N_TABLE(jsonb '1', '$' COLUMNS (a int omit quotes true on er...
- ^
-DETAIL: Only ERROR, NULL, EMPTY ARRAY, EMPTY OBJECT, or DEFAULT expression is allowed in ON ERROR for formatted columns.
-SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int exists empty object on error));
-ERROR: invalid ON ERROR behavior for column "a"
-LINE 1: ...M JSON_TABLE(jsonb '1', '$' COLUMNS (a int exists empty obje...
- ^
-DETAIL: Only ERROR, TRUE, FALSE, or UNKNOWN is allowed in ON ERROR for EXISTS columns.
--- Test JSON_TABLE() column deparsing -- don't emit default ON ERROR / EMPTY
--- behavior
-CREATE VIEW json_table_view8 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$'));
-\sv json_table_view8;
-CREATE OR REPLACE VIEW public.json_table_view8 AS
- SELECT a
- FROM JSON_TABLE(
- '"a"'::text, '$' AS json_table_path_0
- COLUMNS (
- a text PATH '$'
- )
- )
-CREATE VIEW json_table_view9 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') ERROR ON ERROR);
-\sv json_table_view9;
-CREATE OR REPLACE VIEW public.json_table_view9 AS
- SELECT a
- FROM JSON_TABLE(
- '"a"'::text, '$' AS json_table_path_0
- COLUMNS (
- a text PATH '$'
- ) ERROR ON ERROR
- )
-DROP VIEW json_table_view8, json_table_view9;
--- Test JSON_TABLE() deparsing -- don't emit default ON ERROR behavior
-CREATE VIEW json_table_view8 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') EMPTY ON ERROR);
-\sv json_table_view8;
-CREATE OR REPLACE VIEW public.json_table_view8 AS
- SELECT a
- FROM JSON_TABLE(
- '"a"'::text, '$' AS json_table_path_0
- COLUMNS (
- a text PATH '$'
- )
- )
-CREATE VIEW json_table_view9 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') EMPTY ARRAY ON ERROR);
-\sv json_table_view9;
-CREATE OR REPLACE VIEW public.json_table_view9 AS
- SELECT a
- FROM JSON_TABLE(
- '"a"'::text, '$' AS json_table_path_0
- COLUMNS (
- a text PATH '$'
- )
- )
-DROP VIEW json_table_view8, json_table_view9;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/plancache.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/plancache.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/plancache.out 2024-11-15 02:50:52.482062215 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/plancache.out 2024-11-15 02:59:18.205116998 +0000
@@ -1,400 +1,2 @@
---
--- Tests to exercise the plan caching/invalidation mechanism
---
-CREATE TEMP TABLE pcachetest AS SELECT * FROM int8_tbl;
--- create and use a cached plan
-PREPARE prepstmt AS SELECT * FROM pcachetest;
-EXECUTE prepstmt;
- q1 | q2
-------------------+-------------------
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 123
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
-(5 rows)
-
--- and one with parameters
-PREPARE prepstmt2(bigint) AS SELECT * FROM pcachetest WHERE q1 = $1;
-EXECUTE prepstmt2(123);
- q1 | q2
------+------------------
- 123 | 456
- 123 | 4567890123456789
-(2 rows)
-
--- invalidate the plans and see what happens
-DROP TABLE pcachetest;
-EXECUTE prepstmt;
-ERROR: relation "pcachetest" does not exist
-EXECUTE prepstmt2(123);
-ERROR: relation "pcachetest" does not exist
--- recreate the temp table (this demonstrates that the raw plan is
--- purely textual and doesn't depend on OIDs, for instance)
-CREATE TEMP TABLE pcachetest AS SELECT * FROM int8_tbl ORDER BY 2;
-EXECUTE prepstmt;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | -4567890123456789
- 4567890123456789 | 123
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 4567890123456789
-(5 rows)
-
-EXECUTE prepstmt2(123);
- q1 | q2
------+------------------
- 123 | 456
- 123 | 4567890123456789
-(2 rows)
-
--- prepared statements should prevent change in output tupdesc,
--- since clients probably aren't expecting that to change on the fly
-ALTER TABLE pcachetest ADD COLUMN q3 bigint;
-EXECUTE prepstmt;
-ERROR: cached plan must not change result type
-EXECUTE prepstmt2(123);
-ERROR: cached plan must not change result type
--- but we're nice guys and will let you undo your mistake
-ALTER TABLE pcachetest DROP COLUMN q3;
-EXECUTE prepstmt;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | -4567890123456789
- 4567890123456789 | 123
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 4567890123456789
-(5 rows)
-
-EXECUTE prepstmt2(123);
- q1 | q2
------+------------------
- 123 | 456
- 123 | 4567890123456789
-(2 rows)
-
--- Try it with a view, which isn't directly used in the resulting plan
--- but should trigger invalidation anyway
-CREATE TEMP VIEW pcacheview AS
- SELECT * FROM pcachetest;
-PREPARE vprep AS SELECT * FROM pcacheview;
-EXECUTE vprep;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | -4567890123456789
- 4567890123456789 | 123
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 4567890123456789
-(5 rows)
-
-CREATE OR REPLACE TEMP VIEW pcacheview AS
- SELECT q1, q2/2 AS q2 FROM pcachetest;
-EXECUTE vprep;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | -2283945061728394
- 4567890123456789 | 61
- 123 | 228
- 123 | 2283945061728394
- 4567890123456789 | 2283945061728394
-(5 rows)
-
--- Check basic SPI plan invalidation
-create function cache_test(int) returns int as $$
-declare total int;
-begin
- create temp table t1(f1 int);
- insert into t1 values($1);
- insert into t1 values(11);
- insert into t1 values(12);
- insert into t1 values(13);
- select sum(f1) into total from t1;
- drop table t1;
- return total;
-end
-$$ language plpgsql;
-select cache_test(1);
- cache_test
-------------
- 37
-(1 row)
-
-select cache_test(2);
- cache_test
-------------
- 38
-(1 row)
-
-select cache_test(3);
- cache_test
-------------
- 39
-(1 row)
-
--- Check invalidation of plpgsql "simple expression"
-create temp view v1 as
- select 2+2 as f1;
-create function cache_test_2() returns int as $$
-begin
- return f1 from v1;
-end$$ language plpgsql;
-select cache_test_2();
- cache_test_2
---------------
- 4
-(1 row)
-
-create or replace temp view v1 as
- select 2+2+4 as f1;
-select cache_test_2();
- cache_test_2
---------------
- 8
-(1 row)
-
-create or replace temp view v1 as
- select 2+2+4+(select max(unique1) from tenk1) as f1;
-select cache_test_2();
- cache_test_2
---------------
- 10007
-(1 row)
-
---- Check that change of search_path is honored when re-using cached plan
-create schema s1
- create table abc (f1 int);
-create schema s2
- create table abc (f1 int);
-insert into s1.abc values(123);
-insert into s2.abc values(456);
-set search_path = s1;
-prepare p1 as select f1 from abc;
-execute p1;
- f1
------
- 123
-(1 row)
-
-set search_path = s2;
-select f1 from abc;
- f1
------
- 456
-(1 row)
-
-execute p1;
- f1
------
- 456
-(1 row)
-
-alter table s1.abc add column f2 float8; -- force replan
-execute p1;
- f1
------
- 456
-(1 row)
-
-drop schema s1 cascade;
-NOTICE: drop cascades to table s1.abc
-drop schema s2 cascade;
-NOTICE: drop cascades to table abc
-reset search_path;
--- Check that invalidation deals with regclass constants
-create temp sequence seq;
-prepare p2 as select nextval('seq');
-execute p2;
- nextval
----------
- 1
-(1 row)
-
-drop sequence seq;
-create temp sequence seq;
-execute p2;
- nextval
----------
- 1
-(1 row)
-
--- Check DDL via SPI, immediately followed by SPI plan re-use
--- (bug in original coding)
-create function cachebug() returns void as $$
-declare r int;
-begin
- drop table if exists temptable cascade;
- create temp table temptable as select * from generate_series(1,3) as f1;
- create temp view vv as select * from temptable;
- for r in select * from vv loop
- raise notice '%', r;
- end loop;
-end$$ language plpgsql;
-select cachebug();
-NOTICE: table "temptable" does not exist, skipping
-NOTICE: 1
-NOTICE: 2
-NOTICE: 3
- cachebug
-----------
-
-(1 row)
-
-select cachebug();
-NOTICE: drop cascades to view vv
-NOTICE: 1
-NOTICE: 2
-NOTICE: 3
- cachebug
-----------
-
-(1 row)
-
--- Check that addition or removal of any partition is correctly dealt with by
--- default partition table when it is being used in prepared statement.
-create table pc_list_parted (a int) partition by list(a);
-create table pc_list_part_null partition of pc_list_parted for values in (null);
-create table pc_list_part_1 partition of pc_list_parted for values in (1);
-create table pc_list_part_def partition of pc_list_parted default;
-prepare pstmt_def_insert (int) as insert into pc_list_part_def values($1);
--- should fail
-execute pstmt_def_insert(null);
-ERROR: new row for relation "pc_list_part_def" violates partition constraint
-DETAIL: Failing row contains (null).
-execute pstmt_def_insert(1);
-ERROR: new row for relation "pc_list_part_def" violates partition constraint
-DETAIL: Failing row contains (1).
-create table pc_list_part_2 partition of pc_list_parted for values in (2);
-execute pstmt_def_insert(2);
-ERROR: new row for relation "pc_list_part_def" violates partition constraint
-DETAIL: Failing row contains (2).
-alter table pc_list_parted detach partition pc_list_part_null;
--- should be ok
-execute pstmt_def_insert(null);
-drop table pc_list_part_1;
--- should be ok
-execute pstmt_def_insert(1);
-drop table pc_list_parted, pc_list_part_null;
-deallocate pstmt_def_insert;
--- Test plan_cache_mode
-create table test_mode (a int);
-insert into test_mode select 1 from generate_series(1,1000) union all select 2;
-create index on test_mode (a);
-analyze test_mode;
-prepare test_mode_pp (int) as select count(*) from test_mode where a = $1;
-select name, generic_plans, custom_plans from pg_prepared_statements
- where name = 'test_mode_pp';
- name | generic_plans | custom_plans
---------------+---------------+--------------
- test_mode_pp | 0 | 0
-(1 row)
-
--- up to 5 executions, custom plan is used
-set plan_cache_mode to auto;
-explain (costs off) execute test_mode_pp(2);
- QUERY PLAN
-----------------------------------------------------------
- Aggregate
- -> Index Only Scan using test_mode_a_idx on test_mode
- Index Cond: (a = 2)
-(3 rows)
-
-select name, generic_plans, custom_plans from pg_prepared_statements
- where name = 'test_mode_pp';
- name | generic_plans | custom_plans
---------------+---------------+--------------
- test_mode_pp | 0 | 1
-(1 row)
-
--- force generic plan
-set plan_cache_mode to force_generic_plan;
-explain (costs off) execute test_mode_pp(2);
- QUERY PLAN
------------------------------
- Aggregate
- -> Seq Scan on test_mode
- Filter: (a = $1)
-(3 rows)
-
-select name, generic_plans, custom_plans from pg_prepared_statements
- where name = 'test_mode_pp';
- name | generic_plans | custom_plans
---------------+---------------+--------------
- test_mode_pp | 1 | 1
-(1 row)
-
--- get to generic plan by 5 executions
-set plan_cache_mode to auto;
-execute test_mode_pp(1); -- 1x
- count
--------
- 1000
-(1 row)
-
-execute test_mode_pp(1); -- 2x
- count
--------
- 1000
-(1 row)
-
-execute test_mode_pp(1); -- 3x
- count
--------
- 1000
-(1 row)
-
-execute test_mode_pp(1); -- 4x
- count
--------
- 1000
-(1 row)
-
-select name, generic_plans, custom_plans from pg_prepared_statements
- where name = 'test_mode_pp';
- name | generic_plans | custom_plans
---------------+---------------+--------------
- test_mode_pp | 1 | 5
-(1 row)
-
-execute test_mode_pp(1); -- 5x
- count
--------
- 1000
-(1 row)
-
-select name, generic_plans, custom_plans from pg_prepared_statements
- where name = 'test_mode_pp';
- name | generic_plans | custom_plans
---------------+---------------+--------------
- test_mode_pp | 2 | 5
-(1 row)
-
--- we should now get a really bad plan
-explain (costs off) execute test_mode_pp(2);
- QUERY PLAN
------------------------------
- Aggregate
- -> Seq Scan on test_mode
- Filter: (a = $1)
-(3 rows)
-
--- but we can force a custom plan
-set plan_cache_mode to force_custom_plan;
-explain (costs off) execute test_mode_pp(2);
- QUERY PLAN
-----------------------------------------------------------
- Aggregate
- -> Index Only Scan using test_mode_a_idx on test_mode
- Index Cond: (a = 2)
-(3 rows)
-
-select name, generic_plans, custom_plans from pg_prepared_statements
- where name = 'test_mode_pp';
- name | generic_plans | custom_plans
---------------+---------------+--------------
- test_mode_pp | 3 | 6
-(1 row)
-
-drop table test_mode;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/limit.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/limit.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/limit.out 2024-11-15 02:50:52.462095130 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/limit.out 2024-11-15 02:59:18.193116981 +0000
@@ -1,698 +1,2 @@
---
--- LIMIT
--- Check the LIMIT/OFFSET feature of SELECT
---
-SELECT ''::text AS two, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 50
- ORDER BY unique1 LIMIT 2;
- two | unique1 | unique2 | stringu1
------+---------+---------+----------
- | 51 | 76 | ZBAAAA
- | 52 | 985 | ACAAAA
-(2 rows)
-
-SELECT ''::text AS five, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 60
- ORDER BY unique1 LIMIT 5;
- five | unique1 | unique2 | stringu1
-------+---------+---------+----------
- | 61 | 560 | JCAAAA
- | 62 | 633 | KCAAAA
- | 63 | 296 | LCAAAA
- | 64 | 479 | MCAAAA
- | 65 | 64 | NCAAAA
-(5 rows)
-
-SELECT ''::text AS two, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 60 AND unique1 < 63
- ORDER BY unique1 LIMIT 5;
- two | unique1 | unique2 | stringu1
------+---------+---------+----------
- | 61 | 560 | JCAAAA
- | 62 | 633 | KCAAAA
-(2 rows)
-
-SELECT ''::text AS three, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 100
- ORDER BY unique1 LIMIT 3 OFFSET 20;
- three | unique1 | unique2 | stringu1
--------+---------+---------+----------
- | 121 | 700 | REAAAA
- | 122 | 519 | SEAAAA
- | 123 | 777 | TEAAAA
-(3 rows)
-
-SELECT ''::text AS zero, unique1, unique2, stringu1
- FROM onek WHERE unique1 < 50
- ORDER BY unique1 DESC LIMIT 8 OFFSET 99;
- zero | unique1 | unique2 | stringu1
-------+---------+---------+----------
-(0 rows)
-
-SELECT ''::text AS eleven, unique1, unique2, stringu1
- FROM onek WHERE unique1 < 50
- ORDER BY unique1 DESC LIMIT 20 OFFSET 39;
- eleven | unique1 | unique2 | stringu1
---------+---------+---------+----------
- | 10 | 520 | KAAAAA
- | 9 | 49 | JAAAAA
- | 8 | 653 | IAAAAA
- | 7 | 647 | HAAAAA
- | 6 | 978 | GAAAAA
- | 5 | 541 | FAAAAA
- | 4 | 833 | EAAAAA
- | 3 | 431 | DAAAAA
- | 2 | 326 | CAAAAA
- | 1 | 214 | BAAAAA
- | 0 | 998 | AAAAAA
-(11 rows)
-
-SELECT ''::text AS ten, unique1, unique2, stringu1
- FROM onek
- ORDER BY unique1 OFFSET 990;
- ten | unique1 | unique2 | stringu1
------+---------+---------+----------
- | 990 | 369 | CMAAAA
- | 991 | 426 | DMAAAA
- | 992 | 363 | EMAAAA
- | 993 | 661 | FMAAAA
- | 994 | 695 | GMAAAA
- | 995 | 144 | HMAAAA
- | 996 | 258 | IMAAAA
- | 997 | 21 | JMAAAA
- | 998 | 549 | KMAAAA
- | 999 | 152 | LMAAAA
-(10 rows)
-
-SELECT ''::text AS five, unique1, unique2, stringu1
- FROM onek
- ORDER BY unique1 OFFSET 990 LIMIT 5;
- five | unique1 | unique2 | stringu1
-------+---------+---------+----------
- | 990 | 369 | CMAAAA
- | 991 | 426 | DMAAAA
- | 992 | 363 | EMAAAA
- | 993 | 661 | FMAAAA
- | 994 | 695 | GMAAAA
-(5 rows)
-
-SELECT ''::text AS five, unique1, unique2, stringu1
- FROM onek
- ORDER BY unique1 LIMIT 5 OFFSET 900;
- five | unique1 | unique2 | stringu1
-------+---------+---------+----------
- | 900 | 913 | QIAAAA
- | 901 | 931 | RIAAAA
- | 902 | 702 | SIAAAA
- | 903 | 641 | TIAAAA
- | 904 | 793 | UIAAAA
-(5 rows)
-
--- Test null limit and offset. The planner would discard a simple null
--- constant, so to ensure executor is exercised, do this:
-select * from int8_tbl limit (case when random() < 0.5 then null::bigint end);
- q1 | q2
-------------------+-------------------
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 123
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
-(5 rows)
-
-select * from int8_tbl offset (case when random() < 0.5 then null::bigint end);
- q1 | q2
-------------------+-------------------
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 123
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
-(5 rows)
-
--- Test assorted cases involving backwards fetch from a LIMIT plan node
-begin;
-declare c1 cursor for select * from int8_tbl limit 10;
-fetch all in c1;
- q1 | q2
-------------------+-------------------
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 123
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
-(5 rows)
-
-fetch 1 in c1;
- q1 | q2
-----+----
-(0 rows)
-
-fetch backward 1 in c1;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | -4567890123456789
-(1 row)
-
-fetch backward all in c1;
- q1 | q2
-------------------+------------------
- 4567890123456789 | 4567890123456789
- 4567890123456789 | 123
- 123 | 4567890123456789
- 123 | 456
-(4 rows)
-
-fetch backward 1 in c1;
- q1 | q2
-----+----
-(0 rows)
-
-fetch all in c1;
- q1 | q2
-------------------+-------------------
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 123
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
-(5 rows)
-
-declare c2 cursor for select * from int8_tbl limit 3;
-fetch all in c2;
- q1 | q2
-------------------+------------------
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 123
-(3 rows)
-
-fetch 1 in c2;
- q1 | q2
-----+----
-(0 rows)
-
-fetch backward 1 in c2;
- q1 | q2
-------------------+-----
- 4567890123456789 | 123
-(1 row)
-
-fetch backward all in c2;
- q1 | q2
------+------------------
- 123 | 4567890123456789
- 123 | 456
-(2 rows)
-
-fetch backward 1 in c2;
- q1 | q2
-----+----
-(0 rows)
-
-fetch all in c2;
- q1 | q2
-------------------+------------------
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 123
-(3 rows)
-
-declare c3 cursor for select * from int8_tbl offset 3;
-fetch all in c3;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
-(2 rows)
-
-fetch 1 in c3;
- q1 | q2
-----+----
-(0 rows)
-
-fetch backward 1 in c3;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | -4567890123456789
-(1 row)
-
-fetch backward all in c3;
- q1 | q2
-------------------+------------------
- 4567890123456789 | 4567890123456789
-(1 row)
-
-fetch backward 1 in c3;
- q1 | q2
-----+----
-(0 rows)
-
-fetch all in c3;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
-(2 rows)
-
-declare c4 cursor for select * from int8_tbl offset 10;
-fetch all in c4;
- q1 | q2
-----+----
-(0 rows)
-
-fetch 1 in c4;
- q1 | q2
-----+----
-(0 rows)
-
-fetch backward 1 in c4;
- q1 | q2
-----+----
-(0 rows)
-
-fetch backward all in c4;
- q1 | q2
-----+----
-(0 rows)
-
-fetch backward 1 in c4;
- q1 | q2
-----+----
-(0 rows)
-
-fetch all in c4;
- q1 | q2
-----+----
-(0 rows)
-
-declare c5 cursor for select * from int8_tbl order by q1 fetch first 2 rows with ties;
-fetch all in c5;
- q1 | q2
------+------------------
- 123 | 456
- 123 | 4567890123456789
-(2 rows)
-
-fetch 1 in c5;
- q1 | q2
-----+----
-(0 rows)
-
-fetch backward 1 in c5;
- q1 | q2
------+------------------
- 123 | 4567890123456789
-(1 row)
-
-fetch backward 1 in c5;
- q1 | q2
------+-----
- 123 | 456
-(1 row)
-
-fetch all in c5;
- q1 | q2
------+------------------
- 123 | 4567890123456789
-(1 row)
-
-fetch backward all in c5;
- q1 | q2
------+------------------
- 123 | 4567890123456789
- 123 | 456
-(2 rows)
-
-fetch all in c5;
- q1 | q2
------+------------------
- 123 | 456
- 123 | 4567890123456789
-(2 rows)
-
-fetch backward all in c5;
- q1 | q2
------+------------------
- 123 | 4567890123456789
- 123 | 456
-(2 rows)
-
-rollback;
--- Stress test for variable LIMIT in conjunction with bounded-heap sorting
-SELECT
- (SELECT n
- FROM (VALUES (1)) AS x,
- (SELECT n FROM generate_series(1,10) AS n
- ORDER BY n LIMIT 1 OFFSET s-1) AS y) AS z
- FROM generate_series(1,10) AS s;
- z
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
-(10 rows)
-
---
--- Test behavior of volatile and set-returning functions in conjunction
--- with ORDER BY and LIMIT.
---
-create temp sequence testseq;
-explain (verbose, costs off)
-select unique1, unique2, nextval('testseq')
- from tenk1 order by unique2 limit 10;
- QUERY PLAN
-----------------------------------------------------------------
- Limit
- Output: unique1, unique2, (nextval('testseq'::regclass))
- -> Index Scan using tenk1_unique2 on public.tenk1
- Output: unique1, unique2, nextval('testseq'::regclass)
-(4 rows)
-
-select unique1, unique2, nextval('testseq')
- from tenk1 order by unique2 limit 10;
- unique1 | unique2 | nextval
----------+---------+---------
- 8800 | 0 | 1
- 1891 | 1 | 2
- 3420 | 2 | 3
- 9850 | 3 | 4
- 7164 | 4 | 5
- 8009 | 5 | 6
- 5057 | 6 | 7
- 6701 | 7 | 8
- 4321 | 8 | 9
- 3043 | 9 | 10
-(10 rows)
-
-select currval('testseq');
- currval
----------
- 10
-(1 row)
-
-explain (verbose, costs off)
-select unique1, unique2, nextval('testseq')
- from tenk1 order by tenthous limit 10;
- QUERY PLAN
---------------------------------------------------------------------------
- Limit
- Output: unique1, unique2, (nextval('testseq'::regclass)), tenthous
- -> Result
- Output: unique1, unique2, nextval('testseq'::regclass), tenthous
- -> Sort
- Output: unique1, unique2, tenthous
- Sort Key: tenk1.tenthous
- -> Seq Scan on public.tenk1
- Output: unique1, unique2, tenthous
-(9 rows)
-
-select unique1, unique2, nextval('testseq')
- from tenk1 order by tenthous limit 10;
- unique1 | unique2 | nextval
----------+---------+---------
- 0 | 9998 | 11
- 1 | 2838 | 12
- 2 | 2716 | 13
- 3 | 5679 | 14
- 4 | 1621 | 15
- 5 | 5557 | 16
- 6 | 2855 | 17
- 7 | 8518 | 18
- 8 | 5435 | 19
- 9 | 4463 | 20
-(10 rows)
-
-select currval('testseq');
- currval
----------
- 20
-(1 row)
-
-explain (verbose, costs off)
-select unique1, unique2, generate_series(1,10)
- from tenk1 order by unique2 limit 7;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------
- Limit
- Output: unique1, unique2, (generate_series(1, 10))
- -> ProjectSet
- Output: unique1, unique2, generate_series(1, 10)
- -> Index Scan using tenk1_unique2 on public.tenk1
- Output: unique1, unique2, two, four, ten, twenty, hundred, thousand, twothousand, fivethous, tenthous, odd, even, stringu1, stringu2, string4
-(6 rows)
-
-select unique1, unique2, generate_series(1,10)
- from tenk1 order by unique2 limit 7;
- unique1 | unique2 | generate_series
----------+---------+-----------------
- 8800 | 0 | 1
- 8800 | 0 | 2
- 8800 | 0 | 3
- 8800 | 0 | 4
- 8800 | 0 | 5
- 8800 | 0 | 6
- 8800 | 0 | 7
-(7 rows)
-
-explain (verbose, costs off)
-select unique1, unique2, generate_series(1,10)
- from tenk1 order by tenthous limit 7;
- QUERY PLAN
---------------------------------------------------------------------
- Limit
- Output: unique1, unique2, (generate_series(1, 10)), tenthous
- -> ProjectSet
- Output: unique1, unique2, generate_series(1, 10), tenthous
- -> Sort
- Output: unique1, unique2, tenthous
- Sort Key: tenk1.tenthous
- -> Seq Scan on public.tenk1
- Output: unique1, unique2, tenthous
-(9 rows)
-
-select unique1, unique2, generate_series(1,10)
- from tenk1 order by tenthous limit 7;
- unique1 | unique2 | generate_series
----------+---------+-----------------
- 0 | 9998 | 1
- 0 | 9998 | 2
- 0 | 9998 | 3
- 0 | 9998 | 4
- 0 | 9998 | 5
- 0 | 9998 | 6
- 0 | 9998 | 7
-(7 rows)
-
--- use of random() is to keep planner from folding the expressions together
-explain (verbose, costs off)
-select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------
- ProjectSet
- Output: generate_series(0, 2), generate_series(((random() * '0.1'::double precision))::integer, 2)
- -> Result
-(3 rows)
-
-select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2;
- s1 | s2
-----+----
- 0 | 0
- 1 | 1
- 2 | 2
-(3 rows)
-
-explain (verbose, costs off)
-select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2
-order by s2 desc;
- QUERY PLAN
-------------------------------------------------------------------------------------------------------------
- Sort
- Output: (generate_series(0, 2)), (generate_series(((random() * '0.1'::double precision))::integer, 2))
- Sort Key: (generate_series(((random() * '0.1'::double precision))::integer, 2)) DESC
- -> ProjectSet
- Output: generate_series(0, 2), generate_series(((random() * '0.1'::double precision))::integer, 2)
- -> Result
-(6 rows)
-
-select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2
-order by s2 desc;
- s1 | s2
-----+----
- 2 | 2
- 1 | 1
- 0 | 0
-(3 rows)
-
--- test for failure to set all aggregates' aggtranstype
-explain (verbose, costs off)
-select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2
- from tenk1 group by thousand order by thousand limit 3;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------
- Limit
- Output: (sum(tenthous)), (((sum(tenthous))::double precision + (random() * '0'::double precision))), thousand
- -> GroupAggregate
- Output: sum(tenthous), ((sum(tenthous))::double precision + (random() * '0'::double precision)), thousand
- Group Key: tenk1.thousand
- -> Index Only Scan using tenk1_thous_tenthous on public.tenk1
- Output: thousand, tenthous
-(7 rows)
-
-select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2
- from tenk1 group by thousand order by thousand limit 3;
- s1 | s2
--------+-------
- 45000 | 45000
- 45010 | 45010
- 45020 | 45020
-(3 rows)
-
---
--- FETCH FIRST
--- Check the WITH TIES clause
---
-SELECT thousand
- FROM onek WHERE thousand < 5
- ORDER BY thousand FETCH FIRST 2 ROW WITH TIES;
- thousand
-----------
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
-(10 rows)
-
-SELECT thousand
- FROM onek WHERE thousand < 5
- ORDER BY thousand FETCH FIRST ROWS WITH TIES;
- thousand
-----------
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
-(10 rows)
-
-SELECT thousand
- FROM onek WHERE thousand < 5
- ORDER BY thousand FETCH FIRST 1 ROW WITH TIES;
- thousand
-----------
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
-(10 rows)
-
-SELECT thousand
- FROM onek WHERE thousand < 5
- ORDER BY thousand FETCH FIRST 2 ROW ONLY;
- thousand
-----------
- 0
- 0
-(2 rows)
-
--- SKIP LOCKED and WITH TIES are incompatible
-SELECT thousand
- FROM onek WHERE thousand < 5
- ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE SKIP LOCKED;
-ERROR: SKIP LOCKED and WITH TIES options cannot be used together
-LINE 3: ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE S...
- ^
--- should fail
-SELECT ''::text AS two, unique1, unique2, stringu1
- FROM onek WHERE unique1 > 50
- FETCH FIRST 2 ROW WITH TIES;
-ERROR: WITH TIES cannot be specified without ORDER BY clause
-LINE 3: FETCH FIRST 2 ROW WITH TIES;
- ^
--- test ruleutils
-CREATE VIEW limit_thousand_v_1 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST 5 ROWS WITH TIES OFFSET 10;
-\d+ limit_thousand_v_1
- View "public.limit_thousand_v_1"
- Column | Type | Collation | Nullable | Default | Storage | Description
-----------+---------+-----------+----------+---------+---------+-------------
- thousand | integer | | | | plain |
-View definition:
- SELECT thousand
- FROM onek
- WHERE thousand < 995
- ORDER BY thousand
- OFFSET 10
- FETCH FIRST 5 ROWS WITH TIES;
-
-CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY;
-\d+ limit_thousand_v_2
- View "public.limit_thousand_v_2"
- Column | Type | Collation | Nullable | Default | Storage | Description
-----------+---------+-----------+----------+---------+---------+-------------
- thousand | integer | | | | plain |
-View definition:
- SELECT thousand
- FROM onek
- WHERE thousand < 995
- ORDER BY thousand
- OFFSET 10
- LIMIT 5;
-
-CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST NULL ROWS WITH TIES; -- fails
-ERROR: row count cannot be null in FETCH FIRST ... WITH TIES clause
-CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES;
-\d+ limit_thousand_v_3
- View "public.limit_thousand_v_3"
- Column | Type | Collation | Nullable | Default | Storage | Description
-----------+---------+-----------+----------+---------+---------+-------------
- thousand | integer | | | | plain |
-View definition:
- SELECT thousand
- FROM onek
- WHERE thousand < 995
- ORDER BY thousand
- FETCH FIRST (NULL::integer + 1) ROWS WITH TIES;
-
-CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
-\d+ limit_thousand_v_4
- View "public.limit_thousand_v_4"
- Column | Type | Collation | Nullable | Default | Storage | Description
-----------+---------+-----------+----------+---------+---------+-------------
- thousand | integer | | | | plain |
-View definition:
- SELECT thousand
- FROM onek
- WHERE thousand < 995
- ORDER BY thousand
- LIMIT ALL;
-
--- leave these views
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/plpgsql.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/plpgsql.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/plpgsql.out 2024-11-15 02:50:52.482062215 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/plpgsql.out 2024-11-15 02:59:18.209117002 +0000
@@ -1,5854 +1,2 @@
---
--- PLPGSQL
---
--- Scenario:
---
--- A building with a modern TP cable installation where any
--- of the wall connectors can be used to plug in phones,
--- ethernet interfaces or local office hubs. The backside
--- of the wall connectors is wired to one of several patch-
--- fields in the building.
---
--- In the patchfields, there are hubs and all the slots
--- representing the wall connectors. In addition there are
--- slots that can represent a phone line from the central
--- phone system.
---
--- Triggers ensure consistency of the patching information.
---
--- Functions are used to build up powerful views that let
--- you look behind the wall when looking at a patchfield
--- or into a room.
---
-create table Room (
- roomno char(8),
- comment text
-);
-create unique index Room_rno on Room using btree (roomno bpchar_ops);
-create table WSlot (
- slotname char(20),
- roomno char(8),
- slotlink char(20),
- backlink char(20)
-);
-create unique index WSlot_name on WSlot using btree (slotname bpchar_ops);
-create table PField (
- name text,
- comment text
-);
-create unique index PField_name on PField using btree (name text_ops);
-create table PSlot (
- slotname char(20),
- pfname text,
- slotlink char(20),
- backlink char(20)
-);
-create unique index PSlot_name on PSlot using btree (slotname bpchar_ops);
-create table PLine (
- slotname char(20),
- phonenumber char(20),
- comment text,
- backlink char(20)
-);
-create unique index PLine_name on PLine using btree (slotname bpchar_ops);
-create table Hub (
- name char(14),
- comment text,
- nslots integer
-);
-create unique index Hub_name on Hub using btree (name bpchar_ops);
-create table HSlot (
- slotname char(20),
- hubname char(14),
- slotno integer,
- slotlink char(20)
-);
-create unique index HSlot_name on HSlot using btree (slotname bpchar_ops);
-create index HSlot_hubname on HSlot using btree (hubname bpchar_ops);
-create table System (
- name text,
- comment text
-);
-create unique index System_name on System using btree (name text_ops);
-create table IFace (
- slotname char(20),
- sysname text,
- ifname text,
- slotlink char(20)
-);
-create unique index IFace_name on IFace using btree (slotname bpchar_ops);
-create table PHone (
- slotname char(20),
- comment text,
- slotlink char(20)
-);
-create unique index PHone_name on PHone using btree (slotname bpchar_ops);
--- ************************************************************
--- *
--- * Trigger procedures and functions for the patchfield
--- * test of PL/pgSQL
--- *
--- ************************************************************
--- ************************************************************
--- * AFTER UPDATE on Room
--- * - If room no changes let wall slots follow
--- ************************************************************
-create function tg_room_au() returns trigger as '
-begin
- if new.roomno != old.roomno then
- update WSlot set roomno = new.roomno where roomno = old.roomno;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_room_au after update
- on Room for each row execute procedure tg_room_au();
--- ************************************************************
--- * AFTER DELETE on Room
--- * - delete wall slots in this room
--- ************************************************************
-create function tg_room_ad() returns trigger as '
-begin
- delete from WSlot where roomno = old.roomno;
- return old;
-end;
-' language plpgsql;
-create trigger tg_room_ad after delete
- on Room for each row execute procedure tg_room_ad();
--- ************************************************************
--- * BEFORE INSERT or UPDATE on WSlot
--- * - Check that room exists
--- ************************************************************
-create function tg_wslot_biu() returns trigger as $$
-begin
- if count(*) = 0 from Room where roomno = new.roomno then
- raise exception 'Room % does not exist', new.roomno;
- end if;
- return new;
-end;
-$$ language plpgsql;
-create trigger tg_wslot_biu before insert or update
- on WSlot for each row execute procedure tg_wslot_biu();
--- ************************************************************
--- * AFTER UPDATE on PField
--- * - Let PSlots of this field follow
--- ************************************************************
-create function tg_pfield_au() returns trigger as '
-begin
- if new.name != old.name then
- update PSlot set pfname = new.name where pfname = old.name;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_pfield_au after update
- on PField for each row execute procedure tg_pfield_au();
--- ************************************************************
--- * AFTER DELETE on PField
--- * - Remove all slots of this patchfield
--- ************************************************************
-create function tg_pfield_ad() returns trigger as '
-begin
- delete from PSlot where pfname = old.name;
- return old;
-end;
-' language plpgsql;
-create trigger tg_pfield_ad after delete
- on PField for each row execute procedure tg_pfield_ad();
--- ************************************************************
--- * BEFORE INSERT or UPDATE on PSlot
--- * - Ensure that our patchfield does exist
--- ************************************************************
-create function tg_pslot_biu() returns trigger as $proc$
-declare
- pfrec record;
- ps alias for new;
-begin
- select into pfrec * from PField where name = ps.pfname;
- if not found then
- raise exception $$Patchfield "%" does not exist$$, ps.pfname;
- end if;
- return ps;
-end;
-$proc$ language plpgsql;
-create trigger tg_pslot_biu before insert or update
- on PSlot for each row execute procedure tg_pslot_biu();
--- ************************************************************
--- * AFTER UPDATE on System
--- * - If system name changes let interfaces follow
--- ************************************************************
-create function tg_system_au() returns trigger as '
-begin
- if new.name != old.name then
- update IFace set sysname = new.name where sysname = old.name;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_system_au after update
- on System for each row execute procedure tg_system_au();
--- ************************************************************
--- * BEFORE INSERT or UPDATE on IFace
--- * - set the slotname to IF.sysname.ifname
--- ************************************************************
-create function tg_iface_biu() returns trigger as $$
-declare
- sname text;
- sysrec record;
-begin
- select into sysrec * from system where name = new.sysname;
- if not found then
- raise exception $q$system "%" does not exist$q$, new.sysname;
- end if;
- sname := 'IF.' || new.sysname;
- sname := sname || '.';
- sname := sname || new.ifname;
- if length(sname) > 20 then
- raise exception 'IFace slotname "%" too long (20 char max)', sname;
- end if;
- new.slotname := sname;
- return new;
-end;
-$$ language plpgsql;
-create trigger tg_iface_biu before insert or update
- on IFace for each row execute procedure tg_iface_biu();
--- ************************************************************
--- * AFTER INSERT or UPDATE or DELETE on Hub
--- * - insert/delete/rename slots as required
--- ************************************************************
-create function tg_hub_a() returns trigger as '
-declare
- hname text;
- dummy integer;
-begin
- if tg_op = ''INSERT'' then
- dummy := tg_hub_adjustslots(new.name, 0, new.nslots);
- return new;
- end if;
- if tg_op = ''UPDATE'' then
- if new.name != old.name then
- update HSlot set hubname = new.name where hubname = old.name;
- end if;
- dummy := tg_hub_adjustslots(new.name, old.nslots, new.nslots);
- return new;
- end if;
- if tg_op = ''DELETE'' then
- dummy := tg_hub_adjustslots(old.name, old.nslots, 0);
- return old;
- end if;
-end;
-' language plpgsql;
-create trigger tg_hub_a after insert or update or delete
- on Hub for each row execute procedure tg_hub_a();
--- ************************************************************
--- * Support function to add/remove slots of Hub
--- ************************************************************
-create function tg_hub_adjustslots(hname bpchar,
- oldnslots integer,
- newnslots integer)
-returns integer as '
-begin
- if newnslots = oldnslots then
- return 0;
- end if;
- if newnslots < oldnslots then
- delete from HSlot where hubname = hname and slotno > newnslots;
- return 0;
- end if;
- for i in oldnslots + 1 .. newnslots loop
- insert into HSlot (slotname, hubname, slotno, slotlink)
- values (''HS.dummy'', hname, i, '''');
- end loop;
- return 0;
-end
-' language plpgsql;
--- Test comments
-COMMENT ON FUNCTION tg_hub_adjustslots_wrong(bpchar, integer, integer) IS 'function with args';
-ERROR: function tg_hub_adjustslots_wrong(character, integer, integer) does not exist
-COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS 'function with args';
-COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS NULL;
--- ************************************************************
--- * BEFORE INSERT or UPDATE on HSlot
--- * - prevent from manual manipulation
--- * - set the slotname to HS.hubname.slotno
--- ************************************************************
-create function tg_hslot_biu() returns trigger as '
-declare
- sname text;
- xname HSlot.slotname%TYPE;
- hubrec record;
-begin
- select into hubrec * from Hub where name = new.hubname;
- if not found then
- raise exception ''no manual manipulation of HSlot'';
- end if;
- if new.slotno < 1 or new.slotno > hubrec.nslots then
- raise exception ''no manual manipulation of HSlot'';
- end if;
- if tg_op = ''UPDATE'' and new.hubname != old.hubname then
- if count(*) > 0 from Hub where name = old.hubname then
- raise exception ''no manual manipulation of HSlot'';
- end if;
- end if;
- sname := ''HS.'' || trim(new.hubname);
- sname := sname || ''.'';
- sname := sname || new.slotno::text;
- if length(sname) > 20 then
- raise exception ''HSlot slotname "%" too long (20 char max)'', sname;
- end if;
- new.slotname := sname;
- return new;
-end;
-' language plpgsql;
-create trigger tg_hslot_biu before insert or update
- on HSlot for each row execute procedure tg_hslot_biu();
--- ************************************************************
--- * BEFORE DELETE on HSlot
--- * - prevent from manual manipulation
--- ************************************************************
-create function tg_hslot_bd() returns trigger as '
-declare
- hubrec record;
-begin
- select into hubrec * from Hub where name = old.hubname;
- if not found then
- return old;
- end if;
- if old.slotno > hubrec.nslots then
- return old;
- end if;
- raise exception ''no manual manipulation of HSlot'';
-end;
-' language plpgsql;
-create trigger tg_hslot_bd before delete
- on HSlot for each row execute procedure tg_hslot_bd();
--- ************************************************************
--- * BEFORE INSERT on all slots
--- * - Check name prefix
--- ************************************************************
-create function tg_chkslotname() returns trigger as '
-begin
- if substr(new.slotname, 1, 2) != tg_argv[0] then
- raise exception ''slotname must begin with %'', tg_argv[0];
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_chkslotname before insert
- on PSlot for each row execute procedure tg_chkslotname('PS');
-create trigger tg_chkslotname before insert
- on WSlot for each row execute procedure tg_chkslotname('WS');
-create trigger tg_chkslotname before insert
- on PLine for each row execute procedure tg_chkslotname('PL');
-create trigger tg_chkslotname before insert
- on IFace for each row execute procedure tg_chkslotname('IF');
-create trigger tg_chkslotname before insert
- on PHone for each row execute procedure tg_chkslotname('PH');
--- ************************************************************
--- * BEFORE INSERT or UPDATE on all slots with slotlink
--- * - Set slotlink to empty string if NULL value given
--- ************************************************************
-create function tg_chkslotlink() returns trigger as '
-begin
- if new.slotlink isnull then
- new.slotlink := '''';
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_chkslotlink before insert or update
- on PSlot for each row execute procedure tg_chkslotlink();
-create trigger tg_chkslotlink before insert or update
- on WSlot for each row execute procedure tg_chkslotlink();
-create trigger tg_chkslotlink before insert or update
- on IFace for each row execute procedure tg_chkslotlink();
-create trigger tg_chkslotlink before insert or update
- on HSlot for each row execute procedure tg_chkslotlink();
-create trigger tg_chkslotlink before insert or update
- on PHone for each row execute procedure tg_chkslotlink();
--- ************************************************************
--- * BEFORE INSERT or UPDATE on all slots with backlink
--- * - Set backlink to empty string if NULL value given
--- ************************************************************
-create function tg_chkbacklink() returns trigger as '
-begin
- if new.backlink isnull then
- new.backlink := '''';
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_chkbacklink before insert or update
- on PSlot for each row execute procedure tg_chkbacklink();
-create trigger tg_chkbacklink before insert or update
- on WSlot for each row execute procedure tg_chkbacklink();
-create trigger tg_chkbacklink before insert or update
- on PLine for each row execute procedure tg_chkbacklink();
--- ************************************************************
--- * BEFORE UPDATE on PSlot
--- * - do delete/insert instead of update if name changes
--- ************************************************************
-create function tg_pslot_bu() returns trigger as '
-begin
- if new.slotname != old.slotname then
- delete from PSlot where slotname = old.slotname;
- insert into PSlot (
- slotname,
- pfname,
- slotlink,
- backlink
- ) values (
- new.slotname,
- new.pfname,
- new.slotlink,
- new.backlink
- );
- return null;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_pslot_bu before update
- on PSlot for each row execute procedure tg_pslot_bu();
--- ************************************************************
--- * BEFORE UPDATE on WSlot
--- * - do delete/insert instead of update if name changes
--- ************************************************************
-create function tg_wslot_bu() returns trigger as '
-begin
- if new.slotname != old.slotname then
- delete from WSlot where slotname = old.slotname;
- insert into WSlot (
- slotname,
- roomno,
- slotlink,
- backlink
- ) values (
- new.slotname,
- new.roomno,
- new.slotlink,
- new.backlink
- );
- return null;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_wslot_bu before update
- on WSlot for each row execute procedure tg_Wslot_bu();
--- ************************************************************
--- * BEFORE UPDATE on PLine
--- * - do delete/insert instead of update if name changes
--- ************************************************************
-create function tg_pline_bu() returns trigger as '
-begin
- if new.slotname != old.slotname then
- delete from PLine where slotname = old.slotname;
- insert into PLine (
- slotname,
- phonenumber,
- comment,
- backlink
- ) values (
- new.slotname,
- new.phonenumber,
- new.comment,
- new.backlink
- );
- return null;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_pline_bu before update
- on PLine for each row execute procedure tg_pline_bu();
--- ************************************************************
--- * BEFORE UPDATE on IFace
--- * - do delete/insert instead of update if name changes
--- ************************************************************
-create function tg_iface_bu() returns trigger as '
-begin
- if new.slotname != old.slotname then
- delete from IFace where slotname = old.slotname;
- insert into IFace (
- slotname,
- sysname,
- ifname,
- slotlink
- ) values (
- new.slotname,
- new.sysname,
- new.ifname,
- new.slotlink
- );
- return null;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_iface_bu before update
- on IFace for each row execute procedure tg_iface_bu();
--- ************************************************************
--- * BEFORE UPDATE on HSlot
--- * - do delete/insert instead of update if name changes
--- ************************************************************
-create function tg_hslot_bu() returns trigger as '
-begin
- if new.slotname != old.slotname or new.hubname != old.hubname then
- delete from HSlot where slotname = old.slotname;
- insert into HSlot (
- slotname,
- hubname,
- slotno,
- slotlink
- ) values (
- new.slotname,
- new.hubname,
- new.slotno,
- new.slotlink
- );
- return null;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_hslot_bu before update
- on HSlot for each row execute procedure tg_hslot_bu();
--- ************************************************************
--- * BEFORE UPDATE on PHone
--- * - do delete/insert instead of update if name changes
--- ************************************************************
-create function tg_phone_bu() returns trigger as '
-begin
- if new.slotname != old.slotname then
- delete from PHone where slotname = old.slotname;
- insert into PHone (
- slotname,
- comment,
- slotlink
- ) values (
- new.slotname,
- new.comment,
- new.slotlink
- );
- return null;
- end if;
- return new;
-end;
-' language plpgsql;
-create trigger tg_phone_bu before update
- on PHone for each row execute procedure tg_phone_bu();
--- ************************************************************
--- * AFTER INSERT or UPDATE or DELETE on slot with backlink
--- * - Ensure that the opponent correctly points back to us
--- ************************************************************
-create function tg_backlink_a() returns trigger as '
-declare
- dummy integer;
-begin
- if tg_op = ''INSERT'' then
- if new.backlink != '''' then
- dummy := tg_backlink_set(new.backlink, new.slotname);
- end if;
- return new;
- end if;
- if tg_op = ''UPDATE'' then
- if new.backlink != old.backlink then
- if old.backlink != '''' then
- dummy := tg_backlink_unset(old.backlink, old.slotname);
- end if;
- if new.backlink != '''' then
- dummy := tg_backlink_set(new.backlink, new.slotname);
- end if;
- else
- if new.slotname != old.slotname and new.backlink != '''' then
- dummy := tg_slotlink_set(new.backlink, new.slotname);
- end if;
- end if;
- return new;
- end if;
- if tg_op = ''DELETE'' then
- if old.backlink != '''' then
- dummy := tg_backlink_unset(old.backlink, old.slotname);
- end if;
- return old;
- end if;
-end;
-' language plpgsql;
-create trigger tg_backlink_a after insert or update or delete
- on PSlot for each row execute procedure tg_backlink_a('PS');
-create trigger tg_backlink_a after insert or update or delete
- on WSlot for each row execute procedure tg_backlink_a('WS');
-create trigger tg_backlink_a after insert or update or delete
- on PLine for each row execute procedure tg_backlink_a('PL');
--- ************************************************************
--- * Support function to set the opponents backlink field
--- * if it does not already point to the requested slot
--- ************************************************************
-create function tg_backlink_set(myname bpchar, blname bpchar)
-returns integer as '
-declare
- mytype char(2);
- link char(4);
- rec record;
-begin
- mytype := substr(myname, 1, 2);
- link := mytype || substr(blname, 1, 2);
- if link = ''PLPL'' then
- raise exception
- ''backlink between two phone lines does not make sense'';
- end if;
- if link in (''PLWS'', ''WSPL'') then
- raise exception
- ''direct link of phone line to wall slot not permitted'';
- end if;
- if mytype = ''PS'' then
- select into rec * from PSlot where slotname = myname;
- if not found then
- raise exception ''% does not exist'', myname;
- end if;
- if rec.backlink != blname then
- update PSlot set backlink = blname where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''WS'' then
- select into rec * from WSlot where slotname = myname;
- if not found then
- raise exception ''% does not exist'', myname;
- end if;
- if rec.backlink != blname then
- update WSlot set backlink = blname where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''PL'' then
- select into rec * from PLine where slotname = myname;
- if not found then
- raise exception ''% does not exist'', myname;
- end if;
- if rec.backlink != blname then
- update PLine set backlink = blname where slotname = myname;
- end if;
- return 0;
- end if;
- raise exception ''illegal backlink beginning with %'', mytype;
-end;
-' language plpgsql;
--- ************************************************************
--- * Support function to clear out the backlink field if
--- * it still points to specific slot
--- ************************************************************
-create function tg_backlink_unset(bpchar, bpchar)
-returns integer as '
-declare
- myname alias for $1;
- blname alias for $2;
- mytype char(2);
- rec record;
-begin
- mytype := substr(myname, 1, 2);
- if mytype = ''PS'' then
- select into rec * from PSlot where slotname = myname;
- if not found then
- return 0;
- end if;
- if rec.backlink = blname then
- update PSlot set backlink = '''' where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''WS'' then
- select into rec * from WSlot where slotname = myname;
- if not found then
- return 0;
- end if;
- if rec.backlink = blname then
- update WSlot set backlink = '''' where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''PL'' then
- select into rec * from PLine where slotname = myname;
- if not found then
- return 0;
- end if;
- if rec.backlink = blname then
- update PLine set backlink = '''' where slotname = myname;
- end if;
- return 0;
- end if;
-end
-' language plpgsql;
--- ************************************************************
--- * AFTER INSERT or UPDATE or DELETE on slot with slotlink
--- * - Ensure that the opponent correctly points back to us
--- ************************************************************
-create function tg_slotlink_a() returns trigger as '
-declare
- dummy integer;
-begin
- if tg_op = ''INSERT'' then
- if new.slotlink != '''' then
- dummy := tg_slotlink_set(new.slotlink, new.slotname);
- end if;
- return new;
- end if;
- if tg_op = ''UPDATE'' then
- if new.slotlink != old.slotlink then
- if old.slotlink != '''' then
- dummy := tg_slotlink_unset(old.slotlink, old.slotname);
- end if;
- if new.slotlink != '''' then
- dummy := tg_slotlink_set(new.slotlink, new.slotname);
- end if;
- else
- if new.slotname != old.slotname and new.slotlink != '''' then
- dummy := tg_slotlink_set(new.slotlink, new.slotname);
- end if;
- end if;
- return new;
- end if;
- if tg_op = ''DELETE'' then
- if old.slotlink != '''' then
- dummy := tg_slotlink_unset(old.slotlink, old.slotname);
- end if;
- return old;
- end if;
-end;
-' language plpgsql;
-create trigger tg_slotlink_a after insert or update or delete
- on PSlot for each row execute procedure tg_slotlink_a('PS');
-create trigger tg_slotlink_a after insert or update or delete
- on WSlot for each row execute procedure tg_slotlink_a('WS');
-create trigger tg_slotlink_a after insert or update or delete
- on IFace for each row execute procedure tg_slotlink_a('IF');
-create trigger tg_slotlink_a after insert or update or delete
- on HSlot for each row execute procedure tg_slotlink_a('HS');
-create trigger tg_slotlink_a after insert or update or delete
- on PHone for each row execute procedure tg_slotlink_a('PH');
--- ************************************************************
--- * Support function to set the opponents slotlink field
--- * if it does not already point to the requested slot
--- ************************************************************
-create function tg_slotlink_set(bpchar, bpchar)
-returns integer as '
-declare
- myname alias for $1;
- blname alias for $2;
- mytype char(2);
- link char(4);
- rec record;
-begin
- mytype := substr(myname, 1, 2);
- link := mytype || substr(blname, 1, 2);
- if link = ''PHPH'' then
- raise exception
- ''slotlink between two phones does not make sense'';
- end if;
- if link in (''PHHS'', ''HSPH'') then
- raise exception
- ''link of phone to hub does not make sense'';
- end if;
- if link in (''PHIF'', ''IFPH'') then
- raise exception
- ''link of phone to hub does not make sense'';
- end if;
- if link in (''PSWS'', ''WSPS'') then
- raise exception
- ''slotlink from patchslot to wallslot not permitted'';
- end if;
- if mytype = ''PS'' then
- select into rec * from PSlot where slotname = myname;
- if not found then
- raise exception ''% does not exist'', myname;
- end if;
- if rec.slotlink != blname then
- update PSlot set slotlink = blname where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''WS'' then
- select into rec * from WSlot where slotname = myname;
- if not found then
- raise exception ''% does not exist'', myname;
- end if;
- if rec.slotlink != blname then
- update WSlot set slotlink = blname where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''IF'' then
- select into rec * from IFace where slotname = myname;
- if not found then
- raise exception ''% does not exist'', myname;
- end if;
- if rec.slotlink != blname then
- update IFace set slotlink = blname where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''HS'' then
- select into rec * from HSlot where slotname = myname;
- if not found then
- raise exception ''% does not exist'', myname;
- end if;
- if rec.slotlink != blname then
- update HSlot set slotlink = blname where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''PH'' then
- select into rec * from PHone where slotname = myname;
- if not found then
- raise exception ''% does not exist'', myname;
- end if;
- if rec.slotlink != blname then
- update PHone set slotlink = blname where slotname = myname;
- end if;
- return 0;
- end if;
- raise exception ''illegal slotlink beginning with %'', mytype;
-end;
-' language plpgsql;
--- ************************************************************
--- * Support function to clear out the slotlink field if
--- * it still points to specific slot
--- ************************************************************
-create function tg_slotlink_unset(bpchar, bpchar)
-returns integer as '
-declare
- myname alias for $1;
- blname alias for $2;
- mytype char(2);
- rec record;
-begin
- mytype := substr(myname, 1, 2);
- if mytype = ''PS'' then
- select into rec * from PSlot where slotname = myname;
- if not found then
- return 0;
- end if;
- if rec.slotlink = blname then
- update PSlot set slotlink = '''' where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''WS'' then
- select into rec * from WSlot where slotname = myname;
- if not found then
- return 0;
- end if;
- if rec.slotlink = blname then
- update WSlot set slotlink = '''' where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''IF'' then
- select into rec * from IFace where slotname = myname;
- if not found then
- return 0;
- end if;
- if rec.slotlink = blname then
- update IFace set slotlink = '''' where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''HS'' then
- select into rec * from HSlot where slotname = myname;
- if not found then
- return 0;
- end if;
- if rec.slotlink = blname then
- update HSlot set slotlink = '''' where slotname = myname;
- end if;
- return 0;
- end if;
- if mytype = ''PH'' then
- select into rec * from PHone where slotname = myname;
- if not found then
- return 0;
- end if;
- if rec.slotlink = blname then
- update PHone set slotlink = '''' where slotname = myname;
- end if;
- return 0;
- end if;
-end;
-' language plpgsql;
--- ************************************************************
--- * Describe the backside of a patchfield slot
--- ************************************************************
-create function pslot_backlink_view(bpchar)
-returns text as '
-<>
-declare
- rec record;
- bltype char(2);
- retval text;
-begin
- select into rec * from PSlot where slotname = $1;
- if not found then
- return '''';
- end if;
- if rec.backlink = '''' then
- return ''-'';
- end if;
- bltype := substr(rec.backlink, 1, 2);
- if bltype = ''PL'' then
- declare
- rec record;
- begin
- select into rec * from PLine where slotname = "outer".rec.backlink;
- retval := ''Phone line '' || trim(rec.phonenumber);
- if rec.comment != '''' then
- retval := retval || '' ('';
- retval := retval || rec.comment;
- retval := retval || '')'';
- end if;
- return retval;
- end;
- end if;
- if bltype = ''WS'' then
- select into rec * from WSlot where slotname = rec.backlink;
- retval := trim(rec.slotname) || '' in room '';
- retval := retval || trim(rec.roomno);
- retval := retval || '' -> '';
- return retval || wslot_slotlink_view(rec.slotname);
- end if;
- return rec.backlink;
-end;
-' language plpgsql;
--- ************************************************************
--- * Describe the front of a patchfield slot
--- ************************************************************
-create function pslot_slotlink_view(bpchar)
-returns text as '
-declare
- psrec record;
- sltype char(2);
- retval text;
-begin
- select into psrec * from PSlot where slotname = $1;
- if not found then
- return '''';
- end if;
- if psrec.slotlink = '''' then
- return ''-'';
- end if;
- sltype := substr(psrec.slotlink, 1, 2);
- if sltype = ''PS'' then
- retval := trim(psrec.slotlink) || '' -> '';
- return retval || pslot_backlink_view(psrec.slotlink);
- end if;
- if sltype = ''HS'' then
- retval := comment from Hub H, HSlot HS
- where HS.slotname = psrec.slotlink
- and H.name = HS.hubname;
- retval := retval || '' slot '';
- retval := retval || slotno::text from HSlot
- where slotname = psrec.slotlink;
- return retval;
- end if;
- return psrec.slotlink;
-end;
-' language plpgsql;
--- ************************************************************
--- * Describe the front of a wall connector slot
--- ************************************************************
-create function wslot_slotlink_view(bpchar)
-returns text as '
-declare
- rec record;
- sltype char(2);
- retval text;
-begin
- select into rec * from WSlot where slotname = $1;
- if not found then
- return '''';
- end if;
- if rec.slotlink = '''' then
- return ''-'';
- end if;
- sltype := substr(rec.slotlink, 1, 2);
- if sltype = ''PH'' then
- select into rec * from PHone where slotname = rec.slotlink;
- retval := ''Phone '' || trim(rec.slotname);
- if rec.comment != '''' then
- retval := retval || '' ('';
- retval := retval || rec.comment;
- retval := retval || '')'';
- end if;
- return retval;
- end if;
- if sltype = ''IF'' then
- declare
- syrow System%RowType;
- ifrow IFace%ROWTYPE;
- begin
- select into ifrow * from IFace where slotname = rec.slotlink;
- select into syrow * from System where name = ifrow.sysname;
- retval := syrow.name || '' IF '';
- retval := retval || ifrow.ifname;
- if syrow.comment != '''' then
- retval := retval || '' ('';
- retval := retval || syrow.comment;
- retval := retval || '')'';
- end if;
- return retval;
- end;
- end if;
- return rec.slotlink;
-end;
-' language plpgsql;
--- ************************************************************
--- * View of a patchfield describing backside and patches
--- ************************************************************
-create view Pfield_v1 as select PF.pfname, PF.slotname,
- pslot_backlink_view(PF.slotname) as backside,
- pslot_slotlink_view(PF.slotname) as patch
- from PSlot PF;
---
--- First we build the house - so we create the rooms
---
-insert into Room values ('001', 'Entrance');
-insert into Room values ('002', 'Office');
-insert into Room values ('003', 'Office');
-insert into Room values ('004', 'Technical');
-insert into Room values ('101', 'Office');
-insert into Room values ('102', 'Conference');
-insert into Room values ('103', 'Restroom');
-insert into Room values ('104', 'Technical');
-insert into Room values ('105', 'Office');
-insert into Room values ('106', 'Office');
---
--- Second we install the wall connectors
---
-insert into WSlot values ('WS.001.1a', '001', '', '');
-insert into WSlot values ('WS.001.1b', '001', '', '');
-insert into WSlot values ('WS.001.2a', '001', '', '');
-insert into WSlot values ('WS.001.2b', '001', '', '');
-insert into WSlot values ('WS.001.3a', '001', '', '');
-insert into WSlot values ('WS.001.3b', '001', '', '');
-insert into WSlot values ('WS.002.1a', '002', '', '');
-insert into WSlot values ('WS.002.1b', '002', '', '');
-insert into WSlot values ('WS.002.2a', '002', '', '');
-insert into WSlot values ('WS.002.2b', '002', '', '');
-insert into WSlot values ('WS.002.3a', '002', '', '');
-insert into WSlot values ('WS.002.3b', '002', '', '');
-insert into WSlot values ('WS.003.1a', '003', '', '');
-insert into WSlot values ('WS.003.1b', '003', '', '');
-insert into WSlot values ('WS.003.2a', '003', '', '');
-insert into WSlot values ('WS.003.2b', '003', '', '');
-insert into WSlot values ('WS.003.3a', '003', '', '');
-insert into WSlot values ('WS.003.3b', '003', '', '');
-insert into WSlot values ('WS.101.1a', '101', '', '');
-insert into WSlot values ('WS.101.1b', '101', '', '');
-insert into WSlot values ('WS.101.2a', '101', '', '');
-insert into WSlot values ('WS.101.2b', '101', '', '');
-insert into WSlot values ('WS.101.3a', '101', '', '');
-insert into WSlot values ('WS.101.3b', '101', '', '');
-insert into WSlot values ('WS.102.1a', '102', '', '');
-insert into WSlot values ('WS.102.1b', '102', '', '');
-insert into WSlot values ('WS.102.2a', '102', '', '');
-insert into WSlot values ('WS.102.2b', '102', '', '');
-insert into WSlot values ('WS.102.3a', '102', '', '');
-insert into WSlot values ('WS.102.3b', '102', '', '');
-insert into WSlot values ('WS.105.1a', '105', '', '');
-insert into WSlot values ('WS.105.1b', '105', '', '');
-insert into WSlot values ('WS.105.2a', '105', '', '');
-insert into WSlot values ('WS.105.2b', '105', '', '');
-insert into WSlot values ('WS.105.3a', '105', '', '');
-insert into WSlot values ('WS.105.3b', '105', '', '');
-insert into WSlot values ('WS.106.1a', '106', '', '');
-insert into WSlot values ('WS.106.1b', '106', '', '');
-insert into WSlot values ('WS.106.2a', '106', '', '');
-insert into WSlot values ('WS.106.2b', '106', '', '');
-insert into WSlot values ('WS.106.3a', '106', '', '');
-insert into WSlot values ('WS.106.3b', '106', '', '');
---
--- Now create the patch fields and their slots
---
-insert into PField values ('PF0_1', 'Wallslots basement');
---
--- The cables for these will be made later, so they are unconnected for now
---
-insert into PSlot values ('PS.base.a1', 'PF0_1', '', '');
-insert into PSlot values ('PS.base.a2', 'PF0_1', '', '');
-insert into PSlot values ('PS.base.a3', 'PF0_1', '', '');
-insert into PSlot values ('PS.base.a4', 'PF0_1', '', '');
-insert into PSlot values ('PS.base.a5', 'PF0_1', '', '');
-insert into PSlot values ('PS.base.a6', 'PF0_1', '', '');
---
--- These are already wired to the wall connectors
---
-insert into PSlot values ('PS.base.b1', 'PF0_1', '', 'WS.002.1a');
-insert into PSlot values ('PS.base.b2', 'PF0_1', '', 'WS.002.1b');
-insert into PSlot values ('PS.base.b3', 'PF0_1', '', 'WS.002.2a');
-insert into PSlot values ('PS.base.b4', 'PF0_1', '', 'WS.002.2b');
-insert into PSlot values ('PS.base.b5', 'PF0_1', '', 'WS.002.3a');
-insert into PSlot values ('PS.base.b6', 'PF0_1', '', 'WS.002.3b');
-insert into PSlot values ('PS.base.c1', 'PF0_1', '', 'WS.003.1a');
-insert into PSlot values ('PS.base.c2', 'PF0_1', '', 'WS.003.1b');
-insert into PSlot values ('PS.base.c3', 'PF0_1', '', 'WS.003.2a');
-insert into PSlot values ('PS.base.c4', 'PF0_1', '', 'WS.003.2b');
-insert into PSlot values ('PS.base.c5', 'PF0_1', '', 'WS.003.3a');
-insert into PSlot values ('PS.base.c6', 'PF0_1', '', 'WS.003.3b');
---
--- This patchfield will be renamed later into PF0_2 - so its
--- slots references in pfname should follow
---
-insert into PField values ('PF0_X', 'Phonelines basement');
-insert into PSlot values ('PS.base.ta1', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.ta2', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.ta3', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.ta4', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.ta5', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.ta6', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.tb1', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.tb2', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.tb3', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.tb4', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.tb5', 'PF0_X', '', '');
-insert into PSlot values ('PS.base.tb6', 'PF0_X', '', '');
-insert into PField values ('PF1_1', 'Wallslots first floor');
-insert into PSlot values ('PS.first.a1', 'PF1_1', '', 'WS.101.1a');
-insert into PSlot values ('PS.first.a2', 'PF1_1', '', 'WS.101.1b');
-insert into PSlot values ('PS.first.a3', 'PF1_1', '', 'WS.101.2a');
-insert into PSlot values ('PS.first.a4', 'PF1_1', '', 'WS.101.2b');
-insert into PSlot values ('PS.first.a5', 'PF1_1', '', 'WS.101.3a');
-insert into PSlot values ('PS.first.a6', 'PF1_1', '', 'WS.101.3b');
-insert into PSlot values ('PS.first.b1', 'PF1_1', '', 'WS.102.1a');
-insert into PSlot values ('PS.first.b2', 'PF1_1', '', 'WS.102.1b');
-insert into PSlot values ('PS.first.b3', 'PF1_1', '', 'WS.102.2a');
-insert into PSlot values ('PS.first.b4', 'PF1_1', '', 'WS.102.2b');
-insert into PSlot values ('PS.first.b5', 'PF1_1', '', 'WS.102.3a');
-insert into PSlot values ('PS.first.b6', 'PF1_1', '', 'WS.102.3b');
-insert into PSlot values ('PS.first.c1', 'PF1_1', '', 'WS.105.1a');
-insert into PSlot values ('PS.first.c2', 'PF1_1', '', 'WS.105.1b');
-insert into PSlot values ('PS.first.c3', 'PF1_1', '', 'WS.105.2a');
-insert into PSlot values ('PS.first.c4', 'PF1_1', '', 'WS.105.2b');
-insert into PSlot values ('PS.first.c5', 'PF1_1', '', 'WS.105.3a');
-insert into PSlot values ('PS.first.c6', 'PF1_1', '', 'WS.105.3b');
-insert into PSlot values ('PS.first.d1', 'PF1_1', '', 'WS.106.1a');
-insert into PSlot values ('PS.first.d2', 'PF1_1', '', 'WS.106.1b');
-insert into PSlot values ('PS.first.d3', 'PF1_1', '', 'WS.106.2a');
-insert into PSlot values ('PS.first.d4', 'PF1_1', '', 'WS.106.2b');
-insert into PSlot values ('PS.first.d5', 'PF1_1', '', 'WS.106.3a');
-insert into PSlot values ('PS.first.d6', 'PF1_1', '', 'WS.106.3b');
---
--- Now we wire the wall connectors 1a-2a in room 001 to the
--- patchfield. In the second update we make an error, and
--- correct it after
---
-update PSlot set backlink = 'WS.001.1a' where slotname = 'PS.base.a1';
-update PSlot set backlink = 'WS.001.1b' where slotname = 'PS.base.a3';
-select * from WSlot where roomno = '001' order by slotname;
- slotname | roomno | slotlink | backlink
-----------------------+----------+----------------------+----------------------
- WS.001.1a | 001 | | PS.base.a1
- WS.001.1b | 001 | | PS.base.a3
- WS.001.2a | 001 | |
- WS.001.2b | 001 | |
- WS.001.3a | 001 | |
- WS.001.3b | 001 | |
-(6 rows)
-
-select * from PSlot where slotname ~ 'PS.base.a' order by slotname;
- slotname | pfname | slotlink | backlink
-----------------------+--------+----------------------+----------------------
- PS.base.a1 | PF0_1 | | WS.001.1a
- PS.base.a2 | PF0_1 | |
- PS.base.a3 | PF0_1 | | WS.001.1b
- PS.base.a4 | PF0_1 | |
- PS.base.a5 | PF0_1 | |
- PS.base.a6 | PF0_1 | |
-(6 rows)
-
-update PSlot set backlink = 'WS.001.2a' where slotname = 'PS.base.a3';
-select * from WSlot where roomno = '001' order by slotname;
- slotname | roomno | slotlink | backlink
-----------------------+----------+----------------------+----------------------
- WS.001.1a | 001 | | PS.base.a1
- WS.001.1b | 001 | |
- WS.001.2a | 001 | | PS.base.a3
- WS.001.2b | 001 | |
- WS.001.3a | 001 | |
- WS.001.3b | 001 | |
-(6 rows)
-
-select * from PSlot where slotname ~ 'PS.base.a' order by slotname;
- slotname | pfname | slotlink | backlink
-----------------------+--------+----------------------+----------------------
- PS.base.a1 | PF0_1 | | WS.001.1a
- PS.base.a2 | PF0_1 | |
- PS.base.a3 | PF0_1 | | WS.001.2a
- PS.base.a4 | PF0_1 | |
- PS.base.a5 | PF0_1 | |
- PS.base.a6 | PF0_1 | |
-(6 rows)
-
-update PSlot set backlink = 'WS.001.1b' where slotname = 'PS.base.a2';
-select * from WSlot where roomno = '001' order by slotname;
- slotname | roomno | slotlink | backlink
-----------------------+----------+----------------------+----------------------
- WS.001.1a | 001 | | PS.base.a1
- WS.001.1b | 001 | | PS.base.a2
- WS.001.2a | 001 | | PS.base.a3
- WS.001.2b | 001 | |
- WS.001.3a | 001 | |
- WS.001.3b | 001 | |
-(6 rows)
-
-select * from PSlot where slotname ~ 'PS.base.a' order by slotname;
- slotname | pfname | slotlink | backlink
-----------------------+--------+----------------------+----------------------
- PS.base.a1 | PF0_1 | | WS.001.1a
- PS.base.a2 | PF0_1 | | WS.001.1b
- PS.base.a3 | PF0_1 | | WS.001.2a
- PS.base.a4 | PF0_1 | |
- PS.base.a5 | PF0_1 | |
- PS.base.a6 | PF0_1 | |
-(6 rows)
-
---
--- Same procedure for 2b-3b but this time updating the WSlot instead
--- of the PSlot. Due to the triggers the result is the same:
--- WSlot and corresponding PSlot point to each other.
---
-update WSlot set backlink = 'PS.base.a4' where slotname = 'WS.001.2b';
-update WSlot set backlink = 'PS.base.a6' where slotname = 'WS.001.3a';
-select * from WSlot where roomno = '001' order by slotname;
- slotname | roomno | slotlink | backlink
-----------------------+----------+----------------------+----------------------
- WS.001.1a | 001 | | PS.base.a1
- WS.001.1b | 001 | | PS.base.a2
- WS.001.2a | 001 | | PS.base.a3
- WS.001.2b | 001 | | PS.base.a4
- WS.001.3a | 001 | | PS.base.a6
- WS.001.3b | 001 | |
-(6 rows)
-
-select * from PSlot where slotname ~ 'PS.base.a' order by slotname;
- slotname | pfname | slotlink | backlink
-----------------------+--------+----------------------+----------------------
- PS.base.a1 | PF0_1 | | WS.001.1a
- PS.base.a2 | PF0_1 | | WS.001.1b
- PS.base.a3 | PF0_1 | | WS.001.2a
- PS.base.a4 | PF0_1 | | WS.001.2b
- PS.base.a5 | PF0_1 | |
- PS.base.a6 | PF0_1 | | WS.001.3a
-(6 rows)
-
-update WSlot set backlink = 'PS.base.a6' where slotname = 'WS.001.3b';
-select * from WSlot where roomno = '001' order by slotname;
- slotname | roomno | slotlink | backlink
-----------------------+----------+----------------------+----------------------
- WS.001.1a | 001 | | PS.base.a1
- WS.001.1b | 001 | | PS.base.a2
- WS.001.2a | 001 | | PS.base.a3
- WS.001.2b | 001 | | PS.base.a4
- WS.001.3a | 001 | |
- WS.001.3b | 001 | | PS.base.a6
-(6 rows)
-
-select * from PSlot where slotname ~ 'PS.base.a' order by slotname;
- slotname | pfname | slotlink | backlink
-----------------------+--------+----------------------+----------------------
- PS.base.a1 | PF0_1 | | WS.001.1a
- PS.base.a2 | PF0_1 | | WS.001.1b
- PS.base.a3 | PF0_1 | | WS.001.2a
- PS.base.a4 | PF0_1 | | WS.001.2b
- PS.base.a5 | PF0_1 | |
- PS.base.a6 | PF0_1 | | WS.001.3b
-(6 rows)
-
-update WSlot set backlink = 'PS.base.a5' where slotname = 'WS.001.3a';
-select * from WSlot where roomno = '001' order by slotname;
- slotname | roomno | slotlink | backlink
-----------------------+----------+----------------------+----------------------
- WS.001.1a | 001 | | PS.base.a1
- WS.001.1b | 001 | | PS.base.a2
- WS.001.2a | 001 | | PS.base.a3
- WS.001.2b | 001 | | PS.base.a4
- WS.001.3a | 001 | | PS.base.a5
- WS.001.3b | 001 | | PS.base.a6
-(6 rows)
-
-select * from PSlot where slotname ~ 'PS.base.a' order by slotname;
- slotname | pfname | slotlink | backlink
-----------------------+--------+----------------------+----------------------
- PS.base.a1 | PF0_1 | | WS.001.1a
- PS.base.a2 | PF0_1 | | WS.001.1b
- PS.base.a3 | PF0_1 | | WS.001.2a
- PS.base.a4 | PF0_1 | | WS.001.2b
- PS.base.a5 | PF0_1 | | WS.001.3a
- PS.base.a6 | PF0_1 | | WS.001.3b
-(6 rows)
-
-insert into PField values ('PF1_2', 'Phonelines first floor');
-insert into PSlot values ('PS.first.ta1', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.ta2', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.ta3', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.ta4', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.ta5', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.ta6', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.tb1', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.tb2', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.tb3', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.tb4', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.tb5', 'PF1_2', '', '');
-insert into PSlot values ('PS.first.tb6', 'PF1_2', '', '');
---
--- Fix the wrong name for patchfield PF0_2
---
-update PField set name = 'PF0_2' where name = 'PF0_X';
-select * from PSlot order by slotname;
- slotname | pfname | slotlink | backlink
-----------------------+--------+----------------------+----------------------
- PS.base.a1 | PF0_1 | | WS.001.1a
- PS.base.a2 | PF0_1 | | WS.001.1b
- PS.base.a3 | PF0_1 | | WS.001.2a
- PS.base.a4 | PF0_1 | | WS.001.2b
- PS.base.a5 | PF0_1 | | WS.001.3a
- PS.base.a6 | PF0_1 | | WS.001.3b
- PS.base.b1 | PF0_1 | | WS.002.1a
- PS.base.b2 | PF0_1 | | WS.002.1b
- PS.base.b3 | PF0_1 | | WS.002.2a
- PS.base.b4 | PF0_1 | | WS.002.2b
- PS.base.b5 | PF0_1 | | WS.002.3a
- PS.base.b6 | PF0_1 | | WS.002.3b
- PS.base.c1 | PF0_1 | | WS.003.1a
- PS.base.c2 | PF0_1 | | WS.003.1b
- PS.base.c3 | PF0_1 | | WS.003.2a
- PS.base.c4 | PF0_1 | | WS.003.2b
- PS.base.c5 | PF0_1 | | WS.003.3a
- PS.base.c6 | PF0_1 | | WS.003.3b
- PS.base.ta1 | PF0_2 | |
- PS.base.ta2 | PF0_2 | |
- PS.base.ta3 | PF0_2 | |
- PS.base.ta4 | PF0_2 | |
- PS.base.ta5 | PF0_2 | |
- PS.base.ta6 | PF0_2 | |
- PS.base.tb1 | PF0_2 | |
- PS.base.tb2 | PF0_2 | |
- PS.base.tb3 | PF0_2 | |
- PS.base.tb4 | PF0_2 | |
- PS.base.tb5 | PF0_2 | |
- PS.base.tb6 | PF0_2 | |
- PS.first.a1 | PF1_1 | | WS.101.1a
- PS.first.a2 | PF1_1 | | WS.101.1b
- PS.first.a3 | PF1_1 | | WS.101.2a
- PS.first.a4 | PF1_1 | | WS.101.2b
- PS.first.a5 | PF1_1 | | WS.101.3a
- PS.first.a6 | PF1_1 | | WS.101.3b
- PS.first.b1 | PF1_1 | | WS.102.1a
- PS.first.b2 | PF1_1 | | WS.102.1b
- PS.first.b3 | PF1_1 | | WS.102.2a
- PS.first.b4 | PF1_1 | | WS.102.2b
- PS.first.b5 | PF1_1 | | WS.102.3a
- PS.first.b6 | PF1_1 | | WS.102.3b
- PS.first.c1 | PF1_1 | | WS.105.1a
- PS.first.c2 | PF1_1 | | WS.105.1b
- PS.first.c3 | PF1_1 | | WS.105.2a
- PS.first.c4 | PF1_1 | | WS.105.2b
- PS.first.c5 | PF1_1 | | WS.105.3a
- PS.first.c6 | PF1_1 | | WS.105.3b
- PS.first.d1 | PF1_1 | | WS.106.1a
- PS.first.d2 | PF1_1 | | WS.106.1b
- PS.first.d3 | PF1_1 | | WS.106.2a
- PS.first.d4 | PF1_1 | | WS.106.2b
- PS.first.d5 | PF1_1 | | WS.106.3a
- PS.first.d6 | PF1_1 | | WS.106.3b
- PS.first.ta1 | PF1_2 | |
- PS.first.ta2 | PF1_2 | |
- PS.first.ta3 | PF1_2 | |
- PS.first.ta4 | PF1_2 | |
- PS.first.ta5 | PF1_2 | |
- PS.first.ta6 | PF1_2 | |
- PS.first.tb1 | PF1_2 | |
- PS.first.tb2 | PF1_2 | |
- PS.first.tb3 | PF1_2 | |
- PS.first.tb4 | PF1_2 | |
- PS.first.tb5 | PF1_2 | |
- PS.first.tb6 | PF1_2 | |
-(66 rows)
-
-select * from WSlot order by slotname;
- slotname | roomno | slotlink | backlink
-----------------------+----------+----------------------+----------------------
- WS.001.1a | 001 | | PS.base.a1
- WS.001.1b | 001 | | PS.base.a2
- WS.001.2a | 001 | | PS.base.a3
- WS.001.2b | 001 | | PS.base.a4
- WS.001.3a | 001 | | PS.base.a5
- WS.001.3b | 001 | | PS.base.a6
- WS.002.1a | 002 | | PS.base.b1
- WS.002.1b | 002 | | PS.base.b2
- WS.002.2a | 002 | | PS.base.b3
- WS.002.2b | 002 | | PS.base.b4
- WS.002.3a | 002 | | PS.base.b5
- WS.002.3b | 002 | | PS.base.b6
- WS.003.1a | 003 | | PS.base.c1
- WS.003.1b | 003 | | PS.base.c2
- WS.003.2a | 003 | | PS.base.c3
- WS.003.2b | 003 | | PS.base.c4
- WS.003.3a | 003 | | PS.base.c5
- WS.003.3b | 003 | | PS.base.c6
- WS.101.1a | 101 | | PS.first.a1
- WS.101.1b | 101 | | PS.first.a2
- WS.101.2a | 101 | | PS.first.a3
- WS.101.2b | 101 | | PS.first.a4
- WS.101.3a | 101 | | PS.first.a5
- WS.101.3b | 101 | | PS.first.a6
- WS.102.1a | 102 | | PS.first.b1
- WS.102.1b | 102 | | PS.first.b2
- WS.102.2a | 102 | | PS.first.b3
- WS.102.2b | 102 | | PS.first.b4
- WS.102.3a | 102 | | PS.first.b5
- WS.102.3b | 102 | | PS.first.b6
- WS.105.1a | 105 | | PS.first.c1
- WS.105.1b | 105 | | PS.first.c2
- WS.105.2a | 105 | | PS.first.c3
- WS.105.2b | 105 | | PS.first.c4
- WS.105.3a | 105 | | PS.first.c5
- WS.105.3b | 105 | | PS.first.c6
- WS.106.1a | 106 | | PS.first.d1
- WS.106.1b | 106 | | PS.first.d2
- WS.106.2a | 106 | | PS.first.d3
- WS.106.2b | 106 | | PS.first.d4
- WS.106.3a | 106 | | PS.first.d5
- WS.106.3b | 106 | | PS.first.d6
-(42 rows)
-
---
--- Install the central phone system and create the phone numbers.
--- They are wired on insert to the patchfields. Again the
--- triggers automatically tell the PSlots to update their
--- backlink field.
---
-insert into PLine values ('PL.001', '-0', 'Central call', 'PS.base.ta1');
-insert into PLine values ('PL.002', '-101', '', 'PS.base.ta2');
-insert into PLine values ('PL.003', '-102', '', 'PS.base.ta3');
-insert into PLine values ('PL.004', '-103', '', 'PS.base.ta5');
-insert into PLine values ('PL.005', '-104', '', 'PS.base.ta6');
-insert into PLine values ('PL.006', '-106', '', 'PS.base.tb2');
-insert into PLine values ('PL.007', '-108', '', 'PS.base.tb3');
-insert into PLine values ('PL.008', '-109', '', 'PS.base.tb4');
-insert into PLine values ('PL.009', '-121', '', 'PS.base.tb5');
-insert into PLine values ('PL.010', '-122', '', 'PS.base.tb6');
-insert into PLine values ('PL.015', '-134', '', 'PS.first.ta1');
-insert into PLine values ('PL.016', '-137', '', 'PS.first.ta3');
-insert into PLine values ('PL.017', '-139', '', 'PS.first.ta4');
-insert into PLine values ('PL.018', '-362', '', 'PS.first.tb1');
-insert into PLine values ('PL.019', '-363', '', 'PS.first.tb2');
-insert into PLine values ('PL.020', '-364', '', 'PS.first.tb3');
-insert into PLine values ('PL.021', '-365', '', 'PS.first.tb5');
-insert into PLine values ('PL.022', '-367', '', 'PS.first.tb6');
-insert into PLine values ('PL.028', '-501', 'Fax entrance', 'PS.base.ta2');
-insert into PLine values ('PL.029', '-502', 'Fax first floor', 'PS.first.ta1');
---
--- Buy some phones, plug them into the wall and patch the
--- phone lines to the corresponding patchfield slots.
---
-insert into PHone values ('PH.hc001', 'Hicom standard', 'WS.001.1a');
-update PSlot set slotlink = 'PS.base.ta1' where slotname = 'PS.base.a1';
-insert into PHone values ('PH.hc002', 'Hicom standard', 'WS.002.1a');
-update PSlot set slotlink = 'PS.base.ta5' where slotname = 'PS.base.b1';
-insert into PHone values ('PH.hc003', 'Hicom standard', 'WS.002.2a');
-update PSlot set slotlink = 'PS.base.tb2' where slotname = 'PS.base.b3';
-insert into PHone values ('PH.fax001', 'Canon fax', 'WS.001.2a');
-update PSlot set slotlink = 'PS.base.ta2' where slotname = 'PS.base.a3';
---
--- Install a hub at one of the patchfields, plug a computers
--- ethernet interface into the wall and patch it to the hub.
---
-insert into Hub values ('base.hub1', 'Patchfield PF0_1 hub', 16);
-insert into System values ('orion', 'PC');
-insert into IFace values ('IF', 'orion', 'eth0', 'WS.002.1b');
-update PSlot set slotlink = 'HS.base.hub1.1' where slotname = 'PS.base.b2';
---
--- Now we take a look at the patchfield
---
-select * from PField_v1 where pfname = 'PF0_1' order by slotname;
- pfname | slotname | backside | patch
---------+----------------------+----------------------------------------------------------+-----------------------------------------------
- PF0_1 | PS.base.a1 | WS.001.1a in room 001 -> Phone PH.hc001 (Hicom standard) | PS.base.ta1 -> Phone line -0 (Central call)
- PF0_1 | PS.base.a2 | WS.001.1b in room 001 -> - | -
- PF0_1 | PS.base.a3 | WS.001.2a in room 001 -> Phone PH.fax001 (Canon fax) | PS.base.ta2 -> Phone line -501 (Fax entrance)
- PF0_1 | PS.base.a4 | WS.001.2b in room 001 -> - | -
- PF0_1 | PS.base.a5 | WS.001.3a in room 001 -> - | -
- PF0_1 | PS.base.a6 | WS.001.3b in room 001 -> - | -
- PF0_1 | PS.base.b1 | WS.002.1a in room 002 -> Phone PH.hc002 (Hicom standard) | PS.base.ta5 -> Phone line -103
- PF0_1 | PS.base.b2 | WS.002.1b in room 002 -> orion IF eth0 (PC) | Patchfield PF0_1 hub slot 1
- PF0_1 | PS.base.b3 | WS.002.2a in room 002 -> Phone PH.hc003 (Hicom standard) | PS.base.tb2 -> Phone line -106
- PF0_1 | PS.base.b4 | WS.002.2b in room 002 -> - | -
- PF0_1 | PS.base.b5 | WS.002.3a in room 002 -> - | -
- PF0_1 | PS.base.b6 | WS.002.3b in room 002 -> - | -
- PF0_1 | PS.base.c1 | WS.003.1a in room 003 -> - | -
- PF0_1 | PS.base.c2 | WS.003.1b in room 003 -> - | -
- PF0_1 | PS.base.c3 | WS.003.2a in room 003 -> - | -
- PF0_1 | PS.base.c4 | WS.003.2b in room 003 -> - | -
- PF0_1 | PS.base.c5 | WS.003.3a in room 003 -> - | -
- PF0_1 | PS.base.c6 | WS.003.3b in room 003 -> - | -
-(18 rows)
-
-select * from PField_v1 where pfname = 'PF0_2' order by slotname;
- pfname | slotname | backside | patch
---------+----------------------+--------------------------------+------------------------------------------------------------------------
- PF0_2 | PS.base.ta1 | Phone line -0 (Central call) | PS.base.a1 -> WS.001.1a in room 001 -> Phone PH.hc001 (Hicom standard)
- PF0_2 | PS.base.ta2 | Phone line -501 (Fax entrance) | PS.base.a3 -> WS.001.2a in room 001 -> Phone PH.fax001 (Canon fax)
- PF0_2 | PS.base.ta3 | Phone line -102 | -
- PF0_2 | PS.base.ta4 | - | -
- PF0_2 | PS.base.ta5 | Phone line -103 | PS.base.b1 -> WS.002.1a in room 002 -> Phone PH.hc002 (Hicom standard)
- PF0_2 | PS.base.ta6 | Phone line -104 | -
- PF0_2 | PS.base.tb1 | - | -
- PF0_2 | PS.base.tb2 | Phone line -106 | PS.base.b3 -> WS.002.2a in room 002 -> Phone PH.hc003 (Hicom standard)
- PF0_2 | PS.base.tb3 | Phone line -108 | -
- PF0_2 | PS.base.tb4 | Phone line -109 | -
- PF0_2 | PS.base.tb5 | Phone line -121 | -
- PF0_2 | PS.base.tb6 | Phone line -122 | -
-(12 rows)
-
---
--- Finally we want errors
---
-insert into PField values ('PF1_1', 'should fail due to unique index');
-ERROR: duplicate key value violates unique constraint "pfield_name"
-DETAIL: Key (name)=(PF1_1) already exists.
-update PSlot set backlink = 'WS.not.there' where slotname = 'PS.base.a1';
-ERROR: WS.not.there does not exist
-CONTEXT: PL/pgSQL function tg_backlink_set(character,character) line 30 at RAISE
-PL/pgSQL function tg_backlink_a() line 17 at assignment
-update PSlot set backlink = 'XX.illegal' where slotname = 'PS.base.a1';
-ERROR: illegal backlink beginning with XX
-CONTEXT: PL/pgSQL function tg_backlink_set(character,character) line 47 at RAISE
-PL/pgSQL function tg_backlink_a() line 17 at assignment
-update PSlot set slotlink = 'PS.not.there' where slotname = 'PS.base.a1';
-ERROR: PS.not.there does not exist
-CONTEXT: PL/pgSQL function tg_slotlink_set(character,character) line 30 at RAISE
-PL/pgSQL function tg_slotlink_a() line 17 at assignment
-update PSlot set slotlink = 'XX.illegal' where slotname = 'PS.base.a1';
-ERROR: illegal slotlink beginning with XX
-CONTEXT: PL/pgSQL function tg_slotlink_set(character,character) line 77 at RAISE
-PL/pgSQL function tg_slotlink_a() line 17 at assignment
-insert into HSlot values ('HS', 'base.hub1', 1, '');
-ERROR: duplicate key value violates unique constraint "hslot_name"
-DETAIL: Key (slotname)=(HS.base.hub1.1 ) already exists.
-insert into HSlot values ('HS', 'base.hub1', 20, '');
-ERROR: no manual manipulation of HSlot
-CONTEXT: PL/pgSQL function tg_hslot_biu() line 12 at RAISE
-delete from HSlot;
-ERROR: no manual manipulation of HSlot
-CONTEXT: PL/pgSQL function tg_hslot_bd() line 12 at RAISE
-insert into IFace values ('IF', 'notthere', 'eth0', '');
-ERROR: system "notthere" does not exist
-CONTEXT: PL/pgSQL function tg_iface_biu() line 8 at RAISE
-insert into IFace values ('IF', 'orion', 'ethernet_interface_name_too_long', '');
-ERROR: IFace slotname "IF.orion.ethernet_interface_name_too_long" too long (20 char max)
-CONTEXT: PL/pgSQL function tg_iface_biu() line 14 at RAISE
---
--- The following tests are unrelated to the scenario outlined above;
--- they merely exercise specific parts of PL/pgSQL
---
---
--- Test recursion, per bug report 7-Sep-01
---
-CREATE FUNCTION recursion_test(int,int) RETURNS text AS '
-DECLARE rslt text;
-BEGIN
- IF $1 <= 0 THEN
- rslt = CAST($2 AS TEXT);
- ELSE
- rslt = CAST($1 AS TEXT) || '','' || recursion_test($1 - 1, $2);
- END IF;
- RETURN rslt;
-END;' LANGUAGE plpgsql;
-SELECT recursion_test(4,3);
- recursion_test
-----------------
- 4,3,2,1,3
-(1 row)
-
---
--- Test the FOUND magic variable
---
-CREATE TABLE found_test_tbl (a int);
-create function test_found()
- returns boolean as '
- declare
- begin
- insert into found_test_tbl values (1);
- if FOUND then
- insert into found_test_tbl values (2);
- end if;
-
- update found_test_tbl set a = 100 where a = 1;
- if FOUND then
- insert into found_test_tbl values (3);
- end if;
-
- delete from found_test_tbl where a = 9999; -- matches no rows
- if not FOUND then
- insert into found_test_tbl values (4);
- end if;
-
- for i in 1 .. 10 loop
- -- no need to do anything
- end loop;
- if FOUND then
- insert into found_test_tbl values (5);
- end if;
-
- -- never executes the loop
- for i in 2 .. 1 loop
- -- no need to do anything
- end loop;
- if not FOUND then
- insert into found_test_tbl values (6);
- end if;
- return true;
- end;' language plpgsql;
-select test_found();
- test_found
-------------
- t
-(1 row)
-
-select * from found_test_tbl;
- a
------
- 2
- 100
- 3
- 4
- 5
- 6
-(6 rows)
-
---
--- Test set-returning functions for PL/pgSQL
---
-create function test_table_func_rec() returns setof found_test_tbl as '
-DECLARE
- rec RECORD;
-BEGIN
- FOR rec IN select * from found_test_tbl LOOP
- RETURN NEXT rec;
- END LOOP;
- RETURN;
-END;' language plpgsql;
-select * from test_table_func_rec();
- a
------
- 2
- 100
- 3
- 4
- 5
- 6
-(6 rows)
-
-create function test_table_func_row() returns setof found_test_tbl as '
-DECLARE
- row found_test_tbl%ROWTYPE;
-BEGIN
- FOR row IN select * from found_test_tbl LOOP
- RETURN NEXT row;
- END LOOP;
- RETURN;
-END;' language plpgsql;
-select * from test_table_func_row();
- a
------
- 2
- 100
- 3
- 4
- 5
- 6
-(6 rows)
-
-create function test_ret_set_scalar(int,int) returns setof int as '
-DECLARE
- i int;
-BEGIN
- FOR i IN $1 .. $2 LOOP
- RETURN NEXT i + 1;
- END LOOP;
- RETURN;
-END;' language plpgsql;
-select * from test_ret_set_scalar(1,10);
- test_ret_set_scalar
----------------------
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
-(10 rows)
-
-create function test_ret_set_rec_dyn(int) returns setof record as '
-DECLARE
- retval RECORD;
-BEGIN
- IF $1 > 10 THEN
- SELECT INTO retval 5, 10, 15;
- RETURN NEXT retval;
- RETURN NEXT retval;
- ELSE
- SELECT INTO retval 50, 5::numeric, ''xxx''::text;
- RETURN NEXT retval;
- RETURN NEXT retval;
- END IF;
- RETURN;
-END;' language plpgsql;
-SELECT * FROM test_ret_set_rec_dyn(1500) AS (a int, b int, c int);
- a | b | c
----+----+----
- 5 | 10 | 15
- 5 | 10 | 15
-(2 rows)
-
-SELECT * FROM test_ret_set_rec_dyn(5) AS (a int, b numeric, c text);
- a | b | c
-----+---+-----
- 50 | 5 | xxx
- 50 | 5 | xxx
-(2 rows)
-
-create function test_ret_rec_dyn(int) returns record as '
-DECLARE
- retval RECORD;
-BEGIN
- IF $1 > 10 THEN
- SELECT INTO retval 5, 10, 15;
- RETURN retval;
- ELSE
- SELECT INTO retval 50, 5::numeric, ''xxx''::text;
- RETURN retval;
- END IF;
-END;' language plpgsql;
-SELECT * FROM test_ret_rec_dyn(1500) AS (a int, b int, c int);
- a | b | c
----+----+----
- 5 | 10 | 15
-(1 row)
-
-SELECT * FROM test_ret_rec_dyn(5) AS (a int, b numeric, c text);
- a | b | c
-----+---+-----
- 50 | 5 | xxx
-(1 row)
-
---
--- Test some simple polymorphism cases.
---
-create function f1(x anyelement) returns anyelement as $$
-begin
- return x + 1;
-end$$ language plpgsql;
-select f1(42) as int, f1(4.5) as num;
- int | num
------+-----
- 43 | 5.5
-(1 row)
-
-select f1(point(3,4)); -- fail for lack of + operator
-ERROR: operator does not exist: point + integer
-LINE 1: x + 1
- ^
-HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
-QUERY: x + 1
-CONTEXT: PL/pgSQL function f1(anyelement) line 3 at RETURN
-drop function f1(x anyelement);
-create function f1(x anyelement) returns anyarray as $$
-begin
- return array[x + 1, x + 2];
-end$$ language plpgsql;
-select f1(42) as int, f1(4.5) as num;
- int | num
----------+-----------
- {43,44} | {5.5,6.5}
-(1 row)
-
-drop function f1(x anyelement);
-create function f1(x anyarray) returns anyelement as $$
-begin
- return x[1];
-end$$ language plpgsql;
-select f1(array[2,4]) as int, f1(array[4.5, 7.7]) as num;
- int | num
------+-----
- 2 | 4.5
-(1 row)
-
-select f1(stavalues1) from pg_statistic; -- fail, can't infer element type
-ERROR: cannot determine element type of "anyarray" argument
-drop function f1(x anyarray);
-create function f1(x anyarray) returns anyarray as $$
-begin
- return x;
-end$$ language plpgsql;
-select f1(array[2,4]) as int, f1(array[4.5, 7.7]) as num;
- int | num
--------+-----------
- {2,4} | {4.5,7.7}
-(1 row)
-
-select f1(stavalues1) from pg_statistic; -- fail, can't infer element type
-ERROR: PL/pgSQL functions cannot accept type anyarray
-CONTEXT: compilation of PL/pgSQL function "f1" near line 1
-drop function f1(x anyarray);
--- fail, can't infer type:
-create function f1(x anyelement) returns anyrange as $$
-begin
- return array[x + 1, x + 2];
-end$$ language plpgsql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange.
-create function f1(x anyrange) returns anyarray as $$
-begin
- return array[lower(x), upper(x)];
-end$$ language plpgsql;
-select f1(int4range(42, 49)) as int, f1(float8range(4.5, 7.8)) as num;
- int | num
----------+-----------
- {42,49} | {4.5,7.8}
-(1 row)
-
-drop function f1(x anyrange);
-create function f1(x anycompatible, y anycompatible) returns anycompatiblearray as $$
-begin
- return array[x, y];
-end$$ language plpgsql;
-select f1(2, 4) as int, f1(2, 4.5) as num;
- int | num
--------+---------
- {2,4} | {2,4.5}
-(1 row)
-
-drop function f1(x anycompatible, y anycompatible);
-create function f1(x anycompatiblerange, y anycompatible, z anycompatible) returns anycompatiblearray as $$
-begin
- return array[lower(x), upper(x), y, z];
-end$$ language plpgsql;
-select f1(int4range(42, 49), 11, 2::smallint) as int, f1(float8range(4.5, 7.8), 7.8, 11::real) as num;
- int | num
---------------+------------------
- {42,49,11,2} | {4.5,7.8,7.8,11}
-(1 row)
-
-select f1(int4range(42, 49), 11, 4.5) as fail; -- range type doesn't fit
-ERROR: function f1(int4range, integer, numeric) does not exist
-LINE 1: select f1(int4range(42, 49), 11, 4.5) as fail;
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function f1(x anycompatiblerange, y anycompatible, z anycompatible);
--- fail, can't infer type:
-create function f1(x anycompatible) returns anycompatiblerange as $$
-begin
- return array[x + 1, x + 2];
-end$$ language plpgsql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange.
-create function f1(x anycompatiblerange, y anycompatiblearray) returns anycompatiblerange as $$
-begin
- return x;
-end$$ language plpgsql;
-select f1(int4range(42, 49), array[11]) as int, f1(float8range(4.5, 7.8), array[7]) as num;
- int | num
----------+-----------
- [42,49) | [4.5,7.8)
-(1 row)
-
-drop function f1(x anycompatiblerange, y anycompatiblearray);
-create function f1(a anyelement, b anyarray,
- c anycompatible, d anycompatible,
- OUT x anyarray, OUT y anycompatiblearray)
-as $$
-begin
- x := a || b;
- y := array[c, d];
-end$$ language plpgsql;
-select x, pg_typeof(x), y, pg_typeof(y)
- from f1(11, array[1, 2], 42, 34.5);
- x | pg_typeof | y | pg_typeof
-----------+-----------+-----------+-----------
- {11,1,2} | integer[] | {42,34.5} | numeric[]
-(1 row)
-
-select x, pg_typeof(x), y, pg_typeof(y)
- from f1(11, array[1, 2], point(1,2), point(3,4));
- x | pg_typeof | y | pg_typeof
-----------+-----------+-------------------+-----------
- {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[]
-(1 row)
-
-select x, pg_typeof(x), y, pg_typeof(y)
- from f1(11, '{1,2}', point(1,2), '(3,4)');
- x | pg_typeof | y | pg_typeof
-----------+-----------+-------------------+-----------
- {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[]
-(1 row)
-
-select x, pg_typeof(x), y, pg_typeof(y)
- from f1(11, array[1, 2.2], 42, 34.5); -- fail
-ERROR: function f1(integer, numeric[], integer, numeric) does not exist
-LINE 2: from f1(11, array[1, 2.2], 42, 34.5);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function f1(a anyelement, b anyarray,
- c anycompatible, d anycompatible);
---
--- Test handling of OUT parameters, including polymorphic cases.
--- Note that RETURN is optional with OUT params; we try both ways.
---
--- wrong way to do it:
-create function f1(in i int, out j int) returns int as $$
-begin
- return i+1;
-end$$ language plpgsql;
-ERROR: RETURN cannot have a parameter in function with OUT parameters
-LINE 3: return i+1;
- ^
-create function f1(in i int, out j int) as $$
-begin
- j := i+1;
- return;
-end$$ language plpgsql;
-select f1(42);
- f1
-----
- 43
-(1 row)
-
-select * from f1(42);
- j
-----
- 43
-(1 row)
-
-create or replace function f1(inout i int) as $$
-begin
- i := i+1;
-end$$ language plpgsql;
-select f1(42);
- f1
-----
- 43
-(1 row)
-
-select * from f1(42);
- i
-----
- 43
-(1 row)
-
-drop function f1(int);
-create function f1(in i int, out j int) returns setof int as $$
-begin
- j := i+1;
- return next;
- j := i+2;
- return next;
- return;
-end$$ language plpgsql;
-select * from f1(42);
- j
-----
- 43
- 44
-(2 rows)
-
-drop function f1(int);
-create function f1(in i int, out j int, out k text) as $$
-begin
- j := i;
- j := j+1;
- k := 'foo';
-end$$ language plpgsql;
-select f1(42);
- f1
-----------
- (43,foo)
-(1 row)
-
-select * from f1(42);
- j | k
-----+-----
- 43 | foo
-(1 row)
-
-drop function f1(int);
-create function f1(in i int, out j int, out k text) returns setof record as $$
-begin
- j := i+1;
- k := 'foo';
- return next;
- j := j+1;
- k := 'foot';
- return next;
-end$$ language plpgsql;
-select * from f1(42);
- j | k
-----+------
- 43 | foo
- 44 | foot
-(2 rows)
-
-drop function f1(int);
-create function duplic(in i anyelement, out j anyelement, out k anyarray) as $$
-begin
- j := i;
- k := array[j,j];
- return;
-end$$ language plpgsql;
-select * from duplic(42);
- j | k
-----+---------
- 42 | {42,42}
-(1 row)
-
-select * from duplic('foo'::text);
- j | k
------+-----------
- foo | {foo,foo}
-(1 row)
-
-drop function duplic(anyelement);
-create function duplic(in i anycompatiblerange, out j anycompatible, out k anycompatiblearray) as $$
-begin
- j := lower(i);
- k := array[lower(i),upper(i)];
- return;
-end$$ language plpgsql;
-select * from duplic(int4range(42,49));
- j | k
-----+---------
- 42 | {42,49}
-(1 row)
-
-select * from duplic(textrange('aaa', 'bbb'));
- j | k
------+-----------
- aaa | {aaa,bbb}
-(1 row)
-
-drop function duplic(anycompatiblerange);
---
--- test PERFORM
---
-create table perform_test (
- a INT,
- b INT
-);
-create function perform_simple_func(int) returns boolean as '
-BEGIN
- IF $1 < 20 THEN
- INSERT INTO perform_test VALUES ($1, $1 + 10);
- RETURN TRUE;
- ELSE
- RETURN FALSE;
- END IF;
-END;' language plpgsql;
-create function perform_test_func() returns void as '
-BEGIN
- IF FOUND then
- INSERT INTO perform_test VALUES (100, 100);
- END IF;
-
- PERFORM perform_simple_func(5);
-
- IF FOUND then
- INSERT INTO perform_test VALUES (100, 100);
- END IF;
-
- PERFORM perform_simple_func(50);
-
- IF FOUND then
- INSERT INTO perform_test VALUES (100, 100);
- END IF;
-
- RETURN;
-END;' language plpgsql;
-SELECT perform_test_func();
- perform_test_func
--------------------
-
-(1 row)
-
-SELECT * FROM perform_test;
- a | b
------+-----
- 5 | 15
- 100 | 100
- 100 | 100
-(3 rows)
-
-drop table perform_test;
---
--- Test proper snapshot handling in simple expressions
---
-create temp table users(login text, id serial);
-create function sp_id_user(a_login text) returns int as $$
-declare x int;
-begin
- select into x id from users where login = a_login;
- if found then return x; end if;
- return 0;
-end$$ language plpgsql stable;
-insert into users values('user1');
-select sp_id_user('user1');
- sp_id_user
-------------
- 1
-(1 row)
-
-select sp_id_user('userx');
- sp_id_user
-------------
- 0
-(1 row)
-
-create function sp_add_user(a_login text) returns int as $$
-declare my_id_user int;
-begin
- my_id_user = sp_id_user( a_login );
- IF my_id_user > 0 THEN
- RETURN -1; -- error code for existing user
- END IF;
- INSERT INTO users ( login ) VALUES ( a_login );
- my_id_user = sp_id_user( a_login );
- IF my_id_user = 0 THEN
- RETURN -2; -- error code for insertion failure
- END IF;
- RETURN my_id_user;
-end$$ language plpgsql;
-select sp_add_user('user1');
- sp_add_user
--------------
- -1
-(1 row)
-
-select sp_add_user('user2');
- sp_add_user
--------------
- 2
-(1 row)
-
-select sp_add_user('user2');
- sp_add_user
--------------
- -1
-(1 row)
-
-select sp_add_user('user3');
- sp_add_user
--------------
- 3
-(1 row)
-
-select sp_add_user('user3');
- sp_add_user
--------------
- -1
-(1 row)
-
-drop function sp_add_user(text);
-drop function sp_id_user(text);
---
--- tests for refcursors
---
-create table rc_test (a int, b int);
-copy rc_test from stdin;
-create function return_unnamed_refcursor() returns refcursor as $$
-declare
- rc refcursor;
-begin
- open rc for select a from rc_test;
- return rc;
-end
-$$ language plpgsql;
-create function use_refcursor(rc refcursor) returns int as $$
-declare
- rc refcursor;
- x record;
-begin
- rc := return_unnamed_refcursor();
- fetch next from rc into x;
- return x.a;
-end
-$$ language plpgsql;
-select use_refcursor(return_unnamed_refcursor());
- use_refcursor
----------------
- 5
-(1 row)
-
-create function return_refcursor(rc refcursor) returns refcursor as $$
-begin
- open rc for select a from rc_test;
- return rc;
-end
-$$ language plpgsql;
-create function refcursor_test1(refcursor) returns refcursor as $$
-begin
- perform return_refcursor($1);
- return $1;
-end
-$$ language plpgsql;
-begin;
-select refcursor_test1('test1');
- refcursor_test1
------------------
- test1
-(1 row)
-
-fetch next in test1;
- a
----
- 5
-(1 row)
-
-select refcursor_test1('test2');
- refcursor_test1
------------------
- test2
-(1 row)
-
-fetch all from test2;
- a
------
- 5
- 50
- 500
-(3 rows)
-
-commit;
--- should fail
-fetch next from test1;
-ERROR: cursor "test1" does not exist
-create function refcursor_test2(int, int) returns boolean as $$
-declare
- c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2;
- nonsense record;
-begin
- open c1($1, $2);
- fetch c1 into nonsense;
- close c1;
- if found then
- return true;
- else
- return false;
- end if;
-end
-$$ language plpgsql;
-select refcursor_test2(20000, 20000) as "Should be false",
- refcursor_test2(20, 20) as "Should be true";
- Should be false | Should be true
------------------+----------------
- f | t
-(1 row)
-
--- should fail
-create function constant_refcursor() returns refcursor as $$
-declare
- rc constant refcursor;
-begin
- open rc for select a from rc_test;
- return rc;
-end
-$$ language plpgsql;
-select constant_refcursor();
-ERROR: variable "rc" is declared CONSTANT
-CONTEXT: PL/pgSQL function constant_refcursor() line 5 at OPEN
--- but it's okay like this
-create or replace function constant_refcursor() returns refcursor as $$
-declare
- rc constant refcursor := 'my_cursor_name';
-begin
- open rc for select a from rc_test;
- return rc;
-end
-$$ language plpgsql;
-select constant_refcursor();
- constant_refcursor
---------------------
- my_cursor_name
-(1 row)
-
---
--- tests for cursors with named parameter arguments
---
-create function namedparmcursor_test1(int, int) returns boolean as $$
-declare
- c1 cursor (param1 int, param12 int) for select * from rc_test where a > param1 and b > param12;
- nonsense record;
-begin
- open c1(param12 := $2, param1 := $1);
- fetch c1 into nonsense;
- close c1;
- if found then
- return true;
- else
- return false;
- end if;
-end
-$$ language plpgsql;
-select namedparmcursor_test1(20000, 20000) as "Should be false",
- namedparmcursor_test1(20, 20) as "Should be true";
- Should be false | Should be true
------------------+----------------
- f | t
-(1 row)
-
--- mixing named and positional argument notations
-create function namedparmcursor_test2(int, int) returns boolean as $$
-declare
- c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2;
- nonsense record;
-begin
- open c1(param1 := $1, $2);
- fetch c1 into nonsense;
- close c1;
- if found then
- return true;
- else
- return false;
- end if;
-end
-$$ language plpgsql;
-select namedparmcursor_test2(20, 20);
- namedparmcursor_test2
------------------------
- t
-(1 row)
-
--- mixing named and positional: param2 is given twice, once in named notation
--- and second time in positional notation. Should throw an error at parse time
-create function namedparmcursor_test3() returns void as $$
-declare
- c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2;
-begin
- open c1(param2 := 20, 21);
-end
-$$ language plpgsql;
-ERROR: value for parameter "param2" of cursor "c1" specified more than once
-LINE 5: open c1(param2 := 20, 21);
- ^
--- mixing named and positional: same as previous test, but param1 is duplicated
-create function namedparmcursor_test4() returns void as $$
-declare
- c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2;
-begin
- open c1(20, param1 := 21);
-end
-$$ language plpgsql;
-ERROR: value for parameter "param1" of cursor "c1" specified more than once
-LINE 5: open c1(20, param1 := 21);
- ^
--- duplicate named parameter, should throw an error at parse time
-create function namedparmcursor_test5() returns void as $$
-declare
- c1 cursor (p1 int, p2 int) for
- select * from tenk1 where thousand = p1 and tenthous = p2;
-begin
- open c1 (p2 := 77, p2 := 42);
-end
-$$ language plpgsql;
-ERROR: value for parameter "p2" of cursor "c1" specified more than once
-LINE 6: open c1 (p2 := 77, p2 := 42);
- ^
--- not enough parameters, should throw an error at parse time
-create function namedparmcursor_test6() returns void as $$
-declare
- c1 cursor (p1 int, p2 int) for
- select * from tenk1 where thousand = p1 and tenthous = p2;
-begin
- open c1 (p2 := 77);
-end
-$$ language plpgsql;
-ERROR: not enough arguments for cursor "c1"
-LINE 6: open c1 (p2 := 77);
- ^
--- division by zero runtime error, the context given in the error message
--- should be sensible
-create function namedparmcursor_test7() returns void as $$
-declare
- c1 cursor (p1 int, p2 int) for
- select * from tenk1 where thousand = p1 and tenthous = p2;
-begin
- open c1 (p2 := 77, p1 := 42/0);
-end $$ language plpgsql;
-select namedparmcursor_test7();
-ERROR: division by zero
-CONTEXT: PL/pgSQL expression "42/0 AS p1, 77 AS p2"
-PL/pgSQL function namedparmcursor_test7() line 6 at OPEN
--- check that line comments work correctly within the argument list
--- (this used to require a special hack in the code; it no longer does,
--- but let's keep the test anyway)
-create function namedparmcursor_test8() returns int4 as $$
-declare
- c1 cursor (p1 int, p2 int) for
- select count(*) from tenk1 where thousand = p1 and tenthous = p2;
- n int4;
-begin
- open c1 (77 -- test
- , 42);
- fetch c1 into n;
- return n;
-end $$ language plpgsql;
-select namedparmcursor_test8();
- namedparmcursor_test8
------------------------
- 0
-(1 row)
-
--- cursor parameter name can match plpgsql variable or unreserved keyword
-create function namedparmcursor_test9(p1 int) returns int4 as $$
-declare
- c1 cursor (p1 int, p2 int, debug int) for
- select count(*) from tenk1 where thousand = p1 and tenthous = p2
- and four = debug;
- p2 int4 := 1006;
- n int4;
-begin
- open c1 (p1 := p1, p2 := p2, debug := 2);
- fetch c1 into n;
- return n;
-end $$ language plpgsql;
-select namedparmcursor_test9(6);
- namedparmcursor_test9
------------------------
- 1
-(1 row)
-
---
--- tests for "raise" processing
---
-create function raise_test1(int) returns int as $$
-begin
- raise notice 'This message has too many parameters!', $1;
- return $1;
-end;
-$$ language plpgsql;
-ERROR: too many parameters specified for RAISE
-CONTEXT: compilation of PL/pgSQL function "raise_test1" near line 3
-create function raise_test2(int) returns int as $$
-begin
- raise notice 'This message has too few parameters: %, %, %', $1, $1;
- return $1;
-end;
-$$ language plpgsql;
-ERROR: too few parameters specified for RAISE
-CONTEXT: compilation of PL/pgSQL function "raise_test2" near line 3
-create function raise_test3(int) returns int as $$
-begin
- raise notice 'This message has no parameters (despite having %% signs in it)!';
- return $1;
-end;
-$$ language plpgsql;
-select raise_test3(1);
-NOTICE: This message has no parameters (despite having % signs in it)!
- raise_test3
--------------
- 1
-(1 row)
-
--- Test re-RAISE inside a nested exception block. This case is allowed
--- by Oracle's PL/SQL but was handled differently by PG before 9.1.
-CREATE FUNCTION reraise_test() RETURNS void AS $$
-BEGIN
- BEGIN
- RAISE syntax_error;
- EXCEPTION
- WHEN syntax_error THEN
- BEGIN
- raise notice 'exception % thrown in inner block, reraising', sqlerrm;
- RAISE;
- EXCEPTION
- WHEN OTHERS THEN
- raise notice 'RIGHT - exception % caught in inner block', sqlerrm;
- END;
- END;
-EXCEPTION
- WHEN OTHERS THEN
- raise notice 'WRONG - exception % caught in outer block', sqlerrm;
-END;
-$$ LANGUAGE plpgsql;
-SELECT reraise_test();
-NOTICE: exception syntax_error thrown in inner block, reraising
-NOTICE: RIGHT - exception syntax_error caught in inner block
- reraise_test
---------------
-
-(1 row)
-
---
--- reject function definitions that contain malformed SQL queries at
--- compile-time, where possible
---
-create function bad_sql1() returns int as $$
-declare a int;
-begin
- a := 5;
- Johnny Yuma;
- a := 10;
- return a;
-end$$ language plpgsql;
-ERROR: syntax error at or near "Johnny"
-LINE 5: Johnny Yuma;
- ^
-create function bad_sql2() returns int as $$
-declare r record;
-begin
- for r in select I fought the law, the law won LOOP
- raise notice 'in loop';
- end loop;
- return 5;
-end;$$ language plpgsql;
-ERROR: syntax error at or near "the"
-LINE 4: for r in select I fought the law, the law won LOOP
- ^
--- a RETURN expression is mandatory, except for void-returning
--- functions, where it is not allowed
-create function missing_return_expr() returns int as $$
-begin
- return ;
-end;$$ language plpgsql;
-ERROR: missing expression at or near ";"
-LINE 3: return ;
- ^
-create function void_return_expr() returns void as $$
-begin
- return 5;
-end;$$ language plpgsql;
-ERROR: RETURN cannot have a parameter in function returning void
-LINE 3: return 5;
- ^
--- VOID functions are allowed to omit RETURN
-create function void_return_expr() returns void as $$
-begin
- perform 2+2;
-end;$$ language plpgsql;
-select void_return_expr();
- void_return_expr
-------------------
-
-(1 row)
-
--- but ordinary functions are not
-create function missing_return_expr() returns int as $$
-begin
- perform 2+2;
-end;$$ language plpgsql;
-select missing_return_expr();
-ERROR: control reached end of function without RETURN
-CONTEXT: PL/pgSQL function missing_return_expr()
-drop function void_return_expr();
-drop function missing_return_expr();
---
--- EXECUTE ... INTO test
---
-create table eifoo (i integer, y integer);
-create type eitype as (i integer, y integer);
-create or replace function execute_into_test(varchar) returns record as $$
-declare
- _r record;
- _rt eifoo%rowtype;
- _v eitype;
- i int;
- j int;
- k int;
-begin
- execute 'insert into '||$1||' values(10,15)';
- execute 'select (row).* from (select row(10,1)::eifoo) s' into _r;
- raise notice '% %', _r.i, _r.y;
- execute 'select * from '||$1||' limit 1' into _rt;
- raise notice '% %', _rt.i, _rt.y;
- execute 'select *, 20 from '||$1||' limit 1' into i, j, k;
- raise notice '% % %', i, j, k;
- execute 'select 1,2' into _v;
- return _v;
-end; $$ language plpgsql;
-select execute_into_test('eifoo');
-NOTICE: 10 1
-NOTICE: 10 15
-NOTICE: 10 15 20
- execute_into_test
--------------------
- (1,2)
-(1 row)
-
-drop table eifoo cascade;
-drop type eitype cascade;
---
--- SQLSTATE and SQLERRM test
---
-create function excpt_test1() returns void as $$
-begin
- raise notice '% %', sqlstate, sqlerrm;
-end; $$ language plpgsql;
--- should fail: SQLSTATE and SQLERRM are only in defined EXCEPTION
--- blocks
-select excpt_test1();
-ERROR: column "sqlstate" does not exist
-LINE 1: sqlstate
- ^
-QUERY: sqlstate
-CONTEXT: PL/pgSQL function excpt_test1() line 3 at RAISE
-create function excpt_test2() returns void as $$
-begin
- begin
- begin
- raise notice '% %', sqlstate, sqlerrm;
- end;
- end;
-end; $$ language plpgsql;
--- should fail
-select excpt_test2();
-ERROR: column "sqlstate" does not exist
-LINE 1: sqlstate
- ^
-QUERY: sqlstate
-CONTEXT: PL/pgSQL function excpt_test2() line 5 at RAISE
-create function excpt_test3() returns void as $$
-begin
- begin
- raise exception 'user exception';
- exception when others then
- raise notice 'caught exception % %', sqlstate, sqlerrm;
- begin
- raise notice '% %', sqlstate, sqlerrm;
- perform 10/0;
- exception
- when substring_error then
- -- this exception handler shouldn't be invoked
- raise notice 'unexpected exception: % %', sqlstate, sqlerrm;
- when division_by_zero then
- raise notice 'caught exception % %', sqlstate, sqlerrm;
- end;
- raise notice '% %', sqlstate, sqlerrm;
- end;
-end; $$ language plpgsql;
-select excpt_test3();
-NOTICE: caught exception P0001 user exception
-NOTICE: P0001 user exception
-NOTICE: caught exception 22012 division by zero
-NOTICE: P0001 user exception
- excpt_test3
--------------
-
-(1 row)
-
-create function excpt_test4() returns text as $$
-begin
- begin perform 1/0;
- exception when others then return sqlerrm; end;
-end; $$ language plpgsql;
-select excpt_test4();
- excpt_test4
-------------------
- division by zero
-(1 row)
-
-drop function excpt_test1();
-drop function excpt_test2();
-drop function excpt_test3();
-drop function excpt_test4();
--- parameters of raise stmt can be expressions
-create function raise_exprs() returns void as $$
-declare
- a integer[] = '{10,20,30}';
- c varchar = 'xyz';
- i integer;
-begin
- i := 2;
- raise notice '%; %; %; %; %; %', a, a[i], c, (select c || 'abc'), row(10,'aaa',NULL,30), NULL;
-end;$$ language plpgsql;
-select raise_exprs();
-NOTICE: {10,20,30}; 20; xyz; xyzabc; (10,aaa,,30);
- raise_exprs
--------------
-
-(1 row)
-
-drop function raise_exprs();
--- regression test: verify that multiple uses of same plpgsql datum within
--- a SQL command all get mapped to the same $n parameter. The return value
--- of the SELECT is not important, we only care that it doesn't fail with
--- a complaint about an ungrouped column reference.
-create function multi_datum_use(p1 int) returns bool as $$
-declare
- x int;
- y int;
-begin
- select into x,y unique1/p1, unique1/$1 from tenk1 group by unique1/p1;
- return x = y;
-end$$ language plpgsql;
-select multi_datum_use(42);
- multi_datum_use
------------------
- t
-(1 row)
-
---
--- Test STRICT limiter in both planned and EXECUTE invocations.
--- Note that a data-modifying query is quasi strict (disallow multi rows)
--- by default in the planned case, but not in EXECUTE.
---
-create temp table foo (f1 int, f2 int);
-insert into foo values (1,2), (3,4);
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should work
- insert into foo values(5,6) returning * into x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-NOTICE: x.f1 = 5, x.f2 = 6
- stricttest
-------------
-
-(1 row)
-
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should fail due to implicit strict
- insert into foo values(7,8),(9,10) returning * into x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-HINT: Make sure the query returns a single row, or use LIMIT 1.
-CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should work
- execute 'insert into foo values(5,6) returning *' into x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-NOTICE: x.f1 = 5, x.f2 = 6
- stricttest
-------------
-
-(1 row)
-
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- this should work since EXECUTE isn't as picky
- execute 'insert into foo values(7,8),(9,10) returning *' into x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-NOTICE: x.f1 = 7, x.f2 = 8
- stricttest
-------------
-
-(1 row)
-
-select * from foo;
- f1 | f2
-----+----
- 1 | 2
- 3 | 4
- 5 | 6
- 5 | 6
- 7 | 8
- 9 | 10
-(6 rows)
-
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should work
- select * from foo where f1 = 3 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-NOTICE: x.f1 = 3, x.f2 = 4
- stricttest
-------------
-
-(1 row)
-
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should fail, no rows
- select * from foo where f1 = 0 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned no rows
-CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should fail, too many rows
- select * from foo where f1 > 3 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-HINT: Make sure the query returns a single row, or use LIMIT 1.
-CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should work
- execute 'select * from foo where f1 = 3' into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-NOTICE: x.f1 = 3, x.f2 = 4
- stricttest
-------------
-
-(1 row)
-
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should fail, no rows
- execute 'select * from foo where f1 = 0' into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned no rows
-CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- should fail, too many rows
- execute 'select * from foo where f1 > 3' into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE
-drop function stricttest();
--- test printing parameters after failure due to STRICT
-set plpgsql.print_strict_params to true;
-create or replace function stricttest() returns void as $$
-declare
-x record;
-p1 int := 2;
-p3 text := 'foo';
-begin
- -- no rows
- select * from foo where f1 = p1 and f1::text = p3 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned no rows
-DETAIL: parameters: p1 = '2', p3 = 'foo'
-CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement
-create or replace function stricttest() returns void as $$
-declare
-x record;
-p1 int := 2;
-p3 text := $a$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$a$;
-begin
- -- no rows
- select * from foo where f1 = p1 and f1::text = p3 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned no rows
-DETAIL: parameters: p1 = '2', p3 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia?'''
-CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement
-create or replace function stricttest() returns void as $$
-declare
-x record;
-p1 int := 2;
-p3 text := 'foo';
-begin
- -- too many rows
- select * from foo where f1 > p1 or f1::text = p3 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-DETAIL: parameters: p1 = '2', p3 = 'foo'
-HINT: Make sure the query returns a single row, or use LIMIT 1.
-CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- too many rows, no params
- select * from foo where f1 > 3 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-HINT: Make sure the query returns a single row, or use LIMIT 1.
-CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- no rows
- execute 'select * from foo where f1 = $1 or f1::text = $2' using 0, 'foo' into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned no rows
-DETAIL: parameters: $1 = '0', $2 = 'foo'
-CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- too many rows
- execute 'select * from foo where f1 > $1' using 1 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-DETAIL: parameters: $1 = '1'
-CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE
-create or replace function stricttest() returns void as $$
-declare x record;
-begin
- -- too many rows, no parameters
- execute 'select * from foo where f1 > 3' into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE
-create or replace function stricttest() returns void as $$
--- override the global
-#print_strict_params off
-declare
-x record;
-p1 int := 2;
-p3 text := 'foo';
-begin
- -- too many rows
- select * from foo where f1 > p1 or f1::text = p3 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-HINT: Make sure the query returns a single row, or use LIMIT 1.
-CONTEXT: PL/pgSQL function stricttest() line 10 at SQL statement
-reset plpgsql.print_strict_params;
-create or replace function stricttest() returns void as $$
--- override the global
-#print_strict_params on
-declare
-x record;
-p1 int := 2;
-p3 text := 'foo';
-begin
- -- too many rows
- select * from foo where f1 > p1 or f1::text = p3 into strict x;
- raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2;
-end$$ language plpgsql;
-select stricttest();
-ERROR: query returned more than one row
-DETAIL: parameters: p1 = '2', p3 = 'foo'
-HINT: Make sure the query returns a single row, or use LIMIT 1.
-CONTEXT: PL/pgSQL function stricttest() line 10 at SQL statement
--- test warnings and errors
-set plpgsql.extra_warnings to 'all';
-set plpgsql.extra_warnings to 'none';
-set plpgsql.extra_errors to 'all';
-set plpgsql.extra_errors to 'none';
--- test warnings when shadowing a variable
-set plpgsql.extra_warnings to 'shadowed_variables';
--- simple shadowing of input and output parameters
-create or replace function shadowtest(in1 int)
- returns table (out1 int) as $$
-declare
-in1 int;
-out1 int;
-begin
-end
-$$ language plpgsql;
-WARNING: variable "in1" shadows a previously defined variable
-LINE 4: in1 int;
- ^
-WARNING: variable "out1" shadows a previously defined variable
-LINE 5: out1 int;
- ^
-select shadowtest(1);
- shadowtest
-------------
-(0 rows)
-
-set plpgsql.extra_warnings to 'shadowed_variables';
-select shadowtest(1);
- shadowtest
-------------
-(0 rows)
-
-create or replace function shadowtest(in1 int)
- returns table (out1 int) as $$
-declare
-in1 int;
-out1 int;
-begin
-end
-$$ language plpgsql;
-WARNING: variable "in1" shadows a previously defined variable
-LINE 4: in1 int;
- ^
-WARNING: variable "out1" shadows a previously defined variable
-LINE 5: out1 int;
- ^
-select shadowtest(1);
- shadowtest
-------------
-(0 rows)
-
-drop function shadowtest(int);
--- shadowing in a second DECLARE block
-create or replace function shadowtest()
- returns void as $$
-declare
-f1 int;
-begin
- declare
- f1 int;
- begin
- end;
-end$$ language plpgsql;
-WARNING: variable "f1" shadows a previously defined variable
-LINE 7: f1 int;
- ^
-drop function shadowtest();
--- several levels of shadowing
-create or replace function shadowtest(in1 int)
- returns void as $$
-declare
-in1 int;
-begin
- declare
- in1 int;
- begin
- end;
-end$$ language plpgsql;
-WARNING: variable "in1" shadows a previously defined variable
-LINE 4: in1 int;
- ^
-WARNING: variable "in1" shadows a previously defined variable
-LINE 7: in1 int;
- ^
-drop function shadowtest(int);
--- shadowing in cursor definitions
-create or replace function shadowtest()
- returns void as $$
-declare
-f1 int;
-c1 cursor (f1 int) for select 1;
-begin
-end$$ language plpgsql;
-WARNING: variable "f1" shadows a previously defined variable
-LINE 5: c1 cursor (f1 int) for select 1;
- ^
-drop function shadowtest();
--- test errors when shadowing a variable
-set plpgsql.extra_errors to 'shadowed_variables';
-create or replace function shadowtest(f1 int)
- returns boolean as $$
-declare f1 int; begin return 1; end $$ language plpgsql;
-ERROR: variable "f1" shadows a previously defined variable
-LINE 3: declare f1 int; begin return 1; end $$ language plpgsql;
- ^
-select shadowtest(1);
-ERROR: function shadowtest(integer) does not exist
-LINE 1: select shadowtest(1);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-reset plpgsql.extra_errors;
-reset plpgsql.extra_warnings;
-create or replace function shadowtest(f1 int)
- returns boolean as $$
-declare f1 int; begin return 1; end $$ language plpgsql;
-select shadowtest(1);
- shadowtest
-------------
- t
-(1 row)
-
--- runtime extra checks
-set plpgsql.extra_warnings to 'too_many_rows';
-do $$
-declare x int;
-begin
- select v from generate_series(1,2) g(v) into x;
-end;
-$$;
-WARNING: query returned more than one row
-HINT: Make sure the query returns a single row, or use LIMIT 1.
-set plpgsql.extra_errors to 'too_many_rows';
-do $$
-declare x int;
-begin
- select v from generate_series(1,2) g(v) into x;
-end;
-$$;
-ERROR: query returned more than one row
-HINT: Make sure the query returns a single row, or use LIMIT 1.
-CONTEXT: PL/pgSQL function inline_code_block line 4 at SQL statement
-reset plpgsql.extra_errors;
-reset plpgsql.extra_warnings;
-set plpgsql.extra_warnings to 'strict_multi_assignment';
-do $$
-declare
- x int;
- y int;
-begin
- select 1 into x, y;
- select 1,2 into x, y;
- select 1,2,3 into x, y;
-end
-$$;
-WARNING: number of source and target fields in assignment does not match
-DETAIL: strict_multi_assignment check of extra_warnings is active.
-HINT: Make sure the query returns the exact list of columns.
-WARNING: number of source and target fields in assignment does not match
-DETAIL: strict_multi_assignment check of extra_warnings is active.
-HINT: Make sure the query returns the exact list of columns.
-set plpgsql.extra_errors to 'strict_multi_assignment';
-do $$
-declare
- x int;
- y int;
-begin
- select 1 into x, y;
- select 1,2 into x, y;
- select 1,2,3 into x, y;
-end
-$$;
-ERROR: number of source and target fields in assignment does not match
-DETAIL: strict_multi_assignment check of extra_errors is active.
-HINT: Make sure the query returns the exact list of columns.
-CONTEXT: PL/pgSQL function inline_code_block line 6 at SQL statement
-create table test_01(a int, b int, c int);
-alter table test_01 drop column a;
--- the check is active only when source table is not empty
-insert into test_01 values(10,20);
-do $$
-declare
- x int;
- y int;
-begin
- select * from test_01 into x, y; -- should be ok
- raise notice 'ok';
- select * from test_01 into x; -- should to fail
-end;
-$$;
-NOTICE: ok
-ERROR: number of source and target fields in assignment does not match
-DETAIL: strict_multi_assignment check of extra_errors is active.
-HINT: Make sure the query returns the exact list of columns.
-CONTEXT: PL/pgSQL function inline_code_block line 8 at SQL statement
-do $$
-declare
- t test_01;
-begin
- select 1, 2 into t; -- should be ok
- raise notice 'ok';
- select 1, 2, 3 into t; -- should fail;
-end;
-$$;
-NOTICE: ok
-ERROR: number of source and target fields in assignment does not match
-DETAIL: strict_multi_assignment check of extra_errors is active.
-HINT: Make sure the query returns the exact list of columns.
-CONTEXT: PL/pgSQL function inline_code_block line 7 at SQL statement
-do $$
-declare
- t test_01;
-begin
- select 1 into t; -- should fail;
-end;
-$$;
-ERROR: number of source and target fields in assignment does not match
-DETAIL: strict_multi_assignment check of extra_errors is active.
-HINT: Make sure the query returns the exact list of columns.
-CONTEXT: PL/pgSQL function inline_code_block line 5 at SQL statement
-drop table test_01;
-reset plpgsql.extra_errors;
-reset plpgsql.extra_warnings;
--- test scrollable cursor support
-create function sc_test() returns setof integer as $$
-declare
- c scroll cursor for select f1 from int4_tbl;
- x integer;
-begin
- open c;
- fetch last from c into x;
- while found loop
- return next x;
- fetch prior from c into x;
- end loop;
- close c;
-end;
-$$ language plpgsql;
-select * from sc_test();
- sc_test
--------------
- -2147483647
- 2147483647
- -123456
- 123456
- 0
-(5 rows)
-
-create or replace function sc_test() returns setof integer as $$
-declare
- c no scroll cursor for select f1 from int4_tbl;
- x integer;
-begin
- open c;
- fetch last from c into x;
- while found loop
- return next x;
- fetch prior from c into x;
- end loop;
- close c;
-end;
-$$ language plpgsql;
-select * from sc_test(); -- fails because of NO SCROLL specification
-ERROR: cursor can only scan forward
-HINT: Declare it with SCROLL option to enable backward scan.
-CONTEXT: PL/pgSQL function sc_test() line 7 at FETCH
-create or replace function sc_test() returns setof integer as $$
-declare
- c refcursor;
- x integer;
-begin
- open c scroll for select f1 from int4_tbl;
- fetch last from c into x;
- while found loop
- return next x;
- fetch prior from c into x;
- end loop;
- close c;
-end;
-$$ language plpgsql;
-select * from sc_test();
- sc_test
--------------
- -2147483647
- 2147483647
- -123456
- 123456
- 0
-(5 rows)
-
-create or replace function sc_test() returns setof integer as $$
-declare
- c refcursor;
- x integer;
-begin
- open c scroll for execute 'select f1 from int4_tbl';
- fetch last from c into x;
- while found loop
- return next x;
- fetch relative -2 from c into x;
- end loop;
- close c;
-end;
-$$ language plpgsql;
-select * from sc_test();
- sc_test
--------------
- -2147483647
- -123456
- 0
-(3 rows)
-
-create or replace function sc_test() returns setof integer as $$
-declare
- c refcursor;
- x integer;
-begin
- open c scroll for execute 'select f1 from int4_tbl';
- fetch last from c into x;
- while found loop
- return next x;
- move backward 2 from c;
- fetch relative -1 from c into x;
- end loop;
- close c;
-end;
-$$ language plpgsql;
-select * from sc_test();
- sc_test
--------------
- -2147483647
- 123456
-(2 rows)
-
-create or replace function sc_test() returns setof integer as $$
-declare
- c cursor for select * from generate_series(1, 10);
- x integer;
-begin
- open c;
- loop
- move relative 2 in c;
- if not found then
- exit;
- end if;
- fetch next from c into x;
- if found then
- return next x;
- end if;
- end loop;
- close c;
-end;
-$$ language plpgsql;
-select * from sc_test();
- sc_test
----------
- 3
- 6
- 9
-(3 rows)
-
-create or replace function sc_test() returns setof integer as $$
-declare
- c cursor for select * from generate_series(1, 10);
- x integer;
-begin
- open c;
- move forward all in c;
- fetch backward from c into x;
- if found then
- return next x;
- end if;
- close c;
-end;
-$$ language plpgsql;
-select * from sc_test();
- sc_test
----------
- 10
-(1 row)
-
-drop function sc_test();
--- test qualified variable names
-create function pl_qual_names (param1 int) returns void as $$
-<>
-declare
- param1 int := 1;
-begin
- <>
- declare
- param1 int := 2;
- begin
- raise notice 'param1 = %', param1;
- raise notice 'pl_qual_names.param1 = %', pl_qual_names.param1;
- raise notice 'outerblock.param1 = %', outerblock.param1;
- raise notice 'innerblock.param1 = %', innerblock.param1;
- end;
-end;
-$$ language plpgsql;
-select pl_qual_names(42);
-NOTICE: param1 = 2
-NOTICE: pl_qual_names.param1 = 42
-NOTICE: outerblock.param1 = 1
-NOTICE: innerblock.param1 = 2
- pl_qual_names
----------------
-
-(1 row)
-
-drop function pl_qual_names(int);
--- tests for RETURN QUERY
-create function ret_query1(out int, out int) returns setof record as $$
-begin
- $1 := -1;
- $2 := -2;
- return next;
- return query select x + 1, x * 10 from generate_series(0, 10) s (x);
- return next;
-end;
-$$ language plpgsql;
-select * from ret_query1();
- column1 | column2
----------+---------
- -1 | -2
- 1 | 0
- 2 | 10
- 3 | 20
- 4 | 30
- 5 | 40
- 6 | 50
- 7 | 60
- 8 | 70
- 9 | 80
- 10 | 90
- 11 | 100
- -1 | -2
-(13 rows)
-
-create type record_type as (x text, y int, z boolean);
-create or replace function ret_query2(lim int) returns setof record_type as $$
-begin
- return query select fipshash(s.x::text), s.x, s.x > 0
- from generate_series(-8, lim) s (x) where s.x % 2 = 0;
-end;
-$$ language plpgsql;
-select * from ret_query2(8);
- x | y | z
-----------------------------------+----+---
- e91592205d3881e3ea35d66973bb4898 | -8 | f
- 03b26944890929ff751653acb2f2af79 | -6 | f
- e5e0093f285a4fb94c3fcc2ad7fd04ed | -4 | f
- cf3bae39dd692048a8bf961182e6a34d | -2 | f
- 5feceb66ffc86f38d952786c6d696c79 | 0 | f
- d4735e3a265e16eee03f59718b9b5d03 | 2 | t
- 4b227777d4dd1fc61c6f884f48641d02 | 4 | t
- e7f6c011776e8db7cd330b54174fd76f | 6 | t
- 2c624232cdd221771294dfbb310aca00 | 8 | t
-(9 rows)
-
--- test EXECUTE USING
-create function exc_using(int, text) returns int as $$
-declare i int;
-begin
- for i in execute 'select * from generate_series(1,$1)' using $1+1 loop
- raise notice '%', i;
- end loop;
- execute 'select $2 + $2*3 + length($1)' into i using $2,$1;
- return i;
-end
-$$ language plpgsql;
-select exc_using(5, 'foobar');
-NOTICE: 1
-NOTICE: 2
-NOTICE: 3
-NOTICE: 4
-NOTICE: 5
-NOTICE: 6
- exc_using
------------
- 26
-(1 row)
-
-drop function exc_using(int, text);
-create or replace function exc_using(int) returns void as $$
-declare
- c refcursor;
- i int;
-begin
- open c for execute 'select * from generate_series(1,$1)' using $1+1;
- loop
- fetch c into i;
- exit when not found;
- raise notice '%', i;
- end loop;
- close c;
- return;
-end;
-$$ language plpgsql;
-select exc_using(5);
-NOTICE: 1
-NOTICE: 2
-NOTICE: 3
-NOTICE: 4
-NOTICE: 5
-NOTICE: 6
- exc_using
------------
-
-(1 row)
-
-drop function exc_using(int);
--- test FOR-over-cursor
-create or replace function forc01() returns void as $$
-declare
- c cursor(r1 integer, r2 integer)
- for select * from generate_series(r1,r2) i;
- c2 cursor
- for select * from generate_series(41,43) i;
-begin
- -- assign portal names to cursors to get stable output
- c := 'c';
- c2 := 'c2';
- for r in c(5,7) loop
- raise notice '% from %', r.i, c;
- end loop;
- -- again, to test if cursor was closed properly
- for r in c(9,10) loop
- raise notice '% from %', r.i, c;
- end loop;
- -- and test a parameterless cursor
- for r in c2 loop
- raise notice '% from %', r.i, c2;
- end loop;
- -- and try it with a hand-assigned name
- raise notice 'after loop, c2 = %', c2;
- c2 := 'special_name';
- for r in c2 loop
- raise notice '% from %', r.i, c2;
- end loop;
- raise notice 'after loop, c2 = %', c2;
- -- and try it with a generated name
- -- (which we can't show in the output because it's variable)
- c2 := null;
- for r in c2 loop
- raise notice '%', r.i;
- end loop;
- raise notice 'after loop, c2 = %', c2;
- return;
-end;
-$$ language plpgsql;
-select forc01();
-NOTICE: 5 from c
-NOTICE: 6 from c
-NOTICE: 7 from c
-NOTICE: 9 from c
-NOTICE: 10 from c
-NOTICE: 41 from c2
-NOTICE: 42 from c2
-NOTICE: 43 from c2
-NOTICE: after loop, c2 = c2
-NOTICE: 41 from special_name
-NOTICE: 42 from special_name
-NOTICE: 43 from special_name
-NOTICE: after loop, c2 = special_name
-NOTICE: 41
-NOTICE: 42
-NOTICE: 43
-NOTICE: after loop, c2 =
- forc01
---------
-
-(1 row)
-
--- try updating the cursor's current row
-create temp table forc_test as
- select n as i, n as j from generate_series(1,10) n;
-create or replace function forc01() returns void as $$
-declare
- c cursor for select * from forc_test;
-begin
- for r in c loop
- raise notice '%, %', r.i, r.j;
- update forc_test set i = i * 100, j = r.j * 2 where current of c;
- end loop;
-end;
-$$ language plpgsql;
-select forc01();
-NOTICE: 1, 1
-NOTICE: 2, 2
-NOTICE: 3, 3
-NOTICE: 4, 4
-NOTICE: 5, 5
-NOTICE: 6, 6
-NOTICE: 7, 7
-NOTICE: 8, 8
-NOTICE: 9, 9
-NOTICE: 10, 10
- forc01
---------
-
-(1 row)
-
-select * from forc_test;
- i | j
-------+----
- 100 | 2
- 200 | 4
- 300 | 6
- 400 | 8
- 500 | 10
- 600 | 12
- 700 | 14
- 800 | 16
- 900 | 18
- 1000 | 20
-(10 rows)
-
--- same, with a cursor whose portal name doesn't match variable name
-create or replace function forc01() returns void as $$
-declare
- c refcursor := 'fooled_ya';
- r record;
-begin
- open c for select * from forc_test;
- loop
- fetch c into r;
- exit when not found;
- raise notice '%, %', r.i, r.j;
- update forc_test set i = i * 100, j = r.j * 2 where current of c;
- end loop;
-end;
-$$ language plpgsql;
-select forc01();
-NOTICE: 100, 2
-NOTICE: 200, 4
-NOTICE: 300, 6
-NOTICE: 400, 8
-NOTICE: 500, 10
-NOTICE: 600, 12
-NOTICE: 700, 14
-NOTICE: 800, 16
-NOTICE: 900, 18
-NOTICE: 1000, 20
- forc01
---------
-
-(1 row)
-
-select * from forc_test;
- i | j
---------+----
- 10000 | 4
- 20000 | 8
- 30000 | 12
- 40000 | 16
- 50000 | 20
- 60000 | 24
- 70000 | 28
- 80000 | 32
- 90000 | 36
- 100000 | 40
-(10 rows)
-
-drop function forc01();
--- it's okay to re-use a cursor variable name, even when bound
-do $$
-declare cnt int := 0;
- c1 cursor for select * from forc_test;
-begin
- for r1 in c1 loop
- declare c1 cursor for select * from forc_test;
- begin
- for r2 in c1 loop
- cnt := cnt + 1;
- end loop;
- end;
- end loop;
- raise notice 'cnt = %', cnt;
-end $$;
-NOTICE: cnt = 100
--- fail because cursor has no query bound to it
-create or replace function forc_bad() returns void as $$
-declare
- c refcursor;
-begin
- for r in c loop
- raise notice '%', r.i;
- end loop;
-end;
-$$ language plpgsql;
-ERROR: cursor FOR loop must use a bound cursor variable
-LINE 5: for r in c loop
- ^
--- test RETURN QUERY EXECUTE
-create or replace function return_dquery()
-returns setof int as $$
-begin
- return query execute 'select * from (values(10),(20)) f';
- return query execute 'select * from (values($1),($2)) f' using 40,50;
-end;
-$$ language plpgsql;
-select * from return_dquery();
- return_dquery
----------------
- 10
- 20
- 40
- 50
-(4 rows)
-
-drop function return_dquery();
--- test RETURN QUERY with dropped columns
-create table tabwithcols(a int, b int, c int, d int);
-insert into tabwithcols values(10,20,30,40),(50,60,70,80);
-create or replace function returnqueryf()
-returns setof tabwithcols as $$
-begin
- return query select * from tabwithcols;
- return query execute 'select * from tabwithcols';
-end;
-$$ language plpgsql;
-select * from returnqueryf();
- a | b | c | d
-----+----+----+----
- 10 | 20 | 30 | 40
- 50 | 60 | 70 | 80
- 10 | 20 | 30 | 40
- 50 | 60 | 70 | 80
-(4 rows)
-
-alter table tabwithcols drop column b;
-select * from returnqueryf();
- a | c | d
-----+----+----
- 10 | 30 | 40
- 50 | 70 | 80
- 10 | 30 | 40
- 50 | 70 | 80
-(4 rows)
-
-alter table tabwithcols drop column d;
-select * from returnqueryf();
- a | c
-----+----
- 10 | 30
- 50 | 70
- 10 | 30
- 50 | 70
-(4 rows)
-
-alter table tabwithcols add column d int;
-select * from returnqueryf();
- a | c | d
-----+----+---
- 10 | 30 |
- 50 | 70 |
- 10 | 30 |
- 50 | 70 |
-(4 rows)
-
-drop function returnqueryf();
-drop table tabwithcols;
---
--- Tests for composite-type results
---
-create type compostype as (x int, y varchar);
--- test: use of variable of composite type in return statement
-create or replace function compos() returns compostype as $$
-declare
- v compostype;
-begin
- v := (1, 'hello');
- return v;
-end;
-$$ language plpgsql;
-select compos();
- compos
------------
- (1,hello)
-(1 row)
-
--- test: use of variable of record type in return statement
-create or replace function compos() returns compostype as $$
-declare
- v record;
-begin
- v := (1, 'hello'::varchar);
- return v;
-end;
-$$ language plpgsql;
-select compos();
- compos
------------
- (1,hello)
-(1 row)
-
--- test: use of row expr in return statement
-create or replace function compos() returns compostype as $$
-begin
- return (1, 'hello'::varchar);
-end;
-$$ language plpgsql;
-select compos();
- compos
------------
- (1,hello)
-(1 row)
-
--- this does not work currently (no implicit casting)
-create or replace function compos() returns compostype as $$
-begin
- return (1, 'hello');
-end;
-$$ language plpgsql;
-select compos();
-ERROR: returned record type does not match expected record type
-DETAIL: Returned type unknown does not match expected type character varying in column 2.
-CONTEXT: PL/pgSQL function compos() while casting return value to function's return type
--- ... but this does
-create or replace function compos() returns compostype as $$
-begin
- return (1, 'hello')::compostype;
-end;
-$$ language plpgsql;
-select compos();
- compos
------------
- (1,hello)
-(1 row)
-
-drop function compos();
--- test: return a row expr as record.
-create or replace function composrec() returns record as $$
-declare
- v record;
-begin
- v := (1, 'hello');
- return v;
-end;
-$$ language plpgsql;
-select composrec();
- composrec
------------
- (1,hello)
-(1 row)
-
--- test: return row expr in return statement.
-create or replace function composrec() returns record as $$
-begin
- return (1, 'hello');
-end;
-$$ language plpgsql;
-select composrec();
- composrec
------------
- (1,hello)
-(1 row)
-
-drop function composrec();
--- test: row expr in RETURN NEXT statement.
-create or replace function compos() returns setof compostype as $$
-begin
- for i in 1..3
- loop
- return next (1, 'hello'::varchar);
- end loop;
- return next null::compostype;
- return next (2, 'goodbye')::compostype;
-end;
-$$ language plpgsql;
-select * from compos();
- x | y
----+---------
- 1 | hello
- 1 | hello
- 1 | hello
- |
- 2 | goodbye
-(5 rows)
-
-drop function compos();
--- test: use invalid expr in return statement.
-create or replace function compos() returns compostype as $$
-begin
- return 1 + 1;
-end;
-$$ language plpgsql;
-select compos();
-ERROR: cannot return non-composite value from function returning composite type
-CONTEXT: PL/pgSQL function compos() line 3 at RETURN
--- RETURN variable is a different code path ...
-create or replace function compos() returns compostype as $$
-declare x int := 42;
-begin
- return x;
-end;
-$$ language plpgsql;
-select * from compos();
-ERROR: cannot return non-composite value from function returning composite type
-CONTEXT: PL/pgSQL function compos() line 4 at RETURN
-drop function compos();
--- test: invalid use of composite variable in scalar-returning function
-create or replace function compos() returns int as $$
-declare
- v compostype;
-begin
- v := (1, 'hello');
- return v;
-end;
-$$ language plpgsql;
-select compos();
-ERROR: invalid input syntax for type integer: "(1,hello)"
-CONTEXT: PL/pgSQL function compos() while casting return value to function's return type
--- test: invalid use of composite expression in scalar-returning function
-create or replace function compos() returns int as $$
-begin
- return (1, 'hello')::compostype;
-end;
-$$ language plpgsql;
-select compos();
-ERROR: invalid input syntax for type integer: "(1,hello)"
-CONTEXT: PL/pgSQL function compos() while casting return value to function's return type
-drop function compos();
-drop type compostype;
---
--- Tests for 8.4's new RAISE features
---
-create or replace function raise_test() returns void as $$
-begin
- raise notice '% % %', 1, 2, 3
- using errcode = '55001', detail = 'some detail info', hint = 'some hint';
- raise '% % %', 1, 2, 3
- using errcode = 'division_by_zero', detail = 'some detail info';
-end;
-$$ language plpgsql;
-select raise_test();
-NOTICE: 1 2 3
-DETAIL: some detail info
-HINT: some hint
-ERROR: 1 2 3
-DETAIL: some detail info
-CONTEXT: PL/pgSQL function raise_test() line 5 at RAISE
--- Since we can't actually see the thrown SQLSTATE in default psql output,
--- test it like this; this also tests re-RAISE
-create or replace function raise_test() returns void as $$
-begin
- raise 'check me'
- using errcode = 'division_by_zero', detail = 'some detail info';
- exception
- when others then
- raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm;
- raise;
-end;
-$$ language plpgsql;
-select raise_test();
-NOTICE: SQLSTATE: 22012 SQLERRM: check me
-ERROR: check me
-DETAIL: some detail info
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
-create or replace function raise_test() returns void as $$
-begin
- raise 'check me'
- using errcode = '1234F', detail = 'some detail info';
- exception
- when others then
- raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm;
- raise;
-end;
-$$ language plpgsql;
-select raise_test();
-NOTICE: SQLSTATE: 1234F SQLERRM: check me
-ERROR: check me
-DETAIL: some detail info
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
--- SQLSTATE specification in WHEN
-create or replace function raise_test() returns void as $$
-begin
- raise 'check me'
- using errcode = '1234F', detail = 'some detail info';
- exception
- when sqlstate '1234F' then
- raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm;
- raise;
-end;
-$$ language plpgsql;
-select raise_test();
-NOTICE: SQLSTATE: 1234F SQLERRM: check me
-ERROR: check me
-DETAIL: some detail info
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
-create or replace function raise_test() returns void as $$
-begin
- raise division_by_zero using detail = 'some detail info';
- exception
- when others then
- raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm;
- raise;
-end;
-$$ language plpgsql;
-select raise_test();
-NOTICE: SQLSTATE: 22012 SQLERRM: division_by_zero
-ERROR: division_by_zero
-DETAIL: some detail info
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
-create or replace function raise_test() returns void as $$
-begin
- raise division_by_zero;
-end;
-$$ language plpgsql;
-select raise_test();
-ERROR: division_by_zero
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
-create or replace function raise_test() returns void as $$
-begin
- raise sqlstate '1234F';
-end;
-$$ language plpgsql;
-select raise_test();
-ERROR: 1234F
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
-create or replace function raise_test() returns void as $$
-begin
- raise division_by_zero using message = 'custom' || ' message';
-end;
-$$ language plpgsql;
-select raise_test();
-ERROR: custom message
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
-create or replace function raise_test() returns void as $$
-begin
- raise using message = 'custom' || ' message', errcode = '22012';
-end;
-$$ language plpgsql;
-select raise_test();
-ERROR: custom message
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
--- conflict on message
-create or replace function raise_test() returns void as $$
-begin
- raise notice 'some message' using message = 'custom' || ' message', errcode = '22012';
-end;
-$$ language plpgsql;
-select raise_test();
-ERROR: RAISE option already specified: MESSAGE
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
--- conflict on errcode
-create or replace function raise_test() returns void as $$
-begin
- raise division_by_zero using message = 'custom' || ' message', errcode = '22012';
-end;
-$$ language plpgsql;
-select raise_test();
-ERROR: RAISE option already specified: ERRCODE
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
--- nothing to re-RAISE
-create or replace function raise_test() returns void as $$
-begin
- raise;
-end;
-$$ language plpgsql;
-select raise_test();
-ERROR: RAISE without parameters cannot be used outside an exception handler
-CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE
--- test access to exception data
-create function zero_divide() returns int as $$
-declare v int := 0;
-begin
- return 10 / v;
-end;
-$$ language plpgsql parallel safe;
-create or replace function raise_test() returns void as $$
-begin
- raise exception 'custom exception'
- using detail = 'some detail of custom exception',
- hint = 'some hint related to custom exception';
-end;
-$$ language plpgsql;
-create function stacked_diagnostics_test() returns void as $$
-declare _sqlstate text;
- _message text;
- _context text;
-begin
- perform zero_divide();
-exception when others then
- get stacked diagnostics
- _sqlstate = returned_sqlstate,
- _message = message_text,
- _context = pg_exception_context;
- raise notice 'sqlstate: %, message: %, context: [%]',
- _sqlstate, _message, replace(_context, E'\n', ' <- ');
-end;
-$$ language plpgsql;
-select stacked_diagnostics_test();
-NOTICE: sqlstate: 22012, message: division by zero, context: [PL/pgSQL function zero_divide() line 4 at RETURN <- SQL statement "SELECT zero_divide()" <- PL/pgSQL function stacked_diagnostics_test() line 6 at PERFORM]
- stacked_diagnostics_test
---------------------------
-
-(1 row)
-
-create or replace function stacked_diagnostics_test() returns void as $$
-declare _detail text;
- _hint text;
- _message text;
-begin
- perform raise_test();
-exception when others then
- get stacked diagnostics
- _message = message_text,
- _detail = pg_exception_detail,
- _hint = pg_exception_hint;
- raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint;
-end;
-$$ language plpgsql;
-select stacked_diagnostics_test();
-NOTICE: message: custom exception, detail: some detail of custom exception, hint: some hint related to custom exception
- stacked_diagnostics_test
---------------------------
-
-(1 row)
-
--- fail, cannot use stacked diagnostics statement outside handler
-create or replace function stacked_diagnostics_test() returns void as $$
-declare _detail text;
- _hint text;
- _message text;
-begin
- get stacked diagnostics
- _message = message_text,
- _detail = pg_exception_detail,
- _hint = pg_exception_hint;
- raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint;
-end;
-$$ language plpgsql;
-select stacked_diagnostics_test();
-ERROR: GET STACKED DIAGNOSTICS cannot be used outside an exception handler
-CONTEXT: PL/pgSQL function stacked_diagnostics_test() line 6 at GET STACKED DIAGNOSTICS
-drop function stacked_diagnostics_test();
--- Test that an error recovery subtransaction is parallel safe
-create function error_trap_test() returns text as $$
-begin
- perform zero_divide();
- return 'no error detected!';
-exception when division_by_zero then
- return 'division_by_zero detected';
-end;
-$$ language plpgsql parallel safe;
-set debug_parallel_query to on;
-explain (verbose, costs off) select error_trap_test();
- QUERY PLAN
------------------------------------
- Gather
- Output: (error_trap_test())
- Workers Planned: 1
- Single Copy: true
- -> Result
- Output: error_trap_test()
-(6 rows)
-
-select error_trap_test();
- error_trap_test
----------------------------
- division_by_zero detected
-(1 row)
-
-reset debug_parallel_query;
-drop function error_trap_test();
-drop function zero_divide();
--- check cases where implicit SQLSTATE variable could be confused with
--- SQLSTATE as a keyword, cf bug #5524
-create or replace function raise_test() returns void as $$
-begin
- perform 1/0;
-exception
- when sqlstate '22012' then
- raise notice using message = sqlstate;
- raise sqlstate '22012' using message = 'substitute message';
-end;
-$$ language plpgsql;
-select raise_test();
-NOTICE: 22012
-ERROR: substitute message
-CONTEXT: PL/pgSQL function raise_test() line 7 at RAISE
-drop function raise_test();
--- test passing column_name, constraint_name, datatype_name, table_name
--- and schema_name error fields
-create or replace function stacked_diagnostics_test() returns void as $$
-declare _column_name text;
- _constraint_name text;
- _datatype_name text;
- _table_name text;
- _schema_name text;
-begin
- raise exception using
- column = '>>some column name<<',
- constraint = '>>some constraint name<<',
- datatype = '>>some datatype name<<',
- table = '>>some table name<<',
- schema = '>>some schema name<<';
-exception when others then
- get stacked diagnostics
- _column_name = column_name,
- _constraint_name = constraint_name,
- _datatype_name = pg_datatype_name,
- _table_name = table_name,
- _schema_name = schema_name;
- raise notice 'column %, constraint %, type %, table %, schema %',
- _column_name, _constraint_name, _datatype_name, _table_name, _schema_name;
-end;
-$$ language plpgsql;
-select stacked_diagnostics_test();
-NOTICE: column >>some column name<<, constraint >>some constraint name<<, type >>some datatype name<<, table >>some table name<<, schema >>some schema name<<
- stacked_diagnostics_test
---------------------------
-
-(1 row)
-
-drop function stacked_diagnostics_test();
--- test variadic functions
-create or replace function vari(variadic int[])
-returns void as $$
-begin
- for i in array_lower($1,1)..array_upper($1,1) loop
- raise notice '%', $1[i];
- end loop; end;
-$$ language plpgsql;
-select vari(1,2,3,4,5);
-NOTICE: 1
-NOTICE: 2
-NOTICE: 3
-NOTICE: 4
-NOTICE: 5
- vari
-------
-
-(1 row)
-
-select vari(3,4,5);
-NOTICE: 3
-NOTICE: 4
-NOTICE: 5
- vari
-------
-
-(1 row)
-
-select vari(variadic array[5,6,7]);
-NOTICE: 5
-NOTICE: 6
-NOTICE: 7
- vari
-------
-
-(1 row)
-
-drop function vari(int[]);
--- coercion test
-create or replace function pleast(variadic numeric[])
-returns numeric as $$
-declare aux numeric = $1[array_lower($1,1)];
-begin
- for i in array_lower($1,1)+1..array_upper($1,1) loop
- if $1[i] < aux then aux := $1[i]; end if;
- end loop;
- return aux;
-end;
-$$ language plpgsql immutable strict;
-select pleast(10,1,2,3,-16);
- pleast
---------
- -16
-(1 row)
-
-select pleast(10.2,2.2,-1.1);
- pleast
---------
- -1.1
-(1 row)
-
-select pleast(10.2,10, -20);
- pleast
---------
- -20
-(1 row)
-
-select pleast(10,20, -1.0);
- pleast
---------
- -1.0
-(1 row)
-
--- in case of conflict, non-variadic version is preferred
-create or replace function pleast(numeric)
-returns numeric as $$
-begin
- raise notice 'non-variadic function called';
- return $1;
-end;
-$$ language plpgsql immutable strict;
-select pleast(10);
-NOTICE: non-variadic function called
- pleast
---------
- 10
-(1 row)
-
-drop function pleast(numeric[]);
-drop function pleast(numeric);
--- test table functions
-create function tftest(int) returns table(a int, b int) as $$
-begin
- return query select $1, $1+i from generate_series(1,5) g(i);
-end;
-$$ language plpgsql immutable strict;
-select * from tftest(10);
- a | b
-----+----
- 10 | 11
- 10 | 12
- 10 | 13
- 10 | 14
- 10 | 15
-(5 rows)
-
-create or replace function tftest(a1 int) returns table(a int, b int) as $$
-begin
- a := a1; b := a1 + 1;
- return next;
- a := a1 * 10; b := a1 * 10 + 1;
- return next;
-end;
-$$ language plpgsql immutable strict;
-select * from tftest(10);
- a | b
------+-----
- 10 | 11
- 100 | 101
-(2 rows)
-
-drop function tftest(int);
-create function rttest()
-returns setof int as $$
-declare rc int;
-begin
- return query values(10),(20);
- get diagnostics rc = row_count;
- raise notice '% %', found, rc;
- return query select * from (values(10),(20)) f(a) where false;
- get diagnostics rc = row_count;
- raise notice '% %', found, rc;
- return query execute 'values(10),(20)';
- get diagnostics rc = row_count;
- raise notice '% %', found, rc;
- return query execute 'select * from (values(10),(20)) f(a) where false';
- get diagnostics rc = row_count;
- raise notice '% %', found, rc;
-end;
-$$ language plpgsql;
-select * from rttest();
-NOTICE: t 2
-NOTICE: f 0
-NOTICE: t 2
-NOTICE: f 0
- rttest
---------
- 10
- 20
- 10
- 20
-(4 rows)
-
--- check some error cases, too
-create or replace function rttest()
-returns setof int as $$
-begin
- return query select 10 into no_such_table;
-end;
-$$ language plpgsql;
-select * from rttest();
-ERROR: SELECT INTO query does not return tuples
-CONTEXT: SQL statement "select 10 into no_such_table"
-PL/pgSQL function rttest() line 3 at RETURN QUERY
-create or replace function rttest()
-returns setof int as $$
-begin
- return query execute 'select 10 into no_such_table';
-end;
-$$ language plpgsql;
-select * from rttest();
-ERROR: SELECT INTO query does not return tuples
-CONTEXT: SQL statement "select 10 into no_such_table"
-PL/pgSQL function rttest() line 3 at RETURN QUERY
-select * from no_such_table;
-ERROR: relation "no_such_table" does not exist
-LINE 1: select * from no_such_table;
- ^
-drop function rttest();
--- Test for proper cleanup at subtransaction exit. This example
--- exposed a bug in PG 8.2.
-CREATE FUNCTION leaker_1(fail BOOL) RETURNS INTEGER AS $$
-DECLARE
- v_var INTEGER;
-BEGIN
- BEGIN
- v_var := (leaker_2(fail)).error_code;
- EXCEPTION
- WHEN others THEN RETURN 0;
- END;
- RETURN 1;
-END;
-$$ LANGUAGE plpgsql;
-CREATE FUNCTION leaker_2(fail BOOL, OUT error_code INTEGER, OUT new_id INTEGER)
- RETURNS RECORD AS $$
-BEGIN
- IF fail THEN
- RAISE EXCEPTION 'fail ...';
- END IF;
- error_code := 1;
- new_id := 1;
- RETURN;
-END;
-$$ LANGUAGE plpgsql;
-SELECT * FROM leaker_1(false);
- leaker_1
-----------
- 1
-(1 row)
-
-SELECT * FROM leaker_1(true);
- leaker_1
-----------
- 0
-(1 row)
-
-DROP FUNCTION leaker_1(bool);
-DROP FUNCTION leaker_2(bool);
--- Test for appropriate cleanup of non-simple expression evaluations
--- (bug in all versions prior to August 2010)
-CREATE FUNCTION nonsimple_expr_test() RETURNS text[] AS $$
-DECLARE
- arr text[];
- lr text;
- i integer;
-BEGIN
- arr := array[array['foo','bar'], array['baz', 'quux']];
- lr := 'fool';
- i := 1;
- -- use sub-SELECTs to make expressions non-simple
- arr[(SELECT i)][(SELECT i+1)] := (SELECT lr);
- RETURN arr;
-END;
-$$ LANGUAGE plpgsql;
-SELECT nonsimple_expr_test();
- nonsimple_expr_test
--------------------------
- {{foo,fool},{baz,quux}}
-(1 row)
-
-DROP FUNCTION nonsimple_expr_test();
-CREATE FUNCTION nonsimple_expr_test() RETURNS integer AS $$
-declare
- i integer NOT NULL := 0;
-begin
- begin
- i := (SELECT NULL::integer); -- should throw error
- exception
- WHEN OTHERS THEN
- i := (SELECT 1::integer);
- end;
- return i;
-end;
-$$ LANGUAGE plpgsql;
-SELECT nonsimple_expr_test();
- nonsimple_expr_test
----------------------
- 1
-(1 row)
-
-DROP FUNCTION nonsimple_expr_test();
---
--- Test cases involving recursion and error recovery in simple expressions
--- (bugs in all versions before October 2010). The problems are most
--- easily exposed by mutual recursion between plpgsql and sql functions.
---
-create function recurse(float8) returns float8 as
-$$
-begin
- if ($1 > 0) then
- return sql_recurse($1 - 1);
- else
- return $1;
- end if;
-end;
-$$ language plpgsql;
--- "limit" is to prevent this from being inlined
-create function sql_recurse(float8) returns float8 as
-$$ select recurse($1) limit 1; $$ language sql;
-select recurse(10);
- recurse
----------
- 0
-(1 row)
-
-create function error1(text) returns text language sql as
-$$ SELECT relname::text FROM pg_class c WHERE c.oid = $1::regclass $$;
-create function error2(p_name_table text) returns text language plpgsql as $$
-begin
- return error1(p_name_table);
-end$$;
-BEGIN;
-create table public.stuffs (stuff text);
-SAVEPOINT a;
-select error2('nonexistent.stuffs');
-ERROR: schema "nonexistent" does not exist
-CONTEXT: SQL function "error1" statement 1
-PL/pgSQL function error2(text) line 3 at RETURN
-ROLLBACK TO a;
-select error2('public.stuffs');
- error2
---------
- stuffs
-(1 row)
-
-rollback;
-drop function error2(p_name_table text);
-drop function error1(text);
--- Test for proper handling of cast-expression caching
-create function sql_to_date(integer) returns date as $$
-select $1::text::date
-$$ language sql immutable strict;
-create cast (integer as date) with function sql_to_date(integer) as assignment;
-create function cast_invoker(integer) returns date as $$
-begin
- return $1;
-end$$ language plpgsql;
-select cast_invoker(20150717);
- cast_invoker
---------------
- 07-17-2015
-(1 row)
-
-select cast_invoker(20150718); -- second call crashed in pre-release 9.5
- cast_invoker
---------------
- 07-18-2015
-(1 row)
-
-begin;
-select cast_invoker(20150717);
- cast_invoker
---------------
- 07-17-2015
-(1 row)
-
-select cast_invoker(20150718);
- cast_invoker
---------------
- 07-18-2015
-(1 row)
-
-savepoint s1;
-select cast_invoker(20150718);
- cast_invoker
---------------
- 07-18-2015
-(1 row)
-
-select cast_invoker(-1); -- fails
-ERROR: invalid input syntax for type date: "-1"
-CONTEXT: SQL function "sql_to_date" statement 1
-PL/pgSQL function cast_invoker(integer) while casting return value to function's return type
-rollback to savepoint s1;
-select cast_invoker(20150719);
- cast_invoker
---------------
- 07-19-2015
-(1 row)
-
-select cast_invoker(20150720);
- cast_invoker
---------------
- 07-20-2015
-(1 row)
-
-commit;
-drop function cast_invoker(integer);
-drop function sql_to_date(integer) cascade;
-NOTICE: drop cascades to cast from integer to date
--- Test handling of cast cache inside DO blocks
--- (to check the original crash case, this must be a cast not previously
--- used in this session)
-begin;
-do $$ declare x text[]; begin x := '{1.23, 4.56}'::numeric[]; end $$;
-do $$ declare x text[]; begin x := '{1.23, 4.56}'::numeric[]; end $$;
-end;
--- Test for consistent reporting of error context
-create function fail() returns int language plpgsql as $$
-begin
- return 1/0;
-end
-$$;
-select fail();
-ERROR: division by zero
-CONTEXT: PL/pgSQL expression "1/0"
-PL/pgSQL function fail() line 3 at RETURN
-select fail();
-ERROR: division by zero
-CONTEXT: PL/pgSQL expression "1/0"
-PL/pgSQL function fail() line 3 at RETURN
-drop function fail();
--- Test handling of string literals.
-set standard_conforming_strings = off;
-create or replace function strtest() returns text as $$
-begin
- raise notice 'foo\\bar\041baz';
- return 'foo\\bar\041baz';
-end
-$$ language plpgsql;
-WARNING: nonstandard use of \\ in a string literal
-LINE 3: raise notice 'foo\\bar\041baz';
- ^
-HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
-WARNING: nonstandard use of \\ in a string literal
-LINE 4: return 'foo\\bar\041baz';
- ^
-HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
-WARNING: nonstandard use of \\ in a string literal
-LINE 4: return 'foo\\bar\041baz';
- ^
-HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
-select strtest();
-NOTICE: foo\bar!baz
-WARNING: nonstandard use of \\ in a string literal
-LINE 1: 'foo\\bar\041baz'
- ^
-HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
-QUERY: 'foo\\bar\041baz'
- strtest
--------------
- foo\bar!baz
-(1 row)
-
-create or replace function strtest() returns text as $$
-begin
- raise notice E'foo\\bar\041baz';
- return E'foo\\bar\041baz';
-end
-$$ language plpgsql;
-select strtest();
-NOTICE: foo\bar!baz
- strtest
--------------
- foo\bar!baz
-(1 row)
-
-set standard_conforming_strings = on;
-create or replace function strtest() returns text as $$
-begin
- raise notice 'foo\\bar\041baz\';
- return 'foo\\bar\041baz\';
-end
-$$ language plpgsql;
-select strtest();
-NOTICE: foo\\bar\041baz\
- strtest
-------------------
- foo\\bar\041baz\
-(1 row)
-
-create or replace function strtest() returns text as $$
-begin
- raise notice E'foo\\bar\041baz';
- return E'foo\\bar\041baz';
-end
-$$ language plpgsql;
-select strtest();
-NOTICE: foo\bar!baz
- strtest
--------------
- foo\bar!baz
-(1 row)
-
-drop function strtest();
--- Test anonymous code blocks.
-DO $$
-DECLARE r record;
-BEGIN
- FOR r IN SELECT rtrim(roomno) AS roomno, comment FROM Room ORDER BY roomno
- LOOP
- RAISE NOTICE '%, %', r.roomno, r.comment;
- END LOOP;
-END$$;
-NOTICE: 001, Entrance
-NOTICE: 002, Office
-NOTICE: 003, Office
-NOTICE: 004, Technical
-NOTICE: 101, Office
-NOTICE: 102, Conference
-NOTICE: 103, Restroom
-NOTICE: 104, Technical
-NOTICE: 105, Office
-NOTICE: 106, Office
--- these are to check syntax error reporting
-DO LANGUAGE plpgsql $$begin return 1; end$$;
-ERROR: RETURN cannot have a parameter in function returning void
-LINE 1: DO LANGUAGE plpgsql $$begin return 1; end$$;
- ^
-DO $$
-DECLARE r record;
-BEGIN
- FOR r IN SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomno
- LOOP
- RAISE NOTICE '%, %', r.roomno, r.comment;
- END LOOP;
-END$$;
-ERROR: column "foo" does not exist
-LINE 1: SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomn...
- ^
-QUERY: SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomno
-CONTEXT: PL/pgSQL function inline_code_block line 4 at FOR over SELECT rows
--- Check handling of errors thrown from/into anonymous code blocks.
-do $outer$
-begin
- for i in 1..10 loop
- begin
- execute $ex$
- do $$
- declare x int = 0;
- begin
- x := 1 / x;
- end;
- $$;
- $ex$;
- exception when division_by_zero then
- raise notice 'caught division by zero';
- end;
- end loop;
-end;
-$outer$;
-NOTICE: caught division by zero
-NOTICE: caught division by zero
-NOTICE: caught division by zero
-NOTICE: caught division by zero
-NOTICE: caught division by zero
-NOTICE: caught division by zero
-NOTICE: caught division by zero
-NOTICE: caught division by zero
-NOTICE: caught division by zero
-NOTICE: caught division by zero
--- Check variable scoping -- a var is not available in its own or prior
--- default expressions, but it is available in later ones.
-do $$
-declare x int := x + 1; -- error
-begin
- raise notice 'x = %', x;
-end;
-$$;
-ERROR: column "x" does not exist
-LINE 1: x + 1
- ^
-QUERY: x + 1
-CONTEXT: PL/pgSQL function inline_code_block line 2 during statement block local variable initialization
-do $$
-declare y int := x + 1; -- error
- x int := 42;
-begin
- raise notice 'x = %, y = %', x, y;
-end;
-$$;
-ERROR: column "x" does not exist
-LINE 1: x + 1
- ^
-QUERY: x + 1
-CONTEXT: PL/pgSQL function inline_code_block line 2 during statement block local variable initialization
-do $$
-declare x int := 42;
- y int := x + 1;
-begin
- raise notice 'x = %, y = %', x, y;
-end;
-$$;
-NOTICE: x = 42, y = 43
-do $$
-declare x int := 42;
-begin
- declare y int := x + 1;
- x int := x + 2;
- z int := x * 10;
- begin
- raise notice 'x = %, y = %, z = %', x, y, z;
- end;
-end;
-$$;
-NOTICE: x = 44, y = 43, z = 440
--- Check handling of conflicts between plpgsql vars and table columns.
-set plpgsql.variable_conflict = error;
-create function conflict_test() returns setof int8_tbl as $$
-declare r record;
- q1 bigint := 42;
-begin
- for r in select q1,q2 from int8_tbl loop
- return next r;
- end loop;
-end;
-$$ language plpgsql;
-select * from conflict_test();
-ERROR: column reference "q1" is ambiguous
-LINE 1: select q1,q2 from int8_tbl
- ^
-DETAIL: It could refer to either a PL/pgSQL variable or a table column.
-QUERY: select q1,q2 from int8_tbl
-CONTEXT: PL/pgSQL function conflict_test() line 5 at FOR over SELECT rows
-create or replace function conflict_test() returns setof int8_tbl as $$
-#variable_conflict use_variable
-declare r record;
- q1 bigint := 42;
-begin
- for r in select q1,q2 from int8_tbl loop
- return next r;
- end loop;
-end;
-$$ language plpgsql;
-select * from conflict_test();
- q1 | q2
-----+-------------------
- 42 | 456
- 42 | 4567890123456789
- 42 | 123
- 42 | 4567890123456789
- 42 | -4567890123456789
-(5 rows)
-
-create or replace function conflict_test() returns setof int8_tbl as $$
-#variable_conflict use_column
-declare r record;
- q1 bigint := 42;
-begin
- for r in select q1,q2 from int8_tbl loop
- return next r;
- end loop;
-end;
-$$ language plpgsql;
-select * from conflict_test();
- q1 | q2
-------------------+-------------------
- 123 | 456
- 123 | 4567890123456789
- 4567890123456789 | 123
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
-(5 rows)
-
-drop function conflict_test();
--- Check that an unreserved keyword can be used as a variable name
-create function unreserved_test() returns int as $$
-declare
- forward int := 21;
-begin
- forward := forward * 2;
- return forward;
-end
-$$ language plpgsql;
-select unreserved_test();
- unreserved_test
------------------
- 42
-(1 row)
-
-create or replace function unreserved_test() returns int as $$
-declare
- return int := 42;
-begin
- return := return + 1;
- return return;
-end
-$$ language plpgsql;
-select unreserved_test();
- unreserved_test
------------------
- 43
-(1 row)
-
-create or replace function unreserved_test() returns int as $$
-declare
- comment int := 21;
-begin
- comment := comment * 2;
- comment on function unreserved_test() is 'this is a test';
- return comment;
-end
-$$ language plpgsql;
-select unreserved_test();
- unreserved_test
------------------
- 42
-(1 row)
-
-select obj_description('unreserved_test()'::regprocedure, 'pg_proc');
- obj_description
------------------
- this is a test
-(1 row)
-
-drop function unreserved_test();
---
--- Test FOREACH over arrays
---
-create function foreach_test(anyarray)
-returns void as $$
-declare x int;
-begin
- foreach x in array $1
- loop
- raise notice '%', x;
- end loop;
- end;
-$$ language plpgsql;
-select foreach_test(ARRAY[1,2,3,4]);
-NOTICE: 1
-NOTICE: 2
-NOTICE: 3
-NOTICE: 4
- foreach_test
---------------
-
-(1 row)
-
-select foreach_test(ARRAY[[1,2],[3,4]]);
-NOTICE: 1
-NOTICE: 2
-NOTICE: 3
-NOTICE: 4
- foreach_test
---------------
-
-(1 row)
-
-create or replace function foreach_test(anyarray)
-returns void as $$
-declare x int;
-begin
- foreach x slice 1 in array $1
- loop
- raise notice '%', x;
- end loop;
- end;
-$$ language plpgsql;
--- should fail
-select foreach_test(ARRAY[1,2,3,4]);
-ERROR: FOREACH ... SLICE loop variable must be of an array type
-CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array
-select foreach_test(ARRAY[[1,2],[3,4]]);
-ERROR: FOREACH ... SLICE loop variable must be of an array type
-CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array
-create or replace function foreach_test(anyarray)
-returns void as $$
-declare x int[];
-begin
- foreach x slice 1 in array $1
- loop
- raise notice '%', x;
- end loop;
- end;
-$$ language plpgsql;
-select foreach_test(ARRAY[1,2,3,4]);
-NOTICE: {1,2,3,4}
- foreach_test
---------------
-
-(1 row)
-
-select foreach_test(ARRAY[[1,2],[3,4]]);
-NOTICE: {1,2}
-NOTICE: {3,4}
- foreach_test
---------------
-
-(1 row)
-
--- higher level of slicing
-create or replace function foreach_test(anyarray)
-returns void as $$
-declare x int[];
-begin
- foreach x slice 2 in array $1
- loop
- raise notice '%', x;
- end loop;
- end;
-$$ language plpgsql;
--- should fail
-select foreach_test(ARRAY[1,2,3,4]);
-ERROR: slice dimension (2) is out of the valid range 0..1
-CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array
--- ok
-select foreach_test(ARRAY[[1,2],[3,4]]);
-NOTICE: {{1,2},{3,4}}
- foreach_test
---------------
-
-(1 row)
-
-select foreach_test(ARRAY[[[1,2]],[[3,4]]]);
-NOTICE: {{1,2}}
-NOTICE: {{3,4}}
- foreach_test
---------------
-
-(1 row)
-
-create type xy_tuple AS (x int, y int);
--- iteration over array of records
-create or replace function foreach_test(anyarray)
-returns void as $$
-declare r record;
-begin
- foreach r in array $1
- loop
- raise notice '%', r;
- end loop;
- end;
-$$ language plpgsql;
-select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]);
-NOTICE: (10,20)
-NOTICE: (40,69)
-NOTICE: (35,78)
- foreach_test
---------------
-
-(1 row)
-
-select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]);
-NOTICE: (10,20)
-NOTICE: (40,69)
-NOTICE: (35,78)
-NOTICE: (88,76)
- foreach_test
---------------
-
-(1 row)
-
-create or replace function foreach_test(anyarray)
-returns void as $$
-declare x int; y int;
-begin
- foreach x, y in array $1
- loop
- raise notice 'x = %, y = %', x, y;
- end loop;
- end;
-$$ language plpgsql;
-select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]);
-NOTICE: x = 10, y = 20
-NOTICE: x = 40, y = 69
-NOTICE: x = 35, y = 78
- foreach_test
---------------
-
-(1 row)
-
-select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]);
-NOTICE: x = 10, y = 20
-NOTICE: x = 40, y = 69
-NOTICE: x = 35, y = 78
-NOTICE: x = 88, y = 76
- foreach_test
---------------
-
-(1 row)
-
--- slicing over array of composite types
-create or replace function foreach_test(anyarray)
-returns void as $$
-declare x xy_tuple[];
-begin
- foreach x slice 1 in array $1
- loop
- raise notice '%', x;
- end loop;
- end;
-$$ language plpgsql;
-select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]);
-NOTICE: {"(10,20)","(40,69)","(35,78)"}
- foreach_test
---------------
-
-(1 row)
-
-select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]);
-NOTICE: {"(10,20)","(40,69)"}
-NOTICE: {"(35,78)","(88,76)"}
- foreach_test
---------------
-
-(1 row)
-
-drop function foreach_test(anyarray);
-drop type xy_tuple;
---
--- Assorted tests for array subscript assignment
---
-create temp table rtype (id int, ar text[]);
-create function arrayassign1() returns text[] language plpgsql as $$
-declare
- r record;
-begin
- r := row(12, '{foo,bar,baz}')::rtype;
- r.ar[2] := 'replace';
- return r.ar;
-end$$;
-select arrayassign1();
- arrayassign1
--------------------
- {foo,replace,baz}
-(1 row)
-
-select arrayassign1(); -- try again to exercise internal caching
- arrayassign1
--------------------
- {foo,replace,baz}
-(1 row)
-
-create domain orderedarray as int[2]
- constraint sorted check (value[1] < value[2]);
-select '{1,2}'::orderedarray;
- orderedarray
---------------
- {1,2}
-(1 row)
-
-select '{2,1}'::orderedarray; -- fail
-ERROR: value for domain orderedarray violates check constraint "sorted"
-create function testoa(x1 int, x2 int, x3 int) returns orderedarray
-language plpgsql as $$
-declare res orderedarray;
-begin
- res := array[x1, x2];
- res[2] := x3;
- return res;
-end$$;
-select testoa(1,2,3);
- testoa
---------
- {1,3}
-(1 row)
-
-select testoa(1,2,3); -- try again to exercise internal caching
- testoa
---------
- {1,3}
-(1 row)
-
-select testoa(2,1,3); -- fail at initial assign
-ERROR: value for domain orderedarray violates check constraint "sorted"
-CONTEXT: PL/pgSQL function testoa(integer,integer,integer) line 4 at assignment
-select testoa(1,2,1); -- fail at update
-ERROR: value for domain orderedarray violates check constraint "sorted"
-CONTEXT: PL/pgSQL function testoa(integer,integer,integer) line 5 at assignment
-drop function arrayassign1();
-drop function testoa(x1 int, x2 int, x3 int);
---
--- Test handling of expanded arrays
---
-create function returns_rw_array(int) returns int[]
-language plpgsql as $$
- declare r int[];
- begin r := array[$1, $1]; return r; end;
-$$ stable;
-create function consumes_rw_array(int[]) returns int
-language plpgsql as $$
- begin return $1[1]; end;
-$$ stable;
-select consumes_rw_array(returns_rw_array(42));
- consumes_rw_array
--------------------
- 42
-(1 row)
-
--- bug #14174
-explain (verbose, costs off)
-select i, a from
- (select returns_rw_array(1) as a offset 0) ss,
- lateral consumes_rw_array(a) i;
- QUERY PLAN
------------------------------------------------------------------
- Nested Loop
- Output: i.i, (returns_rw_array(1))
- -> Result
- Output: returns_rw_array(1)
- -> Function Scan on public.consumes_rw_array i
- Output: i.i
- Function Call: consumes_rw_array((returns_rw_array(1)))
-(7 rows)
-
-select i, a from
- (select returns_rw_array(1) as a offset 0) ss,
- lateral consumes_rw_array(a) i;
- i | a
----+-------
- 1 | {1,1}
-(1 row)
-
-explain (verbose, costs off)
-select consumes_rw_array(a), a from returns_rw_array(1) a;
- QUERY PLAN
---------------------------------------------
- Function Scan on public.returns_rw_array a
- Output: consumes_rw_array(a), a
- Function Call: returns_rw_array(1)
-(3 rows)
-
-select consumes_rw_array(a), a from returns_rw_array(1) a;
- consumes_rw_array | a
--------------------+-------
- 1 | {1,1}
-(1 row)
-
-explain (verbose, costs off)
-select consumes_rw_array(a), a from
- (values (returns_rw_array(1)), (returns_rw_array(2))) v(a);
- QUERY PLAN
----------------------------------------------------------------------
- Values Scan on "*VALUES*"
- Output: consumes_rw_array("*VALUES*".column1), "*VALUES*".column1
-(2 rows)
-
-select consumes_rw_array(a), a from
- (values (returns_rw_array(1)), (returns_rw_array(2))) v(a);
- consumes_rw_array | a
--------------------+-------
- 1 | {1,1}
- 2 | {2,2}
-(2 rows)
-
-do $$
-declare a int[] := array[1,2];
-begin
- a := a || 3;
- raise notice 'a = %', a;
-end$$;
-NOTICE: a = {1,2,3}
---
--- Test access to call stack
---
-create function inner_func(int)
-returns int as $$
-declare _context text;
-begin
- get diagnostics _context = pg_context;
- raise notice '***%***', _context;
- -- lets do it again, just for fun..
- get diagnostics _context = pg_context;
- raise notice '***%***', _context;
- raise notice 'lets make sure we didnt break anything';
- return 2 * $1;
-end;
-$$ language plpgsql;
-create or replace function outer_func(int)
-returns int as $$
-declare
- myresult int;
-begin
- raise notice 'calling down into inner_func()';
- myresult := inner_func($1);
- raise notice 'inner_func() done';
- return myresult;
-end;
-$$ language plpgsql;
-create or replace function outer_outer_func(int)
-returns int as $$
-declare
- myresult int;
-begin
- raise notice 'calling down into outer_func()';
- myresult := outer_func($1);
- raise notice 'outer_func() done';
- return myresult;
-end;
-$$ language plpgsql;
-select outer_outer_func(10);
-NOTICE: calling down into outer_func()
-NOTICE: calling down into inner_func()
-NOTICE: ***PL/pgSQL function inner_func(integer) line 4 at GET DIAGNOSTICS
-PL/pgSQL function outer_func(integer) line 6 at assignment
-PL/pgSQL function outer_outer_func(integer) line 6 at assignment***
-NOTICE: ***PL/pgSQL function inner_func(integer) line 7 at GET DIAGNOSTICS
-PL/pgSQL function outer_func(integer) line 6 at assignment
-PL/pgSQL function outer_outer_func(integer) line 6 at assignment***
-NOTICE: lets make sure we didnt break anything
-NOTICE: inner_func() done
-NOTICE: outer_func() done
- outer_outer_func
-------------------
- 20
-(1 row)
-
--- repeated call should work
-select outer_outer_func(20);
-NOTICE: calling down into outer_func()
-NOTICE: calling down into inner_func()
-NOTICE: ***PL/pgSQL function inner_func(integer) line 4 at GET DIAGNOSTICS
-PL/pgSQL function outer_func(integer) line 6 at assignment
-PL/pgSQL function outer_outer_func(integer) line 6 at assignment***
-NOTICE: ***PL/pgSQL function inner_func(integer) line 7 at GET DIAGNOSTICS
-PL/pgSQL function outer_func(integer) line 6 at assignment
-PL/pgSQL function outer_outer_func(integer) line 6 at assignment***
-NOTICE: lets make sure we didnt break anything
-NOTICE: inner_func() done
-NOTICE: outer_func() done
- outer_outer_func
-------------------
- 40
-(1 row)
-
-drop function outer_outer_func(int);
-drop function outer_func(int);
-drop function inner_func(int);
--- access to call stack from exception
-create function inner_func(int)
-returns int as $$
-declare
- _context text;
- sx int := 5;
-begin
- begin
- perform sx / 0;
- exception
- when division_by_zero then
- get diagnostics _context = pg_context;
- raise notice '***%***', _context;
- end;
-
- -- lets do it again, just for fun..
- get diagnostics _context = pg_context;
- raise notice '***%***', _context;
- raise notice 'lets make sure we didnt break anything';
- return 2 * $1;
-end;
-$$ language plpgsql;
-create or replace function outer_func(int)
-returns int as $$
-declare
- myresult int;
-begin
- raise notice 'calling down into inner_func()';
- myresult := inner_func($1);
- raise notice 'inner_func() done';
- return myresult;
-end;
-$$ language plpgsql;
-create or replace function outer_outer_func(int)
-returns int as $$
-declare
- myresult int;
-begin
- raise notice 'calling down into outer_func()';
- myresult := outer_func($1);
- raise notice 'outer_func() done';
- return myresult;
-end;
-$$ language plpgsql;
-select outer_outer_func(10);
-NOTICE: calling down into outer_func()
-NOTICE: calling down into inner_func()
-NOTICE: ***PL/pgSQL function inner_func(integer) line 10 at GET DIAGNOSTICS
-PL/pgSQL function outer_func(integer) line 6 at assignment
-PL/pgSQL function outer_outer_func(integer) line 6 at assignment***
-NOTICE: ***PL/pgSQL function inner_func(integer) line 15 at GET DIAGNOSTICS
-PL/pgSQL function outer_func(integer) line 6 at assignment
-PL/pgSQL function outer_outer_func(integer) line 6 at assignment***
-NOTICE: lets make sure we didnt break anything
-NOTICE: inner_func() done
-NOTICE: outer_func() done
- outer_outer_func
-------------------
- 20
-(1 row)
-
--- repeated call should work
-select outer_outer_func(20);
-NOTICE: calling down into outer_func()
-NOTICE: calling down into inner_func()
-NOTICE: ***PL/pgSQL function inner_func(integer) line 10 at GET DIAGNOSTICS
-PL/pgSQL function outer_func(integer) line 6 at assignment
-PL/pgSQL function outer_outer_func(integer) line 6 at assignment***
-NOTICE: ***PL/pgSQL function inner_func(integer) line 15 at GET DIAGNOSTICS
-PL/pgSQL function outer_func(integer) line 6 at assignment
-PL/pgSQL function outer_outer_func(integer) line 6 at assignment***
-NOTICE: lets make sure we didnt break anything
-NOTICE: inner_func() done
-NOTICE: outer_func() done
- outer_outer_func
-------------------
- 40
-(1 row)
-
-drop function outer_outer_func(int);
-drop function outer_func(int);
-drop function inner_func(int);
--- Test pg_routine_oid
-create function current_function(text)
-returns regprocedure as $$
-declare
- fn_oid regprocedure;
-begin
- get diagnostics fn_oid = pg_routine_oid;
- return fn_oid;
-end;
-$$ language plpgsql;
-select current_function('foo');
- current_function
-------------------------
- current_function(text)
-(1 row)
-
-drop function current_function(text);
--- shouldn't fail in DO, even though there's no useful data
-do $$
-declare
- fn_oid oid;
-begin
- get diagnostics fn_oid = pg_routine_oid;
- raise notice 'pg_routine_oid = %', fn_oid;
-end;
-$$;
-NOTICE: pg_routine_oid = 0
---
--- Test ASSERT
---
-do $$
-begin
- assert 1=1; -- should succeed
-end;
-$$;
-do $$
-begin
- assert 1=0; -- should fail
-end;
-$$;
-ERROR: assertion failed
-CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT
-do $$
-begin
- assert NULL; -- should fail
-end;
-$$;
-ERROR: assertion failed
-CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT
--- check controlling GUC
-set plpgsql.check_asserts = off;
-do $$
-begin
- assert 1=0; -- won't be tested
-end;
-$$;
-reset plpgsql.check_asserts;
--- test custom message
-do $$
-declare var text := 'some value';
-begin
- assert 1=0, format('assertion failed, var = "%s"', var);
-end;
-$$;
-ERROR: assertion failed, var = "some value"
-CONTEXT: PL/pgSQL function inline_code_block line 4 at ASSERT
--- ensure assertions are not trapped by 'others'
-do $$
-begin
- assert 1=0, 'unhandled assertion';
-exception when others then
- null; -- do nothing
-end;
-$$;
-ERROR: unhandled assertion
-CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT
--- Test use of plpgsql in a domain check constraint (cf. bug #14414)
-create function plpgsql_domain_check(val int) returns boolean as $$
-begin return val > 0; end
-$$ language plpgsql immutable;
-create domain plpgsql_domain as integer check(plpgsql_domain_check(value));
-do $$
-declare v_test plpgsql_domain;
-begin
- v_test := 1;
-end;
-$$;
-do $$
-declare v_test plpgsql_domain := 1;
-begin
- v_test := 0; -- fail
-end;
-$$;
-ERROR: value for domain plpgsql_domain violates check constraint "plpgsql_domain_check"
-CONTEXT: PL/pgSQL function inline_code_block line 4 at assignment
--- Test handling of expanded array passed to a domain constraint (bug #14472)
-create function plpgsql_arr_domain_check(val int[]) returns boolean as $$
-begin return val[1] > 0; end
-$$ language plpgsql immutable;
-create domain plpgsql_arr_domain as int[] check(plpgsql_arr_domain_check(value));
-do $$
-declare v_test plpgsql_arr_domain;
-begin
- v_test := array[1];
- v_test := v_test || 2;
-end;
-$$;
-do $$
-declare v_test plpgsql_arr_domain := array[1];
-begin
- v_test := 0 || v_test; -- fail
-end;
-$$;
-ERROR: value for domain plpgsql_arr_domain violates check constraint "plpgsql_arr_domain_check"
-CONTEXT: PL/pgSQL function inline_code_block line 4 at assignment
---
--- test usage of transition tables in AFTER triggers
---
-CREATE TABLE transition_table_base (id int PRIMARY KEY, val text);
-CREATE FUNCTION transition_table_base_ins_func()
- RETURNS trigger
- LANGUAGE plpgsql
-AS $$
-DECLARE
- t text;
- l text;
-BEGIN
- t = '';
- FOR l IN EXECUTE
- $q$
- EXPLAIN (TIMING off, COSTS off, VERBOSE on)
- SELECT * FROM newtable
- $q$ LOOP
- t = t || l || E'\n';
- END LOOP;
-
- RAISE INFO '%', t;
- RETURN new;
-END;
-$$;
-CREATE TRIGGER transition_table_base_ins_trig
- AFTER INSERT ON transition_table_base
- REFERENCING OLD TABLE AS oldtable NEW TABLE AS newtable
- FOR EACH STATEMENT
- EXECUTE PROCEDURE transition_table_base_ins_func();
-ERROR: OLD TABLE can only be specified for a DELETE or UPDATE trigger
-CREATE TRIGGER transition_table_base_ins_trig
- AFTER INSERT ON transition_table_base
- REFERENCING NEW TABLE AS newtable
- FOR EACH STATEMENT
- EXECUTE PROCEDURE transition_table_base_ins_func();
-INSERT INTO transition_table_base VALUES (1, 'One'), (2, 'Two');
-INFO: Named Tuplestore Scan
- Output: id, val
-
-INSERT INTO transition_table_base VALUES (3, 'Three'), (4, 'Four');
-INFO: Named Tuplestore Scan
- Output: id, val
-
-CREATE OR REPLACE FUNCTION transition_table_base_upd_func()
- RETURNS trigger
- LANGUAGE plpgsql
-AS $$
-DECLARE
- t text;
- l text;
-BEGIN
- t = '';
- FOR l IN EXECUTE
- $q$
- EXPLAIN (TIMING off, COSTS off, VERBOSE on)
- SELECT * FROM oldtable ot FULL JOIN newtable nt USING (id)
- $q$ LOOP
- t = t || l || E'\n';
- END LOOP;
-
- RAISE INFO '%', t;
- RETURN new;
-END;
-$$;
-CREATE TRIGGER transition_table_base_upd_trig
- AFTER UPDATE ON transition_table_base
- REFERENCING OLD TABLE AS oldtable NEW TABLE AS newtable
- FOR EACH STATEMENT
- EXECUTE PROCEDURE transition_table_base_upd_func();
-UPDATE transition_table_base
- SET val = '*' || val || '*'
- WHERE id BETWEEN 2 AND 3;
-INFO: Hash Full Join
- Output: COALESCE(ot.id, nt.id), ot.val, nt.val
- Hash Cond: (ot.id = nt.id)
- -> Named Tuplestore Scan
- Output: ot.id, ot.val
- -> Hash
- Output: nt.id, nt.val
- -> Named Tuplestore Scan
- Output: nt.id, nt.val
-
-CREATE TABLE transition_table_level1
-(
- level1_no serial NOT NULL ,
- level1_node_name varchar(255),
- PRIMARY KEY (level1_no)
-) WITHOUT OIDS;
-CREATE TABLE transition_table_level2
-(
- level2_no serial NOT NULL ,
- parent_no int NOT NULL,
- level1_node_name varchar(255),
- PRIMARY KEY (level2_no)
-) WITHOUT OIDS;
-CREATE TABLE transition_table_status
-(
- level int NOT NULL,
- node_no int NOT NULL,
- status int,
- PRIMARY KEY (level, node_no)
-) WITHOUT OIDS;
-CREATE FUNCTION transition_table_level1_ri_parent_del_func()
- RETURNS TRIGGER
- LANGUAGE plpgsql
-AS $$
- DECLARE n bigint;
- BEGIN
- PERFORM FROM p JOIN transition_table_level2 c ON c.parent_no = p.level1_no;
- IF FOUND THEN
- RAISE EXCEPTION 'RI error';
- END IF;
- RETURN NULL;
- END;
-$$;
-CREATE TRIGGER transition_table_level1_ri_parent_del_trigger
- AFTER DELETE ON transition_table_level1
- REFERENCING OLD TABLE AS p
- FOR EACH STATEMENT EXECUTE PROCEDURE
- transition_table_level1_ri_parent_del_func();
-CREATE FUNCTION transition_table_level1_ri_parent_upd_func()
- RETURNS TRIGGER
- LANGUAGE plpgsql
-AS $$
- DECLARE
- x int;
- BEGIN
- WITH p AS (SELECT level1_no, sum(delta) cnt
- FROM (SELECT level1_no, 1 AS delta FROM i
- UNION ALL
- SELECT level1_no, -1 AS delta FROM d) w
- GROUP BY level1_no
- HAVING sum(delta) < 0)
- SELECT level1_no
- FROM p JOIN transition_table_level2 c ON c.parent_no = p.level1_no
- INTO x;
- IF FOUND THEN
- RAISE EXCEPTION 'RI error';
- END IF;
- RETURN NULL;
- END;
-$$;
-CREATE TRIGGER transition_table_level1_ri_parent_upd_trigger
- AFTER UPDATE ON transition_table_level1
- REFERENCING OLD TABLE AS d NEW TABLE AS i
- FOR EACH STATEMENT EXECUTE PROCEDURE
- transition_table_level1_ri_parent_upd_func();
-CREATE FUNCTION transition_table_level2_ri_child_insupd_func()
- RETURNS TRIGGER
- LANGUAGE plpgsql
-AS $$
- BEGIN
- PERFORM FROM i
- LEFT JOIN transition_table_level1 p
- ON p.level1_no IS NOT NULL AND p.level1_no = i.parent_no
- WHERE p.level1_no IS NULL;
- IF FOUND THEN
- RAISE EXCEPTION 'RI error';
- END IF;
- RETURN NULL;
- END;
-$$;
-CREATE TRIGGER transition_table_level2_ri_child_ins_trigger
- AFTER INSERT ON transition_table_level2
- REFERENCING NEW TABLE AS i
- FOR EACH STATEMENT EXECUTE PROCEDURE
- transition_table_level2_ri_child_insupd_func();
-CREATE TRIGGER transition_table_level2_ri_child_upd_trigger
- AFTER UPDATE ON transition_table_level2
- REFERENCING NEW TABLE AS i
- FOR EACH STATEMENT EXECUTE PROCEDURE
- transition_table_level2_ri_child_insupd_func();
--- create initial test data
-INSERT INTO transition_table_level1 (level1_no)
- SELECT generate_series(1,200);
-ANALYZE transition_table_level1;
-INSERT INTO transition_table_level2 (level2_no, parent_no)
- SELECT level2_no, level2_no / 50 + 1 AS parent_no
- FROM generate_series(1,9999) level2_no;
-ANALYZE transition_table_level2;
-INSERT INTO transition_table_status (level, node_no, status)
- SELECT 1, level1_no, 0 FROM transition_table_level1;
-INSERT INTO transition_table_status (level, node_no, status)
- SELECT 2, level2_no, 0 FROM transition_table_level2;
-ANALYZE transition_table_status;
-INSERT INTO transition_table_level1(level1_no)
- SELECT generate_series(201,1000);
-ANALYZE transition_table_level1;
--- behave reasonably if someone tries to modify a transition table
-CREATE FUNCTION transition_table_level2_bad_usage_func()
- RETURNS TRIGGER
- LANGUAGE plpgsql
-AS $$
- BEGIN
- INSERT INTO dx VALUES (1000000, 1000000, 'x');
- RETURN NULL;
- END;
-$$;
-CREATE TRIGGER transition_table_level2_bad_usage_trigger
- AFTER DELETE ON transition_table_level2
- REFERENCING OLD TABLE AS dx
- FOR EACH STATEMENT EXECUTE PROCEDURE
- transition_table_level2_bad_usage_func();
-DELETE FROM transition_table_level2
- WHERE level2_no BETWEEN 301 AND 305;
-ERROR: relation "dx" cannot be the target of a modifying statement
-CONTEXT: SQL statement "INSERT INTO dx VALUES (1000000, 1000000, 'x')"
-PL/pgSQL function transition_table_level2_bad_usage_func() line 3 at SQL statement
-DROP TRIGGER transition_table_level2_bad_usage_trigger
- ON transition_table_level2;
--- attempt modifications which would break RI (should all fail)
-DELETE FROM transition_table_level1
- WHERE level1_no = 25;
-ERROR: RI error
-CONTEXT: PL/pgSQL function transition_table_level1_ri_parent_del_func() line 6 at RAISE
-UPDATE transition_table_level1 SET level1_no = -1
- WHERE level1_no = 30;
-ERROR: RI error
-CONTEXT: PL/pgSQL function transition_table_level1_ri_parent_upd_func() line 15 at RAISE
-INSERT INTO transition_table_level2 (level2_no, parent_no)
- VALUES (10000, 10000);
-ERROR: RI error
-CONTEXT: PL/pgSQL function transition_table_level2_ri_child_insupd_func() line 8 at RAISE
-UPDATE transition_table_level2 SET parent_no = 2000
- WHERE level2_no = 40;
-ERROR: RI error
-CONTEXT: PL/pgSQL function transition_table_level2_ri_child_insupd_func() line 8 at RAISE
--- attempt modifications which would not break RI (should all succeed)
-DELETE FROM transition_table_level1
- WHERE level1_no BETWEEN 201 AND 1000;
-DELETE FROM transition_table_level1
- WHERE level1_no BETWEEN 100000000 AND 100000010;
-SELECT count(*) FROM transition_table_level1;
- count
--------
- 200
-(1 row)
-
-DELETE FROM transition_table_level2
- WHERE level2_no BETWEEN 211 AND 220;
-SELECT count(*) FROM transition_table_level2;
- count
--------
- 9989
-(1 row)
-
-CREATE TABLE alter_table_under_transition_tables
-(
- id int PRIMARY KEY,
- name text
-);
-CREATE FUNCTION alter_table_under_transition_tables_upd_func()
- RETURNS TRIGGER
- LANGUAGE plpgsql
-AS $$
-BEGIN
- RAISE WARNING 'old table = %, new table = %',
- (SELECT string_agg(id || '=' || name, ',') FROM d),
- (SELECT string_agg(id || '=' || name, ',') FROM i);
- RAISE NOTICE 'one = %', (SELECT 1 FROM alter_table_under_transition_tables LIMIT 1);
- RETURN NULL;
-END;
-$$;
--- should fail, TRUNCATE is not compatible with transition tables
-CREATE TRIGGER alter_table_under_transition_tables_upd_trigger
- AFTER TRUNCATE OR UPDATE ON alter_table_under_transition_tables
- REFERENCING OLD TABLE AS d NEW TABLE AS i
- FOR EACH STATEMENT EXECUTE PROCEDURE
- alter_table_under_transition_tables_upd_func();
-ERROR: TRUNCATE triggers with transition tables are not supported
--- should work
-CREATE TRIGGER alter_table_under_transition_tables_upd_trigger
- AFTER UPDATE ON alter_table_under_transition_tables
- REFERENCING OLD TABLE AS d NEW TABLE AS i
- FOR EACH STATEMENT EXECUTE PROCEDURE
- alter_table_under_transition_tables_upd_func();
-INSERT INTO alter_table_under_transition_tables
- VALUES (1, '1'), (2, '2'), (3, '3');
-UPDATE alter_table_under_transition_tables
- SET name = name || name;
-WARNING: old table = 1=1,2=2,3=3, new table = 1=11,2=22,3=33
-NOTICE: one = 1
--- now change 'name' to an integer to see what happens...
-ALTER TABLE alter_table_under_transition_tables
- ALTER COLUMN name TYPE int USING name::integer;
-UPDATE alter_table_under_transition_tables
- SET name = (name::text || name::text)::integer;
-WARNING: old table = 1=11,2=22,3=33, new table = 1=1111,2=2222,3=3333
-NOTICE: one = 1
--- now drop column 'name'
-ALTER TABLE alter_table_under_transition_tables
- DROP column name;
-UPDATE alter_table_under_transition_tables
- SET id = id;
-ERROR: column "name" does not exist
-LINE 1: (SELECT string_agg(id || '=' || name, ',') FROM d)
- ^
-QUERY: (SELECT string_agg(id || '=' || name, ',') FROM d)
-CONTEXT: PL/pgSQL function alter_table_under_transition_tables_upd_func() line 3 at RAISE
---
--- Test multiple reference to a transition table
---
-CREATE TABLE multi_test (i int);
-INSERT INTO multi_test VALUES (1);
-CREATE OR REPLACE FUNCTION multi_test_trig() RETURNS trigger
-LANGUAGE plpgsql AS $$
-BEGIN
- RAISE NOTICE 'count = %', (SELECT COUNT(*) FROM new_test);
- RAISE NOTICE 'count union = %',
- (SELECT COUNT(*)
- FROM (SELECT * FROM new_test UNION ALL SELECT * FROM new_test) ss);
- RETURN NULL;
-END$$;
-CREATE TRIGGER my_trigger AFTER UPDATE ON multi_test
- REFERENCING NEW TABLE AS new_test OLD TABLE as old_test
- FOR EACH STATEMENT EXECUTE PROCEDURE multi_test_trig();
-UPDATE multi_test SET i = i;
-NOTICE: count = 1
-NOTICE: count union = 2
-DROP TABLE multi_test;
-DROP FUNCTION multi_test_trig();
---
--- Check type parsing and record fetching from partitioned tables
---
-CREATE TABLE partitioned_table (a int, b text) PARTITION BY LIST (a);
-CREATE TABLE pt_part1 PARTITION OF partitioned_table FOR VALUES IN (1);
-CREATE TABLE pt_part2 PARTITION OF partitioned_table FOR VALUES IN (2);
-INSERT INTO partitioned_table VALUES (1, 'Row 1');
-INSERT INTO partitioned_table VALUES (2, 'Row 2');
-CREATE OR REPLACE FUNCTION get_from_partitioned_table(partitioned_table.a%type)
-RETURNS partitioned_table AS $$
-DECLARE
- a_val partitioned_table.a%TYPE;
- result partitioned_table%ROWTYPE;
-BEGIN
- a_val := $1;
- SELECT * INTO result FROM partitioned_table WHERE a = a_val;
- RETURN result;
-END; $$ LANGUAGE plpgsql;
-NOTICE: type reference partitioned_table.a%TYPE converted to integer
-SELECT * FROM get_from_partitioned_table(1) AS t;
- a | b
----+-------
- 1 | Row 1
-(1 row)
-
-CREATE OR REPLACE FUNCTION list_partitioned_table()
-RETURNS SETOF public.partitioned_table.a%TYPE AS $$
-DECLARE
- row public.partitioned_table%ROWTYPE;
- a_val public.partitioned_table.a%TYPE;
-BEGIN
- FOR row IN SELECT * FROM public.partitioned_table ORDER BY a LOOP
- a_val := row.a;
- RETURN NEXT a_val;
- END LOOP;
- RETURN;
-END; $$ LANGUAGE plpgsql;
-NOTICE: type reference public.partitioned_table.a%TYPE converted to integer
-SELECT * FROM list_partitioned_table() AS t;
- t
----
- 1
- 2
-(2 rows)
-
---
--- Check argument name is used instead of $n in error message
---
-CREATE FUNCTION fx(x WSlot) RETURNS void AS $$
-BEGIN
- GET DIAGNOSTICS x = ROW_COUNT;
- RETURN;
-END; $$ LANGUAGE plpgsql;
-ERROR: "x" is not a scalar variable
-LINE 3: GET DIAGNOSTICS x = ROW_COUNT;
- ^
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/copy2.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/copy2.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/copy2.out 2024-11-15 02:50:52.426154377 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/copy2.out 2024-11-15 02:59:18.189116977 +0000
@@ -1,931 +1,2 @@
-CREATE TEMP TABLE x (
- a serial,
- b int,
- c text not null default 'stuff',
- d text,
- e text
-) ;
-CREATE FUNCTION fn_x_before () RETURNS TRIGGER AS '
- BEGIN
- NEW.e := ''before trigger fired''::text;
- return NEW;
- END;
-' LANGUAGE plpgsql;
-CREATE FUNCTION fn_x_after () RETURNS TRIGGER AS '
- BEGIN
- UPDATE x set e=''after trigger fired'' where c=''stuff'';
- return NULL;
- END;
-' LANGUAGE plpgsql;
-CREATE TRIGGER trg_x_after AFTER INSERT ON x
-FOR EACH ROW EXECUTE PROCEDURE fn_x_after();
-CREATE TRIGGER trg_x_before BEFORE INSERT ON x
-FOR EACH ROW EXECUTE PROCEDURE fn_x_before();
-COPY x (a, b, c, d, e) from stdin;
-COPY x (b, d) from stdin;
-COPY x (b, d) from stdin;
-COPY x (a, b, c, d, e) from stdin;
--- non-existent column in column list: should fail
-COPY x (xyz) from stdin;
-ERROR: column "xyz" of relation "x" does not exist
--- redundant options
-COPY x from stdin (format CSV, FORMAT CSV);
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (format CSV, FORMAT CSV);
- ^
-COPY x from stdin (freeze off, freeze on);
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (freeze off, freeze on);
- ^
-COPY x from stdin (delimiter ',', delimiter ',');
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (delimiter ',', delimiter ',');
- ^
-COPY x from stdin (null ' ', null ' ');
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (null ' ', null ' ');
- ^
-COPY x from stdin (header off, header on);
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (header off, header on);
- ^
-COPY x from stdin (quote ':', quote ':');
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (quote ':', quote ':');
- ^
-COPY x from stdin (escape ':', escape ':');
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (escape ':', escape ':');
- ^
-COPY x from stdin (force_quote (a), force_quote *);
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (force_quote (a), force_quote *);
- ^
-COPY x from stdin (force_not_null (a), force_not_null (b));
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (force_not_null (a), force_not_null (b));
- ^
-COPY x from stdin (force_null (a), force_null (b));
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (force_null (a), force_null (b));
- ^
-COPY x from stdin (convert_selectively (a), convert_selectively (b));
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (convert_selectively (a), convert_selectiv...
- ^
-COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii');
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii...
- ^
-COPY x from stdin (on_error ignore, on_error ignore);
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (on_error ignore, on_error ignore);
- ^
-COPY x from stdin (log_verbosity default, log_verbosity verbose);
-ERROR: conflicting or redundant options
-LINE 1: COPY x from stdin (log_verbosity default, log_verbosity verb...
- ^
--- incorrect options
-COPY x from stdin (format BINARY, delimiter ',');
-ERROR: cannot specify DELIMITER in BINARY mode
-COPY x from stdin (format BINARY, null 'x');
-ERROR: cannot specify NULL in BINARY mode
-COPY x from stdin (format BINARY, on_error ignore);
-ERROR: only ON_ERROR STOP is allowed in BINARY mode
-COPY x from stdin (on_error unsupported);
-ERROR: COPY ON_ERROR "unsupported" not recognized
-LINE 1: COPY x from stdin (on_error unsupported);
- ^
-COPY x from stdin (format TEXT, force_quote(a));
-ERROR: COPY FORCE_QUOTE requires CSV mode
-COPY x from stdin (format TEXT, force_quote *);
-ERROR: COPY FORCE_QUOTE requires CSV mode
-COPY x from stdin (format CSV, force_quote(a));
-ERROR: COPY FORCE_QUOTE cannot be used with COPY FROM
-COPY x from stdin (format CSV, force_quote *);
-ERROR: COPY FORCE_QUOTE cannot be used with COPY FROM
-COPY x from stdin (format TEXT, force_not_null(a));
-ERROR: COPY FORCE_NOT_NULL requires CSV mode
-COPY x from stdin (format TEXT, force_not_null *);
-ERROR: COPY FORCE_NOT_NULL requires CSV mode
-COPY x to stdout (format CSV, force_not_null(a));
-ERROR: COPY FORCE_NOT_NULL cannot be used with COPY TO
-COPY x to stdout (format CSV, force_not_null *);
-ERROR: COPY FORCE_NOT_NULL cannot be used with COPY TO
-COPY x from stdin (format TEXT, force_null(a));
-ERROR: COPY FORCE_NULL requires CSV mode
-COPY x from stdin (format TEXT, force_null *);
-ERROR: COPY FORCE_NULL requires CSV mode
-COPY x to stdout (format CSV, force_null(a));
-ERROR: COPY FORCE_NULL cannot be used with COPY TO
-COPY x to stdout (format CSV, force_null *);
-ERROR: COPY FORCE_NULL cannot be used with COPY TO
-COPY x to stdout (format BINARY, on_error unsupported);
-ERROR: COPY ON_ERROR cannot be used with COPY TO
-LINE 1: COPY x to stdout (format BINARY, on_error unsupported);
- ^
-COPY x from stdin (log_verbosity unsupported);
-ERROR: COPY LOG_VERBOSITY "unsupported" not recognized
-LINE 1: COPY x from stdin (log_verbosity unsupported);
- ^
-COPY x from stdin with (reject_limit 1);
-ERROR: COPY REJECT_LIMIT requires ON_ERROR to be set to IGNORE
-COPY x from stdin with (on_error ignore, reject_limit 0);
-ERROR: REJECT_LIMIT (0) must be greater than zero
--- too many columns in column list: should fail
-COPY x (a, b, c, d, e, d, c) from stdin;
-ERROR: column "d" specified more than once
--- missing data: should fail
-COPY x from stdin;
-ERROR: invalid input syntax for type integer: ""
-CONTEXT: COPY x, line 1, column a: ""
-COPY x from stdin;
-ERROR: missing data for column "e"
-CONTEXT: COPY x, line 1: "2000 230 23 23"
-COPY x from stdin;
-ERROR: missing data for column "e"
-CONTEXT: COPY x, line 1: "2001 231 \N \N"
--- extra data: should fail
-COPY x from stdin;
-ERROR: extra data after last expected column
-CONTEXT: COPY x, line 1: "2002 232 40 50 60 70 80"
--- various COPY options: delimiters, oids, NULL string, encoding
-COPY x (b, c, d, e) from stdin delimiter ',' null 'x';
-COPY x from stdin WITH DELIMITER AS ';' NULL AS '';
-COPY x from stdin WITH DELIMITER AS ':' NULL AS E'\\X' ENCODING 'sql_ascii';
-COPY x TO stdout WHERE a = 1;
-ERROR: WHERE clause not allowed with COPY TO
-LINE 1: COPY x TO stdout WHERE a = 1;
- ^
-COPY x from stdin WHERE a = 50004;
-COPY x from stdin WHERE a > 60003;
-COPY x from stdin WHERE f > 60003;
-ERROR: column "f" does not exist
-LINE 1: COPY x from stdin WHERE f > 60003;
- ^
-COPY x from stdin WHERE a = max(x.b);
-ERROR: aggregate functions are not allowed in COPY FROM WHERE conditions
-LINE 1: COPY x from stdin WHERE a = max(x.b);
- ^
-COPY x from stdin WHERE a IN (SELECT 1 FROM x);
-ERROR: cannot use subquery in COPY FROM WHERE condition
-LINE 1: COPY x from stdin WHERE a IN (SELECT 1 FROM x);
- ^
-COPY x from stdin WHERE a IN (generate_series(1,5));
-ERROR: set-returning functions are not allowed in COPY FROM WHERE conditions
-LINE 1: COPY x from stdin WHERE a IN (generate_series(1,5));
- ^
-COPY x from stdin WHERE a = row_number() over(b);
-ERROR: window functions are not allowed in COPY FROM WHERE conditions
-LINE 1: COPY x from stdin WHERE a = row_number() over(b);
- ^
--- check results of copy in
-SELECT * FROM x;
- a | b | c | d | e
--------+----+------------+--------+----------------------
- 9999 | | \N | NN | before trigger fired
- 10000 | 21 | 31 | 41 | before trigger fired
- 10001 | 22 | 32 | 42 | before trigger fired
- 10002 | 23 | 33 | 43 | before trigger fired
- 10003 | 24 | 34 | 44 | before trigger fired
- 10004 | 25 | 35 | 45 | before trigger fired
- 10005 | 26 | 36 | 46 | before trigger fired
- 6 | | 45 | 80 | before trigger fired
- 7 | | x | \x | before trigger fired
- 8 | | , | \, | before trigger fired
- 3000 | | c | | before trigger fired
- 4000 | | C | | before trigger fired
- 4001 | 1 | empty | | before trigger fired
- 4002 | 2 | null | | before trigger fired
- 4003 | 3 | Backslash | \ | before trigger fired
- 4004 | 4 | BackslashX | \X | before trigger fired
- 4005 | 5 | N | N | before trigger fired
- 4006 | 6 | BackslashN | \N | before trigger fired
- 4007 | 7 | XX | XX | before trigger fired
- 4008 | 8 | Delimiter | : | before trigger fired
- 50004 | 25 | 35 | 45 | before trigger fired
- 60004 | 25 | 35 | 45 | before trigger fired
- 60005 | 26 | 36 | 46 | before trigger fired
- 1 | 1 | stuff | test_1 | after trigger fired
- 2 | 2 | stuff | test_2 | after trigger fired
- 3 | 3 | stuff | test_3 | after trigger fired
- 4 | 4 | stuff | test_4 | after trigger fired
- 5 | 5 | stuff | test_5 | after trigger fired
-(28 rows)
-
--- check copy out
-COPY x TO stdout;
-9999 \N \\N NN before trigger fired
-10000 21 31 41 before trigger fired
-10001 22 32 42 before trigger fired
-10002 23 33 43 before trigger fired
-10003 24 34 44 before trigger fired
-10004 25 35 45 before trigger fired
-10005 26 36 46 before trigger fired
-6 \N 45 80 before trigger fired
-7 \N x \\x before trigger fired
-8 \N , \\, before trigger fired
-3000 \N c \N before trigger fired
-4000 \N C \N before trigger fired
-4001 1 empty before trigger fired
-4002 2 null \N before trigger fired
-4003 3 Backslash \\ before trigger fired
-4004 4 BackslashX \\X before trigger fired
-4005 5 N N before trigger fired
-4006 6 BackslashN \\N before trigger fired
-4007 7 XX XX before trigger fired
-4008 8 Delimiter : before trigger fired
-50004 25 35 45 before trigger fired
-60004 25 35 45 before trigger fired
-60005 26 36 46 before trigger fired
-1 1 stuff test_1 after trigger fired
-2 2 stuff test_2 after trigger fired
-3 3 stuff test_3 after trigger fired
-4 4 stuff test_4 after trigger fired
-5 5 stuff test_5 after trigger fired
-COPY x (c, e) TO stdout;
-\\N before trigger fired
-31 before trigger fired
-32 before trigger fired
-33 before trigger fired
-34 before trigger fired
-35 before trigger fired
-36 before trigger fired
-45 before trigger fired
-x before trigger fired
-, before trigger fired
-c before trigger fired
-C before trigger fired
-empty before trigger fired
-null before trigger fired
-Backslash before trigger fired
-BackslashX before trigger fired
-N before trigger fired
-BackslashN before trigger fired
-XX before trigger fired
-Delimiter before trigger fired
-35 before trigger fired
-35 before trigger fired
-36 before trigger fired
-stuff after trigger fired
-stuff after trigger fired
-stuff after trigger fired
-stuff after trigger fired
-stuff after trigger fired
-COPY x (b, e) TO stdout WITH NULL 'I''m null';
-I'm null before trigger fired
-21 before trigger fired
-22 before trigger fired
-23 before trigger fired
-24 before trigger fired
-25 before trigger fired
-26 before trigger fired
-I'm null before trigger fired
-I'm null before trigger fired
-I'm null before trigger fired
-I'm null before trigger fired
-I'm null before trigger fired
-1 before trigger fired
-2 before trigger fired
-3 before trigger fired
-4 before trigger fired
-5 before trigger fired
-6 before trigger fired
-7 before trigger fired
-8 before trigger fired
-25 before trigger fired
-25 before trigger fired
-26 before trigger fired
-1 after trigger fired
-2 after trigger fired
-3 after trigger fired
-4 after trigger fired
-5 after trigger fired
-CREATE TEMP TABLE y (
- col1 text,
- col2 text
-);
-INSERT INTO y VALUES ('Jackson, Sam', E'\\h');
-INSERT INTO y VALUES ('It is "perfect".',E'\t');
-INSERT INTO y VALUES ('', NULL);
-COPY y TO stdout WITH CSV;
-"Jackson, Sam",\h
-"It is ""perfect"".",
-"",
-COPY y TO stdout WITH CSV QUOTE '''' DELIMITER '|';
-Jackson, Sam|\h
-It is "perfect".|
-''|
-COPY y TO stdout WITH CSV FORCE QUOTE col2 ESCAPE E'\\' ENCODING 'sql_ascii';
-"Jackson, Sam","\\h"
-"It is \"perfect\"."," "
-"",
-COPY y TO stdout WITH CSV FORCE QUOTE *;
-"Jackson, Sam","\h"
-"It is ""perfect""."," "
-"",
--- Repeat above tests with new 9.0 option syntax
-COPY y TO stdout (FORMAT CSV);
-"Jackson, Sam",\h
-"It is ""perfect"".",
-"",
-COPY y TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|');
-Jackson, Sam|\h
-It is "perfect".|
-''|
-COPY y TO stdout (FORMAT CSV, FORCE_QUOTE (col2), ESCAPE E'\\');
-"Jackson, Sam","\\h"
-"It is \"perfect\"."," "
-"",
-COPY y TO stdout (FORMAT CSV, FORCE_QUOTE *);
-"Jackson, Sam","\h"
-"It is ""perfect""."," "
-"",
-\copy y TO stdout (FORMAT CSV)
-"Jackson, Sam",\h
-"It is ""perfect"".",
-"",
-\copy y TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|')
-Jackson, Sam|\h
-It is "perfect".|
-''|
-\copy y TO stdout (FORMAT CSV, FORCE_QUOTE (col2), ESCAPE E'\\')
-"Jackson, Sam","\\h"
-"It is \"perfect\"."," "
-"",
-\copy y TO stdout (FORMAT CSV, FORCE_QUOTE *)
-"Jackson, Sam","\h"
-"It is ""perfect""."," "
-"",
---test that we read consecutive LFs properly
-CREATE TEMP TABLE testnl (a int, b text, c int);
-COPY testnl FROM stdin CSV;
--- test end of copy marker
-CREATE TEMP TABLE testeoc (a text);
-COPY testeoc FROM stdin CSV;
-COPY testeoc TO stdout CSV;
-a\.
-\.b
-c\.d
-"\."
--- test handling of nonstandard null marker that violates escaping rules
-CREATE TEMP TABLE testnull(a int, b text);
-INSERT INTO testnull VALUES (1, E'\\0'), (NULL, NULL);
-COPY testnull TO stdout WITH NULL AS E'\\0';
-1 \\0
-\0 \0
-COPY testnull FROM stdin WITH NULL AS E'\\0';
-SELECT * FROM testnull;
- a | b
-----+----
- 1 | \0
- |
- 42 | \0
- |
-(4 rows)
-
-BEGIN;
-CREATE TABLE vistest (LIKE testeoc);
-COPY vistest FROM stdin CSV;
-COMMIT;
-SELECT * FROM vistest;
- a
-----
- a0
- b
-(2 rows)
-
-BEGIN;
-TRUNCATE vistest;
-COPY vistest FROM stdin CSV;
-SELECT * FROM vistest;
- a
-----
- a1
- b
-(2 rows)
-
-SAVEPOINT s1;
-TRUNCATE vistest;
-COPY vistest FROM stdin CSV;
-SELECT * FROM vistest;
- a
-----
- d1
- e
-(2 rows)
-
-COMMIT;
-SELECT * FROM vistest;
- a
-----
- d1
- e
-(2 rows)
-
-BEGIN;
-TRUNCATE vistest;
-COPY vistest FROM stdin CSV FREEZE;
-SELECT * FROM vistest;
- a
-----
- a2
- b
-(2 rows)
-
-SAVEPOINT s1;
-TRUNCATE vistest;
-COPY vistest FROM stdin CSV FREEZE;
-SELECT * FROM vistest;
- a
-----
- d2
- e
-(2 rows)
-
-COMMIT;
-SELECT * FROM vistest;
- a
-----
- d2
- e
-(2 rows)
-
-BEGIN;
-TRUNCATE vistest;
-COPY vistest FROM stdin CSV FREEZE;
-SELECT * FROM vistest;
- a
----
- x
- y
-(2 rows)
-
-COMMIT;
-TRUNCATE vistest;
-COPY vistest FROM stdin CSV FREEZE;
-ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction
-BEGIN;
-TRUNCATE vistest;
-SAVEPOINT s1;
-COPY vistest FROM stdin CSV FREEZE;
-ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction
-COMMIT;
-BEGIN;
-INSERT INTO vistest VALUES ('z');
-SAVEPOINT s1;
-TRUNCATE vistest;
-ROLLBACK TO SAVEPOINT s1;
-COPY vistest FROM stdin CSV FREEZE;
-ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction
-COMMIT;
-CREATE FUNCTION truncate_in_subxact() RETURNS VOID AS
-$$
-BEGIN
- TRUNCATE vistest;
-EXCEPTION
- WHEN OTHERS THEN
- INSERT INTO vistest VALUES ('subxact failure');
-END;
-$$ language plpgsql;
-BEGIN;
-INSERT INTO vistest VALUES ('z');
-SELECT truncate_in_subxact();
- truncate_in_subxact
----------------------
-
-(1 row)
-
-COPY vistest FROM stdin CSV FREEZE;
-SELECT * FROM vistest;
- a
-----
- d4
- e
-(2 rows)
-
-COMMIT;
-SELECT * FROM vistest;
- a
-----
- d4
- e
-(2 rows)
-
--- Test FORCE_NOT_NULL and FORCE_NULL options
-CREATE TEMP TABLE forcetest (
- a INT NOT NULL,
- b TEXT NOT NULL,
- c TEXT,
- d TEXT,
- e TEXT
-);
-\pset null NULL
--- should succeed with no effect ("b" remains an empty string, "c" remains NULL)
-BEGIN;
-COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(b), FORCE_NULL(c));
-COMMIT;
-SELECT b, c FROM forcetest WHERE a = 1;
- b | c
----+------
- | NULL
-(1 row)
-
--- should succeed, FORCE_NULL and FORCE_NOT_NULL can be both specified
-BEGIN;
-COPY forcetest (a, b, c, d) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(c,d), FORCE_NULL(c,d));
-COMMIT;
-SELECT c, d FROM forcetest WHERE a = 2;
- c | d
----+------
- | NULL
-(1 row)
-
--- should fail with not-null constraint violation
-BEGIN;
-COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL(b), FORCE_NOT_NULL(c));
-ERROR: null value in column "b" of relation "forcetest" violates not-null constraint
-DETAIL: Failing row contains (3, null, , null, null).
-CONTEXT: COPY forcetest, line 1: "3,,"""
-ROLLBACK;
--- should fail with "not referenced by COPY" error
-BEGIN;
-COPY forcetest (d, e) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(b));
-ERROR: FORCE_NOT_NULL column "b" not referenced by COPY
-ROLLBACK;
--- should fail with "not referenced by COPY" error
-BEGIN;
-COPY forcetest (d, e) FROM STDIN WITH (FORMAT csv, FORCE_NULL(b));
-ERROR: FORCE_NULL column "b" not referenced by COPY
-ROLLBACK;
--- should succeed with no effect ("b" remains an empty string, "c" remains NULL)
-BEGIN;
-COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *, FORCE_NULL *);
-COMMIT;
-SELECT b, c FROM forcetest WHERE a = 4;
- b | c
----+------
- | NULL
-(1 row)
-
--- should succeed with effect ("b" remains an empty string)
-BEGIN;
-COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *);
-COMMIT;
-SELECT b, c FROM forcetest WHERE a = 5;
- b | c
----+---
- |
-(1 row)
-
--- should succeed with effect ("c" remains NULL)
-BEGIN;
-COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL *);
-COMMIT;
-SELECT b, c FROM forcetest WHERE a = 6;
- b | c
----+------
- b | NULL
-(1 row)
-
--- should fail with "conflicting or redundant options" error
-BEGIN;
-COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *, FORCE_NOT_NULL(b));
-ERROR: conflicting or redundant options
-LINE 1: ...c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *, FORCE_NOT_...
- ^
-ROLLBACK;
--- should fail with "conflicting or redundant options" error
-BEGIN;
-COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL *, FORCE_NULL(b));
-ERROR: conflicting or redundant options
-LINE 1: ... b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL *, FORCE_NULL...
- ^
-ROLLBACK;
-\pset null ''
--- test case with whole-row Var in a check constraint
-create table check_con_tbl (f1 int);
-create function check_con_function(check_con_tbl) returns bool as $$
-begin
- raise notice 'input = %', row_to_json($1);
- return $1.f1 > 0;
-end $$ language plpgsql immutable;
-alter table check_con_tbl add check (check_con_function(check_con_tbl.*));
-\d+ check_con_tbl
- Table "public.check_con_tbl"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+---------+--------------+-------------
- f1 | integer | | | | plain | |
-Check constraints:
- "check_con_tbl_check" CHECK (check_con_function(check_con_tbl.*))
-
-copy check_con_tbl from stdin;
-NOTICE: input = {"f1":1}
-NOTICE: input = {"f1":null}
-copy check_con_tbl from stdin;
-NOTICE: input = {"f1":0}
-ERROR: new row for relation "check_con_tbl" violates check constraint "check_con_tbl_check"
-DETAIL: Failing row contains (0).
-CONTEXT: COPY check_con_tbl, line 1: "0"
-select * from check_con_tbl;
- f1
-----
- 1
-
-(2 rows)
-
--- test with RLS enabled.
-CREATE ROLE regress_rls_copy_user;
-CREATE ROLE regress_rls_copy_user_colperms;
-CREATE TABLE rls_t1 (a int, b int, c int);
-COPY rls_t1 (a, b, c) from stdin;
-CREATE POLICY p1 ON rls_t1 FOR SELECT USING (a % 2 = 0);
-ALTER TABLE rls_t1 ENABLE ROW LEVEL SECURITY;
-ALTER TABLE rls_t1 FORCE ROW LEVEL SECURITY;
-GRANT SELECT ON TABLE rls_t1 TO regress_rls_copy_user;
-GRANT SELECT (a, b) ON TABLE rls_t1 TO regress_rls_copy_user_colperms;
--- all columns
-COPY rls_t1 TO stdout;
-1 4 1
-2 3 2
-3 2 3
-4 1 4
-COPY rls_t1 (a, b, c) TO stdout;
-1 4 1
-2 3 2
-3 2 3
-4 1 4
--- subset of columns
-COPY rls_t1 (a) TO stdout;
-1
-2
-3
-4
-COPY rls_t1 (a, b) TO stdout;
-1 4
-2 3
-3 2
-4 1
--- column reordering
-COPY rls_t1 (b, a) TO stdout;
-4 1
-3 2
-2 3
-1 4
-SET SESSION AUTHORIZATION regress_rls_copy_user;
--- all columns
-COPY rls_t1 TO stdout;
-2 3 2
-4 1 4
-COPY rls_t1 (a, b, c) TO stdout;
-2 3 2
-4 1 4
--- subset of columns
-COPY rls_t1 (a) TO stdout;
-2
-4
-COPY rls_t1 (a, b) TO stdout;
-2 3
-4 1
--- column reordering
-COPY rls_t1 (b, a) TO stdout;
-3 2
-1 4
-RESET SESSION AUTHORIZATION;
-SET SESSION AUTHORIZATION regress_rls_copy_user_colperms;
--- attempt all columns (should fail)
-COPY rls_t1 TO stdout;
-ERROR: permission denied for table rls_t1
-COPY rls_t1 (a, b, c) TO stdout;
-ERROR: permission denied for table rls_t1
--- try to copy column with no privileges (should fail)
-COPY rls_t1 (c) TO stdout;
-ERROR: permission denied for table rls_t1
--- subset of columns (should succeed)
-COPY rls_t1 (a) TO stdout;
-2
-4
-COPY rls_t1 (a, b) TO stdout;
-2 3
-4 1
-RESET SESSION AUTHORIZATION;
--- test with INSTEAD OF INSERT trigger on a view
-CREATE TABLE instead_of_insert_tbl(id serial, name text);
-CREATE VIEW instead_of_insert_tbl_view AS SELECT ''::text AS str;
-COPY instead_of_insert_tbl_view FROM stdin; -- fail
-ERROR: cannot copy to view "instead_of_insert_tbl_view"
-HINT: To enable copying to a view, provide an INSTEAD OF INSERT trigger.
-CREATE FUNCTION fun_instead_of_insert_tbl() RETURNS trigger AS $$
-BEGIN
- INSERT INTO instead_of_insert_tbl (name) VALUES (NEW.str);
- RETURN NULL;
-END;
-$$ LANGUAGE plpgsql;
-CREATE TRIGGER trig_instead_of_insert_tbl_view
- INSTEAD OF INSERT ON instead_of_insert_tbl_view
- FOR EACH ROW EXECUTE PROCEDURE fun_instead_of_insert_tbl();
-COPY instead_of_insert_tbl_view FROM stdin;
-SELECT * FROM instead_of_insert_tbl;
- id | name
-----+-------
- 1 | test1
-(1 row)
-
--- Test of COPY optimization with view using INSTEAD OF INSERT
--- trigger when relation is created in the same transaction as
--- when COPY is executed.
-BEGIN;
-CREATE VIEW instead_of_insert_tbl_view_2 as select ''::text as str;
-CREATE TRIGGER trig_instead_of_insert_tbl_view_2
- INSTEAD OF INSERT ON instead_of_insert_tbl_view_2
- FOR EACH ROW EXECUTE PROCEDURE fun_instead_of_insert_tbl();
-COPY instead_of_insert_tbl_view_2 FROM stdin;
-SELECT * FROM instead_of_insert_tbl;
- id | name
-----+-------
- 1 | test1
- 2 | test1
-(2 rows)
-
-COMMIT;
--- tests for on_error option
-CREATE TABLE check_ign_err (n int, m int[], k int);
-COPY check_ign_err FROM STDIN WITH (on_error stop);
-ERROR: invalid input syntax for type integer: "a"
-CONTEXT: COPY check_ign_err, line 2, column n: "a"
--- want context for notices
-\set SHOW_CONTEXT always
-COPY check_ign_err FROM STDIN WITH (on_error ignore, log_verbosity verbose);
-NOTICE: skipping row due to data type incompatibility at line 2 for column "n": "a"
-CONTEXT: COPY check_ign_err
-NOTICE: skipping row due to data type incompatibility at line 3 for column "k": "3333333333"
-CONTEXT: COPY check_ign_err
-NOTICE: skipping row due to data type incompatibility at line 4 for column "m": "{a, 4}"
-CONTEXT: COPY check_ign_err
-NOTICE: skipping row due to data type incompatibility at line 5 for column "n": ""
-CONTEXT: COPY check_ign_err
-NOTICE: skipping row due to data type incompatibility at line 7 for column "m": "a"
-CONTEXT: COPY check_ign_err
-NOTICE: skipping row due to data type incompatibility at line 8 for column "k": "a"
-CONTEXT: COPY check_ign_err
-NOTICE: 6 rows were skipped due to data type incompatibility
--- tests for on_error option with log_verbosity and null constraint via domain
-CREATE DOMAIN dcheck_ign_err2 varchar(15) NOT NULL;
-CREATE TABLE check_ign_err2 (n int, m int[], k int, l dcheck_ign_err2);
-COPY check_ign_err2 FROM STDIN WITH (on_error ignore, log_verbosity verbose);
-NOTICE: skipping row due to data type incompatibility at line 2 for column "l": null input
-CONTEXT: COPY check_ign_err2
-NOTICE: 1 row was skipped due to data type incompatibility
-COPY check_ign_err2 FROM STDIN WITH (on_error ignore, log_verbosity silent);
--- reset context choice
-\set SHOW_CONTEXT errors
-SELECT * FROM check_ign_err;
- n | m | k
----+-----+---
- 1 | {1} | 1
- 5 | {5} | 5
- 8 | {8} | 8
-(3 rows)
-
-SELECT * FROM check_ign_err2;
- n | m | k | l
----+-----+---+-------
- 1 | {1} | 1 | 'foo'
- 3 | {3} | 3 | 'bar'
-(2 rows)
-
--- test datatype error that can't be handled as soft: should fail
-CREATE TABLE hard_err(foo widget);
-COPY hard_err FROM STDIN WITH (on_error ignore);
-ERROR: invalid input syntax for type widget: "1"
-CONTEXT: COPY hard_err, line 1, column foo: "1"
--- test missing data: should fail
-COPY check_ign_err FROM STDIN WITH (on_error ignore);
-ERROR: missing data for column "k"
-CONTEXT: COPY check_ign_err, line 1: "1 {1}"
--- test extra data: should fail
-COPY check_ign_err FROM STDIN WITH (on_error ignore);
-ERROR: extra data after last expected column
-CONTEXT: COPY check_ign_err, line 1: "1 {1} 3 abc"
--- tests for reject_limit option
-COPY check_ign_err FROM STDIN WITH (on_error ignore, reject_limit 3);
-ERROR: skipped more than REJECT_LIMIT (3) rows due to data type incompatibility
-CONTEXT: COPY check_ign_err, line 5, column n: ""
-COPY check_ign_err FROM STDIN WITH (on_error ignore, reject_limit 4);
-NOTICE: 4 rows were skipped due to data type incompatibility
--- clean up
-DROP TABLE forcetest;
-DROP TABLE vistest;
-DROP FUNCTION truncate_in_subxact();
-DROP TABLE x, y;
-DROP TABLE rls_t1 CASCADE;
-DROP ROLE regress_rls_copy_user;
-DROP ROLE regress_rls_copy_user_colperms;
-DROP FUNCTION fn_x_before();
-DROP FUNCTION fn_x_after();
-DROP TABLE instead_of_insert_tbl;
-DROP VIEW instead_of_insert_tbl_view;
-DROP VIEW instead_of_insert_tbl_view_2;
-DROP FUNCTION fun_instead_of_insert_tbl();
-DROP TABLE check_ign_err;
-DROP TABLE check_ign_err2;
-DROP DOMAIN dcheck_ign_err2;
-DROP TABLE hard_err;
---
--- COPY FROM ... DEFAULT
---
-create temp table copy_default (
- id integer primary key,
- text_value text not null default 'test',
- ts_value timestamp without time zone not null default '2022-07-05'
-);
--- if DEFAULT is not specified, then the marker will be regular data
-copy copy_default from stdin;
-select id, text_value, ts_value from copy_default;
- id | text_value | ts_value
-----+------------+--------------------------
- 1 | value | Mon Jul 04 00:00:00 2022
- 2 | D | Tue Jul 05 00:00:00 2022
-(2 rows)
-
-truncate copy_default;
-copy copy_default from stdin with (format csv);
-select id, text_value, ts_value from copy_default;
- id | text_value | ts_value
-----+------------+--------------------------
- 1 | value | Mon Jul 04 00:00:00 2022
- 2 | \D | Tue Jul 05 00:00:00 2022
-(2 rows)
-
-truncate copy_default;
--- DEFAULT cannot be used in binary mode
-copy copy_default from stdin with (format binary, default '\D');
-ERROR: cannot specify DEFAULT in BINARY mode
--- DEFAULT cannot be new line nor carriage return
-copy copy_default from stdin with (default E'\n');
-ERROR: COPY default representation cannot use newline or carriage return
-copy copy_default from stdin with (default E'\r');
-ERROR: COPY default representation cannot use newline or carriage return
--- DELIMITER cannot appear in DEFAULT spec
-copy copy_default from stdin with (delimiter ';', default 'test;test');
-ERROR: COPY delimiter character must not appear in the DEFAULT specification
--- CSV quote cannot appear in DEFAULT spec
-copy copy_default from stdin with (format csv, quote '"', default 'test"test');
-ERROR: CSV quote character must not appear in the DEFAULT specification
--- NULL and DEFAULT spec must be different
-copy copy_default from stdin with (default '\N');
-ERROR: NULL specification and DEFAULT specification cannot be the same
--- cannot use DEFAULT marker in column that has no DEFAULT value
-copy copy_default from stdin with (default '\D');
-ERROR: unexpected default marker in COPY data
-DETAIL: Column "id" has no default value.
-CONTEXT: COPY copy_default, line 1: "\D value '2022-07-04'"
-copy copy_default from stdin with (format csv, default '\D');
-ERROR: unexpected default marker in COPY data
-DETAIL: Column "id" has no default value.
-CONTEXT: COPY copy_default, line 1: "\D,value,2022-07-04"
--- The DEFAULT marker must be unquoted and unescaped or it's not recognized
-copy copy_default from stdin with (default '\D');
-select id, text_value, ts_value from copy_default;
- id | text_value | ts_value
-----+------------+--------------------------
- 1 | test | Mon Jul 04 00:00:00 2022
- 2 | \D | Mon Jul 04 00:00:00 2022
- 3 | "D" | Mon Jul 04 00:00:00 2022
-(3 rows)
-
-truncate copy_default;
-copy copy_default from stdin with (format csv, default '\D');
-select id, text_value, ts_value from copy_default;
- id | text_value | ts_value
-----+------------+--------------------------
- 1 | test | Mon Jul 04 00:00:00 2022
- 2 | \\D | Mon Jul 04 00:00:00 2022
- 3 | \D | Mon Jul 04 00:00:00 2022
-(3 rows)
-
-truncate copy_default;
--- successful usage of DEFAULT option in COPY
-copy copy_default from stdin with (default '\D');
-select id, text_value, ts_value from copy_default;
- id | text_value | ts_value
-----+------------+--------------------------
- 1 | value | Mon Jul 04 00:00:00 2022
- 2 | test | Sun Jul 03 00:00:00 2022
- 3 | test | Tue Jul 05 00:00:00 2022
-(3 rows)
-
-truncate copy_default;
-copy copy_default from stdin with (format csv, default '\D');
-select id, text_value, ts_value from copy_default;
- id | text_value | ts_value
-----+------------+--------------------------
- 1 | value | Mon Jul 04 00:00:00 2022
- 2 | test | Sun Jul 03 00:00:00 2022
- 3 | test | Tue Jul 05 00:00:00 2022
-(3 rows)
-
-truncate copy_default;
--- DEFAULT cannot be used in COPY TO
-copy (select 1 as test) TO stdout with (default '\D');
-ERROR: COPY DEFAULT cannot be used with COPY TO
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/temp.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/temp.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/temp.out 2024-11-15 02:50:52.506022717 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/temp.out 2024-11-15 02:59:18.185116971 +0000
@@ -1,412 +1,2 @@
---
--- TEMP
--- Test temp relations and indexes
---
--- test temp table/index masking
-CREATE TABLE temptest(col int);
-CREATE INDEX i_temptest ON temptest(col);
-CREATE TEMP TABLE temptest(tcol int);
-CREATE INDEX i_temptest ON temptest(tcol);
-SELECT * FROM temptest;
- tcol
-------
-(0 rows)
-
-DROP INDEX i_temptest;
-DROP TABLE temptest;
-SELECT * FROM temptest;
- col
------
-(0 rows)
-
-DROP INDEX i_temptest;
-DROP TABLE temptest;
--- test temp table selects
-CREATE TABLE temptest(col int);
-INSERT INTO temptest VALUES (1);
-CREATE TEMP TABLE temptest(tcol float);
-INSERT INTO temptest VALUES (2.1);
-SELECT * FROM temptest;
- tcol
-------
- 2.1
-(1 row)
-
-DROP TABLE temptest;
-SELECT * FROM temptest;
- col
------
- 1
-(1 row)
-
-DROP TABLE temptest;
--- test temp table deletion
-CREATE TEMP TABLE temptest(col int);
-\c
-SELECT * FROM temptest;
-ERROR: relation "temptest" does not exist
-LINE 1: SELECT * FROM temptest;
- ^
--- Test ON COMMIT DELETE ROWS
-CREATE TEMP TABLE temptest(col int) ON COMMIT DELETE ROWS;
--- while we're here, verify successful truncation of index with SQL function
-CREATE INDEX ON temptest(bit_length(''));
-BEGIN;
-INSERT INTO temptest VALUES (1);
-INSERT INTO temptest VALUES (2);
-SELECT * FROM temptest;
- col
------
- 1
- 2
-(2 rows)
-
-COMMIT;
-SELECT * FROM temptest;
- col
------
-(0 rows)
-
-DROP TABLE temptest;
-BEGIN;
-CREATE TEMP TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1;
-SELECT * FROM temptest;
- col
------
- 1
-(1 row)
-
-COMMIT;
-SELECT * FROM temptest;
- col
------
-(0 rows)
-
-DROP TABLE temptest;
--- Test ON COMMIT DROP
-BEGIN;
-CREATE TEMP TABLE temptest(col int) ON COMMIT DROP;
-INSERT INTO temptest VALUES (1);
-INSERT INTO temptest VALUES (2);
-SELECT * FROM temptest;
- col
------
- 1
- 2
-(2 rows)
-
-COMMIT;
-SELECT * FROM temptest;
-ERROR: relation "temptest" does not exist
-LINE 1: SELECT * FROM temptest;
- ^
-BEGIN;
-CREATE TEMP TABLE temptest(col) ON COMMIT DROP AS SELECT 1;
-SELECT * FROM temptest;
- col
------
- 1
-(1 row)
-
-COMMIT;
-SELECT * FROM temptest;
-ERROR: relation "temptest" does not exist
-LINE 1: SELECT * FROM temptest;
- ^
--- Test it with a CHECK condition that produces a toasted pg_constraint entry
-BEGIN;
-do $$
-begin
- execute format($cmd$
- CREATE TEMP TABLE temptest (col text CHECK (col < %L)) ON COMMIT DROP
- $cmd$,
- (SELECT string_agg(g.i::text || ':' || random()::text, '|')
- FROM generate_series(1, 100) g(i)));
-end$$;
-SELECT * FROM temptest;
- col
------
-(0 rows)
-
-COMMIT;
-SELECT * FROM temptest;
-ERROR: relation "temptest" does not exist
-LINE 1: SELECT * FROM temptest;
- ^
--- ON COMMIT is only allowed for TEMP
-CREATE TABLE temptest(col int) ON COMMIT DELETE ROWS;
-ERROR: ON COMMIT can only be used on temporary tables
-CREATE TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1;
-ERROR: ON COMMIT can only be used on temporary tables
--- Test foreign keys
-BEGIN;
-CREATE TEMP TABLE temptest1(col int PRIMARY KEY);
-CREATE TEMP TABLE temptest2(col int REFERENCES temptest1)
- ON COMMIT DELETE ROWS;
-INSERT INTO temptest1 VALUES (1);
-INSERT INTO temptest2 VALUES (1);
-COMMIT;
-SELECT * FROM temptest1;
- col
------
- 1
-(1 row)
-
-SELECT * FROM temptest2;
- col
------
-(0 rows)
-
-BEGIN;
-CREATE TEMP TABLE temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS;
-CREATE TEMP TABLE temptest4(col int REFERENCES temptest3);
-COMMIT;
-ERROR: unsupported ON COMMIT and foreign key combination
-DETAIL: Table "temptest4" references "temptest3", but they do not have the same ON COMMIT setting.
--- Test manipulation of temp schema's placement in search path
-create table public.whereami (f1 text);
-insert into public.whereami values ('public');
-create temp table whereami (f1 text);
-insert into whereami values ('temp');
-create function public.whoami() returns text
- as $$select 'public'::text$$ language sql;
-create function pg_temp.whoami() returns text
- as $$select 'temp'::text$$ language sql;
--- default should have pg_temp implicitly first, but only for tables
-select * from whereami;
- f1
-------
- temp
-(1 row)
-
-select whoami();
- whoami
---------
- public
-(1 row)
-
--- can list temp first explicitly, but it still doesn't affect functions
-set search_path = pg_temp, public;
-select * from whereami;
- f1
-------
- temp
-(1 row)
-
-select whoami();
- whoami
---------
- public
-(1 row)
-
--- or put it last for security
-set search_path = public, pg_temp;
-select * from whereami;
- f1
---------
- public
-(1 row)
-
-select whoami();
- whoami
---------
- public
-(1 row)
-
--- you can invoke a temp function explicitly, though
-select pg_temp.whoami();
- whoami
---------
- temp
-(1 row)
-
-drop table public.whereami;
--- types in temp schema
-set search_path = pg_temp, public;
-create domain pg_temp.nonempty as text check (value <> '');
--- function-syntax invocation of types matches rules for functions
-select nonempty('');
-ERROR: function nonempty(unknown) does not exist
-LINE 1: select nonempty('');
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select pg_temp.nonempty('');
-ERROR: value for domain nonempty violates check constraint "nonempty_check"
--- other syntax matches rules for tables
-select ''::nonempty;
-ERROR: value for domain nonempty violates check constraint "nonempty_check"
-reset search_path;
--- For partitioned temp tables, ON COMMIT actions ignore storage-less
--- partitioned tables.
-begin;
-create temp table temp_parted_oncommit (a int)
- partition by list (a) on commit delete rows;
-create temp table temp_parted_oncommit_1
- partition of temp_parted_oncommit
- for values in (1) on commit delete rows;
-insert into temp_parted_oncommit values (1);
-commit;
--- partitions are emptied by the previous commit
-select * from temp_parted_oncommit;
- a
----
-(0 rows)
-
-drop table temp_parted_oncommit;
--- Check dependencies between ON COMMIT actions with a partitioned
--- table and its partitions. Using ON COMMIT DROP on a parent removes
--- the whole set.
-begin;
-create temp table temp_parted_oncommit_test (a int)
- partition by list (a) on commit drop;
-create temp table temp_parted_oncommit_test1
- partition of temp_parted_oncommit_test
- for values in (1) on commit delete rows;
-create temp table temp_parted_oncommit_test2
- partition of temp_parted_oncommit_test
- for values in (2) on commit drop;
-insert into temp_parted_oncommit_test values (1), (2);
-commit;
--- no relations remain in this case.
-select relname from pg_class where relname ~ '^temp_parted_oncommit_test';
- relname
----------
-(0 rows)
-
--- Using ON COMMIT DELETE on a partitioned table does not remove
--- all rows if partitions preserve their data.
-begin;
-create temp table temp_parted_oncommit_test (a int)
- partition by list (a) on commit delete rows;
-create temp table temp_parted_oncommit_test1
- partition of temp_parted_oncommit_test
- for values in (1) on commit preserve rows;
-create temp table temp_parted_oncommit_test2
- partition of temp_parted_oncommit_test
- for values in (2) on commit drop;
-insert into temp_parted_oncommit_test values (1), (2);
-commit;
--- Data from the remaining partition is still here as its rows are
--- preserved.
-select * from temp_parted_oncommit_test;
- a
----
- 1
-(1 row)
-
--- two relations remain in this case.
-select relname from pg_class where relname ~ '^temp_parted_oncommit_test'
- order by relname;
- relname
-----------------------------
- temp_parted_oncommit_test
- temp_parted_oncommit_test1
-(2 rows)
-
-drop table temp_parted_oncommit_test;
--- Check dependencies between ON COMMIT actions with inheritance trees.
--- Using ON COMMIT DROP on a parent removes the whole set.
-begin;
-create temp table temp_inh_oncommit_test (a int) on commit drop;
-create temp table temp_inh_oncommit_test1 ()
- inherits(temp_inh_oncommit_test) on commit delete rows;
-insert into temp_inh_oncommit_test1 values (1);
-commit;
--- no relations remain in this case
-select relname from pg_class where relname ~ '^temp_inh_oncommit_test';
- relname
----------
-(0 rows)
-
--- Data on the parent is removed, and the child goes away.
-begin;
-create temp table temp_inh_oncommit_test (a int) on commit delete rows;
-create temp table temp_inh_oncommit_test1 ()
- inherits(temp_inh_oncommit_test) on commit drop;
-insert into temp_inh_oncommit_test1 values (1);
-insert into temp_inh_oncommit_test values (1);
-commit;
-select * from temp_inh_oncommit_test;
- a
----
-(0 rows)
-
--- one relation remains
-select relname from pg_class where relname ~ '^temp_inh_oncommit_test';
- relname
-------------------------
- temp_inh_oncommit_test
-(1 row)
-
-drop table temp_inh_oncommit_test;
--- Tests with two-phase commit
--- Transactions creating objects in a temporary namespace cannot be used
--- with two-phase commit.
--- These cases generate errors about temporary namespace.
--- Function creation
-begin;
-create function pg_temp.twophase_func() returns void as
- $$ select '2pc_func'::text $$ language sql;
-prepare transaction 'twophase_func';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
--- Function drop
-create function pg_temp.twophase_func() returns void as
- $$ select '2pc_func'::text $$ language sql;
-begin;
-drop function pg_temp.twophase_func();
-prepare transaction 'twophase_func';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
--- Operator creation
-begin;
-create operator pg_temp.@@ (leftarg = int4, rightarg = int4, procedure = int4mi);
-prepare transaction 'twophase_operator';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
--- These generate errors about temporary tables.
-begin;
-create type pg_temp.twophase_type as (a int);
-prepare transaction 'twophase_type';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
-begin;
-create view pg_temp.twophase_view as select 1;
-prepare transaction 'twophase_view';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
-begin;
-create sequence pg_temp.twophase_seq;
-prepare transaction 'twophase_sequence';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
--- Temporary tables cannot be used with two-phase commit.
-create temp table twophase_tab (a int);
-begin;
-select a from twophase_tab;
- a
----
-(0 rows)
-
-prepare transaction 'twophase_tab';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
-begin;
-insert into twophase_tab values (1);
-prepare transaction 'twophase_tab';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
-begin;
-lock twophase_tab in access exclusive mode;
-prepare transaction 'twophase_tab';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
-begin;
-drop table twophase_tab;
-prepare transaction 'twophase_tab';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
--- Corner case: current_schema may create a temporary schema if namespace
--- creation is pending, so check after that. First reset the connection
--- to remove the temporary namespace.
-\c -
-SET search_path TO 'pg_temp';
-BEGIN;
-SELECT current_schema() ~ 'pg_temp' AS is_temp_schema;
- is_temp_schema
-----------------
- t
-(1 row)
-
-PREPARE TRANSACTION 'twophase_search';
-ERROR: cannot PREPARE a transaction that has operated on temporary objects
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/domain.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/domain.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/domain.out 2024-11-15 02:50:52.434141211 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/domain.out 2024-11-15 02:59:18.193116981 +0000
@@ -1,1333 +1,2 @@
---
--- Test domains.
---
--- Test Comment / Drop
-create domain domaindroptest int4;
-comment on domain domaindroptest is 'About to drop this..';
-create domain dependenttypetest domaindroptest;
--- fail because of dependent type
-drop domain domaindroptest;
-ERROR: cannot drop type domaindroptest because other objects depend on it
-DETAIL: type dependenttypetest depends on type domaindroptest
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-drop domain domaindroptest cascade;
-NOTICE: drop cascades to type dependenttypetest
--- this should fail because already gone
-drop domain domaindroptest cascade;
-ERROR: type "domaindroptest" does not exist
--- Test domain input.
--- Note: the point of checking both INSERT and COPY FROM is that INSERT
--- exercises CoerceToDomain while COPY exercises domain_in.
-create domain domainvarchar varchar(5);
-create domain domainnumeric numeric(8,2);
-create domain domainint4 int4;
-create domain domaintext text;
--- Test explicit coercions --- these should succeed (and truncate)
-SELECT cast('123456' as domainvarchar);
- domainvarchar
----------------
- 12345
-(1 row)
-
-SELECT cast('12345' as domainvarchar);
- domainvarchar
----------------
- 12345
-(1 row)
-
--- Test tables using domains
-create table basictest
- ( testint4 domainint4
- , testtext domaintext
- , testvarchar domainvarchar
- , testnumeric domainnumeric
- );
-INSERT INTO basictest values ('88', 'haha', 'short', '123.12'); -- Good
-INSERT INTO basictest values ('88', 'haha', 'short text', '123.12'); -- Bad varchar
-ERROR: value too long for type character varying(5)
-INSERT INTO basictest values ('88', 'haha', 'short', '123.1212'); -- Truncate numeric
--- Test copy
-COPY basictest (testvarchar) FROM stdin; -- fail
-ERROR: value too long for type character varying(5)
-CONTEXT: COPY basictest, line 1, column testvarchar: "notsoshorttext"
-COPY basictest (testvarchar) FROM stdin;
-select * from basictest;
- testint4 | testtext | testvarchar | testnumeric
-----------+----------+-------------+-------------
- 88 | haha | short | 123.12
- 88 | haha | short | 123.12
- | | short |
-(3 rows)
-
--- check that domains inherit operations from base types
-select testtext || testvarchar as concat, testnumeric + 42 as sum
-from basictest;
- concat | sum
------------+--------
- hahashort | 165.12
- hahashort | 165.12
- |
-(3 rows)
-
--- check that union/case/coalesce type resolution handles domains properly
-select pg_typeof(coalesce(4::domainint4, 7));
- pg_typeof
------------
- integer
-(1 row)
-
-select pg_typeof(coalesce(4::domainint4, 7::domainint4));
- pg_typeof
-------------
- domainint4
-(1 row)
-
-drop table basictest;
-drop domain domainvarchar restrict;
-drop domain domainnumeric restrict;
-drop domain domainint4 restrict;
-drop domain domaintext;
--- Test non-error-throwing input
-create domain positiveint int4 check(value > 0);
-create domain weirdfloat float8 check((1 / value) < 10);
-select pg_input_is_valid('1', 'positiveint');
- pg_input_is_valid
--------------------
- t
-(1 row)
-
-select pg_input_is_valid('junk', 'positiveint');
- pg_input_is_valid
--------------------
- f
-(1 row)
-
-select pg_input_is_valid('-1', 'positiveint');
- pg_input_is_valid
--------------------
- f
-(1 row)
-
-select * from pg_input_error_info('junk', 'positiveint');
- message | detail | hint | sql_error_code
------------------------------------------------+--------+------+----------------
- invalid input syntax for type integer: "junk" | | | 22P02
-(1 row)
-
-select * from pg_input_error_info('-1', 'positiveint');
- message | detail | hint | sql_error_code
-----------------------------------------------------------------------------+--------+------+----------------
- value for domain positiveint violates check constraint "positiveint_check" | | | 23514
-(1 row)
-
-select * from pg_input_error_info('junk', 'weirdfloat');
- message | detail | hint | sql_error_code
---------------------------------------------------------+--------+------+----------------
- invalid input syntax for type double precision: "junk" | | | 22P02
-(1 row)
-
-select * from pg_input_error_info('0.01', 'weirdfloat');
- message | detail | hint | sql_error_code
---------------------------------------------------------------------------+--------+------+----------------
- value for domain weirdfloat violates check constraint "weirdfloat_check" | | | 23514
-(1 row)
-
--- We currently can't trap errors raised in the CHECK expression itself
-select * from pg_input_error_info('0', 'weirdfloat');
-ERROR: division by zero
-drop domain positiveint;
-drop domain weirdfloat;
--- Test domains over array types
-create domain domainint4arr int4[1];
-create domain domainchar4arr varchar(4)[2][3];
-create table domarrtest
- ( testint4arr domainint4arr
- , testchar4arr domainchar4arr
- );
-INSERT INTO domarrtest values ('{2,2}', '{{"a","b"},{"c","d"}}');
-INSERT INTO domarrtest values ('{{2,2},{2,2}}', '{{"a","b"}}');
-INSERT INTO domarrtest values ('{2,2}', '{{"a","b"},{"c","d"},{"e","f"}}');
-INSERT INTO domarrtest values ('{2,2}', '{{"a"},{"c"}}');
-INSERT INTO domarrtest values (NULL, '{{"a","b","c"},{"d","e","f"}}');
-INSERT INTO domarrtest values (NULL, '{{"toolong","b","c"},{"d","e","f"}}');
-ERROR: value too long for type character varying(4)
-INSERT INTO domarrtest (testint4arr[1], testint4arr[3]) values (11,22);
-select * from domarrtest;
- testint4arr | testchar4arr
----------------+---------------------
- {2,2} | {{a,b},{c,d}}
- {{2,2},{2,2}} | {{a,b}}
- {2,2} | {{a,b},{c,d},{e,f}}
- {2,2} | {{a},{c}}
- | {{a,b,c},{d,e,f}}
- {11,NULL,22} |
-(6 rows)
-
-select testint4arr[1], testchar4arr[2:2] from domarrtest;
- testint4arr | testchar4arr
--------------+--------------
- 2 | {{c,d}}
- | {}
- 2 | {{c,d}}
- 2 | {{c}}
- | {{d,e,f}}
- 11 |
-(6 rows)
-
-select array_dims(testint4arr), array_dims(testchar4arr) from domarrtest;
- array_dims | array_dims
-------------+------------
- [1:2] | [1:2][1:2]
- [1:2][1:2] | [1:1][1:2]
- [1:2] | [1:3][1:2]
- [1:2] | [1:2][1:1]
- | [1:2][1:3]
- [1:3] |
-(6 rows)
-
-COPY domarrtest FROM stdin;
-COPY domarrtest FROM stdin; -- fail
-ERROR: value too long for type character varying(4)
-CONTEXT: COPY domarrtest, line 1, column testchar4arr: "{qwerty,w,e}"
-select * from domarrtest;
- testint4arr | testchar4arr
----------------+---------------------
- {2,2} | {{a,b},{c,d}}
- {{2,2},{2,2}} | {{a,b}}
- {2,2} | {{a,b},{c,d},{e,f}}
- {2,2} | {{a},{c}}
- | {{a,b,c},{d,e,f}}
- {11,NULL,22} |
- {3,4} | {q,w,e}
- |
-(8 rows)
-
-update domarrtest set
- testint4arr[1] = testint4arr[1] + 1,
- testint4arr[3] = testint4arr[3] - 1
-where testchar4arr is null;
-select * from domarrtest where testchar4arr is null;
- testint4arr | testchar4arr
-------------------+--------------
- {12,NULL,21} |
- {NULL,NULL,NULL} |
-(2 rows)
-
-drop table domarrtest;
-drop domain domainint4arr restrict;
-drop domain domainchar4arr restrict;
-create domain dia as int[];
-select '{1,2,3}'::dia;
- dia
----------
- {1,2,3}
-(1 row)
-
-select array_dims('{1,2,3}'::dia);
- array_dims
-------------
- [1:3]
-(1 row)
-
-select pg_typeof('{1,2,3}'::dia);
- pg_typeof
------------
- dia
-(1 row)
-
-select pg_typeof('{1,2,3}'::dia || 42); -- should be int[] not dia
- pg_typeof
------------
- integer[]
-(1 row)
-
-drop domain dia;
--- Test domains over composites
-create type comptype as (r float8, i float8);
-create domain dcomptype as comptype;
-create table dcomptable (d1 dcomptype unique);
-insert into dcomptable values (row(1,2)::dcomptype);
-insert into dcomptable values (row(3,4)::comptype);
-insert into dcomptable values (row(1,2)::dcomptype); -- fail on uniqueness
-ERROR: duplicate key value violates unique constraint "dcomptable_d1_key"
-DETAIL: Key (d1)=((1,2)) already exists.
-insert into dcomptable (d1.r) values(11);
-select * from dcomptable;
- d1
--------
- (1,2)
- (3,4)
- (11,)
-(3 rows)
-
-select (d1).r, (d1).i, (d1).* from dcomptable;
- r | i | r | i
-----+---+----+---
- 1 | 2 | 1 | 2
- 3 | 4 | 3 | 4
- 11 | | 11 |
-(3 rows)
-
-update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0;
-select * from dcomptable;
- d1
--------
- (11,)
- (2,2)
- (4,4)
-(3 rows)
-
-alter domain dcomptype add constraint c1 check ((value).r <= (value).i);
-alter domain dcomptype add constraint c2 check ((value).r > (value).i); -- fail
-ERROR: column "d1" of table "dcomptable" contains values that violate the new constraint
-select row(2,1)::dcomptype; -- fail
-ERROR: value for domain dcomptype violates check constraint "c1"
-insert into dcomptable values (row(1,2)::comptype);
-insert into dcomptable values (row(2,1)::comptype); -- fail
-ERROR: value for domain dcomptype violates check constraint "c1"
-insert into dcomptable (d1.r) values(99);
-insert into dcomptable (d1.r, d1.i) values(99, 100);
-insert into dcomptable (d1.r, d1.i) values(100, 99); -- fail
-ERROR: value for domain dcomptype violates check constraint "c1"
-update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; -- fail
-ERROR: value for domain dcomptype violates check constraint "c1"
-update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0;
-select * from dcomptable;
- d1
-----------
- (11,)
- (99,)
- (1,3)
- (3,5)
- (0,3)
- (98,101)
-(6 rows)
-
-explain (verbose, costs off)
- update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0;
- QUERY PLAN
------------------------------------------------------------------------------------------------
- Update on public.dcomptable
- -> Seq Scan on public.dcomptable
- Output: ROW(((d1).r - '1'::double precision), ((d1).i + '1'::double precision)), ctid
- Filter: ((dcomptable.d1).i > '0'::double precision)
-(4 rows)
-
-create rule silly as on delete to dcomptable do instead
- update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0;
-\d+ dcomptable
- Table "public.dcomptable"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+-----------+-----------+----------+---------+----------+--------------+-------------
- d1 | dcomptype | | | | extended | |
-Indexes:
- "dcomptable_d1_key" UNIQUE CONSTRAINT, btree (d1)
-Rules:
- silly AS
- ON DELETE TO dcomptable DO INSTEAD UPDATE dcomptable SET d1.r = (dcomptable.d1).r - 1::double precision, d1.i = (dcomptable.d1).i + 1::double precision
- WHERE (dcomptable.d1).i > 0::double precision
-
-create function makedcomp(r float8, i float8) returns dcomptype
-as 'select row(r, i)' language sql;
-select makedcomp(1,2);
- makedcomp
------------
- (1,2)
-(1 row)
-
-select makedcomp(2,1); -- fail
-ERROR: value for domain dcomptype violates check constraint "c1"
-select * from makedcomp(1,2) m;
- r | i
----+---
- 1 | 2
-(1 row)
-
-select m, m is not null from makedcomp(1,2) m;
- m | ?column?
--------+----------
- (1,2) | t
-(1 row)
-
-drop function makedcomp(float8, float8);
-drop table dcomptable;
-drop type comptype cascade;
-NOTICE: drop cascades to type dcomptype
--- check altering and dropping columns used by domain constraints
-create type comptype as (r float8, i float8);
-create domain dcomptype as comptype;
-alter domain dcomptype add constraint c1 check ((value).r > 0);
-comment on constraint c1 on domain dcomptype is 'random commentary';
-select row(0,1)::dcomptype; -- fail
-ERROR: value for domain dcomptype violates check constraint "c1"
-alter type comptype alter attribute r type varchar; -- fail
-ERROR: operator does not exist: character varying > double precision
-HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
-alter type comptype alter attribute r type bigint;
-alter type comptype drop attribute r; -- fail
-ERROR: cannot drop column r of composite type comptype because other objects depend on it
-DETAIL: constraint c1 depends on column r of composite type comptype
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-alter type comptype drop attribute i;
-select conname, obj_description(oid, 'pg_constraint') from pg_constraint
- where contypid = 'dcomptype'::regtype; -- check comment is still there
- conname | obj_description
----------+-------------------
- c1 | random commentary
-(1 row)
-
-drop type comptype cascade;
-NOTICE: drop cascades to type dcomptype
--- Test domains over arrays of composite
-create type comptype as (r float8, i float8);
-create domain dcomptypea as comptype[];
-create table dcomptable (d1 dcomptypea unique);
-insert into dcomptable values (array[row(1,2)]::dcomptypea);
-insert into dcomptable values (array[row(3,4), row(5,6)]::comptype[]);
-insert into dcomptable values (array[row(7,8)::comptype, row(9,10)::comptype]);
-insert into dcomptable values (array[row(1,2)]::dcomptypea); -- fail on uniqueness
-ERROR: duplicate key value violates unique constraint "dcomptable_d1_key"
-DETAIL: Key (d1)=({"(1,2)"}) already exists.
-insert into dcomptable (d1[1]) values(row(9,10));
-insert into dcomptable (d1[1].r) values(11);
-select * from dcomptable;
- d1
---------------------
- {"(1,2)"}
- {"(3,4)","(5,6)"}
- {"(7,8)","(9,10)"}
- {"(9,10)"}
- {"(11,)"}
-(5 rows)
-
-select d1[2], d1[1].r, d1[1].i from dcomptable;
- d1 | r | i
---------+----+----
- | 1 | 2
- (5,6) | 3 | 4
- (9,10) | 7 | 8
- | 9 | 10
- | 11 |
-(5 rows)
-
-update dcomptable set d1[2] = row(d1[2].i, d1[2].r);
-select * from dcomptable;
- d1
---------------------
- {"(1,2)","(,)"}
- {"(3,4)","(6,5)"}
- {"(7,8)","(10,9)"}
- {"(9,10)","(,)"}
- {"(11,)","(,)"}
-(5 rows)
-
-update dcomptable set d1[1].r = d1[1].r + 1 where d1[1].i > 0;
-select * from dcomptable;
- d1
---------------------
- {"(11,)","(,)"}
- {"(2,2)","(,)"}
- {"(4,4)","(6,5)"}
- {"(8,8)","(10,9)"}
- {"(10,10)","(,)"}
-(5 rows)
-
-alter domain dcomptypea add constraint c1 check (value[1].r <= value[1].i);
-alter domain dcomptypea add constraint c2 check (value[1].r > value[1].i); -- fail
-ERROR: column "d1" of table "dcomptable" contains values that violate the new constraint
-select array[row(2,1)]::dcomptypea; -- fail
-ERROR: value for domain dcomptypea violates check constraint "c1"
-insert into dcomptable values (array[row(1,2)]::comptype[]);
-insert into dcomptable values (array[row(2,1)]::comptype[]); -- fail
-ERROR: value for domain dcomptypea violates check constraint "c1"
-insert into dcomptable (d1[1].r) values(99);
-insert into dcomptable (d1[1].r, d1[1].i) values(99, 100);
-insert into dcomptable (d1[1].r, d1[1].i) values(100, 99); -- fail
-ERROR: value for domain dcomptypea violates check constraint "c1"
-update dcomptable set d1[1].r = d1[1].r + 1 where d1[1].i > 0; -- fail
-ERROR: value for domain dcomptypea violates check constraint "c1"
-update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1
- where d1[1].i > 0;
-select * from dcomptable;
- d1
---------------------
- {"(11,)","(,)"}
- {"(99,)"}
- {"(1,3)","(,)"}
- {"(3,5)","(6,5)"}
- {"(7,9)","(10,9)"}
- {"(9,11)","(,)"}
- {"(0,3)"}
- {"(98,101)"}
-(8 rows)
-
-explain (verbose, costs off)
- update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1
- where d1[1].i > 0;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------
- Update on public.dcomptable
- -> Seq Scan on public.dcomptable
- Output: (d1[1].r := (d1[1].r - '1'::double precision))[1].i := (d1[1].i + '1'::double precision), ctid
- Filter: (dcomptable.d1[1].i > '0'::double precision)
-(4 rows)
-
-create rule silly as on delete to dcomptable do instead
- update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1
- where d1[1].i > 0;
-\d+ dcomptable
- Table "public.dcomptable"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+------------+-----------+----------+---------+----------+--------------+-------------
- d1 | dcomptypea | | | | extended | |
-Indexes:
- "dcomptable_d1_key" UNIQUE CONSTRAINT, btree (d1)
-Rules:
- silly AS
- ON DELETE TO dcomptable DO INSTEAD UPDATE dcomptable SET d1[1].r = dcomptable.d1[1].r - 1::double precision, d1[1].i = dcomptable.d1[1].i + 1::double precision
- WHERE dcomptable.d1[1].i > 0::double precision
-
-drop table dcomptable;
-drop type comptype cascade;
-NOTICE: drop cascades to type dcomptypea
--- Test arrays over domains
-create domain posint as int check (value > 0);
-create table pitable (f1 posint[]);
-insert into pitable values(array[42]);
-insert into pitable values(array[-1]); -- fail
-ERROR: value for domain posint violates check constraint "posint_check"
-insert into pitable values('{0}'); -- fail
-ERROR: value for domain posint violates check constraint "posint_check"
-LINE 1: insert into pitable values('{0}');
- ^
-update pitable set f1[1] = f1[1] + 1;
-update pitable set f1[1] = 0; -- fail
-ERROR: value for domain posint violates check constraint "posint_check"
-select * from pitable;
- f1
-------
- {43}
-(1 row)
-
-drop table pitable;
-create domain vc4 as varchar(4);
-create table vc4table (f1 vc4[]);
-insert into vc4table values(array['too long']); -- fail
-ERROR: value too long for type character varying(4)
-insert into vc4table values(array['too long']::vc4[]); -- cast truncates
-select * from vc4table;
- f1
-----------
- {"too "}
-(1 row)
-
-drop table vc4table;
-drop type vc4;
--- You can sort of fake arrays-of-arrays by putting a domain in between
-create domain dposinta as posint[];
-create table dposintatable (f1 dposinta[]);
-insert into dposintatable values(array[array[42]]); -- fail
-ERROR: column "f1" is of type dposinta[] but expression is of type integer[]
-LINE 1: insert into dposintatable values(array[array[42]]);
- ^
-HINT: You will need to rewrite or cast the expression.
-insert into dposintatable values(array[array[42]::posint[]]); -- still fail
-ERROR: column "f1" is of type dposinta[] but expression is of type posint[]
-LINE 1: insert into dposintatable values(array[array[42]::posint[]])...
- ^
-HINT: You will need to rewrite or cast the expression.
-insert into dposintatable values(array[array[42]::dposinta]); -- but this works
-select f1, f1[1], (f1[1])[1] from dposintatable;
- f1 | f1 | f1
-----------+------+----
- {"{42}"} | {42} | 42
-(1 row)
-
-select pg_typeof(f1) from dposintatable;
- pg_typeof
-------------
- dposinta[]
-(1 row)
-
-select pg_typeof(f1[1]) from dposintatable;
- pg_typeof
------------
- dposinta
-(1 row)
-
-select pg_typeof(f1[1][1]) from dposintatable;
- pg_typeof
------------
- dposinta
-(1 row)
-
-select pg_typeof((f1[1])[1]) from dposintatable;
- pg_typeof
------------
- posint
-(1 row)
-
-update dposintatable set f1[2] = array[99];
-select f1, f1[1], (f1[2])[1] from dposintatable;
- f1 | f1 | f1
------------------+------+----
- {"{42}","{99}"} | {42} | 99
-(1 row)
-
--- it'd be nice if you could do something like this, but for now you can't:
-update dposintatable set f1[2][1] = array[97];
-ERROR: wrong number of array subscripts
--- maybe someday we can make this syntax work:
-update dposintatable set (f1[2])[1] = array[98];
-ERROR: syntax error at or near "["
-LINE 1: update dposintatable set (f1[2])[1] = array[98];
- ^
-drop table dposintatable;
-drop domain posint cascade;
-NOTICE: drop cascades to type dposinta
--- Test arrays over domains of composite
-create type comptype as (cf1 int, cf2 int);
-create domain dcomptype as comptype check ((value).cf1 > 0);
-create table dcomptable (f1 dcomptype[]);
-insert into dcomptable values (null);
-update dcomptable set f1[1].cf2 = 5;
-table dcomptable;
- f1
-----------
- {"(,5)"}
-(1 row)
-
-update dcomptable set f1[1].cf1 = -1; -- fail
-ERROR: value for domain dcomptype violates check constraint "dcomptype_check"
-update dcomptable set f1[1].cf1 = 1;
-table dcomptable;
- f1
------------
- {"(1,5)"}
-(1 row)
-
--- if there's no constraints, a different code path is taken:
-alter domain dcomptype drop constraint dcomptype_check;
-update dcomptable set f1[1].cf1 = -1; -- now ok
-table dcomptable;
- f1
-------------
- {"(-1,5)"}
-(1 row)
-
-drop table dcomptable;
-drop type comptype cascade;
-NOTICE: drop cascades to type dcomptype
--- Test not-null restrictions
-create domain dnotnull varchar(15) NOT NULL;
-create domain dnull varchar(15);
-create domain dcheck varchar(15) NOT NULL CHECK (VALUE = 'a' OR VALUE = 'c' OR VALUE = 'd');
-create table nulltest
- ( col1 dnotnull
- , col2 dnotnull NULL -- NOT NULL in the domain cannot be overridden
- , col3 dnull NOT NULL
- , col4 dnull
- , col5 dcheck CHECK (col5 IN ('c', 'd'))
- );
-INSERT INTO nulltest DEFAULT VALUES;
-ERROR: domain dnotnull does not allow null values
-INSERT INTO nulltest values ('a', 'b', 'c', 'd', 'c'); -- Good
-insert into nulltest values ('a', 'b', 'c', 'd', NULL);
-ERROR: domain dcheck does not allow null values
-insert into nulltest values ('a', 'b', 'c', 'd', 'a');
-ERROR: new row for relation "nulltest" violates check constraint "nulltest_col5_check"
-DETAIL: Failing row contains (a, b, c, d, a).
-INSERT INTO nulltest values (NULL, 'b', 'c', 'd', 'd');
-ERROR: domain dnotnull does not allow null values
-INSERT INTO nulltest values ('a', NULL, 'c', 'd', 'c');
-ERROR: domain dnotnull does not allow null values
-INSERT INTO nulltest values ('a', 'b', NULL, 'd', 'c');
-ERROR: null value in column "col3" of relation "nulltest" violates not-null constraint
-DETAIL: Failing row contains (a, b, null, d, c).
-INSERT INTO nulltest values ('a', 'b', 'c', NULL, 'd'); -- Good
--- Test copy
-COPY nulltest FROM stdin; --fail
-ERROR: null value in column "col3" of relation "nulltest" violates not-null constraint
-DETAIL: Failing row contains (a, b, null, d, d).
-CONTEXT: COPY nulltest, line 1: "a b \N d d"
-COPY nulltest FROM stdin; --fail
-ERROR: domain dcheck does not allow null values
-CONTEXT: COPY nulltest, line 1, column col5: null input
--- Last row is bad
-COPY nulltest FROM stdin;
-ERROR: new row for relation "nulltest" violates check constraint "nulltest_col5_check"
-DETAIL: Failing row contains (a, b, c, null, a).
-CONTEXT: COPY nulltest, line 3: "a b c \N a"
-select * from nulltest;
- col1 | col2 | col3 | col4 | col5
-------+------+------+------+------
- a | b | c | d | c
- a | b | c | | d
-(2 rows)
-
--- Test out coerced (casted) constraints
-SELECT cast('1' as dnotnull);
- dnotnull
-----------
- 1
-(1 row)
-
-SELECT cast(NULL as dnotnull); -- fail
-ERROR: domain dnotnull does not allow null values
-SELECT cast(cast(NULL as dnull) as dnotnull); -- fail
-ERROR: domain dnotnull does not allow null values
-SELECT cast(col4 as dnotnull) from nulltest; -- fail
-ERROR: domain dnotnull does not allow null values
--- cleanup
-drop table nulltest;
-drop domain dnotnull restrict;
-drop domain dnull restrict;
-drop domain dcheck restrict;
-create domain ddef1 int4 DEFAULT 3;
-create domain ddef2 oid DEFAULT '12';
--- Type mixing, function returns int8
-create domain ddef3 text DEFAULT 5;
-create sequence ddef4_seq;
-create domain ddef4 int4 DEFAULT nextval('ddef4_seq');
-create domain ddef5 numeric(8,2) NOT NULL DEFAULT '12.12';
-create table defaulttest
- ( col1 ddef1
- , col2 ddef2
- , col3 ddef3
- , col4 ddef4 PRIMARY KEY
- , col5 ddef1 NOT NULL DEFAULT NULL
- , col6 ddef2 DEFAULT '88'
- , col7 ddef4 DEFAULT 8000
- , col8 ddef5
- );
-insert into defaulttest(col4) values(0); -- fails, col5 defaults to null
-ERROR: null value in column "col5" of relation "defaulttest" violates not-null constraint
-DETAIL: Failing row contains (3, 12, 5, 0, null, 88, 8000, 12.12).
-alter table defaulttest alter column col5 drop default;
-insert into defaulttest default values; -- succeeds, inserts domain default
--- We used to treat SET DEFAULT NULL as equivalent to DROP DEFAULT; wrong
-alter table defaulttest alter column col5 set default null;
-insert into defaulttest(col4) values(0); -- fails
-ERROR: null value in column "col5" of relation "defaulttest" violates not-null constraint
-DETAIL: Failing row contains (3, 12, 5, 0, null, 88, 8000, 12.12).
-alter table defaulttest alter column col5 drop default;
-insert into defaulttest default values;
-insert into defaulttest default values;
--- Test defaults with copy
-COPY defaulttest(col5) FROM stdin;
-select * from defaulttest;
- col1 | col2 | col3 | col4 | col5 | col6 | col7 | col8
-------+------+------+------+------+------+------+-------
- 3 | 12 | 5 | 1 | 3 | 88 | 8000 | 12.12
- 3 | 12 | 5 | 2 | 3 | 88 | 8000 | 12.12
- 3 | 12 | 5 | 3 | 3 | 88 | 8000 | 12.12
- 3 | 12 | 5 | 4 | 42 | 88 | 8000 | 12.12
-(4 rows)
-
-drop table defaulttest cascade;
--- Test ALTER DOMAIN .. NOT NULL
-create domain dnotnulltest integer;
-create table domnotnull
-( col1 dnotnulltest
-, col2 dnotnulltest
-);
-insert into domnotnull default values;
-alter domain dnotnulltest set not null; -- fails
-ERROR: column "col1" of table "domnotnull" contains null values
-update domnotnull set col1 = 5;
-alter domain dnotnulltest set not null; -- fails
-ERROR: column "col2" of table "domnotnull" contains null values
-update domnotnull set col2 = 6;
-alter domain dnotnulltest set not null;
-update domnotnull set col1 = null; -- fails
-ERROR: domain dnotnulltest does not allow null values
-alter domain dnotnulltest drop not null;
-update domnotnull set col1 = null;
-drop domain dnotnulltest cascade;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to column col2 of table domnotnull
-drop cascades to column col1 of table domnotnull
--- Test ALTER DOMAIN .. DEFAULT ..
-create table domdeftest (col1 ddef1);
-insert into domdeftest default values;
-select * from domdeftest;
- col1
-------
- 3
-(1 row)
-
-alter domain ddef1 set default '42';
-insert into domdeftest default values;
-select * from domdeftest;
- col1
-------
- 3
- 42
-(2 rows)
-
-alter domain ddef1 drop default;
-insert into domdeftest default values;
-select * from domdeftest;
- col1
-------
- 3
- 42
-
-(3 rows)
-
-drop table domdeftest;
--- Test ALTER DOMAIN .. CONSTRAINT ..
-create domain con as integer;
-create table domcontest (col1 con);
-insert into domcontest values (1);
-insert into domcontest values (2);
-alter domain con add constraint t check (VALUE < 1); -- fails
-ERROR: column "col1" of table "domcontest" contains values that violate the new constraint
-alter domain con add constraint t check (VALUE < 34);
-alter domain con add check (VALUE > 0);
-\dD con
- List of domains
- Schema | Name | Type | Collation | Nullable | Default | Check
---------+------+---------+-----------+----------+---------+--------------------------------------
- public | con | integer | | | | CHECK (VALUE > 0) CHECK (VALUE < 34)
-(1 row)
-
-insert into domcontest values (-5); -- fails
-ERROR: value for domain con violates check constraint "con_check"
-insert into domcontest values (42); -- fails
-ERROR: value for domain con violates check constraint "t"
-insert into domcontest values (5);
-alter domain con drop constraint t;
-insert into domcontest values (-5); --fails
-ERROR: value for domain con violates check constraint "con_check"
-insert into domcontest values (42);
-alter domain con drop constraint nonexistent;
-ERROR: constraint "nonexistent" of domain "con" does not exist
-alter domain con drop constraint if exists nonexistent;
-NOTICE: constraint "nonexistent" of domain "con" does not exist, skipping
--- not-null constraints
-create domain connotnull integer;
-create table domconnotnulltest
-( col1 connotnull
-, col2 connotnull
-);
-insert into domconnotnulltest default values;
-alter domain connotnull add not null; -- fails
-ERROR: column "col1" of table "domconnotnulltest" contains null values
-update domconnotnulltest set col1 = 5;
-alter domain connotnull add not null; -- fails
-ERROR: column "col2" of table "domconnotnulltest" contains null values
-update domconnotnulltest set col2 = 6;
-alter domain connotnull add constraint constr1 not null;
-select count(*) from pg_constraint where contypid = 'connotnull'::regtype and contype = 'n';
- count
--------
- 1
-(1 row)
-
-alter domain connotnull add constraint constr1bis not null; -- redundant
-select count(*) from pg_constraint where contypid = 'connotnull'::regtype and contype = 'n';
- count
--------
- 1
-(1 row)
-
-\dD connotnull
- List of domains
- Schema | Name | Type | Collation | Nullable | Default | Check
---------+------------+---------+-----------+----------+---------+-------
- public | connotnull | integer | | not null | |
-(1 row)
-
-update domconnotnulltest set col1 = null; -- fails
-ERROR: domain connotnull does not allow null values
-alter domain connotnull drop constraint constr1;
-update domconnotnulltest set col1 = null;
-drop domain connotnull cascade;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to column col2 of table domconnotnulltest
-drop cascades to column col1 of table domconnotnulltest
-drop table domconnotnulltest;
--- Test ALTER DOMAIN .. CONSTRAINT .. NOT VALID
-create domain things AS INT;
-CREATE TABLE thethings (stuff things);
-INSERT INTO thethings (stuff) VALUES (55);
-ALTER DOMAIN things ADD CONSTRAINT meow CHECK (VALUE < 11);
-ERROR: column "stuff" of table "thethings" contains values that violate the new constraint
-ALTER DOMAIN things ADD CONSTRAINT meow CHECK (VALUE < 11) NOT VALID;
-ALTER DOMAIN things VALIDATE CONSTRAINT meow;
-ERROR: column "stuff" of table "thethings" contains values that violate the new constraint
-UPDATE thethings SET stuff = 10;
-ALTER DOMAIN things VALIDATE CONSTRAINT meow;
--- Confirm ALTER DOMAIN with RULES.
-create table domtab (col1 integer);
-create domain dom as integer;
-create view domview as select cast(col1 as dom) from domtab;
-insert into domtab (col1) values (null);
-insert into domtab (col1) values (5);
-select * from domview;
- col1
-------
-
- 5
-(2 rows)
-
-alter domain dom set not null;
-select * from domview; -- fail
-ERROR: domain dom does not allow null values
-alter domain dom drop not null;
-select * from domview;
- col1
-------
-
- 5
-(2 rows)
-
-alter domain dom add constraint domchkgt6 check(value > 6);
-select * from domview; --fail
-ERROR: value for domain dom violates check constraint "domchkgt6"
-alter domain dom drop constraint domchkgt6 restrict;
-select * from domview;
- col1
-------
-
- 5
-(2 rows)
-
--- cleanup
-drop domain ddef1 restrict;
-drop domain ddef2 restrict;
-drop domain ddef3 restrict;
-drop domain ddef4 restrict;
-drop domain ddef5 restrict;
-drop sequence ddef4_seq;
--- Test domains over domains
-create domain vchar4 varchar(4);
-create domain dinter vchar4 check (substring(VALUE, 1, 1) = 'x');
-create domain dtop dinter check (substring(VALUE, 2, 1) = '1');
-select 'x123'::dtop;
- dtop
-------
- x123
-(1 row)
-
-select 'x1234'::dtop; -- explicit coercion should truncate
- dtop
-------
- x123
-(1 row)
-
-select 'y1234'::dtop; -- fail
-ERROR: value for domain dtop violates check constraint "dinter_check"
-select 'y123'::dtop; -- fail
-ERROR: value for domain dtop violates check constraint "dinter_check"
-select 'yz23'::dtop; -- fail
-ERROR: value for domain dtop violates check constraint "dinter_check"
-select 'xz23'::dtop; -- fail
-ERROR: value for domain dtop violates check constraint "dtop_check"
-create temp table dtest(f1 dtop);
-insert into dtest values('x123');
-insert into dtest values('x1234'); -- fail, implicit coercion
-ERROR: value too long for type character varying(4)
-insert into dtest values('y1234'); -- fail, implicit coercion
-ERROR: value too long for type character varying(4)
-insert into dtest values('y123'); -- fail
-ERROR: value for domain dtop violates check constraint "dinter_check"
-insert into dtest values('yz23'); -- fail
-ERROR: value for domain dtop violates check constraint "dinter_check"
-insert into dtest values('xz23'); -- fail
-ERROR: value for domain dtop violates check constraint "dtop_check"
-drop table dtest;
-drop domain vchar4 cascade;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to type dinter
-drop cascades to type dtop
--- Make sure that constraints of newly-added domain columns are
--- enforced correctly, even if there's no default value for the new
--- column. Per bug #1433
-create domain str_domain as text not null;
-create table domain_test (a int, b int);
-insert into domain_test values (1, 2);
-insert into domain_test values (1, 2);
--- should fail
-alter table domain_test add column c str_domain;
-ERROR: domain str_domain does not allow null values
-create domain str_domain2 as text check (value <> 'foo') default 'foo';
--- should fail
-alter table domain_test add column d str_domain2;
-ERROR: value for domain str_domain2 violates check constraint "str_domain2_check"
--- Check that domain constraints on prepared statement parameters of
--- unknown type are enforced correctly.
-create domain pos_int as int4 check (value > 0) not null;
-prepare s1 as select $1::pos_int = 10 as "is_ten";
-execute s1(10);
- is_ten
---------
- t
-(1 row)
-
-execute s1(0); -- should fail
-ERROR: value for domain pos_int violates check constraint "pos_int_check"
-execute s1(NULL); -- should fail
-ERROR: domain pos_int does not allow null values
--- Check that domain constraints on plpgsql function parameters, results,
--- and local variables are enforced correctly.
-create function doubledecrement(p1 pos_int) returns pos_int as $$
-declare v pos_int;
-begin
- return p1;
-end$$ language plpgsql;
-select doubledecrement(3); -- fail because of implicit null assignment
-ERROR: domain pos_int does not allow null values
-CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 2 during statement block local variable initialization
-create or replace function doubledecrement(p1 pos_int) returns pos_int as $$
-declare v pos_int := 0;
-begin
- return p1;
-end$$ language plpgsql;
-select doubledecrement(3); -- fail at initialization assignment
-ERROR: value for domain pos_int violates check constraint "pos_int_check"
-CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 2 during statement block local variable initialization
-create or replace function doubledecrement(p1 pos_int) returns pos_int as $$
-declare v pos_int := 1;
-begin
- v := p1 - 1;
- return v - 1;
-end$$ language plpgsql;
-select doubledecrement(null); -- fail before call
-ERROR: domain pos_int does not allow null values
-select doubledecrement(0); -- fail before call
-ERROR: value for domain pos_int violates check constraint "pos_int_check"
-select doubledecrement(1); -- fail at assignment to v
-ERROR: value for domain pos_int violates check constraint "pos_int_check"
-CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 4 at assignment
-select doubledecrement(2); -- fail at return
-ERROR: value for domain pos_int violates check constraint "pos_int_check"
-CONTEXT: PL/pgSQL function doubledecrement(pos_int) while casting return value to function's return type
-select doubledecrement(3); -- good
- doubledecrement
------------------
- 1
-(1 row)
-
--- Check that ALTER DOMAIN tests columns of derived types
-create domain posint as int4;
--- Currently, this doesn't work for composite types, but verify it complains
-create type ddtest1 as (f1 posint);
-create table ddtest2(f1 ddtest1);
-insert into ddtest2 values(row(-1));
-alter domain posint add constraint c1 check(value >= 0);
-ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it
-drop table ddtest2;
--- Likewise for domains within arrays of composite
-create table ddtest2(f1 ddtest1[]);
-insert into ddtest2 values('{(-1)}');
-alter domain posint add constraint c1 check(value >= 0);
-ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it
-drop table ddtest2;
--- Likewise for domains within domains over composite
-create domain ddtest1d as ddtest1;
-create table ddtest2(f1 ddtest1d);
-insert into ddtest2 values('(-1)');
-alter domain posint add constraint c1 check(value >= 0);
-ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it
-drop table ddtest2;
-drop domain ddtest1d;
--- Likewise for domains within domains over array of composite
-create domain ddtest1d as ddtest1[];
-create table ddtest2(f1 ddtest1d);
-insert into ddtest2 values('{(-1)}');
-alter domain posint add constraint c1 check(value >= 0);
-ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it
-drop table ddtest2;
-drop domain ddtest1d;
--- Doesn't work for ranges, either
-create type rposint as range (subtype = posint);
-create table ddtest2(f1 rposint);
-insert into ddtest2 values('(-1,3]');
-alter domain posint add constraint c1 check(value >= 0);
-ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it
-drop table ddtest2;
-drop type rposint;
-alter domain posint add constraint c1 check(value >= 0);
-create domain posint2 as posint check (value % 2 = 0);
-create table ddtest2(f1 posint2);
-insert into ddtest2 values(11); -- fail
-ERROR: value for domain posint2 violates check constraint "posint2_check"
-insert into ddtest2 values(-2); -- fail
-ERROR: value for domain posint2 violates check constraint "c1"
-insert into ddtest2 values(2);
-alter domain posint add constraint c2 check(value >= 10); -- fail
-ERROR: column "f1" of table "ddtest2" contains values that violate the new constraint
-alter domain posint add constraint c2 check(value > 0); -- OK
-drop table ddtest2;
-drop type ddtest1;
-drop domain posint cascade;
-NOTICE: drop cascades to type posint2
---
--- Check enforcement of domain-related typmod in plpgsql (bug #5717)
---
-create or replace function array_elem_check(numeric) returns numeric as $$
-declare
- x numeric(4,2)[1];
-begin
- x[1] := $1;
- return x[1];
-end$$ language plpgsql;
-select array_elem_check(121.00);
-ERROR: numeric field overflow
-DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2.
-CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment
-select array_elem_check(1.23456);
- array_elem_check
-------------------
- 1.23
-(1 row)
-
-create domain mynums as numeric(4,2)[1];
-create or replace function array_elem_check(numeric) returns numeric as $$
-declare
- x mynums;
-begin
- x[1] := $1;
- return x[1];
-end$$ language plpgsql;
-select array_elem_check(121.00);
-ERROR: numeric field overflow
-DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2.
-CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment
-select array_elem_check(1.23456);
- array_elem_check
-------------------
- 1.23
-(1 row)
-
-create domain mynums2 as mynums;
-create or replace function array_elem_check(numeric) returns numeric as $$
-declare
- x mynums2;
-begin
- x[1] := $1;
- return x[1];
-end$$ language plpgsql;
-select array_elem_check(121.00);
-ERROR: numeric field overflow
-DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2.
-CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment
-select array_elem_check(1.23456);
- array_elem_check
-------------------
- 1.23
-(1 row)
-
-drop function array_elem_check(numeric);
---
--- Check enforcement of array-level domain constraints
---
-create domain orderedpair as int[2] check (value[1] < value[2]);
-select array[1,2]::orderedpair;
- array
--------
- {1,2}
-(1 row)
-
-select array[2,1]::orderedpair; -- fail
-ERROR: value for domain orderedpair violates check constraint "orderedpair_check"
-create temp table op (f1 orderedpair);
-insert into op values (array[1,2]);
-insert into op values (array[2,1]); -- fail
-ERROR: value for domain orderedpair violates check constraint "orderedpair_check"
-update op set f1[2] = 3;
-update op set f1[2] = 0; -- fail
-ERROR: value for domain orderedpair violates check constraint "orderedpair_check"
-select * from op;
- f1
--------
- {1,3}
-(1 row)
-
-create or replace function array_elem_check(int) returns int as $$
-declare
- x orderedpair := '{1,2}';
-begin
- x[2] := $1;
- return x[2];
-end$$ language plpgsql;
-select array_elem_check(3);
- array_elem_check
-------------------
- 3
-(1 row)
-
-select array_elem_check(-1);
-ERROR: value for domain orderedpair violates check constraint "orderedpair_check"
-CONTEXT: PL/pgSQL function array_elem_check(integer) line 5 at assignment
-drop function array_elem_check(int);
---
--- Check enforcement of changing constraints in plpgsql
---
-create domain di as int;
-create function dom_check(int) returns di as $$
-declare d di;
-begin
- d := $1::di;
- return d;
-end
-$$ language plpgsql immutable;
-select dom_check(0);
- dom_check
------------
- 0
-(1 row)
-
-alter domain di add constraint pos check (value > 0);
-select dom_check(0); -- fail
-ERROR: value for domain di violates check constraint "pos"
-CONTEXT: PL/pgSQL function dom_check(integer) line 4 at assignment
-alter domain di drop constraint pos;
-select dom_check(0);
- dom_check
------------
- 0
-(1 row)
-
--- implicit cast during assignment is a separate code path, test that too
-create or replace function dom_check(int) returns di as $$
-declare d di;
-begin
- d := $1;
- return d;
-end
-$$ language plpgsql immutable;
-select dom_check(0);
- dom_check
------------
- 0
-(1 row)
-
-alter domain di add constraint pos check (value > 0);
-select dom_check(0); -- fail
-ERROR: value for domain di violates check constraint "pos"
-CONTEXT: PL/pgSQL function dom_check(integer) line 4 at assignment
-alter domain di drop constraint pos;
-select dom_check(0);
- dom_check
------------
- 0
-(1 row)
-
-drop function dom_check(int);
-drop domain di;
---
--- Check use of a (non-inline-able) SQL function in a domain constraint;
--- this has caused issues in the past
---
-create function sql_is_distinct_from(anyelement, anyelement)
-returns boolean language sql
-as 'select $1 is distinct from $2 limit 1';
-create domain inotnull int
- check (sql_is_distinct_from(value, null));
-select 1::inotnull;
- inotnull
-----------
- 1
-(1 row)
-
-select null::inotnull;
-ERROR: value for domain inotnull violates check constraint "inotnull_check"
-create table dom_table (x inotnull);
-insert into dom_table values ('1');
-insert into dom_table values (1);
-insert into dom_table values (null);
-ERROR: value for domain inotnull violates check constraint "inotnull_check"
-drop table dom_table;
-drop domain inotnull;
-drop function sql_is_distinct_from(anyelement, anyelement);
---
--- Renaming
---
-create domain testdomain1 as int;
-alter domain testdomain1 rename to testdomain2;
-alter type testdomain2 rename to testdomain3; -- alter type also works
-drop domain testdomain3;
---
--- Renaming domain constraints
---
-create domain testdomain1 as int constraint unsigned check (value > 0);
-alter domain testdomain1 rename constraint unsigned to unsigned_foo;
-alter domain testdomain1 drop constraint unsigned_foo;
-drop domain testdomain1;
---
--- Get the base type of a domain
---
-create domain mytext as text;
-create domain mytext_child_1 as mytext;
-select pg_basetype('mytext'::regtype);
- pg_basetype
--------------
- text
-(1 row)
-
-select pg_basetype('mytext_child_1'::regtype);
- pg_basetype
--------------
- text
-(1 row)
-
-select pg_basetype(1); -- expect NULL not error
- pg_basetype
--------------
-
-(1 row)
-
-drop domain mytext cascade;
-NOTICE: drop cascades to type mytext_child_1
---
--- Information schema
---
-SELECT * FROM information_schema.column_domain_usage
- WHERE domain_name IN ('con', 'dom', 'pos_int', 'things')
- ORDER BY domain_name;
- domain_catalog | domain_schema | domain_name | table_catalog | table_schema | table_name | column_name
-----------------+---------------+-------------+---------------+--------------+------------+-------------
- regression | public | con | regression | public | domcontest | col1
- regression | public | dom | regression | public | domview | col1
- regression | public | things | regression | public | thethings | stuff
-(3 rows)
-
-SELECT * FROM information_schema.domain_constraints
- WHERE domain_name IN ('con', 'dom', 'pos_int', 'things')
- ORDER BY constraint_name;
- constraint_catalog | constraint_schema | constraint_name | domain_catalog | domain_schema | domain_name | is_deferrable | initially_deferred
---------------------+-------------------+------------------+----------------+---------------+-------------+---------------+--------------------
- regression | public | con_check | regression | public | con | NO | NO
- regression | public | meow | regression | public | things | NO | NO
- regression | public | pos_int_check | regression | public | pos_int | NO | NO
- regression | public | pos_int_not_null | regression | public | pos_int | NO | NO
-(4 rows)
-
-SELECT * FROM information_schema.domains
- WHERE domain_name IN ('con', 'dom', 'pos_int', 'things')
- ORDER BY domain_name;
- domain_catalog | domain_schema | domain_name | data_type | character_maximum_length | character_octet_length | character_set_catalog | character_set_schema | character_set_name | collation_catalog | collation_schema | collation_name | numeric_precision | numeric_precision_radix | numeric_scale | datetime_precision | interval_type | interval_precision | domain_default | udt_catalog | udt_schema | udt_name | scope_catalog | scope_schema | scope_name | maximum_cardinality | dtd_identifier
-----------------+---------------+-------------+-----------+--------------------------+------------------------+-----------------------+----------------------+--------------------+-------------------+------------------+----------------+-------------------+-------------------------+---------------+--------------------+---------------+--------------------+----------------+-------------+------------+----------+---------------+--------------+------------+---------------------+----------------
- regression | public | con | integer | | | | | | | | | 32 | 2 | 0 | | | | | regression | pg_catalog | int4 | | | | | 1
- regression | public | dom | integer | | | | | | | | | 32 | 2 | 0 | | | | | regression | pg_catalog | int4 | | | | | 1
- regression | public | pos_int | integer | | | | | | | | | 32 | 2 | 0 | | | | | regression | pg_catalog | int4 | | | | | 1
- regression | public | things | integer | | | | | | | | | 32 | 2 | 0 | | | | | regression | pg_catalog | int4 | | | | | 1
-(4 rows)
-
-SELECT * FROM information_schema.check_constraints
- WHERE (constraint_schema, constraint_name)
- IN (SELECT constraint_schema, constraint_name
- FROM information_schema.domain_constraints
- WHERE domain_name IN ('con', 'dom', 'pos_int', 'things'))
- ORDER BY constraint_name;
- constraint_catalog | constraint_schema | constraint_name | check_clause
---------------------+-------------------+------------------+-------------------
- regression | public | con_check | (VALUE > 0)
- regression | public | meow | (VALUE < 11)
- regression | public | pos_int_check | (VALUE > 0)
- regression | public | pos_int_not_null | VALUE IS NOT NULL
-(4 rows)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/rangefuncs.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/rangefuncs.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/rangefuncs.out 2024-11-15 02:50:52.490049049 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/rangefuncs.out 2024-11-15 02:59:18.181116966 +0000
@@ -1,2503 +1,2 @@
-CREATE TABLE rngfunc2(rngfuncid int, f2 int);
-INSERT INTO rngfunc2 VALUES(1, 11);
-INSERT INTO rngfunc2 VALUES(2, 22);
-INSERT INTO rngfunc2 VALUES(1, 111);
-CREATE FUNCTION rngfunct(int) returns setof rngfunc2 as 'SELECT * FROM rngfunc2 WHERE rngfuncid = $1 ORDER BY f2;' LANGUAGE SQL;
--- function with ORDINALITY
-select * from rngfunct(1) with ordinality as z(a,b,ord);
- a | b | ord
----+-----+-----
- 1 | 11 | 1
- 1 | 111 | 2
-(2 rows)
-
-select * from rngfunct(1) with ordinality as z(a,b,ord) where b > 100; -- ordinal 2, not 1
- a | b | ord
----+-----+-----
- 1 | 111 | 2
-(1 row)
-
--- ordinality vs. column names and types
-select a,b,ord from rngfunct(1) with ordinality as z(a,b,ord);
- a | b | ord
----+-----+-----
- 1 | 11 | 1
- 1 | 111 | 2
-(2 rows)
-
-select a,ord from unnest(array['a','b']) with ordinality as z(a,ord);
- a | ord
----+-----
- a | 1
- b | 2
-(2 rows)
-
-select * from unnest(array['a','b']) with ordinality as z(a,ord);
- a | ord
----+-----
- a | 1
- b | 2
-(2 rows)
-
-select a,ord from unnest(array[1.0::float8]) with ordinality as z(a,ord);
- a | ord
----+-----
- 1 | 1
-(1 row)
-
-select * from unnest(array[1.0::float8]) with ordinality as z(a,ord);
- a | ord
----+-----
- 1 | 1
-(1 row)
-
-select row_to_json(s.*) from generate_series(11,14) with ordinality s;
- row_to_json
--------------------------
- {"s":11,"ordinality":1}
- {"s":12,"ordinality":2}
- {"s":13,"ordinality":3}
- {"s":14,"ordinality":4}
-(4 rows)
-
--- ordinality vs. views
-create temporary view vw_ord as select * from (values (1)) v(n) join rngfunct(1) with ordinality as z(a,b,ord) on (n=ord);
-select * from vw_ord;
- n | a | b | ord
----+---+----+-----
- 1 | 1 | 11 | 1
-(1 row)
-
-select definition from pg_views where viewname='vw_ord';
- definition
--------------------------------------------------------------------------
- SELECT v.n, +
- z.a, +
- z.b, +
- z.ord +
- FROM (( VALUES (1)) v(n) +
- JOIN rngfunct(1) WITH ORDINALITY z(a, b, ord) ON ((v.n = z.ord)));
-(1 row)
-
-drop view vw_ord;
--- multiple functions
-select * from rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord);
- a | b | c | d | ord
----+-----+---+----+-----
- 1 | 11 | 2 | 22 | 1
- 1 | 111 | | | 2
-(2 rows)
-
-create temporary view vw_ord as select * from (values (1)) v(n) join rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord) on (n=ord);
-select * from vw_ord;
- n | a | b | c | d | ord
----+---+----+---+----+-----
- 1 | 1 | 11 | 2 | 22 | 1
-(1 row)
-
-select definition from pg_views where viewname='vw_ord';
- definition
--------------------------------------------------------------------------------------------------------
- SELECT v.n, +
- z.a, +
- z.b, +
- z.c, +
- z.d, +
- z.ord +
- FROM (( VALUES (1)) v(n) +
- JOIN ROWS FROM(rngfunct(1), rngfunct(2)) WITH ORDINALITY z(a, b, c, d, ord) ON ((v.n = z.ord)));
-(1 row)
-
-drop view vw_ord;
--- expansions of unnest()
-select * from unnest(array[10,20],array['foo','bar'],array[1.0]);
- unnest | unnest | unnest
---------+--------+--------
- 10 | foo | 1.0
- 20 | bar |
-(2 rows)
-
-select * from unnest(array[10,20],array['foo','bar'],array[1.0]) with ordinality as z(a,b,c,ord);
- a | b | c | ord
-----+-----+-----+-----
- 10 | foo | 1.0 | 1
- 20 | bar | | 2
-(2 rows)
-
-select * from rows from(unnest(array[10,20],array['foo','bar'],array[1.0])) with ordinality as z(a,b,c,ord);
- a | b | c | ord
-----+-----+-----+-----
- 10 | foo | 1.0 | 1
- 20 | bar | | 2
-(2 rows)
-
-select * from rows from(unnest(array[10,20],array['foo','bar']), generate_series(101,102)) with ordinality as z(a,b,c,ord);
- a | b | c | ord
-----+-----+-----+-----
- 10 | foo | 101 | 1
- 20 | bar | 102 | 2
-(2 rows)
-
-create temporary view vw_ord as select * from unnest(array[10,20],array['foo','bar'],array[1.0]) as z(a,b,c);
-select * from vw_ord;
- a | b | c
-----+-----+-----
- 10 | foo | 1.0
- 20 | bar |
-(2 rows)
-
-select definition from pg_views where viewname='vw_ord';
- definition
-----------------------------------------------------------------------------------------
- SELECT a, +
- b, +
- c +
- FROM UNNEST(ARRAY[10, 20], ARRAY['foo'::text, 'bar'::text], ARRAY[1.0]) z(a, b, c);
-(1 row)
-
-drop view vw_ord;
-create temporary view vw_ord as select * from rows from(unnest(array[10,20],array['foo','bar'],array[1.0])) as z(a,b,c);
-select * from vw_ord;
- a | b | c
-----+-----+-----
- 10 | foo | 1.0
- 20 | bar |
-(2 rows)
-
-select definition from pg_views where viewname='vw_ord';
- definition
-----------------------------------------------------------------------------------------
- SELECT a, +
- b, +
- c +
- FROM UNNEST(ARRAY[10, 20], ARRAY['foo'::text, 'bar'::text], ARRAY[1.0]) z(a, b, c);
-(1 row)
-
-drop view vw_ord;
-create temporary view vw_ord as select * from rows from(unnest(array[10,20],array['foo','bar']), generate_series(1,2)) as z(a,b,c);
-select * from vw_ord;
- a | b | c
-----+-----+---
- 10 | foo | 1
- 20 | bar | 2
-(2 rows)
-
-select definition from pg_views where viewname='vw_ord';
- definition
-----------------------------------------------------------------------------------------------------------------------
- SELECT a, +
- b, +
- c +
- FROM ROWS FROM(unnest(ARRAY[10, 20]), unnest(ARRAY['foo'::text, 'bar'::text]), generate_series(1, 2)) z(a, b, c);
-(1 row)
-
-drop view vw_ord;
--- ordinality and multiple functions vs. rewind and reverse scan
-begin;
-declare rf_cur scroll cursor for select * from rows from(generate_series(1,5),generate_series(1,2)) with ordinality as g(i,j,o);
-fetch all from rf_cur;
- i | j | o
----+---+---
- 1 | 1 | 1
- 2 | 2 | 2
- 3 | | 3
- 4 | | 4
- 5 | | 5
-(5 rows)
-
-fetch backward all from rf_cur;
- i | j | o
----+---+---
- 5 | | 5
- 4 | | 4
- 3 | | 3
- 2 | 2 | 2
- 1 | 1 | 1
-(5 rows)
-
-fetch all from rf_cur;
- i | j | o
----+---+---
- 1 | 1 | 1
- 2 | 2 | 2
- 3 | | 3
- 4 | | 4
- 5 | | 5
-(5 rows)
-
-fetch next from rf_cur;
- i | j | o
----+---+---
-(0 rows)
-
-fetch next from rf_cur;
- i | j | o
----+---+---
-(0 rows)
-
-fetch prior from rf_cur;
- i | j | o
----+---+---
- 5 | | 5
-(1 row)
-
-fetch absolute 1 from rf_cur;
- i | j | o
----+---+---
- 1 | 1 | 1
-(1 row)
-
-fetch next from rf_cur;
- i | j | o
----+---+---
- 2 | 2 | 2
-(1 row)
-
-fetch next from rf_cur;
- i | j | o
----+---+---
- 3 | | 3
-(1 row)
-
-fetch next from rf_cur;
- i | j | o
----+---+---
- 4 | | 4
-(1 row)
-
-fetch prior from rf_cur;
- i | j | o
----+---+---
- 3 | | 3
-(1 row)
-
-fetch prior from rf_cur;
- i | j | o
----+---+---
- 2 | 2 | 2
-(1 row)
-
-fetch prior from rf_cur;
- i | j | o
----+---+---
- 1 | 1 | 1
-(1 row)
-
-commit;
--- function with implicit LATERAL
-select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) z where rngfunc2.f2 = z.f2;
- rngfuncid | f2 | rngfuncid | f2
------------+-----+-----------+-----
- 1 | 11 | 1 | 11
- 2 | 22 | 2 | 22
- 1 | 111 | 1 | 111
-(3 rows)
-
--- function with implicit LATERAL and explicit ORDINALITY
-select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) with ordinality as z(rngfuncid,f2,ord) where rngfunc2.f2 = z.f2;
- rngfuncid | f2 | rngfuncid | f2 | ord
------------+-----+-----------+-----+-----
- 1 | 11 | 1 | 11 | 1
- 2 | 22 | 2 | 22 | 1
- 1 | 111 | 1 | 111 | 2
-(3 rows)
-
--- function in subselect
-select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2;
- rngfuncid | f2
------------+-----
- 1 | 11
- 1 | 111
- 2 | 22
-(3 rows)
-
--- function in subselect
-select * from rngfunc2 where f2 in (select f2 from rngfunct(1) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2;
- rngfuncid | f2
------------+-----
- 1 | 11
- 1 | 111
-(2 rows)
-
--- function in subselect
-select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = 1) ORDER BY 1,2;
- rngfuncid | f2
------------+-----
- 1 | 11
- 1 | 111
-(2 rows)
-
--- nested functions
-select rngfunct.rngfuncid, rngfunct.f2 from rngfunct(sin(pi()/2)::int) ORDER BY 1,2;
- rngfuncid | f2
------------+-----
- 1 | 11
- 1 | 111
-(2 rows)
-
-CREATE TABLE rngfunc (rngfuncid int, rngfuncsubid int, rngfuncname text, primary key(rngfuncid,rngfuncsubid));
-INSERT INTO rngfunc VALUES(1,1,'Joe');
-INSERT INTO rngfunc VALUES(1,2,'Ed');
-INSERT INTO rngfunc VALUES(2,1,'Mary');
--- sql, proretset = f, prorettype = b
-CREATE FUNCTION getrngfunc1(int) RETURNS int AS 'SELECT $1;' LANGUAGE SQL;
-SELECT * FROM getrngfunc1(1) AS t1;
- t1
-----
- 1
-(1 row)
-
-SELECT * FROM getrngfunc1(1) WITH ORDINALITY AS t1(v,o);
- v | o
----+---
- 1 | 1
-(1 row)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1);
-SELECT * FROM vw_getrngfunc;
- getrngfunc1
--------------
- 1
-(1 row)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1) WITH ORDINALITY as t1(v,o);
-SELECT * FROM vw_getrngfunc;
- v | o
----+---
- 1 | 1
-(1 row)
-
-DROP VIEW vw_getrngfunc;
--- sql, proretset = t, prorettype = b
-CREATE FUNCTION getrngfunc2(int) RETURNS setof int AS 'SELECT rngfuncid FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL;
-SELECT * FROM getrngfunc2(1) AS t1;
- t1
-----
- 1
- 1
-(2 rows)
-
-SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o);
- v | o
----+---
- 1 | 1
- 1 | 2
-(2 rows)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1);
-SELECT * FROM vw_getrngfunc;
- getrngfunc2
--------------
- 1
- 1
-(2 rows)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o);
-SELECT * FROM vw_getrngfunc;
- v | o
----+---
- 1 | 1
- 1 | 2
-(2 rows)
-
-DROP VIEW vw_getrngfunc;
--- sql, proretset = t, prorettype = b
-CREATE FUNCTION getrngfunc3(int) RETURNS setof text AS 'SELECT rngfuncname FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL;
-SELECT * FROM getrngfunc3(1) AS t1;
- t1
------
- Joe
- Ed
-(2 rows)
-
-SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o);
- v | o
------+---
- Joe | 1
- Ed | 2
-(2 rows)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1);
-SELECT * FROM vw_getrngfunc;
- getrngfunc3
--------------
- Joe
- Ed
-(2 rows)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o);
-SELECT * FROM vw_getrngfunc;
- v | o
------+---
- Joe | 1
- Ed | 2
-(2 rows)
-
-DROP VIEW vw_getrngfunc;
--- sql, proretset = f, prorettype = c
-CREATE FUNCTION getrngfunc4(int) RETURNS rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL;
-SELECT * FROM getrngfunc4(1) AS t1;
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
-(1 row)
-
-SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o);
- a | b | c | o
----+---+-----+---
- 1 | 1 | Joe | 1
-(1 row)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1);
-SELECT * FROM vw_getrngfunc;
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
-(1 row)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o);
-SELECT * FROM vw_getrngfunc;
- a | b | c | o
----+---+-----+---
- 1 | 1 | Joe | 1
-(1 row)
-
-DROP VIEW vw_getrngfunc;
--- sql, proretset = t, prorettype = c
-CREATE FUNCTION getrngfunc5(int) RETURNS setof rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL;
-SELECT * FROM getrngfunc5(1) AS t1;
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
- 1 | 2 | Ed
-(2 rows)
-
-SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o);
- a | b | c | o
----+---+-----+---
- 1 | 1 | Joe | 1
- 1 | 2 | Ed | 2
-(2 rows)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1);
-SELECT * FROM vw_getrngfunc;
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
- 1 | 2 | Ed
-(2 rows)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o);
-SELECT * FROM vw_getrngfunc;
- a | b | c | o
----+---+-----+---
- 1 | 1 | Joe | 1
- 1 | 2 | Ed | 2
-(2 rows)
-
-DROP VIEW vw_getrngfunc;
--- sql, proretset = f, prorettype = record
-CREATE FUNCTION getrngfunc6(int) RETURNS RECORD AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL;
-SELECT * FROM getrngfunc6(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text);
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
-(1 row)
-
-SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY;
- rngfuncid | rngfuncsubid | rngfuncname | ordinality
------------+--------------+-------------+------------
- 1 | 1 | Joe | 1
-(1 row)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc6(1) AS
-(rngfuncid int, rngfuncsubid int, rngfuncname text);
-SELECT * FROM vw_getrngfunc;
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
-(1 row)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS
- SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) )
- WITH ORDINALITY;
-SELECT * FROM vw_getrngfunc;
- rngfuncid | rngfuncsubid | rngfuncname | ordinality
------------+--------------+-------------+------------
- 1 | 1 | Joe | 1
-(1 row)
-
-DROP VIEW vw_getrngfunc;
--- sql, proretset = t, prorettype = record
-CREATE FUNCTION getrngfunc7(int) RETURNS setof record AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL;
-SELECT * FROM getrngfunc7(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text);
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
- 1 | 2 | Ed
-(2 rows)
-
-SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY;
- rngfuncid | rngfuncsubid | rngfuncname | ordinality
------------+--------------+-------------+------------
- 1 | 1 | Joe | 1
- 1 | 2 | Ed | 2
-(2 rows)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc7(1) AS
-(rngfuncid int, rngfuncsubid int, rngfuncname text);
-SELECT * FROM vw_getrngfunc;
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
- 1 | 2 | Ed
-(2 rows)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS
- SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) )
- WITH ORDINALITY;
-SELECT * FROM vw_getrngfunc;
- rngfuncid | rngfuncsubid | rngfuncname | ordinality
------------+--------------+-------------+------------
- 1 | 1 | Joe | 1
- 1 | 2 | Ed | 2
-(2 rows)
-
-DROP VIEW vw_getrngfunc;
--- plpgsql, proretset = f, prorettype = b
-CREATE FUNCTION getrngfunc8(int) RETURNS int AS 'DECLARE rngfuncint int; BEGIN SELECT rngfuncid into rngfuncint FROM rngfunc WHERE rngfuncid = $1; RETURN rngfuncint; END;' LANGUAGE plpgsql;
-SELECT * FROM getrngfunc8(1) AS t1;
- t1
-----
- 1
-(1 row)
-
-SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o);
- v | o
----+---
- 1 | 1
-(1 row)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1);
-SELECT * FROM vw_getrngfunc;
- getrngfunc8
--------------
- 1
-(1 row)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o);
-SELECT * FROM vw_getrngfunc;
- v | o
----+---
- 1 | 1
-(1 row)
-
-DROP VIEW vw_getrngfunc;
--- plpgsql, proretset = f, prorettype = c
-CREATE FUNCTION getrngfunc9(int) RETURNS rngfunc AS 'DECLARE rngfunctup rngfunc%ROWTYPE; BEGIN SELECT * into rngfunctup FROM rngfunc WHERE rngfuncid = $1; RETURN rngfunctup; END;' LANGUAGE plpgsql;
-SELECT * FROM getrngfunc9(1) AS t1;
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
-(1 row)
-
-SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o);
- a | b | c | o
----+---+-----+---
- 1 | 1 | Joe | 1
-(1 row)
-
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1);
-SELECT * FROM vw_getrngfunc;
- rngfuncid | rngfuncsubid | rngfuncname
------------+--------------+-------------
- 1 | 1 | Joe
-(1 row)
-
-DROP VIEW vw_getrngfunc;
-CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o);
-SELECT * FROM vw_getrngfunc;
- a | b | c | o
----+---+-----+---
- 1 | 1 | Joe | 1
-(1 row)
-
-DROP VIEW vw_getrngfunc;
--- mix 'n match kinds, to exercise expandRTE and related logic
-select * from rows from(getrngfunc1(1),getrngfunc2(1),getrngfunc3(1),getrngfunc4(1),getrngfunc5(1),
- getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text),
- getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text),
- getrngfunc8(1),getrngfunc9(1))
- with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u);
- a | b | c | d | e | f | g | h | i | j | k | l | m | o | p | q | r | s | t | u
----+---+-----+---+---+-----+---+---+-----+---+---+-----+---+---+-----+---+---+---+-----+---
- 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | 1 | Joe | 1
- | 1 | Ed | | | | 1 | 2 | Ed | | | | 1 | 2 | Ed | | | | | 2
-(2 rows)
-
-select * from rows from(getrngfunc9(1),getrngfunc8(1),
- getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text),
- getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text),
- getrngfunc5(1),getrngfunc4(1),getrngfunc3(1),getrngfunc2(1),getrngfunc1(1))
- with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u);
- a | b | c | d | e | f | g | h | i | j | k | l | m | o | p | q | r | s | t | u
----+---+-----+---+---+---+-----+---+---+-----+---+---+-----+---+---+-----+-----+---+---+---
- 1 | 1 | Joe | 1 | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | Joe | 1 | 1 | 1
- | | | | 1 | 2 | Ed | | | | 1 | 2 | Ed | | | | Ed | 1 | | 2
-(2 rows)
-
-create temporary view vw_rngfunc as
- select * from rows from(getrngfunc9(1),
- getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text),
- getrngfunc1(1))
- with ordinality as t1(a,b,c,d,e,f,g,n);
-select * from vw_rngfunc;
- a | b | c | d | e | f | g | n
----+---+-----+---+---+-----+---+---
- 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1
- | | | 1 | 2 | Ed | | 2
-(2 rows)
-
-select pg_get_viewdef('vw_rngfunc');
- pg_get_viewdef
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- SELECT a, +
- b, +
- c, +
- d, +
- e, +
- f, +
- g, +
- n +
- FROM ROWS FROM(getrngfunc9(1), getrngfunc7(1) AS (rngfuncid integer, rngfuncsubid integer, rngfuncname text), getrngfunc1(1)) WITH ORDINALITY t1(a, b, c, d, e, f, g, n);
-(1 row)
-
-drop view vw_rngfunc;
-DROP FUNCTION getrngfunc1(int);
-DROP FUNCTION getrngfunc2(int);
-DROP FUNCTION getrngfunc3(int);
-DROP FUNCTION getrngfunc4(int);
-DROP FUNCTION getrngfunc5(int);
-DROP FUNCTION getrngfunc6(int);
-DROP FUNCTION getrngfunc7(int);
-DROP FUNCTION getrngfunc8(int);
-DROP FUNCTION getrngfunc9(int);
-DROP FUNCTION rngfunct(int);
-DROP TABLE rngfunc2;
-DROP TABLE rngfunc;
--- Rescan tests --
-CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq1;
-CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq2;
-CREATE TYPE rngfunc_rescan_t AS (i integer, s bigint);
-CREATE FUNCTION rngfunc_sql(int,int) RETURNS setof rngfunc_rescan_t AS 'SELECT i, nextval(''rngfunc_rescan_seq1'') FROM generate_series($1,$2) i;' LANGUAGE SQL;
--- plpgsql functions use materialize mode
-CREATE FUNCTION rngfunc_mat(int,int) RETURNS setof rngfunc_rescan_t AS 'begin for i in $1..$2 loop return next (i, nextval(''rngfunc_rescan_seq2'')); end loop; end;' LANGUAGE plpgsql;
---invokes ExecReScanFunctionScan - all these cases should materialize the function only once
--- LEFT JOIN on a condition that the planner can't prove to be true is used to ensure the function
--- is on the inner path of a nestloop join
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) ON (r+i)<100;
- r | i | s
----+----+---
- 1 | 11 | 1
- 1 | 12 | 2
- 1 | 13 | 3
- 2 | 11 | 1
- 2 | 12 | 2
- 2 | 13 | 3
- 3 | 11 | 1
- 3 | 12 | 2
- 3 | 13 | 3
-(9 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100;
- r | i | s | o
----+----+---+---
- 1 | 11 | 1 | 1
- 1 | 12 | 2 | 2
- 1 | 13 | 3 | 3
- 2 | 11 | 1 | 1
- 2 | 12 | 2 | 2
- 2 | 13 | 3 | 3
- 3 | 11 | 1 | 1
- 3 | 12 | 2 | 2
- 3 | 13 | 3 | 3
-(9 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) ON (r+i)<100;
- r | i | s
----+----+---
- 1 | 11 | 1
- 1 | 12 | 2
- 1 | 13 | 3
- 2 | 11 | 1
- 2 | 12 | 2
- 2 | 13 | 3
- 3 | 11 | 1
- 3 | 12 | 2
- 3 | 13 | 3
-(9 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100;
- r | i | s | o
----+----+---+---
- 1 | 11 | 1 | 1
- 1 | 12 | 2 | 2
- 1 | 13 | 3 | 3
- 2 | 11 | 1 | 1
- 2 | 12 | 2 | 2
- 2 | 13 | 3 | 3
- 3 | 11 | 1 | 1
- 3 | 12 | 2 | 2
- 3 | 13 | 3 | 3
-(9 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN ROWS FROM( rngfunc_sql(11,13), rngfunc_mat(11,13) ) WITH ORDINALITY AS f(i1,s1,i2,s2,o) ON (r+i1+i2)<100;
- r | i1 | s1 | i2 | s2 | o
----+----+----+----+----+---
- 1 | 11 | 1 | 11 | 1 | 1
- 1 | 12 | 2 | 12 | 2 | 2
- 1 | 13 | 3 | 13 | 3 | 3
- 2 | 11 | 1 | 11 | 1 | 1
- 2 | 12 | 2 | 12 | 2 | 2
- 2 | 13 | 3 | 13 | 3 | 3
- 3 | 11 | 1 | 11 | 1 | 1
- 3 | 12 | 2 | 12 | 2 | 2
- 3 | 13 | 3 | 13 | 3 | 3
-(9 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN generate_series(11,13) f(i) ON (r+i)<100;
- r | i
----+----
- 1 | 11
- 1 | 12
- 1 | 13
- 2 | 11
- 2 | 12
- 2 | 13
- 3 | 11
- 3 | 12
- 3 | 13
-(9 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN generate_series(11,13) WITH ORDINALITY AS f(i,o) ON (r+i)<100;
- r | i | o
----+----+---
- 1 | 11 | 1
- 1 | 12 | 2
- 1 | 13 | 3
- 2 | 11 | 1
- 2 | 12 | 2
- 2 | 13 | 3
- 3 | 11 | 1
- 3 | 12 | 2
- 3 | 13 | 3
-(9 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN unnest(array[10,20,30]) f(i) ON (r+i)<100;
- r | i
----+----
- 1 | 10
- 1 | 20
- 1 | 30
- 2 | 10
- 2 | 20
- 2 | 30
- 3 | 10
- 3 | 20
- 3 | 30
-(9 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN unnest(array[10,20,30]) WITH ORDINALITY AS f(i,o) ON (r+i)<100;
- r | i | o
----+----+---
- 1 | 10 | 1
- 1 | 20 | 2
- 1 | 30 | 3
- 2 | 10 | 1
- 2 | 20 | 2
- 2 | 30 | 3
- 3 | 10 | 1
- 3 | 20 | 2
- 3 | 30 | 3
-(9 rows)
-
---invokes ExecReScanFunctionScan with chgParam != NULL (using implied LATERAL)
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13);
- r | i | s
----+----+---
- 1 | 11 | 1
- 1 | 12 | 2
- 1 | 13 | 3
- 2 | 12 | 4
- 2 | 13 | 5
- 3 | 13 | 6
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13) WITH ORDINALITY AS f(i,s,o);
- r | i | s | o
----+----+---+---
- 1 | 11 | 1 | 1
- 1 | 12 | 2 | 2
- 1 | 13 | 3 | 3
- 2 | 12 | 4 | 1
- 2 | 13 | 5 | 2
- 3 | 13 | 6 | 1
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r);
- r | i | s
----+----+---
- 1 | 11 | 1
- 2 | 11 | 2
- 2 | 12 | 3
- 3 | 11 | 4
- 3 | 12 | 5
- 3 | 13 | 6
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r) WITH ORDINALITY AS f(i,s,o);
- r | i | s | o
----+----+---+---
- 1 | 11 | 1 | 1
- 2 | 11 | 2 | 1
- 2 | 12 | 3 | 2
- 3 | 11 | 4 | 1
- 3 | 12 | 5 | 2
- 3 | 13 | 6 | 3
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2);
- r1 | r2 | i | s
-----+----+----+----
- 11 | 12 | 11 | 1
- 11 | 12 | 12 | 2
- 13 | 15 | 13 | 3
- 13 | 15 | 14 | 4
- 13 | 15 | 15 | 5
- 16 | 20 | 16 | 6
- 16 | 20 | 17 | 7
- 16 | 20 | 18 | 8
- 16 | 20 | 19 | 9
- 16 | 20 | 20 | 10
-(10 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2) WITH ORDINALITY AS f(i,s,o);
- r1 | r2 | i | s | o
-----+----+----+----+---
- 11 | 12 | 11 | 1 | 1
- 11 | 12 | 12 | 2 | 2
- 13 | 15 | 13 | 3 | 1
- 13 | 15 | 14 | 4 | 2
- 13 | 15 | 15 | 5 | 3
- 16 | 20 | 16 | 6 | 1
- 16 | 20 | 17 | 7 | 2
- 16 | 20 | 18 | 8 | 3
- 16 | 20 | 19 | 9 | 4
- 16 | 20 | 20 | 10 | 5
-(10 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13);
- r | i | s
----+----+---
- 1 | 11 | 1
- 1 | 12 | 2
- 1 | 13 | 3
- 2 | 12 | 4
- 2 | 13 | 5
- 3 | 13 | 6
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13) WITH ORDINALITY AS f(i,s,o);
- r | i | s | o
----+----+---+---
- 1 | 11 | 1 | 1
- 1 | 12 | 2 | 2
- 1 | 13 | 3 | 3
- 2 | 12 | 4 | 1
- 2 | 13 | 5 | 2
- 3 | 13 | 6 | 1
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r);
- r | i | s
----+----+---
- 1 | 11 | 1
- 2 | 11 | 2
- 2 | 12 | 3
- 3 | 11 | 4
- 3 | 12 | 5
- 3 | 13 | 6
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r) WITH ORDINALITY AS f(i,s,o);
- r | i | s | o
----+----+---+---
- 1 | 11 | 1 | 1
- 2 | 11 | 2 | 1
- 2 | 12 | 3 | 2
- 3 | 11 | 4 | 1
- 3 | 12 | 5 | 2
- 3 | 13 | 6 | 3
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2);
- r1 | r2 | i | s
-----+----+----+----
- 11 | 12 | 11 | 1
- 11 | 12 | 12 | 2
- 13 | 15 | 13 | 3
- 13 | 15 | 14 | 4
- 13 | 15 | 15 | 5
- 16 | 20 | 16 | 6
- 16 | 20 | 17 | 7
- 16 | 20 | 18 | 8
- 16 | 20 | 19 | 9
- 16 | 20 | 20 | 10
-(10 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2) WITH ORDINALITY AS f(i,s,o);
- r1 | r2 | i | s | o
-----+----+----+----+---
- 11 | 12 | 11 | 1 | 1
- 11 | 12 | 12 | 2 | 2
- 13 | 15 | 13 | 3 | 1
- 13 | 15 | 14 | 4 | 2
- 13 | 15 | 15 | 5 | 3
- 16 | 20 | 16 | 6 | 1
- 16 | 20 | 17 | 7 | 2
- 16 | 20 | 18 | 8 | 3
- 16 | 20 | 19 | 9 | 4
- 16 | 20 | 20 | 10 | 5
-(10 rows)
-
--- selective rescan of multiple functions:
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(11,11), rngfunc_mat(10+r,13) );
- r | i | s | i | s
----+----+---+----+---
- 1 | 11 | 1 | 11 | 1
- 1 | | | 12 | 2
- 1 | | | 13 | 3
- 2 | 11 | 1 | 12 | 4
- 2 | | | 13 | 5
- 3 | 11 | 1 | 13 | 6
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(11,11) );
- r | i | s | i | s
----+----+---+----+---
- 1 | 11 | 1 | 11 | 1
- 1 | 12 | 2 | |
- 1 | 13 | 3 | |
- 2 | 12 | 4 | 11 | 1
- 2 | 13 | 5 | |
- 3 | 13 | 6 | 11 | 1
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(10+r,13) );
- r | i | s | i | s
----+----+---+----+---
- 1 | 11 | 1 | 11 | 1
- 1 | 12 | 2 | 12 | 2
- 1 | 13 | 3 | 13 | 3
- 2 | 12 | 4 | 12 | 4
- 2 | 13 | 5 | 13 | 5
- 3 | 13 | 6 | 13 | 6
-(6 rows)
-
-SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false);
- setval | setval
---------+--------
- 1 | 1
-(1 row)
-
-SELECT * FROM generate_series(1,2) r1, generate_series(r1,3) r2, ROWS FROM( rngfunc_sql(10+r1,13), rngfunc_mat(10+r2,13) );
- r1 | r2 | i | s | i | s
-----+----+----+----+----+---
- 1 | 1 | 11 | 1 | 11 | 1
- 1 | 1 | 12 | 2 | 12 | 2
- 1 | 1 | 13 | 3 | 13 | 3
- 1 | 2 | 11 | 4 | 12 | 4
- 1 | 2 | 12 | 5 | 13 | 5
- 1 | 2 | 13 | 6 | |
- 1 | 3 | 11 | 7 | 13 | 6
- 1 | 3 | 12 | 8 | |
- 1 | 3 | 13 | 9 | |
- 2 | 2 | 12 | 10 | 12 | 7
- 2 | 2 | 13 | 11 | 13 | 8
- 2 | 3 | 12 | 12 | 13 | 9
- 2 | 3 | 13 | 13 | |
-(13 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), generate_series(10+r,20-r) f(i);
- r | i
----+----
- 1 | 11
- 1 | 12
- 1 | 13
- 1 | 14
- 1 | 15
- 1 | 16
- 1 | 17
- 1 | 18
- 1 | 19
- 2 | 12
- 2 | 13
- 2 | 14
- 2 | 15
- 2 | 16
- 2 | 17
- 2 | 18
- 3 | 13
- 3 | 14
- 3 | 15
- 3 | 16
- 3 | 17
-(21 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), generate_series(10+r,20-r) WITH ORDINALITY AS f(i,o);
- r | i | o
----+----+---
- 1 | 11 | 1
- 1 | 12 | 2
- 1 | 13 | 3
- 1 | 14 | 4
- 1 | 15 | 5
- 1 | 16 | 6
- 1 | 17 | 7
- 1 | 18 | 8
- 1 | 19 | 9
- 2 | 12 | 1
- 2 | 13 | 2
- 2 | 14 | 3
- 2 | 15 | 4
- 2 | 16 | 5
- 2 | 17 | 6
- 2 | 18 | 7
- 3 | 13 | 1
- 3 | 14 | 2
- 3 | 15 | 3
- 3 | 16 | 4
- 3 | 17 | 5
-(21 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), unnest(array[r*10,r*20,r*30]) f(i);
- r | i
----+----
- 1 | 10
- 1 | 20
- 1 | 30
- 2 | 20
- 2 | 40
- 2 | 60
- 3 | 30
- 3 | 60
- 3 | 90
-(9 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v(r), unnest(array[r*10,r*20,r*30]) WITH ORDINALITY AS f(i,o);
- r | i | o
----+----+---
- 1 | 10 | 1
- 1 | 20 | 2
- 1 | 30 | 3
- 2 | 20 | 1
- 2 | 40 | 2
- 2 | 60 | 3
- 3 | 30 | 1
- 3 | 60 | 2
- 3 | 90 | 3
-(9 rows)
-
--- deep nesting
-SELECT * FROM (VALUES (1),(2),(3)) v1(r1),
- LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2)
- LEFT JOIN generate_series(21,23) f(i) ON ((r2+i)<100) OFFSET 0) s1;
- r1 | r1 | r2 | i
-----+----+----+----
- 1 | 1 | 10 | 21
- 1 | 1 | 10 | 22
- 1 | 1 | 10 | 23
- 1 | 1 | 20 | 21
- 1 | 1 | 20 | 22
- 1 | 1 | 20 | 23
- 1 | 1 | 30 | 21
- 1 | 1 | 30 | 22
- 1 | 1 | 30 | 23
- 2 | 2 | 10 | 21
- 2 | 2 | 10 | 22
- 2 | 2 | 10 | 23
- 2 | 2 | 20 | 21
- 2 | 2 | 20 | 22
- 2 | 2 | 20 | 23
- 2 | 2 | 30 | 21
- 2 | 2 | 30 | 22
- 2 | 2 | 30 | 23
- 3 | 3 | 10 | 21
- 3 | 3 | 10 | 22
- 3 | 3 | 10 | 23
- 3 | 3 | 20 | 21
- 3 | 3 | 20 | 22
- 3 | 3 | 20 | 23
- 3 | 3 | 30 | 21
- 3 | 3 | 30 | 22
- 3 | 3 | 30 | 23
-(27 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v1(r1),
- LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2)
- LEFT JOIN generate_series(20+r1,23) f(i) ON ((r2+i)<100) OFFSET 0) s1;
- r1 | r1 | r2 | i
-----+----+----+----
- 1 | 1 | 10 | 21
- 1 | 1 | 10 | 22
- 1 | 1 | 10 | 23
- 1 | 1 | 20 | 21
- 1 | 1 | 20 | 22
- 1 | 1 | 20 | 23
- 1 | 1 | 30 | 21
- 1 | 1 | 30 | 22
- 1 | 1 | 30 | 23
- 2 | 2 | 10 | 22
- 2 | 2 | 10 | 23
- 2 | 2 | 20 | 22
- 2 | 2 | 20 | 23
- 2 | 2 | 30 | 22
- 2 | 2 | 30 | 23
- 3 | 3 | 10 | 23
- 3 | 3 | 20 | 23
- 3 | 3 | 30 | 23
-(18 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v1(r1),
- LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2)
- LEFT JOIN generate_series(r2,r2+3) f(i) ON ((r2+i)<100) OFFSET 0) s1;
- r1 | r1 | r2 | i
-----+----+----+----
- 1 | 1 | 10 | 10
- 1 | 1 | 10 | 11
- 1 | 1 | 10 | 12
- 1 | 1 | 10 | 13
- 1 | 1 | 20 | 20
- 1 | 1 | 20 | 21
- 1 | 1 | 20 | 22
- 1 | 1 | 20 | 23
- 1 | 1 | 30 | 30
- 1 | 1 | 30 | 31
- 1 | 1 | 30 | 32
- 1 | 1 | 30 | 33
- 2 | 2 | 10 | 10
- 2 | 2 | 10 | 11
- 2 | 2 | 10 | 12
- 2 | 2 | 10 | 13
- 2 | 2 | 20 | 20
- 2 | 2 | 20 | 21
- 2 | 2 | 20 | 22
- 2 | 2 | 20 | 23
- 2 | 2 | 30 | 30
- 2 | 2 | 30 | 31
- 2 | 2 | 30 | 32
- 2 | 2 | 30 | 33
- 3 | 3 | 10 | 10
- 3 | 3 | 10 | 11
- 3 | 3 | 10 | 12
- 3 | 3 | 10 | 13
- 3 | 3 | 20 | 20
- 3 | 3 | 20 | 21
- 3 | 3 | 20 | 22
- 3 | 3 | 20 | 23
- 3 | 3 | 30 | 30
- 3 | 3 | 30 | 31
- 3 | 3 | 30 | 32
- 3 | 3 | 30 | 33
-(36 rows)
-
-SELECT * FROM (VALUES (1),(2),(3)) v1(r1),
- LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2)
- LEFT JOIN generate_series(r1,2+r2/5) f(i) ON ((r2+i)<100) OFFSET 0) s1;
- r1 | r1 | r2 | i
-----+----+----+---
- 1 | 1 | 10 | 1
- 1 | 1 | 10 | 2
- 1 | 1 | 10 | 3
- 1 | 1 | 10 | 4
- 1 | 1 | 20 | 1
- 1 | 1 | 20 | 2
- 1 | 1 | 20 | 3
- 1 | 1 | 20 | 4
- 1 | 1 | 20 | 5
- 1 | 1 | 20 | 6
- 1 | 1 | 30 | 1
- 1 | 1 | 30 | 2
- 1 | 1 | 30 | 3
- 1 | 1 | 30 | 4
- 1 | 1 | 30 | 5
- 1 | 1 | 30 | 6
- 1 | 1 | 30 | 7
- 1 | 1 | 30 | 8
- 2 | 2 | 10 | 2
- 2 | 2 | 10 | 3
- 2 | 2 | 10 | 4
- 2 | 2 | 20 | 2
- 2 | 2 | 20 | 3
- 2 | 2 | 20 | 4
- 2 | 2 | 20 | 5
- 2 | 2 | 20 | 6
- 2 | 2 | 30 | 2
- 2 | 2 | 30 | 3
- 2 | 2 | 30 | 4
- 2 | 2 | 30 | 5
- 2 | 2 | 30 | 6
- 2 | 2 | 30 | 7
- 2 | 2 | 30 | 8
- 3 | 3 | 10 | 3
- 3 | 3 | 10 | 4
- 3 | 3 | 20 | 3
- 3 | 3 | 20 | 4
- 3 | 3 | 20 | 5
- 3 | 3 | 20 | 6
- 3 | 3 | 30 | 3
- 3 | 3 | 30 | 4
- 3 | 3 | 30 | 5
- 3 | 3 | 30 | 6
- 3 | 3 | 30 | 7
- 3 | 3 | 30 | 8
-(45 rows)
-
--- check handling of FULL JOIN with multiple lateral references (bug #15741)
-SELECT *
-FROM (VALUES (1),(2)) v1(r1)
- LEFT JOIN LATERAL (
- SELECT *
- FROM generate_series(1, v1.r1) AS gs1
- LEFT JOIN LATERAL (
- SELECT *
- FROM generate_series(1, gs1) AS gs2
- LEFT JOIN generate_series(1, gs2) AS gs3 ON TRUE
- ) AS ss1 ON TRUE
- FULL JOIN generate_series(1, v1.r1) AS gs4 ON FALSE
- ) AS ss0 ON TRUE;
- r1 | gs1 | gs2 | gs3 | gs4
-----+-----+-----+-----+-----
- 1 | | | | 1
- 1 | 1 | 1 | 1 |
- 2 | | | | 1
- 2 | | | | 2
- 2 | 1 | 1 | 1 |
- 2 | 2 | 1 | 1 |
- 2 | 2 | 2 | 1 |
- 2 | 2 | 2 | 2 |
-(8 rows)
-
-DROP FUNCTION rngfunc_sql(int,int);
-DROP FUNCTION rngfunc_mat(int,int);
-DROP SEQUENCE rngfunc_rescan_seq1;
-DROP SEQUENCE rngfunc_rescan_seq2;
---
--- Test cases involving OUT parameters
---
-CREATE FUNCTION rngfunc(in f1 int, out f2 int)
-AS 'select $1+1' LANGUAGE sql;
-SELECT rngfunc(42);
- rngfunc
----------
- 43
-(1 row)
-
-SELECT * FROM rngfunc(42);
- f2
-----
- 43
-(1 row)
-
-SELECT * FROM rngfunc(42) AS p(x);
- x
-----
- 43
-(1 row)
-
--- explicit spec of return type is OK
-CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS int
-AS 'select $1+1' LANGUAGE sql;
--- error, wrong result type
-CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS float
-AS 'select $1+1' LANGUAGE sql;
-ERROR: function result type must be integer because of OUT parameters
--- with multiple OUT params you must get a RECORD result
-CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) RETURNS int
-AS 'select $1+1' LANGUAGE sql;
-ERROR: function result type must be record because of OUT parameters
-CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text)
-RETURNS record
-AS 'select $1+1' LANGUAGE sql;
-ERROR: cannot change return type of existing function
-HINT: Use DROP FUNCTION rngfunc(integer) first.
-CREATE OR REPLACE FUNCTION rngfuncr(in f1 int, out f2 int, out text)
-AS $$select $1-1, $1::text || 'z'$$ LANGUAGE sql;
-SELECT f1, rngfuncr(f1) FROM int4_tbl;
- f1 | rngfuncr
--------------+----------------------------
- 0 | (-1,0z)
- 123456 | (123455,123456z)
- -123456 | (-123457,-123456z)
- 2147483647 | (2147483646,2147483647z)
- -2147483647 | (-2147483648,-2147483647z)
-(5 rows)
-
-SELECT * FROM rngfuncr(42);
- f2 | column2
-----+---------
- 41 | 42z
-(1 row)
-
-SELECT * FROM rngfuncr(42) AS p(a,b);
- a | b
-----+-----
- 41 | 42z
-(1 row)
-
-CREATE OR REPLACE FUNCTION rngfuncb(in f1 int, inout f2 int, out text)
-AS $$select $2-1, $1::text || 'z'$$ LANGUAGE sql;
-SELECT f1, rngfuncb(f1, f1/2) FROM int4_tbl;
- f1 | rngfuncb
--------------+----------------------------
- 0 | (-1,0z)
- 123456 | (61727,123456z)
- -123456 | (-61729,-123456z)
- 2147483647 | (1073741822,2147483647z)
- -2147483647 | (-1073741824,-2147483647z)
-(5 rows)
-
-SELECT * FROM rngfuncb(42, 99);
- f2 | column2
-----+---------
- 98 | 42z
-(1 row)
-
-SELECT * FROM rngfuncb(42, 99) AS p(a,b);
- a | b
-----+-----
- 98 | 42z
-(1 row)
-
--- Can reference function with or without OUT params for DROP, etc
-DROP FUNCTION rngfunc(int);
-DROP FUNCTION rngfuncr(in f2 int, out f1 int, out text);
-DROP FUNCTION rngfuncb(in f1 int, inout f2 int);
---
--- For my next trick, polymorphic OUT parameters
---
-CREATE FUNCTION dup (f1 anyelement, f2 out anyelement, f3 out anyarray)
-AS 'select $1, array[$1,$1]' LANGUAGE sql;
-SELECT dup(22);
- dup
-----------------
- (22,"{22,22}")
-(1 row)
-
-SELECT dup('xyz'); -- fails
-ERROR: could not determine polymorphic type because input has type unknown
-SELECT dup('xyz'::text);
- dup
--------------------
- (xyz,"{xyz,xyz}")
-(1 row)
-
-SELECT * FROM dup('xyz'::text);
- f2 | f3
------+-----------
- xyz | {xyz,xyz}
-(1 row)
-
--- fails, as we are attempting to rename first argument
-CREATE OR REPLACE FUNCTION dup (inout f2 anyelement, out f3 anyarray)
-AS 'select $1, array[$1,$1]' LANGUAGE sql;
-ERROR: cannot change name of input parameter "f1"
-HINT: Use DROP FUNCTION dup(anyelement) first.
-DROP FUNCTION dup(anyelement);
--- equivalent behavior, though different name exposed for input arg
-CREATE OR REPLACE FUNCTION dup (inout f2 anyelement, out f3 anyarray)
-AS 'select $1, array[$1,$1]' LANGUAGE sql;
-SELECT dup(22);
- dup
-----------------
- (22,"{22,22}")
-(1 row)
-
-DROP FUNCTION dup(anyelement);
--- fails, no way to deduce outputs
-CREATE FUNCTION bad (f1 int, out f2 anyelement, out f3 anyarray)
-AS 'select $1, array[$1,$1]' LANGUAGE sql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anyelement requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
-CREATE FUNCTION dup (f1 anycompatible, f2 anycompatiblearray, f3 out anycompatible, f4 out anycompatiblearray)
-AS 'select $1, $2' LANGUAGE sql;
-SELECT dup(22, array[44]);
- dup
------------
- (22,{44})
-(1 row)
-
-SELECT dup(4.5, array[44]);
- dup
-------------
- (4.5,{44})
-(1 row)
-
-SELECT dup(22, array[44::bigint]);
- dup
------------
- (22,{44})
-(1 row)
-
-SELECT *, pg_typeof(f3), pg_typeof(f4) FROM dup(22, array[44::bigint]);
- f3 | f4 | pg_typeof | pg_typeof
-----+------+-----------+-----------
- 22 | {44} | bigint | bigint[]
-(1 row)
-
-DROP FUNCTION dup(f1 anycompatible, f2 anycompatiblearray);
-CREATE FUNCTION dup (f1 anycompatiblerange, f2 out anycompatible, f3 out anycompatiblearray, f4 out anycompatiblerange)
-AS 'select lower($1), array[lower($1), upper($1)], $1' LANGUAGE sql;
-SELECT dup(int4range(4,7));
- dup
----------------------
- (4,"{4,7}","[4,7)")
-(1 row)
-
-SELECT dup(numrange(4,7));
- dup
----------------------
- (4,"{4,7}","[4,7)")
-(1 row)
-
-SELECT dup(textrange('aaa', 'bbb'));
- dup
--------------------------------
- (aaa,"{aaa,bbb}","[aaa,bbb)")
-(1 row)
-
-DROP FUNCTION dup(f1 anycompatiblerange);
--- fails, no way to deduce outputs
-CREATE FUNCTION bad (f1 anyarray, out f2 anycompatible, out f3 anycompatiblearray)
-AS 'select $1, array[$1,$1]' LANGUAGE sql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anycompatible requires at least one input of type anycompatible, anycompatiblearray, anycompatiblenonarray, anycompatiblerange, or anycompatiblemultirange.
---
--- table functions
---
-CREATE OR REPLACE FUNCTION rngfunc()
-RETURNS TABLE(a int)
-AS $$ SELECT a FROM generate_series(1,5) a(a) $$ LANGUAGE sql;
-SELECT * FROM rngfunc();
- a
----
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
-DROP FUNCTION rngfunc();
-CREATE OR REPLACE FUNCTION rngfunc(int)
-RETURNS TABLE(a int, b int)
-AS $$ SELECT a, b
- FROM generate_series(1,$1) a(a),
- generate_series(1,$1) b(b) $$ LANGUAGE sql;
-SELECT * FROM rngfunc(3);
- a | b
----+---
- 1 | 1
- 1 | 2
- 1 | 3
- 2 | 1
- 2 | 2
- 2 | 3
- 3 | 1
- 3 | 2
- 3 | 3
-(9 rows)
-
-DROP FUNCTION rngfunc(int);
--- case that causes change of typmod knowledge during inlining
-CREATE OR REPLACE FUNCTION rngfunc()
-RETURNS TABLE(a varchar(5))
-AS $$ SELECT 'hello'::varchar(5) $$ LANGUAGE sql STABLE;
-SELECT * FROM rngfunc() GROUP BY 1;
- a
--------
- hello
-(1 row)
-
-DROP FUNCTION rngfunc();
---
--- some tests on SQL functions with RETURNING
---
-create temp table tt(f1 serial, data text);
-create function insert_tt(text) returns int as
-$$ insert into tt(data) values($1) returning f1 $$
-language sql;
-select insert_tt('foo');
- insert_tt
------------
- 1
-(1 row)
-
-select insert_tt('bar');
- insert_tt
------------
- 2
-(1 row)
-
-select * from tt;
- f1 | data
-----+------
- 1 | foo
- 2 | bar
-(2 rows)
-
--- insert will execute to completion even if function needs just 1 row
-create or replace function insert_tt(text) returns int as
-$$ insert into tt(data) values($1),($1||$1) returning f1 $$
-language sql;
-select insert_tt('fool');
- insert_tt
------------
- 3
-(1 row)
-
-select * from tt;
- f1 | data
-----+----------
- 1 | foo
- 2 | bar
- 3 | fool
- 4 | foolfool
-(4 rows)
-
--- setof does what's expected
-create or replace function insert_tt2(text,text) returns setof int as
-$$ insert into tt(data) values($1),($2) returning f1 $$
-language sql;
-select insert_tt2('foolish','barrish');
- insert_tt2
-------------
- 5
- 6
-(2 rows)
-
-select * from insert_tt2('baz','quux');
- insert_tt2
-------------
- 7
- 8
-(2 rows)
-
-select * from tt;
- f1 | data
-----+----------
- 1 | foo
- 2 | bar
- 3 | fool
- 4 | foolfool
- 5 | foolish
- 6 | barrish
- 7 | baz
- 8 | quux
-(8 rows)
-
--- limit doesn't prevent execution to completion
-select insert_tt2('foolish','barrish') limit 1;
- insert_tt2
-------------
- 9
-(1 row)
-
-select * from tt;
- f1 | data
-----+----------
- 1 | foo
- 2 | bar
- 3 | fool
- 4 | foolfool
- 5 | foolish
- 6 | barrish
- 7 | baz
- 8 | quux
- 9 | foolish
- 10 | barrish
-(10 rows)
-
--- triggers will fire, too
-create function noticetrigger() returns trigger as $$
-begin
- raise notice 'noticetrigger % %', new.f1, new.data;
- return null;
-end $$ language plpgsql;
-create trigger tnoticetrigger after insert on tt for each row
-execute procedure noticetrigger();
-select insert_tt2('foolme','barme') limit 1;
-NOTICE: noticetrigger 11 foolme
-NOTICE: noticetrigger 12 barme
- insert_tt2
-------------
- 11
-(1 row)
-
-select * from tt;
- f1 | data
-----+----------
- 1 | foo
- 2 | bar
- 3 | fool
- 4 | foolfool
- 5 | foolish
- 6 | barrish
- 7 | baz
- 8 | quux
- 9 | foolish
- 10 | barrish
- 11 | foolme
- 12 | barme
-(12 rows)
-
--- and rules work
-create temp table tt_log(f1 int, data text);
-create rule insert_tt_rule as on insert to tt do also
- insert into tt_log values(new.*);
-select insert_tt2('foollog','barlog') limit 1;
-NOTICE: noticetrigger 13 foollog
-NOTICE: noticetrigger 14 barlog
- insert_tt2
-------------
- 13
-(1 row)
-
-select * from tt;
- f1 | data
-----+----------
- 1 | foo
- 2 | bar
- 3 | fool
- 4 | foolfool
- 5 | foolish
- 6 | barrish
- 7 | baz
- 8 | quux
- 9 | foolish
- 10 | barrish
- 11 | foolme
- 12 | barme
- 13 | foollog
- 14 | barlog
-(14 rows)
-
--- note that nextval() gets executed a second time in the rule expansion,
--- which is expected.
-select * from tt_log;
- f1 | data
-----+---------
- 15 | foollog
- 16 | barlog
-(2 rows)
-
--- test case for a whole-row-variable bug
-create function rngfunc1(n integer, out a text, out b text)
- returns setof record
- language sql
- as $$ select 'foo ' || i, 'bar ' || i from generate_series(1,$1) i $$;
-set work_mem='64kB';
-select t.a, t, t.a from rngfunc1(10000) t limit 1;
- a | t | a
--------+-------------------+-------
- foo 1 | ("foo 1","bar 1") | foo 1
-(1 row)
-
-reset work_mem;
-select t.a, t, t.a from rngfunc1(10000) t limit 1;
- a | t | a
--------+-------------------+-------
- foo 1 | ("foo 1","bar 1") | foo 1
-(1 row)
-
-drop function rngfunc1(n integer);
--- test use of SQL functions returning record
--- this is supported in some cases where the query doesn't specify
--- the actual record type ...
-create function array_to_set(anyarray) returns setof record as $$
- select i AS "index", $1[i] AS "value" from generate_subscripts($1, 1) i
-$$ language sql strict immutable;
-select array_to_set(array['one', 'two']);
- array_to_set
---------------
- (1,one)
- (2,two)
-(2 rows)
-
-select * from array_to_set(array['one', 'two']) as t(f1 int,f2 text);
- f1 | f2
-----+-----
- 1 | one
- 2 | two
-(2 rows)
-
-select * from array_to_set(array['one', 'two']); -- fail
-ERROR: a column definition list is required for functions returning "record"
-LINE 1: select * from array_to_set(array['one', 'two']);
- ^
--- after-the-fact coercion of the columns is now possible, too
-select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text);
- f1 | f2
-------+-----
- 1.00 | one
- 2.00 | two
-(2 rows)
-
--- and if it doesn't work, you get a compile-time not run-time error
-select * from array_to_set(array['one', 'two']) as t(f1 point,f2 text);
-ERROR: return type mismatch in function declared to return record
-DETAIL: Final statement returns integer instead of point at column 1.
-CONTEXT: SQL function "array_to_set" during startup
--- with "strict", this function can't be inlined in FROM
-explain (verbose, costs off)
- select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text);
- QUERY PLAN
-----------------------------------------------------
- Function Scan on public.array_to_set t
- Output: f1, f2
- Function Call: array_to_set('{one,two}'::text[])
-(3 rows)
-
--- but without, it can be:
-create or replace function array_to_set(anyarray) returns setof record as $$
- select i AS "index", $1[i] AS "value" from generate_subscripts($1, 1) i
-$$ language sql immutable;
-select array_to_set(array['one', 'two']);
- array_to_set
---------------
- (1,one)
- (2,two)
-(2 rows)
-
-select * from array_to_set(array['one', 'two']) as t(f1 int,f2 text);
- f1 | f2
-----+-----
- 1 | one
- 2 | two
-(2 rows)
-
-select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text);
- f1 | f2
-------+-----
- 1.00 | one
- 2.00 | two
-(2 rows)
-
-select * from array_to_set(array['one', 'two']) as t(f1 point,f2 text);
-ERROR: return type mismatch in function declared to return record
-DETAIL: Final statement returns integer instead of point at column 1.
-CONTEXT: SQL function "array_to_set" during inlining
-explain (verbose, costs off)
- select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text);
- QUERY PLAN
---------------------------------------------------------------
- Function Scan on pg_catalog.generate_subscripts i
- Output: i.i, ('{one,two}'::text[])[i.i]
- Function Call: generate_subscripts('{one,two}'::text[], 1)
-(3 rows)
-
-create temp table rngfunc(f1 int8, f2 int8);
-create function testrngfunc() returns record as $$
- insert into rngfunc values (1,2) returning *;
-$$ language sql;
-select testrngfunc();
- testrngfunc
--------------
- (1,2)
-(1 row)
-
-select * from testrngfunc() as t(f1 int8,f2 int8);
- f1 | f2
-----+----
- 1 | 2
-(1 row)
-
-select * from testrngfunc(); -- fail
-ERROR: a column definition list is required for functions returning "record"
-LINE 1: select * from testrngfunc();
- ^
-drop function testrngfunc();
-create function testrngfunc() returns setof record as $$
- insert into rngfunc values (1,2), (3,4) returning *;
-$$ language sql;
-select testrngfunc();
- testrngfunc
--------------
- (1,2)
- (3,4)
-(2 rows)
-
-select * from testrngfunc() as t(f1 int8,f2 int8);
- f1 | f2
-----+----
- 1 | 2
- 3 | 4
-(2 rows)
-
-select * from testrngfunc(); -- fail
-ERROR: a column definition list is required for functions returning "record"
-LINE 1: select * from testrngfunc();
- ^
-drop function testrngfunc();
--- Check that typmod imposed by a composite type is honored
-create type rngfunc_type as (f1 numeric(35,6), f2 numeric(35,2));
-create function testrngfunc() returns rngfunc_type as $$
- select 7.136178319899999964, 7.136178319899999964;
-$$ language sql immutable;
-explain (verbose, costs off)
-select testrngfunc();
- QUERY PLAN
--------------------------------------------
- Result
- Output: '(7.136178,7.14)'::rngfunc_type
-(2 rows)
-
-select testrngfunc();
- testrngfunc
------------------
- (7.136178,7.14)
-(1 row)
-
-explain (verbose, costs off)
-select * from testrngfunc();
- QUERY PLAN
---------------------------------------------------
- Function Scan on testrngfunc
- Output: f1, f2
- Function Call: '(7.136178,7.14)'::rngfunc_type
-(3 rows)
-
-select * from testrngfunc();
- f1 | f2
-----------+------
- 7.136178 | 7.14
-(1 row)
-
-create or replace function testrngfunc() returns rngfunc_type as $$
- select 7.136178319899999964, 7.136178319899999964;
-$$ language sql volatile;
-explain (verbose, costs off)
-select testrngfunc();
- QUERY PLAN
--------------------------
- Result
- Output: testrngfunc()
-(2 rows)
-
-select testrngfunc();
- testrngfunc
------------------
- (7.136178,7.14)
-(1 row)
-
-explain (verbose, costs off)
-select * from testrngfunc();
- QUERY PLAN
--------------------------------------
- Function Scan on public.testrngfunc
- Output: f1, f2
- Function Call: testrngfunc()
-(3 rows)
-
-select * from testrngfunc();
- f1 | f2
-----------+------
- 7.136178 | 7.14
-(1 row)
-
-drop function testrngfunc();
-create function testrngfunc() returns setof rngfunc_type as $$
- select 7.136178319899999964, 7.136178319899999964;
-$$ language sql immutable;
-explain (verbose, costs off)
-select testrngfunc();
- QUERY PLAN
--------------------------
- ProjectSet
- Output: testrngfunc()
- -> Result
-(3 rows)
-
-select testrngfunc();
- testrngfunc
------------------
- (7.136178,7.14)
-(1 row)
-
-explain (verbose, costs off)
-select * from testrngfunc();
- QUERY PLAN
---------------------------------------------------------
- Result
- Output: 7.136178::numeric(35,6), 7.14::numeric(35,2)
-(2 rows)
-
-select * from testrngfunc();
- f1 | f2
-----------+------
- 7.136178 | 7.14
-(1 row)
-
-create or replace function testrngfunc() returns setof rngfunc_type as $$
- select 7.136178319899999964, 7.136178319899999964;
-$$ language sql volatile;
-explain (verbose, costs off)
-select testrngfunc();
- QUERY PLAN
--------------------------
- ProjectSet
- Output: testrngfunc()
- -> Result
-(3 rows)
-
-select testrngfunc();
- testrngfunc
------------------
- (7.136178,7.14)
-(1 row)
-
-explain (verbose, costs off)
-select * from testrngfunc();
- QUERY PLAN
--------------------------------------
- Function Scan on public.testrngfunc
- Output: f1, f2
- Function Call: testrngfunc()
-(3 rows)
-
-select * from testrngfunc();
- f1 | f2
-----------+------
- 7.136178 | 7.14
-(1 row)
-
-create or replace function testrngfunc() returns setof rngfunc_type as $$
- select 1, 2 union select 3, 4 order by 1;
-$$ language sql immutable;
-explain (verbose, costs off)
-select testrngfunc();
- QUERY PLAN
--------------------------
- ProjectSet
- Output: testrngfunc()
- -> Result
-(3 rows)
-
-select testrngfunc();
- testrngfunc
------------------
- (1.000000,2.00)
- (3.000000,4.00)
-(2 rows)
-
-explain (verbose, costs off)
-select * from testrngfunc();
- QUERY PLAN
-----------------------------------------------------------
- Subquery Scan on "*SELECT*"
- Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1"
- -> Unique
- Output: (1), (2)
- -> Sort
- Output: (1), (2)
- Sort Key: (1), (2)
- -> Append
- -> Result
- Output: 1, 2
- -> Result
- Output: 3, 4
-(12 rows)
-
-select * from testrngfunc();
- f1 | f2
-----------+------
- 1.000000 | 2.00
- 3.000000 | 4.00
-(2 rows)
-
--- Check a couple of error cases while we're here
-select * from testrngfunc() as t(f1 int8,f2 int8); -- fail, composite result
-ERROR: a column definition list is redundant for a function returning a named composite type
-LINE 1: select * from testrngfunc() as t(f1 int8,f2 int8);
- ^
-select * from pg_get_keywords() as t(f1 int8,f2 int8); -- fail, OUT params
-ERROR: a column definition list is redundant for a function with OUT parameters
-LINE 1: select * from pg_get_keywords() as t(f1 int8,f2 int8);
- ^
-select * from sin(3) as t(f1 int8,f2 int8); -- fail, scalar result type
-ERROR: a column definition list is only allowed for functions returning "record"
-LINE 1: select * from sin(3) as t(f1 int8,f2 int8);
- ^
-drop type rngfunc_type cascade;
-NOTICE: drop cascades to function testrngfunc()
---
--- Check some cases involving added/dropped columns in a rowtype result
---
-create temp table users (userid text, seq int, email text, todrop bool, moredrop int, enabled bool);
-insert into users values ('id',1,'email',true,11,true);
-insert into users values ('id2',2,'email2',true,12,true);
-alter table users drop column todrop;
-create or replace function get_first_user() returns users as
-$$ SELECT * FROM users ORDER BY userid LIMIT 1; $$
-language sql stable;
-SELECT get_first_user();
- get_first_user
--------------------
- (id,1,email,11,t)
-(1 row)
-
-SELECT * FROM get_first_user();
- userid | seq | email | moredrop | enabled
---------+-----+-------+----------+---------
- id | 1 | email | 11 | t
-(1 row)
-
-create or replace function get_users() returns setof users as
-$$ SELECT * FROM users ORDER BY userid; $$
-language sql stable;
-SELECT get_users();
- get_users
----------------------
- (id,1,email,11,t)
- (id2,2,email2,12,t)
-(2 rows)
-
-SELECT * FROM get_users();
- userid | seq | email | moredrop | enabled
---------+-----+--------+----------+---------
- id | 1 | email | 11 | t
- id2 | 2 | email2 | 12 | t
-(2 rows)
-
-SELECT * FROM get_users() WITH ORDINALITY; -- make sure ordinality copes
- userid | seq | email | moredrop | enabled | ordinality
---------+-----+--------+----------+---------+------------
- id | 1 | email | 11 | t | 1
- id2 | 2 | email2 | 12 | t | 2
-(2 rows)
-
--- multiple functions vs. dropped columns
-SELECT * FROM ROWS FROM(generate_series(10,11), get_users()) WITH ORDINALITY;
- generate_series | userid | seq | email | moredrop | enabled | ordinality
------------------+--------+-----+--------+----------+---------+------------
- 10 | id | 1 | email | 11 | t | 1
- 11 | id2 | 2 | email2 | 12 | t | 2
-(2 rows)
-
-SELECT * FROM ROWS FROM(get_users(), generate_series(10,11)) WITH ORDINALITY;
- userid | seq | email | moredrop | enabled | generate_series | ordinality
---------+-----+--------+----------+---------+-----------------+------------
- id | 1 | email | 11 | t | 10 | 1
- id2 | 2 | email2 | 12 | t | 11 | 2
-(2 rows)
-
--- check that we can cope with post-parsing changes in rowtypes
-create temp view usersview as
-SELECT * FROM ROWS FROM(get_users(), generate_series(10,11)) WITH ORDINALITY;
-select * from usersview;
- userid | seq | email | moredrop | enabled | generate_series | ordinality
---------+-----+--------+----------+---------+-----------------+------------
- id | 1 | email | 11 | t | 10 | 1
- id2 | 2 | email2 | 12 | t | 11 | 2
-(2 rows)
-
-alter table users add column junk text;
-select * from usersview;
- userid | seq | email | moredrop | enabled | generate_series | ordinality
---------+-----+--------+----------+---------+-----------------+------------
- id | 1 | email | 11 | t | 10 | 1
- id2 | 2 | email2 | 12 | t | 11 | 2
-(2 rows)
-
-alter table users drop column moredrop; -- fail, view has reference
-ERROR: cannot drop column moredrop of table users because other objects depend on it
-DETAIL: view usersview depends on column moredrop of table users
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
--- We used to have a bug that would allow the above to succeed, posing
--- hazards for later execution of the view. Check that the internal
--- defenses for those hazards haven't bit-rotted, in case some other
--- bug with similar symptoms emerges.
-begin;
--- destroy the dependency entry that prevents the DROP:
-delete from pg_depend where
- objid = (select oid from pg_rewrite
- where ev_class = 'usersview'::regclass and rulename = '_RETURN')
- and refobjsubid = 5
-returning pg_describe_object(classid, objid, objsubid) as obj,
- pg_describe_object(refclassid, refobjid, refobjsubid) as ref,
- deptype;
- obj | ref | deptype
---------------------------------+--------------------------------+---------
- rule _RETURN on view usersview | column moredrop of table users | n
-(1 row)
-
-alter table users drop column moredrop;
-select * from usersview; -- expect clean failure
-ERROR: attribute 5 of type record has been dropped
-rollback;
-alter table users alter column seq type numeric; -- fail, view has reference
-ERROR: cannot alter type of a column used by a view or rule
-DETAIL: rule _RETURN on view usersview depends on column "seq"
--- likewise, check we don't crash if the dependency goes wrong
-begin;
--- destroy the dependency entry that prevents the ALTER:
-delete from pg_depend where
- objid = (select oid from pg_rewrite
- where ev_class = 'usersview'::regclass and rulename = '_RETURN')
- and refobjsubid = 2
-returning pg_describe_object(classid, objid, objsubid) as obj,
- pg_describe_object(refclassid, refobjid, refobjsubid) as ref,
- deptype;
- obj | ref | deptype
---------------------------------+---------------------------+---------
- rule _RETURN on view usersview | column seq of table users | n
-(1 row)
-
-alter table users alter column seq type numeric;
-select * from usersview; -- expect clean failure
-ERROR: attribute 2 of type record has wrong type
-DETAIL: Table has type numeric, but query expects integer.
-rollback;
-drop view usersview;
-drop function get_first_user();
-drop function get_users();
-drop table users;
--- check behavior with type coercion required for a set-op
-create or replace function rngfuncbar() returns setof text as
-$$ select 'foo'::varchar union all select 'bar'::varchar ; $$
-language sql stable;
-select rngfuncbar();
- rngfuncbar
-------------
- foo
- bar
-(2 rows)
-
-select * from rngfuncbar();
- rngfuncbar
-------------
- foo
- bar
-(2 rows)
-
--- this function is now inlinable, too:
-explain (verbose, costs off) select * from rngfuncbar();
- QUERY PLAN
-------------------------------------------------
- Result
- Output: ('foo'::character varying)
- -> Append
- -> Result
- Output: 'foo'::character varying
- -> Result
- Output: 'bar'::character varying
-(7 rows)
-
-drop function rngfuncbar();
--- check handling of a SQL function with multiple OUT params (bug #5777)
-create or replace function rngfuncbar(out integer, out numeric) as
-$$ select (1, 2.1) $$ language sql;
-select * from rngfuncbar();
- column1 | column2
----------+---------
- 1 | 2.1
-(1 row)
-
-create or replace function rngfuncbar(out integer, out numeric) as
-$$ select (1, 2) $$ language sql;
-select * from rngfuncbar(); -- fail
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned type integer at ordinal position 2, but query expects numeric.
-create or replace function rngfuncbar(out integer, out numeric) as
-$$ select (1, 2.1, 3) $$ language sql;
-select * from rngfuncbar(); -- fail
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned row contains 3 attributes, but query expects 2.
-drop function rngfuncbar();
--- check whole-row-Var handling in nested lateral functions (bug #11703)
-create function extractq2(t int8_tbl) returns int8 as $$
- select t.q2
-$$ language sql immutable;
-explain (verbose, costs off)
-select x from int8_tbl, extractq2(int8_tbl) f(x);
- QUERY PLAN
-------------------------------------------
- Nested Loop
- Output: f.x
- -> Seq Scan on public.int8_tbl
- Output: int8_tbl.q1, int8_tbl.q2
- -> Function Scan on f
- Output: f.x
- Function Call: int8_tbl.q2
-(7 rows)
-
-select x from int8_tbl, extractq2(int8_tbl) f(x);
- x
--------------------
- 456
- 4567890123456789
- 123
- 4567890123456789
- -4567890123456789
-(5 rows)
-
-create function extractq2_2(t int8_tbl) returns table(ret1 int8) as $$
- select extractq2(t) offset 0
-$$ language sql immutable;
-explain (verbose, costs off)
-select x from int8_tbl, extractq2_2(int8_tbl) f(x);
- QUERY PLAN
------------------------------------
- Nested Loop
- Output: ((int8_tbl.*).q2)
- -> Seq Scan on public.int8_tbl
- Output: int8_tbl.*
- -> Result
- Output: (int8_tbl.*).q2
-(6 rows)
-
-select x from int8_tbl, extractq2_2(int8_tbl) f(x);
- x
--------------------
- 456
- 4567890123456789
- 123
- 4567890123456789
- -4567890123456789
-(5 rows)
-
--- without the "offset 0", this function gets optimized quite differently
-create function extractq2_2_opt(t int8_tbl) returns table(ret1 int8) as $$
- select extractq2(t)
-$$ language sql immutable;
-explain (verbose, costs off)
-select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x);
- QUERY PLAN
------------------------------
- Seq Scan on public.int8_tbl
- Output: int8_tbl.q2
-(2 rows)
-
-select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x);
- x
--------------------
- 456
- 4567890123456789
- 123
- 4567890123456789
- -4567890123456789
-(5 rows)
-
--- check handling of nulls in SRF results (bug #7808)
-create type rngfunc2 as (a integer, b text);
-select *, row_to_json(u) from unnest(array[(1,'foo')::rngfunc2, null::rngfunc2]) u;
- a | b | row_to_json
----+-----+---------------------
- 1 | foo | {"a":1,"b":"foo"}
- | | {"a":null,"b":null}
-(2 rows)
-
-select *, row_to_json(u) from unnest(array[null::rngfunc2, null::rngfunc2]) u;
- a | b | row_to_json
----+---+---------------------
- | | {"a":null,"b":null}
- | | {"a":null,"b":null}
-(2 rows)
-
-select *, row_to_json(u) from unnest(array[null::rngfunc2, (1,'foo')::rngfunc2, null::rngfunc2]) u;
- a | b | row_to_json
----+-----+---------------------
- | | {"a":null,"b":null}
- 1 | foo | {"a":1,"b":"foo"}
- | | {"a":null,"b":null}
-(3 rows)
-
-select *, row_to_json(u) from unnest(array[]::rngfunc2[]) u;
- a | b | row_to_json
----+---+-------------
-(0 rows)
-
-drop type rngfunc2;
--- check handling of functions pulled up into function RTEs (bug #17227)
-explain (verbose, costs off)
-select * from
- (select jsonb_path_query_array(module->'lectures', '$[*]') as lecture
- from unnest(array['{"lectures": [{"id": "1"}]}'::jsonb])
- as unnested_modules(module)) as ss,
- jsonb_to_recordset(ss.lecture) as j (id text);
- QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------
- Nested Loop
- Output: jsonb_path_query_array((unnested_modules.module -> 'lectures'::text), '$[*]'::jsonpath, '{}'::jsonb, false), j.id
- -> Function Scan on pg_catalog.unnest unnested_modules
- Output: unnested_modules.module
- Function Call: unnest('{"{\"lectures\": [{\"id\": \"1\"}]}"}'::jsonb[])
- -> Function Scan on pg_catalog.jsonb_to_recordset j
- Output: j.id
- Function Call: jsonb_to_recordset(jsonb_path_query_array((unnested_modules.module -> 'lectures'::text), '$[*]'::jsonpath, '{}'::jsonb, false))
-(8 rows)
-
-select * from
- (select jsonb_path_query_array(module->'lectures', '$[*]') as lecture
- from unnest(array['{"lectures": [{"id": "1"}]}'::jsonb])
- as unnested_modules(module)) as ss,
- jsonb_to_recordset(ss.lecture) as j (id text);
- lecture | id
----------------+----
- [{"id": "1"}] | 1
-(1 row)
-
--- check detection of mismatching record types with a const-folded expression
-with a(b) as (values (row(1,2,3)))
-select * from a, coalesce(b) as c(d int, e int); -- fail
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned row contains 3 attributes, but query expects 2.
-with a(b) as (values (row(1,2,3)))
-select * from a, coalesce(b) as c(d int, e int, f int, g int); -- fail
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned row contains 3 attributes, but query expects 4.
-with a(b) as (values (row(1,2,3)))
-select * from a, coalesce(b) as c(d int, e int, f float); -- fail
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned type integer at ordinal position 3, but query expects double precision.
-select * from int8_tbl, coalesce(row(1)) as (a int, b int); -- fail
-ERROR: function return row and query-specified return row do not match
-DETAIL: Returned row contains 1 attribute, but query expects 2.
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/prepare.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/prepare.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/prepare.out 2024-11-15 02:50:52.486055632 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/prepare.out 2024-11-15 02:59:18.185116971 +0000
@@ -1,194 +1,2 @@
--- Regression tests for prepareable statements. We query the content
--- of the pg_prepared_statements view as prepared statements are
--- created and removed.
-SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
- name | statement | parameter_types | result_types
-------+-----------+-----------------+--------------
-(0 rows)
-
-PREPARE q1 AS SELECT 1 AS a;
-EXECUTE q1;
- a
----
- 1
-(1 row)
-
-SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
- name | statement | parameter_types | result_types
-------+------------------------------+-----------------+--------------
- q1 | PREPARE q1 AS SELECT 1 AS a; | {} | {integer}
-(1 row)
-
--- should fail
-PREPARE q1 AS SELECT 2;
-ERROR: prepared statement "q1" already exists
--- should succeed
-DEALLOCATE q1;
-PREPARE q1 AS SELECT 2;
-EXECUTE q1;
- ?column?
-----------
- 2
-(1 row)
-
-PREPARE q2 AS SELECT 2 AS b;
-SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
- name | statement | parameter_types | result_types
-------+------------------------------+-----------------+--------------
- q1 | PREPARE q1 AS SELECT 2; | {} | {integer}
- q2 | PREPARE q2 AS SELECT 2 AS b; | {} | {integer}
-(2 rows)
-
--- sql92 syntax
-DEALLOCATE PREPARE q1;
-SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
- name | statement | parameter_types | result_types
-------+------------------------------+-----------------+--------------
- q2 | PREPARE q2 AS SELECT 2 AS b; | {} | {integer}
-(1 row)
-
-DEALLOCATE PREPARE q2;
--- the view should return the empty set again
-SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements;
- name | statement | parameter_types | result_types
-------+-----------+-----------------+--------------
-(0 rows)
-
--- parameterized queries
-PREPARE q2(text) AS
- SELECT datname, datistemplate, datallowconn
- FROM pg_database WHERE datname = $1;
-EXECUTE q2('postgres');
- datname | datistemplate | datallowconn
-----------+---------------+--------------
- postgres | f | t
-(1 row)
-
-PREPARE q3(text, int, float, boolean, smallint) AS
- SELECT * FROM tenk1 WHERE string4 = $1 AND (four = $2 OR
- ten = $3::bigint OR true = $4 OR odd = $5::int)
- ORDER BY unique1;
-EXECUTE q3('AAAAxx', 5::smallint, 10.5::float, false, 4::bigint);
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 2 | 2716 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | MAEAAA | AAAAxx
- 102 | 612 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 102 | 102 | 4 | 5 | YDAAAA | OXAAAA | AAAAxx
- 802 | 2908 | 0 | 2 | 2 | 2 | 2 | 802 | 802 | 802 | 802 | 4 | 5 | WEAAAA | WHEAAA | AAAAxx
- 902 | 1104 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 902 | 902 | 4 | 5 | SIAAAA | MQBAAA | AAAAxx
- 1002 | 2580 | 0 | 2 | 2 | 2 | 2 | 2 | 1002 | 1002 | 1002 | 4 | 5 | OMAAAA | GVDAAA | AAAAxx
- 1602 | 8148 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 1602 | 1602 | 4 | 5 | QJAAAA | KBMAAA | AAAAxx
- 1702 | 7940 | 0 | 2 | 2 | 2 | 2 | 702 | 1702 | 1702 | 1702 | 4 | 5 | MNAAAA | KTLAAA | AAAAxx
- 2102 | 6184 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 2102 | 2102 | 4 | 5 | WCAAAA | WDJAAA | AAAAxx
- 2202 | 8028 | 0 | 2 | 2 | 2 | 2 | 202 | 202 | 2202 | 2202 | 4 | 5 | SGAAAA | UWLAAA | AAAAxx
- 2302 | 7112 | 0 | 2 | 2 | 2 | 2 | 302 | 302 | 2302 | 2302 | 4 | 5 | OKAAAA | ONKAAA | AAAAxx
- 2902 | 6816 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 2902 | 2902 | 4 | 5 | QHAAAA | ECKAAA | AAAAxx
- 3202 | 7128 | 0 | 2 | 2 | 2 | 2 | 202 | 1202 | 3202 | 3202 | 4 | 5 | ETAAAA | EOKAAA | AAAAxx
- 3902 | 9224 | 0 | 2 | 2 | 2 | 2 | 902 | 1902 | 3902 | 3902 | 4 | 5 | CUAAAA | UQNAAA | AAAAxx
- 4102 | 7676 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 4102 | 4102 | 4 | 5 | UBAAAA | GJLAAA | AAAAxx
- 4202 | 6628 | 0 | 2 | 2 | 2 | 2 | 202 | 202 | 4202 | 4202 | 4 | 5 | QFAAAA | YUJAAA | AAAAxx
- 4502 | 412 | 0 | 2 | 2 | 2 | 2 | 502 | 502 | 4502 | 4502 | 4 | 5 | ERAAAA | WPAAAA | AAAAxx
- 4702 | 2520 | 0 | 2 | 2 | 2 | 2 | 702 | 702 | 4702 | 4702 | 4 | 5 | WYAAAA | YSDAAA | AAAAxx
- 4902 | 1600 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 4902 | 4902 | 4 | 5 | OGAAAA | OJCAAA | AAAAxx
- 5602 | 8796 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 602 | 5602 | 4 | 5 | MHAAAA | IANAAA | AAAAxx
- 6002 | 8932 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 1002 | 6002 | 4 | 5 | WWAAAA | OFNAAA | AAAAxx
- 6402 | 3808 | 0 | 2 | 2 | 2 | 2 | 402 | 402 | 1402 | 6402 | 4 | 5 | GMAAAA | MQFAAA | AAAAxx
- 7602 | 1040 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 2602 | 7602 | 4 | 5 | KGAAAA | AOBAAA | AAAAxx
- 7802 | 7508 | 0 | 2 | 2 | 2 | 2 | 802 | 1802 | 2802 | 7802 | 4 | 5 | COAAAA | UCLAAA | AAAAxx
- 8002 | 9980 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 3002 | 8002 | 4 | 5 | UVAAAA | WTOAAA | AAAAxx
- 8302 | 7800 | 0 | 2 | 2 | 2 | 2 | 302 | 302 | 3302 | 8302 | 4 | 5 | IHAAAA | AOLAAA | AAAAxx
- 8402 | 5708 | 0 | 2 | 2 | 2 | 2 | 402 | 402 | 3402 | 8402 | 4 | 5 | ELAAAA | OLIAAA | AAAAxx
- 8602 | 5440 | 0 | 2 | 2 | 2 | 2 | 602 | 602 | 3602 | 8602 | 4 | 5 | WSAAAA | GBIAAA | AAAAxx
- 9502 | 1812 | 0 | 2 | 2 | 2 | 2 | 502 | 1502 | 4502 | 9502 | 4 | 5 | MBAAAA | SRCAAA | AAAAxx
- 9602 | 9972 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 4602 | 9602 | 4 | 5 | IFAAAA | OTOAAA | AAAAxx
-(29 rows)
-
--- too few params
-EXECUTE q3('bool');
-ERROR: wrong number of parameters for prepared statement "q3"
-DETAIL: Expected 5 parameters but got 1.
--- too many params
-EXECUTE q3('bytea', 5::smallint, 10.5::float, false, 4::bigint, true);
-ERROR: wrong number of parameters for prepared statement "q3"
-DETAIL: Expected 5 parameters but got 6.
--- wrong param types
-EXECUTE q3(5::smallint, 10.5::float, false, 4::bigint, 'bytea');
-ERROR: parameter $3 of type boolean cannot be coerced to the expected type double precision
-LINE 1: EXECUTE q3(5::smallint, 10.5::float, false, 4::bigint, 'byte...
- ^
-HINT: You will need to rewrite or cast the expression.
--- invalid type
-PREPARE q4(nonexistenttype) AS SELECT $1;
-ERROR: type "nonexistenttype" does not exist
-LINE 1: PREPARE q4(nonexistenttype) AS SELECT $1;
- ^
--- create table as execute
-PREPARE q5(int, text) AS
- SELECT * FROM tenk1 WHERE unique1 = $1 OR stringu1 = $2
- ORDER BY unique1;
-CREATE TEMPORARY TABLE q5_prep_results AS EXECUTE q5(200, 'DTAAAA');
-SELECT * FROM q5_prep_results;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
- 200 | 9441 | 0 | 0 | 0 | 0 | 0 | 200 | 200 | 200 | 200 | 0 | 1 | SHAAAA | DZNAAA | HHHHxx
- 497 | 9092 | 1 | 1 | 7 | 17 | 97 | 497 | 497 | 497 | 497 | 194 | 195 | DTAAAA | SLNAAA | AAAAxx
- 1173 | 6699 | 1 | 1 | 3 | 13 | 73 | 173 | 1173 | 1173 | 1173 | 146 | 147 | DTAAAA | RXJAAA | VVVVxx
- 1849 | 8143 | 1 | 1 | 9 | 9 | 49 | 849 | 1849 | 1849 | 1849 | 98 | 99 | DTAAAA | FBMAAA | VVVVxx
- 2525 | 64 | 1 | 1 | 5 | 5 | 25 | 525 | 525 | 2525 | 2525 | 50 | 51 | DTAAAA | MCAAAA | AAAAxx
- 3201 | 7309 | 1 | 1 | 1 | 1 | 1 | 201 | 1201 | 3201 | 3201 | 2 | 3 | DTAAAA | DVKAAA | HHHHxx
- 3877 | 4060 | 1 | 1 | 7 | 17 | 77 | 877 | 1877 | 3877 | 3877 | 154 | 155 | DTAAAA | EAGAAA | AAAAxx
- 4553 | 4113 | 1 | 1 | 3 | 13 | 53 | 553 | 553 | 4553 | 4553 | 106 | 107 | DTAAAA | FCGAAA | HHHHxx
- 5229 | 6407 | 1 | 1 | 9 | 9 | 29 | 229 | 1229 | 229 | 5229 | 58 | 59 | DTAAAA | LMJAAA | VVVVxx
- 5905 | 9537 | 1 | 1 | 5 | 5 | 5 | 905 | 1905 | 905 | 5905 | 10 | 11 | DTAAAA | VCOAAA | HHHHxx
- 6581 | 4686 | 1 | 1 | 1 | 1 | 81 | 581 | 581 | 1581 | 6581 | 162 | 163 | DTAAAA | GYGAAA | OOOOxx
- 7257 | 1895 | 1 | 1 | 7 | 17 | 57 | 257 | 1257 | 2257 | 7257 | 114 | 115 | DTAAAA | XUCAAA | VVVVxx
- 7933 | 4514 | 1 | 1 | 3 | 13 | 33 | 933 | 1933 | 2933 | 7933 | 66 | 67 | DTAAAA | QRGAAA | OOOOxx
- 8609 | 5918 | 1 | 1 | 9 | 9 | 9 | 609 | 609 | 3609 | 8609 | 18 | 19 | DTAAAA | QTIAAA | OOOOxx
- 9285 | 8469 | 1 | 1 | 5 | 5 | 85 | 285 | 1285 | 4285 | 9285 | 170 | 171 | DTAAAA | TNMAAA | HHHHxx
- 9961 | 2058 | 1 | 1 | 1 | 1 | 61 | 961 | 1961 | 4961 | 9961 | 122 | 123 | DTAAAA | EBDAAA | OOOOxx
-(16 rows)
-
-CREATE TEMPORARY TABLE q5_prep_nodata AS EXECUTE q5(200, 'DTAAAA')
- WITH NO DATA;
-SELECT * FROM q5_prep_nodata;
- unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
----------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
-(0 rows)
-
--- unknown or unspecified parameter types: should succeed
-PREPARE q6 AS
- SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2;
-PREPARE q7(unknown) AS
- SELECT * FROM road WHERE thepath = $1;
--- DML statements
-PREPARE q8 AS
- UPDATE tenk1 SET stringu1 = $2 WHERE unique1 = $1;
-SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements
- ORDER BY name;
- name | statement | parameter_types | result_types
-------+------------------------------------------------------------------+----------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------
- q2 | PREPARE q2(text) AS +| {text} | {name,boolean,boolean}
- | SELECT datname, datistemplate, datallowconn +| |
- | FROM pg_database WHERE datname = $1; | |
- q3 | PREPARE q3(text, int, float, boolean, smallint) AS +| {text,integer,"double precision",boolean,smallint} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name}
- | SELECT * FROM tenk1 WHERE string4 = $1 AND (four = $2 OR+| |
- | ten = $3::bigint OR true = $4 OR odd = $5::int) +| |
- | ORDER BY unique1; | |
- q5 | PREPARE q5(int, text) AS +| {integer,text} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name}
- | SELECT * FROM tenk1 WHERE unique1 = $1 OR stringu1 = $2 +| |
- | ORDER BY unique1; | |
- q6 | PREPARE q6 AS +| {integer,name} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name}
- | SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2; | |
- q7 | PREPARE q7(unknown) AS +| {path} | {text,path}
- | SELECT * FROM road WHERE thepath = $1; | |
- q8 | PREPARE q8 AS +| {integer,name} |
- | UPDATE tenk1 SET stringu1 = $2 WHERE unique1 = $1; | |
-(6 rows)
-
--- test DEALLOCATE ALL;
-DEALLOCATE ALL;
-SELECT name, statement, parameter_types FROM pg_prepared_statements
- ORDER BY name;
- name | statement | parameter_types
-------+-----------+-----------------
-(0 rows)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/conversion.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/conversion.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/conversion.out 2024-11-15 02:50:52.426154377 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/conversion.out 2024-11-15 02:59:18.185116971 +0000
@@ -1,734 +1,2 @@
---
--- create user defined conversion
---
--- directory paths and dlsuffix are passed to us in environment variables
-\getenv libdir PG_LIBDIR
-\getenv dlsuffix PG_DLSUFFIX
-\set regresslib :libdir '/regress' :dlsuffix
-CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, result OUT bytea)
- AS :'regresslib', 'test_enc_conversion'
- LANGUAGE C STRICT;
-CREATE USER regress_conversion_user WITH NOCREATEDB NOCREATEROLE;
-SET SESSION AUTHORIZATION regress_conversion_user;
-CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;
---
--- cannot make same name conversion in same schema
---
-CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;
-ERROR: conversion "myconv" already exists
---
--- create default conversion with qualified name
---
-CREATE DEFAULT CONVERSION public.mydef FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;
---
--- cannot make default conversion with same schema/for_encoding/to_encoding
---
-CREATE DEFAULT CONVERSION public.mydef2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;
-ERROR: default conversion for LATIN1 to UTF8 already exists
--- test comments
-COMMENT ON CONVERSION myconv_bad IS 'foo';
-ERROR: conversion "myconv_bad" does not exist
-COMMENT ON CONVERSION myconv IS 'bar';
-COMMENT ON CONVERSION myconv IS NULL;
---
--- drop user defined conversion
---
-DROP CONVERSION myconv;
-DROP CONVERSION mydef;
---
--- Note: the built-in conversions are exercised in opr_sanity.sql,
--- so there's no need to do that here.
---
---
--- return to the superuser
---
-RESET SESSION AUTHORIZATION;
-DROP USER regress_conversion_user;
---
--- Test built-in conversion functions.
---
--- Helper function to test a conversion. Uses the test_enc_conversion function
--- that was created in the create_function_0 test.
-create or replace function test_conv(
- input IN bytea,
- src_encoding IN text,
- dst_encoding IN text,
- result OUT bytea,
- errorat OUT bytea,
- error OUT text)
-language plpgsql as
-$$
-declare
- validlen int;
-begin
- -- First try to perform the conversion with noError = false. If that errors out,
- -- capture the error message, and try again with noError = true. The second call
- -- should succeed and return the position of the error, return that too.
- begin
- select * into validlen, result from test_enc_conversion(input, src_encoding, dst_encoding, false);
- errorat = NULL;
- error := NULL;
- exception when others then
- error := sqlerrm;
- select * into validlen, result from test_enc_conversion(input, src_encoding, dst_encoding, true);
- errorat = substr(input, validlen + 1);
- end;
- return;
-end;
-$$;
---
--- UTF-8
---
--- The description column must be unique.
-CREATE TABLE utf8_verification_inputs (inbytes bytea, description text PRIMARY KEY);
-insert into utf8_verification_inputs values
- ('\x66006f', 'NUL byte'),
- ('\xaf', 'bare continuation'),
- ('\xc5', 'missing second byte in 2-byte char'),
- ('\xc080', 'smallest 2-byte overlong'),
- ('\xc1bf', 'largest 2-byte overlong'),
- ('\xc280', 'next 2-byte after overlongs'),
- ('\xdfbf', 'largest 2-byte'),
- ('\xe9af', 'missing third byte in 3-byte char'),
- ('\xe08080', 'smallest 3-byte overlong'),
- ('\xe09fbf', 'largest 3-byte overlong'),
- ('\xe0a080', 'next 3-byte after overlong'),
- ('\xed9fbf', 'last before surrogates'),
- ('\xeda080', 'smallest surrogate'),
- ('\xedbfbf', 'largest surrogate'),
- ('\xee8080', 'next after surrogates'),
- ('\xefbfbf', 'largest 3-byte'),
- ('\xf1afbf', 'missing fourth byte in 4-byte char'),
- ('\xf0808080', 'smallest 4-byte overlong'),
- ('\xf08fbfbf', 'largest 4-byte overlong'),
- ('\xf0908080', 'next 4-byte after overlong'),
- ('\xf48fbfbf', 'largest 4-byte'),
- ('\xf4908080', 'smallest too large'),
- ('\xfa9a9a8a8a', '5-byte');
--- Test UTF-8 verification slow path
-select description, (test_conv(inbytes, 'utf8', 'utf8')).* from utf8_verification_inputs;
- description | result | errorat | error
-------------------------------------+------------+--------------+----------------------------------------------------------------
- NUL byte | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00
- bare continuation | \x | \xaf | invalid byte sequence for encoding "UTF8": 0xaf
- missing second byte in 2-byte char | \x | \xc5 | invalid byte sequence for encoding "UTF8": 0xc5
- smallest 2-byte overlong | \x | \xc080 | invalid byte sequence for encoding "UTF8": 0xc0 0x80
- largest 2-byte overlong | \x | \xc1bf | invalid byte sequence for encoding "UTF8": 0xc1 0xbf
- next 2-byte after overlongs | \xc280 | |
- largest 2-byte | \xdfbf | |
- missing third byte in 3-byte char | \x | \xe9af | invalid byte sequence for encoding "UTF8": 0xe9 0xaf
- smallest 3-byte overlong | \x | \xe08080 | invalid byte sequence for encoding "UTF8": 0xe0 0x80 0x80
- largest 3-byte overlong | \x | \xe09fbf | invalid byte sequence for encoding "UTF8": 0xe0 0x9f 0xbf
- next 3-byte after overlong | \xe0a080 | |
- last before surrogates | \xed9fbf | |
- smallest surrogate | \x | \xeda080 | invalid byte sequence for encoding "UTF8": 0xed 0xa0 0x80
- largest surrogate | \x | \xedbfbf | invalid byte sequence for encoding "UTF8": 0xed 0xbf 0xbf
- next after surrogates | \xee8080 | |
- largest 3-byte | \xefbfbf | |
- missing fourth byte in 4-byte char | \x | \xf1afbf | invalid byte sequence for encoding "UTF8": 0xf1 0xaf 0xbf
- smallest 4-byte overlong | \x | \xf0808080 | invalid byte sequence for encoding "UTF8": 0xf0 0x80 0x80 0x80
- largest 4-byte overlong | \x | \xf08fbfbf | invalid byte sequence for encoding "UTF8": 0xf0 0x8f 0xbf 0xbf
- next 4-byte after overlong | \xf0908080 | |
- largest 4-byte | \xf48fbfbf | |
- smallest too large | \x | \xf4908080 | invalid byte sequence for encoding "UTF8": 0xf4 0x90 0x80 0x80
- 5-byte | \x | \xfa9a9a8a8a | invalid byte sequence for encoding "UTF8": 0xfa
-(23 rows)
-
--- Test UTF-8 verification with ASCII padding appended to provide
--- coverage for algorithms that work on multiple bytes at a time.
--- The error message for a sequence starting with a 4-byte lead
--- will contain all 4 bytes if they are present, so various
--- expressions below add 3 ASCII bytes to the end to ensure
--- consistent error messages.
--- The number 64 below needs to be at least the value of STRIDE_LENGTH in wchar.c.
--- Test multibyte verification in fast path
-with test_bytes as (
- select
- inbytes,
- description,
- (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error
- from utf8_verification_inputs
-), test_padded as (
- select
- description,
- (test_conv(inbytes || repeat('.', 64)::bytea, 'utf8', 'utf8')).error
- from test_bytes
-)
-select
- description,
- b.error as orig_error,
- p.error as error_after_padding
-from test_padded p
-join test_bytes b
-using (description)
-where p.error is distinct from b.error
-order by description;
- description | orig_error | error_after_padding
--------------+------------+---------------------
-(0 rows)
-
--- Test ASCII verification in fast path where incomplete
--- UTF-8 sequences fall at the end of the preceding chunk.
-with test_bytes as (
- select
- inbytes,
- description,
- (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error
- from utf8_verification_inputs
-), test_padded as (
- select
- description,
- (test_conv(repeat('.', 64 - length(inbytes))::bytea || inbytes || repeat('.', 64)::bytea, 'utf8', 'utf8')).error
- from test_bytes
-)
-select
- description,
- b.error as orig_error,
- p.error as error_after_padding
-from test_padded p
-join test_bytes b
-using (description)
-where p.error is distinct from b.error
-order by description;
- description | orig_error | error_after_padding
--------------+------------+---------------------
-(0 rows)
-
--- Test cases where UTF-8 sequences within short text
--- come after the fast path returns.
-with test_bytes as (
- select
- inbytes,
- description,
- (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error
- from utf8_verification_inputs
-), test_padded as (
- select
- description,
- (test_conv(repeat('.', 64)::bytea || inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error
- from test_bytes
-)
-select
- description,
- b.error as orig_error,
- p.error as error_after_padding
-from test_padded p
-join test_bytes b
-using (description)
-where p.error is distinct from b.error
-order by description;
- description | orig_error | error_after_padding
--------------+------------+---------------------
-(0 rows)
-
--- Test cases where incomplete UTF-8 sequences fall at the
--- end of the part checked by the fast path.
-with test_bytes as (
- select
- inbytes,
- description,
- (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error
- from utf8_verification_inputs
-), test_padded as (
- select
- description,
- (test_conv(repeat('.', 64 - length(inbytes))::bytea || inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error
- from test_bytes
-)
-select
- description,
- b.error as orig_error,
- p.error as error_after_padding
-from test_padded p
-join test_bytes b
-using (description)
-where p.error is distinct from b.error
-order by description;
- description | orig_error | error_after_padding
--------------+------------+---------------------
-(0 rows)
-
-CREATE TABLE utf8_inputs (inbytes bytea, description text);
-insert into utf8_inputs values
- ('\x666f6f', 'valid, pure ASCII'),
- ('\xc3a4c3b6', 'valid, extra latin chars'),
- ('\xd184d0bed0be', 'valid, cyrillic'),
- ('\x666f6fe8b1a1', 'valid, kanji/Chinese'),
- ('\xe382abe3829a', 'valid, two chars that combine to one in EUC_JIS_2004'),
- ('\xe382ab', 'only first half of combined char in EUC_JIS_2004'),
- ('\xe382abe382', 'incomplete combination when converted EUC_JIS_2004'),
- ('\xecbd94eb81bceba6ac', 'valid, Hangul, Korean'),
- ('\x666f6fefa8aa', 'valid, needs mapping function to convert to GB18030'),
- ('\x66e8b1ff6f6f', 'invalid byte sequence'),
- ('\x66006f', 'invalid, NUL byte'),
- ('\x666f6fe8b100', 'invalid, NUL byte'),
- ('\x666f6fe8b1', 'incomplete character at end');
--- Test UTF-8 verification
-select description, (test_conv(inbytes, 'utf8', 'utf8')).* from utf8_inputs;
- description | result | errorat | error
-------------------------------------------------------+----------------------+--------------+-----------------------------------------------------------
- valid, pure ASCII | \x666f6f | |
- valid, extra latin chars | \xc3a4c3b6 | |
- valid, cyrillic | \xd184d0bed0be | |
- valid, kanji/Chinese | \x666f6fe8b1a1 | |
- valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | |
- only first half of combined char in EUC_JIS_2004 | \xe382ab | |
- incomplete combination when converted EUC_JIS_2004 | \xe382ab | \xe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82
- valid, Hangul, Korean | \xecbd94eb81bceba6ac | |
- valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | |
- invalid byte sequence | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff
- invalid, NUL byte | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00
- invalid, NUL byte | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00
- incomplete character at end | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1
-(13 rows)
-
--- Test conversions from UTF-8
-select description, inbytes, (test_conv(inbytes, 'utf8', 'euc_jis_2004')).* from utf8_inputs;
- description | inbytes | result | errorat | error
-------------------------------------------------------+----------------------+----------------+----------------------+-------------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid, extra latin chars | \xc3a4c3b6 | \xa9daa9ec | |
- valid, cyrillic | \xd184d0bed0be | \xa7e6a7e0a7e0 | |
- valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6fbedd | |
- valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \xa5f7 | |
- only first half of combined char in EUC_JIS_2004 | \xe382ab | \xa5ab | |
- incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82
- valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "EUC_JIS_2004"
- valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "EUC_JIS_2004"
- invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff
- invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00
- invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00
- incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1
-(13 rows)
-
-select description, inbytes, (test_conv(inbytes, 'utf8', 'latin1')).* from utf8_inputs;
- description | inbytes | result | errorat | error
-------------------------------------------------------+----------------------+----------+----------------------+-------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | |
- valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN1"
- valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN1"
- valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1"
- only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1"
- incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1"
- valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN1"
- valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN1"
- invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff
- invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00
- invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00
- incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1
-(13 rows)
-
-select description, inbytes, (test_conv(inbytes, 'utf8', 'latin2')).* from utf8_inputs;
- description | inbytes | result | errorat | error
-------------------------------------------------------+----------------------+----------+----------------------+-------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | |
- valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN2"
- valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN2"
- valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2"
- only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2"
- incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2"
- valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN2"
- valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN2"
- invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff
- invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00
- invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00
- incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1
-(13 rows)
-
-select description, inbytes, (test_conv(inbytes, 'utf8', 'latin5')).* from utf8_inputs;
- description | inbytes | result | errorat | error
-------------------------------------------------------+----------------------+----------+----------------------+-------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | |
- valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN5"
- valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN5"
- valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5"
- only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5"
- incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5"
- valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN5"
- valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN5"
- invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff
- invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00
- invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00
- incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1
-(13 rows)
-
-select description, inbytes, (test_conv(inbytes, 'utf8', 'koi8r')).* from utf8_inputs;
- description | inbytes | result | errorat | error
-------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid, extra latin chars | \xc3a4c3b6 | \x | \xc3a4c3b6 | character with byte sequence 0xc3 0xa4 in encoding "UTF8" has no equivalent in encoding "KOI8R"
- valid, cyrillic | \xd184d0bed0be | \xc6cfcf | |
- valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "KOI8R"
- valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R"
- only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R"
- incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R"
- valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "KOI8R"
- valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "KOI8R"
- invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff
- invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00
- invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00
- incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1
-(13 rows)
-
-select description, inbytes, (test_conv(inbytes, 'utf8', 'gb18030')).* from utf8_inputs;
- description | inbytes | result | errorat | error
-------------------------------------------------------+----------------------+----------------------------+--------------+-----------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid, extra latin chars | \xc3a4c3b6 | \x81308a3181308b32 | |
- valid, cyrillic | \xd184d0bed0be | \xa7e6a7e0a7e0 | |
- valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6fcff3 | |
- valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \xa5ab8139a732 | |
- only first half of combined char in EUC_JIS_2004 | \xe382ab | \xa5ab | |
- incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \xa5ab | \xe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82
- valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x8334e5398238c4338330b335 | |
- valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f84309c38 | |
- invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff
- invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00
- invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00
- incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1
-(13 rows)
-
---
--- EUC_JIS_2004
---
-CREATE TABLE euc_jis_2004_inputs (inbytes bytea, description text);
-insert into euc_jis_2004_inputs values
- ('\x666f6f', 'valid, pure ASCII'),
- ('\x666f6fbedd', 'valid'),
- ('\xa5f7', 'valid, translates to two UTF-8 chars '),
- ('\xbeddbe', 'incomplete char '),
- ('\x666f6f00bedd', 'invalid, NUL byte'),
- ('\x666f6fbe00dd', 'invalid, NUL byte'),
- ('\x666f6fbedd00', 'invalid, NUL byte'),
- ('\xbe04', 'invalid byte sequence');
--- Test EUC_JIS_2004 verification
-select description, inbytes, (test_conv(inbytes, 'euc_jis_2004', 'euc_jis_2004')).* from euc_jis_2004_inputs;
- description | inbytes | result | errorat | error
----------------------------------------+----------------+--------------+----------+--------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6fbedd | \x666f6fbedd | |
- valid, translates to two UTF-8 chars | \xa5f7 | \xa5f7 | |
- incomplete char | \xbeddbe | \xbedd | \xbe | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe
- invalid, NUL byte | \x666f6f00bedd | \x666f6f | \x00bedd | invalid byte sequence for encoding "EUC_JIS_2004": 0x00
- invalid, NUL byte | \x666f6fbe00dd | \x666f6f | \xbe00dd | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x00
- invalid, NUL byte | \x666f6fbedd00 | \x666f6fbedd | \x00 | invalid byte sequence for encoding "EUC_JIS_2004": 0x00
- invalid byte sequence | \xbe04 | \x | \xbe04 | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x04
-(8 rows)
-
--- Test conversions from EUC_JIS_2004
-select description, inbytes, (test_conv(inbytes, 'euc_jis_2004', 'utf8')).* from euc_jis_2004_inputs;
- description | inbytes | result | errorat | error
----------------------------------------+----------------+----------------+----------+--------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6fbedd | \x666f6fe8b1a1 | |
- valid, translates to two UTF-8 chars | \xa5f7 | \xe382abe3829a | |
- incomplete char | \xbeddbe | \xe8b1a1 | \xbe | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe
- invalid, NUL byte | \x666f6f00bedd | \x666f6f | \x00bedd | invalid byte sequence for encoding "EUC_JIS_2004": 0x00
- invalid, NUL byte | \x666f6fbe00dd | \x666f6f | \xbe00dd | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x00
- invalid, NUL byte | \x666f6fbedd00 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "EUC_JIS_2004": 0x00
- invalid byte sequence | \xbe04 | \x | \xbe04 | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x04
-(8 rows)
-
---
--- SHIFT-JIS-2004
---
-CREATE TABLE shiftjis2004_inputs (inbytes bytea, description text);
-insert into shiftjis2004_inputs values
- ('\x666f6f', 'valid, pure ASCII'),
- ('\x666f6f8fdb', 'valid'),
- ('\x666f6f81c0', 'valid, no translation to UTF-8'),
- ('\x666f6f82f5', 'valid, translates to two UTF-8 chars '),
- ('\x666f6f8fdb8f', 'incomplete char '),
- ('\x666f6f820a', 'incomplete char, followed by newline '),
- ('\x666f6f008fdb', 'invalid, NUL byte'),
- ('\x666f6f8f00db', 'invalid, NUL byte'),
- ('\x666f6f8fdb00', 'invalid, NUL byte');
--- Test SHIFT-JIS-2004 verification
-select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'shiftjis2004')).* from shiftjis2004_inputs;
- description | inbytes | result | errorat | error
----------------------------------------+----------------+--------------+----------+----------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6f8fdb | \x666f6f8fdb | |
- valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6f81c0 | |
- valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6f82f5 | |
- incomplete char | \x666f6f8fdb8f | \x666f6f8fdb | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f
- incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a
- invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00
- invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00
- invalid, NUL byte | \x666f6f8fdb00 | \x666f6f8fdb | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00
-(9 rows)
-
--- Test conversions from SHIFT-JIS-2004
-select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'utf8')).* from shiftjis2004_inputs;
- description | inbytes | result | errorat | error
----------------------------------------+----------------+----------------------+----------+----------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6f8fdb | \x666f6fe8b1a1 | |
- valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6fe28a84 | |
- valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6fe3818be3829a | |
- incomplete char | \x666f6f8fdb8f | \x666f6fe8b1a1 | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f
- incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a
- invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00
- invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00
- invalid, NUL byte | \x666f6f8fdb00 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00
-(9 rows)
-
-select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'euc_jis_2004')).* from shiftjis2004_inputs;
- description | inbytes | result | errorat | error
----------------------------------------+----------------+--------------+----------+----------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6f8fdb | \x666f6fbedd | |
- valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6fa2c2 | |
- valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6fa4f7 | |
- incomplete char | \x666f6f8fdb8f | \x666f6fbedd | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f
- incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a
- invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00
- invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00
- invalid, NUL byte | \x666f6f8fdb00 | \x666f6fbedd | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00
-(9 rows)
-
---
--- GB18030
---
-CREATE TABLE gb18030_inputs (inbytes bytea, description text);
-insert into gb18030_inputs values
- ('\x666f6f', 'valid, pure ASCII'),
- ('\x666f6fcff3', 'valid'),
- ('\x666f6f8431a530', 'valid, no translation to UTF-8'),
- ('\x666f6f84309c38', 'valid, translates to UTF-8 by mapping function'),
- ('\x666f6f84309c', 'incomplete char '),
- ('\x666f6f84309c0a', 'incomplete char, followed by newline '),
- ('\x666f6f84309c3800', 'invalid, NUL byte'),
- ('\x666f6f84309c0038', 'invalid, NUL byte');
--- Test GB18030 verification
-select description, inbytes, (test_conv(inbytes, 'gb18030', 'gb18030')).* from gb18030_inputs;
- description | inbytes | result | errorat | error
-------------------------------------------------+--------------------+------------------+--------------+-------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6fcff3 | \x666f6fcff3 | |
- valid, no translation to UTF-8 | \x666f6f8431a530 | \x666f6f8431a530 | |
- valid, translates to UTF-8 by mapping function | \x666f6f84309c38 | \x666f6f84309c38 | |
- incomplete char | \x666f6f84309c | \x666f6f | \x84309c | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c
- incomplete char, followed by newline | \x666f6f84309c0a | \x666f6f | \x84309c0a | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x0a
- invalid, NUL byte | \x666f6f84309c3800 | \x666f6f84309c38 | \x00 | invalid byte sequence for encoding "GB18030": 0x00
- invalid, NUL byte | \x666f6f84309c0038 | \x666f6f | \x84309c0038 | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x00
-(8 rows)
-
--- Test conversions from GB18030
-select description, inbytes, (test_conv(inbytes, 'gb18030', 'utf8')).* from gb18030_inputs;
- description | inbytes | result | errorat | error
-------------------------------------------------+--------------------+----------------+--------------+-------------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6fcff3 | \x666f6fe8b1a1 | |
- valid, no translation to UTF-8 | \x666f6f8431a530 | \x666f6f | \x8431a530 | character with byte sequence 0x84 0x31 0xa5 0x30 in encoding "GB18030" has no equivalent in encoding "UTF8"
- valid, translates to UTF-8 by mapping function | \x666f6f84309c38 | \x666f6fefa8aa | |
- incomplete char | \x666f6f84309c | \x666f6f | \x84309c | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c
- incomplete char, followed by newline | \x666f6f84309c0a | \x666f6f | \x84309c0a | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x0a
- invalid, NUL byte | \x666f6f84309c3800 | \x666f6fefa8aa | \x00 | invalid byte sequence for encoding "GB18030": 0x00
- invalid, NUL byte | \x666f6f84309c0038 | \x666f6f | \x84309c0038 | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x00
-(8 rows)
-
---
--- ISO-8859-5
---
-CREATE TABLE iso8859_5_inputs (inbytes bytea, description text);
-insert into iso8859_5_inputs values
- ('\x666f6f', 'valid, pure ASCII'),
- ('\xe4dede', 'valid'),
- ('\x00', 'invalid, NUL byte'),
- ('\xe400dede', 'invalid, NUL byte'),
- ('\xe4dede00', 'invalid, NUL byte');
--- Test ISO-8859-5 verification
-select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'iso8859-5')).* from iso8859_5_inputs;
- description | inbytes | result | errorat | error
--------------------+------------+----------+----------+-------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \xe4dede | \xe4dede | |
- invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00
- invalid, NUL byte | \xe400dede | \xe4 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00
- invalid, NUL byte | \xe4dede00 | \xe4dede | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00
-(5 rows)
-
--- Test conversions from ISO-8859-5
-select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'utf8')).* from iso8859_5_inputs;
- description | inbytes | result | errorat | error
--------------------+------------+----------------+----------+-------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \xe4dede | \xd184d0bed0be | |
- invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00
- invalid, NUL byte | \xe400dede | \xd184 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00
- invalid, NUL byte | \xe4dede00 | \xd184d0bed0be | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00
-(5 rows)
-
-select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'koi8r')).* from iso8859_5_inputs;
- description | inbytes | result | errorat | error
--------------------+------------+----------+----------+-------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \xe4dede | \xc6cfcf | |
- invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00
- invalid, NUL byte | \xe400dede | \xc6 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00
- invalid, NUL byte | \xe4dede00 | \xc6cfcf | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00
-(5 rows)
-
-select description, inbytes, (test_conv(inbytes, 'iso8859_5', 'mule_internal')).* from iso8859_5_inputs;
- description | inbytes | result | errorat | error
--------------------+------------+----------------+----------+-------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \xe4dede | \x8bc68bcf8bcf | |
- invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00
- invalid, NUL byte | \xe400dede | \x8bc6 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00
- invalid, NUL byte | \xe4dede00 | \x8bc68bcf8bcf | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00
-(5 rows)
-
---
--- Big5
---
-CREATE TABLE big5_inputs (inbytes bytea, description text);
-insert into big5_inputs values
- ('\x666f6f', 'valid, pure ASCII'),
- ('\x666f6fb648', 'valid'),
- ('\x666f6fa27f', 'valid, no translation to UTF-8'),
- ('\x666f6fb60048', 'invalid, NUL byte'),
- ('\x666f6fb64800', 'invalid, NUL byte');
--- Test Big5 verification
-select description, inbytes, (test_conv(inbytes, 'big5', 'big5')).* from big5_inputs;
- description | inbytes | result | errorat | error
---------------------------------+----------------+--------------+----------+------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6fb648 | \x666f6fb648 | |
- valid, no translation to UTF-8 | \x666f6fa27f | \x666f6fa27f | |
- invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00
- invalid, NUL byte | \x666f6fb64800 | \x666f6fb648 | \x00 | invalid byte sequence for encoding "BIG5": 0x00
-(5 rows)
-
--- Test conversions from Big5
-select description, inbytes, (test_conv(inbytes, 'big5', 'utf8')).* from big5_inputs;
- description | inbytes | result | errorat | error
---------------------------------+----------------+----------------+----------+------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6fb648 | \x666f6fe8b1a1 | |
- valid, no translation to UTF-8 | \x666f6fa27f | \x666f6f | \xa27f | character with byte sequence 0xa2 0x7f in encoding "BIG5" has no equivalent in encoding "UTF8"
- invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00
- invalid, NUL byte | \x666f6fb64800 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "BIG5": 0x00
-(5 rows)
-
-select description, inbytes, (test_conv(inbytes, 'big5', 'mule_internal')).* from big5_inputs;
- description | inbytes | result | errorat | error
---------------------------------+----------------+----------------+----------+------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid | \x666f6fb648 | \x666f6f95e2af | |
- valid, no translation to UTF-8 | \x666f6fa27f | \x666f6f95a3c1 | |
- invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00
- invalid, NUL byte | \x666f6fb64800 | \x666f6f95e2af | \x00 | invalid byte sequence for encoding "BIG5": 0x00
-(5 rows)
-
---
--- MULE_INTERNAL
---
-CREATE TABLE mic_inputs (inbytes bytea, description text);
-insert into mic_inputs values
- ('\x666f6f', 'valid, pure ASCII'),
- ('\x8bc68bcf8bcf', 'valid (in KOI8R)'),
- ('\x8bc68bcf8b', 'invalid,incomplete char'),
- ('\x92bedd', 'valid (in SHIFT_JIS)'),
- ('\x92be', 'invalid, incomplete char)'),
- ('\x666f6f95a3c1', 'valid (in Big5)'),
- ('\x666f6f95a3', 'invalid, incomplete char'),
- ('\x9200bedd', 'invalid, NUL byte'),
- ('\x92bedd00', 'invalid, NUL byte'),
- ('\x8b00c68bcf8bcf', 'invalid, NUL byte');
--- Test MULE_INTERNAL verification
-select description, inbytes, (test_conv(inbytes, 'mule_internal', 'mule_internal')).* from mic_inputs;
- description | inbytes | result | errorat | error
----------------------------+------------------+----------------+------------------+--------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid (in KOI8R) | \x8bc68bcf8bcf | \x8bc68bcf8bcf | |
- invalid,incomplete char | \x8bc68bcf8b | \x8bc68bcf | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b
- valid (in SHIFT_JIS) | \x92bedd | \x92bedd | |
- invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe
- valid (in Big5) | \x666f6f95a3c1 | \x666f6f95a3c1 | |
- invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3
- invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe
- invalid, NUL byte | \x92bedd00 | \x92bedd | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00
- invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00
-(10 rows)
-
--- Test conversions from MULE_INTERNAL
-select description, inbytes, (test_conv(inbytes, 'mule_internal', 'koi8r')).* from mic_inputs;
- description | inbytes | result | errorat | error
----------------------------+------------------+----------+------------------+---------------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid (in KOI8R) | \x8bc68bcf8bcf | \xc6cfcf | |
- invalid,incomplete char | \x8bc68bcf8b | \xc6cf | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b
- valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R"
- invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe
- valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R"
- invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3
- invalid, NUL byte | \x9200bedd | \x | \x9200bedd | character with byte sequence 0x92 0x00 0xbe in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R"
- invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R"
- invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | character with byte sequence 0x8b 0x00 in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R"
-(10 rows)
-
-select description, inbytes, (test_conv(inbytes, 'mule_internal', 'iso8859-5')).* from mic_inputs;
- description | inbytes | result | errorat | error
----------------------------+------------------+----------+------------------+--------------------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid (in KOI8R) | \x8bc68bcf8bcf | \xe4dede | |
- invalid,incomplete char | \x8bc68bcf8b | \xe4de | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b
- valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5"
- invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe
- valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5"
- invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3
- invalid, NUL byte | \x9200bedd | \x | \x9200bedd | character with byte sequence 0x92 0x00 0xbe in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5"
- invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5"
- invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | character with byte sequence 0x8b 0x00 in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5"
-(10 rows)
-
-select description, inbytes, (test_conv(inbytes, 'mule_internal', 'sjis')).* from mic_inputs;
- description | inbytes | result | errorat | error
----------------------------+------------------+----------+------------------+--------------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS"
- invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS"
- valid (in SHIFT_JIS) | \x92bedd | \x8fdb | |
- invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe
- valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS"
- invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3
- invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe
- invalid, NUL byte | \x92bedd00 | \x8fdb | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00
- invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00
-(10 rows)
-
-select description, inbytes, (test_conv(inbytes, 'mule_internal', 'big5')).* from mic_inputs;
- description | inbytes | result | errorat | error
----------------------------+------------------+--------------+------------------+--------------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5"
- invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5"
- valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5"
- invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe
- valid (in Big5) | \x666f6f95a3c1 | \x666f6fa2a1 | |
- invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3
- invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe
- invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5"
- invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00
-(10 rows)
-
-select description, inbytes, (test_conv(inbytes, 'mule_internal', 'euc_jp')).* from mic_inputs;
- description | inbytes | result | errorat | error
----------------------------+------------------+----------+------------------+----------------------------------------------------------------------------------------------------------------
- valid, pure ASCII | \x666f6f | \x666f6f | |
- valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP"
- invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP"
- valid (in SHIFT_JIS) | \x92bedd | \xbedd | |
- invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe
- valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP"
- invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3
- invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe
- invalid, NUL byte | \x92bedd00 | \xbedd | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00
- invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00
-(10 rows)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/truncate.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/truncate.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/truncate.out 2024-11-15 02:50:52.514009551 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/truncate.out 2024-11-15 02:59:18.173116956 +0000
@@ -1,594 +1,2 @@
--- Test basic TRUNCATE functionality.
-CREATE TABLE truncate_a (col1 integer primary key);
-INSERT INTO truncate_a VALUES (1);
-INSERT INTO truncate_a VALUES (2);
-SELECT * FROM truncate_a;
- col1
-------
- 1
- 2
-(2 rows)
-
--- Roll truncate back
-BEGIN;
-TRUNCATE truncate_a;
-ROLLBACK;
-SELECT * FROM truncate_a;
- col1
-------
- 1
- 2
-(2 rows)
-
--- Commit the truncate this time
-BEGIN;
-TRUNCATE truncate_a;
-COMMIT;
-SELECT * FROM truncate_a;
- col1
-------
-(0 rows)
-
--- Test foreign-key checks
-CREATE TABLE trunc_b (a int REFERENCES truncate_a);
-CREATE TABLE trunc_c (a serial PRIMARY KEY);
-CREATE TABLE trunc_d (a int REFERENCES trunc_c);
-CREATE TABLE trunc_e (a int REFERENCES truncate_a, b int REFERENCES trunc_c);
-TRUNCATE TABLE truncate_a; -- fail
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_b" references "truncate_a".
-HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE truncate_a,trunc_b; -- fail
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_e" references "truncate_a".
-HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE truncate_a,trunc_b,trunc_e; -- ok
-TRUNCATE TABLE truncate_a,trunc_e; -- fail
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_b" references "truncate_a".
-HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE trunc_c; -- fail
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_d" references "trunc_c".
-HINT: Truncate table "trunc_d" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE trunc_c,trunc_d; -- fail
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_e" references "trunc_c".
-HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE trunc_c,trunc_d,trunc_e; -- ok
-TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a; -- fail
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_b" references "truncate_a".
-HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a,trunc_b; -- ok
-TRUNCATE TABLE truncate_a RESTRICT; -- fail
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_b" references "truncate_a".
-HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE truncate_a CASCADE; -- ok
-NOTICE: truncate cascades to table "trunc_b"
-NOTICE: truncate cascades to table "trunc_e"
--- circular references
-ALTER TABLE truncate_a ADD FOREIGN KEY (col1) REFERENCES trunc_c;
--- Add some data to verify that truncating actually works ...
-INSERT INTO trunc_c VALUES (1);
-INSERT INTO truncate_a VALUES (1);
-INSERT INTO trunc_b VALUES (1);
-INSERT INTO trunc_d VALUES (1);
-INSERT INTO trunc_e VALUES (1,1);
-TRUNCATE TABLE trunc_c;
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "truncate_a" references "trunc_c".
-HINT: Truncate table "truncate_a" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE trunc_c,truncate_a;
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_d" references "trunc_c".
-HINT: Truncate table "trunc_d" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE trunc_c,truncate_a,trunc_d;
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_e" references "trunc_c".
-HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e;
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "trunc_b" references "truncate_a".
-HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
-TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e,trunc_b;
--- Verify that truncating did actually work
-SELECT * FROM truncate_a
- UNION ALL
- SELECT * FROM trunc_c
- UNION ALL
- SELECT * FROM trunc_b
- UNION ALL
- SELECT * FROM trunc_d;
- col1
-------
-(0 rows)
-
-SELECT * FROM trunc_e;
- a | b
----+---
-(0 rows)
-
--- Add data again to test TRUNCATE ... CASCADE
-INSERT INTO trunc_c VALUES (1);
-INSERT INTO truncate_a VALUES (1);
-INSERT INTO trunc_b VALUES (1);
-INSERT INTO trunc_d VALUES (1);
-INSERT INTO trunc_e VALUES (1,1);
-TRUNCATE TABLE trunc_c CASCADE; -- ok
-NOTICE: truncate cascades to table "truncate_a"
-NOTICE: truncate cascades to table "trunc_d"
-NOTICE: truncate cascades to table "trunc_e"
-NOTICE: truncate cascades to table "trunc_b"
-SELECT * FROM truncate_a
- UNION ALL
- SELECT * FROM trunc_c
- UNION ALL
- SELECT * FROM trunc_b
- UNION ALL
- SELECT * FROM trunc_d;
- col1
-------
-(0 rows)
-
-SELECT * FROM trunc_e;
- a | b
----+---
-(0 rows)
-
-DROP TABLE truncate_a,trunc_c,trunc_b,trunc_d,trunc_e CASCADE;
--- Test TRUNCATE with inheritance
-CREATE TABLE trunc_f (col1 integer primary key);
-INSERT INTO trunc_f VALUES (1);
-INSERT INTO trunc_f VALUES (2);
-CREATE TABLE trunc_fa (col2a text) INHERITS (trunc_f);
-INSERT INTO trunc_fa VALUES (3, 'three');
-CREATE TABLE trunc_fb (col2b int) INHERITS (trunc_f);
-INSERT INTO trunc_fb VALUES (4, 444);
-CREATE TABLE trunc_faa (col3 text) INHERITS (trunc_fa);
-INSERT INTO trunc_faa VALUES (5, 'five', 'FIVE');
-BEGIN;
-SELECT * FROM trunc_f;
- col1
-------
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
-TRUNCATE trunc_f;
-SELECT * FROM trunc_f;
- col1
-------
-(0 rows)
-
-ROLLBACK;
-BEGIN;
-SELECT * FROM trunc_f;
- col1
-------
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
-TRUNCATE ONLY trunc_f;
-SELECT * FROM trunc_f;
- col1
-------
- 3
- 4
- 5
-(3 rows)
-
-ROLLBACK;
-BEGIN;
-SELECT * FROM trunc_f;
- col1
-------
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
-SELECT * FROM trunc_fa;
- col1 | col2a
-------+-------
- 3 | three
- 5 | five
-(2 rows)
-
-SELECT * FROM trunc_faa;
- col1 | col2a | col3
-------+-------+------
- 5 | five | FIVE
-(1 row)
-
-TRUNCATE ONLY trunc_fb, ONLY trunc_fa;
-SELECT * FROM trunc_f;
- col1
-------
- 1
- 2
- 5
-(3 rows)
-
-SELECT * FROM trunc_fa;
- col1 | col2a
-------+-------
- 5 | five
-(1 row)
-
-SELECT * FROM trunc_faa;
- col1 | col2a | col3
-------+-------+------
- 5 | five | FIVE
-(1 row)
-
-ROLLBACK;
-BEGIN;
-SELECT * FROM trunc_f;
- col1
-------
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
-SELECT * FROM trunc_fa;
- col1 | col2a
-------+-------
- 3 | three
- 5 | five
-(2 rows)
-
-SELECT * FROM trunc_faa;
- col1 | col2a | col3
-------+-------+------
- 5 | five | FIVE
-(1 row)
-
-TRUNCATE ONLY trunc_fb, trunc_fa;
-SELECT * FROM trunc_f;
- col1
-------
- 1
- 2
-(2 rows)
-
-SELECT * FROM trunc_fa;
- col1 | col2a
-------+-------
-(0 rows)
-
-SELECT * FROM trunc_faa;
- col1 | col2a | col3
-------+-------+------
-(0 rows)
-
-ROLLBACK;
-DROP TABLE trunc_f CASCADE;
-NOTICE: drop cascades to 3 other objects
-DETAIL: drop cascades to table trunc_fa
-drop cascades to table trunc_faa
-drop cascades to table trunc_fb
--- Test ON TRUNCATE triggers
-CREATE TABLE trunc_trigger_test (f1 int, f2 text, f3 text);
-CREATE TABLE trunc_trigger_log (tgop text, tglevel text, tgwhen text,
- tgargv text, tgtable name, rowcount bigint);
-CREATE FUNCTION trunctrigger() RETURNS trigger as $$
-declare c bigint;
-begin
- execute 'select count(*) from ' || quote_ident(tg_table_name) into c;
- insert into trunc_trigger_log values
- (TG_OP, TG_LEVEL, TG_WHEN, TG_ARGV[0], tg_table_name, c);
- return null;
-end;
-$$ LANGUAGE plpgsql;
--- basic before trigger
-INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux');
-CREATE TRIGGER t
-BEFORE TRUNCATE ON trunc_trigger_test
-FOR EACH STATEMENT
-EXECUTE PROCEDURE trunctrigger('before trigger truncate');
-SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
- Row count in test table
--------------------------
- 2
-(1 row)
-
-SELECT * FROM trunc_trigger_log;
- tgop | tglevel | tgwhen | tgargv | tgtable | rowcount
-------+---------+--------+--------+---------+----------
-(0 rows)
-
-TRUNCATE trunc_trigger_test;
-SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
- Row count in test table
--------------------------
- 0
-(1 row)
-
-SELECT * FROM trunc_trigger_log;
- tgop | tglevel | tgwhen | tgargv | tgtable | rowcount
-----------+-----------+--------+-------------------------+--------------------+----------
- TRUNCATE | STATEMENT | BEFORE | before trigger truncate | trunc_trigger_test | 2
-(1 row)
-
-DROP TRIGGER t ON trunc_trigger_test;
-truncate trunc_trigger_log;
--- same test with an after trigger
-INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux');
-CREATE TRIGGER tt
-AFTER TRUNCATE ON trunc_trigger_test
-FOR EACH STATEMENT
-EXECUTE PROCEDURE trunctrigger('after trigger truncate');
-SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
- Row count in test table
--------------------------
- 2
-(1 row)
-
-SELECT * FROM trunc_trigger_log;
- tgop | tglevel | tgwhen | tgargv | tgtable | rowcount
-------+---------+--------+--------+---------+----------
-(0 rows)
-
-TRUNCATE trunc_trigger_test;
-SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
- Row count in test table
--------------------------
- 0
-(1 row)
-
-SELECT * FROM trunc_trigger_log;
- tgop | tglevel | tgwhen | tgargv | tgtable | rowcount
-----------+-----------+--------+------------------------+--------------------+----------
- TRUNCATE | STATEMENT | AFTER | after trigger truncate | trunc_trigger_test | 0
-(1 row)
-
-DROP TABLE trunc_trigger_test;
-DROP TABLE trunc_trigger_log;
-DROP FUNCTION trunctrigger();
--- test TRUNCATE ... RESTART IDENTITY
-CREATE SEQUENCE truncate_a_id1 START WITH 33;
-CREATE TABLE truncate_a (id serial,
- id1 integer default nextval('truncate_a_id1'));
-ALTER SEQUENCE truncate_a_id1 OWNED BY truncate_a.id1;
-INSERT INTO truncate_a DEFAULT VALUES;
-INSERT INTO truncate_a DEFAULT VALUES;
-SELECT * FROM truncate_a;
- id | id1
-----+-----
- 1 | 33
- 2 | 34
-(2 rows)
-
-TRUNCATE truncate_a;
-INSERT INTO truncate_a DEFAULT VALUES;
-INSERT INTO truncate_a DEFAULT VALUES;
-SELECT * FROM truncate_a;
- id | id1
-----+-----
- 3 | 35
- 4 | 36
-(2 rows)
-
-TRUNCATE truncate_a RESTART IDENTITY;
-INSERT INTO truncate_a DEFAULT VALUES;
-INSERT INTO truncate_a DEFAULT VALUES;
-SELECT * FROM truncate_a;
- id | id1
-----+-----
- 1 | 33
- 2 | 34
-(2 rows)
-
-CREATE TABLE truncate_b (id int GENERATED ALWAYS AS IDENTITY (START WITH 44));
-INSERT INTO truncate_b DEFAULT VALUES;
-INSERT INTO truncate_b DEFAULT VALUES;
-SELECT * FROM truncate_b;
- id
-----
- 44
- 45
-(2 rows)
-
-TRUNCATE truncate_b;
-INSERT INTO truncate_b DEFAULT VALUES;
-INSERT INTO truncate_b DEFAULT VALUES;
-SELECT * FROM truncate_b;
- id
-----
- 46
- 47
-(2 rows)
-
-TRUNCATE truncate_b RESTART IDENTITY;
-INSERT INTO truncate_b DEFAULT VALUES;
-INSERT INTO truncate_b DEFAULT VALUES;
-SELECT * FROM truncate_b;
- id
-----
- 44
- 45
-(2 rows)
-
--- check rollback of a RESTART IDENTITY operation
-BEGIN;
-TRUNCATE truncate_a RESTART IDENTITY;
-INSERT INTO truncate_a DEFAULT VALUES;
-SELECT * FROM truncate_a;
- id | id1
-----+-----
- 1 | 33
-(1 row)
-
-ROLLBACK;
-INSERT INTO truncate_a DEFAULT VALUES;
-INSERT INTO truncate_a DEFAULT VALUES;
-SELECT * FROM truncate_a;
- id | id1
-----+-----
- 1 | 33
- 2 | 34
- 3 | 35
- 4 | 36
-(4 rows)
-
-DROP TABLE truncate_a;
-SELECT nextval('truncate_a_id1'); -- fail, seq should have been dropped
-ERROR: relation "truncate_a_id1" does not exist
-LINE 1: SELECT nextval('truncate_a_id1');
- ^
--- partitioned table
-CREATE TABLE truncparted (a int, b char) PARTITION BY LIST (a);
--- error, can't truncate a partitioned table
-TRUNCATE ONLY truncparted;
-ERROR: cannot truncate only a partitioned table
-HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly.
-CREATE TABLE truncparted1 PARTITION OF truncparted FOR VALUES IN (1);
-INSERT INTO truncparted VALUES (1, 'a');
--- error, must truncate partitions
-TRUNCATE ONLY truncparted;
-ERROR: cannot truncate only a partitioned table
-HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly.
-TRUNCATE truncparted;
-DROP TABLE truncparted;
--- foreign key on partitioned table: partition key is referencing column.
--- Make sure truncate did execute on all tables
-CREATE FUNCTION tp_ins_data() RETURNS void LANGUAGE plpgsql AS $$
- BEGIN
- INSERT INTO truncprim VALUES (1), (100), (150);
- INSERT INTO truncpart VALUES (1), (100), (150);
- END
-$$;
-CREATE FUNCTION tp_chk_data(OUT pktb regclass, OUT pkval int, OUT fktb regclass, OUT fkval int)
- RETURNS SETOF record LANGUAGE plpgsql AS $$
- BEGIN
- RETURN QUERY SELECT
- pk.tableoid::regclass, pk.a, fk.tableoid::regclass, fk.a
- FROM truncprim pk FULL JOIN truncpart fk USING (a)
- ORDER BY 2, 4;
- END
-$$;
-CREATE TABLE truncprim (a int PRIMARY KEY);
-CREATE TABLE truncpart (a int REFERENCES truncprim)
- PARTITION BY RANGE (a);
-CREATE TABLE truncpart_1 PARTITION OF truncpart FOR VALUES FROM (0) TO (100);
-CREATE TABLE truncpart_2 PARTITION OF truncpart FOR VALUES FROM (100) TO (200)
- PARTITION BY RANGE (a);
-CREATE TABLE truncpart_2_1 PARTITION OF truncpart_2 FOR VALUES FROM (100) TO (150);
-CREATE TABLE truncpart_2_d PARTITION OF truncpart_2 DEFAULT;
-TRUNCATE TABLE truncprim; -- should fail
-ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "truncpart" references "truncprim".
-HINT: Truncate table "truncpart" at the same time, or use TRUNCATE ... CASCADE.
-select tp_ins_data();
- tp_ins_data
--------------
-
-(1 row)
-
--- should truncate everything
-TRUNCATE TABLE truncprim, truncpart;
-select * from tp_chk_data();
- pktb | pkval | fktb | fkval
-------+-------+------+-------
-(0 rows)
-
-select tp_ins_data();
- tp_ins_data
--------------
-
-(1 row)
-
--- should truncate everything
-TRUNCATE TABLE truncprim CASCADE;
-NOTICE: truncate cascades to table "truncpart"
-NOTICE: truncate cascades to table "truncpart_1"
-NOTICE: truncate cascades to table "truncpart_2"
-NOTICE: truncate cascades to table "truncpart_2_1"
-NOTICE: truncate cascades to table "truncpart_2_d"
-SELECT * FROM tp_chk_data();
- pktb | pkval | fktb | fkval
-------+-------+------+-------
-(0 rows)
-
-SELECT tp_ins_data();
- tp_ins_data
--------------
-
-(1 row)
-
--- should truncate all partitions
-TRUNCATE TABLE truncpart;
-SELECT * FROM tp_chk_data();
- pktb | pkval | fktb | fkval
------------+-------+------+-------
- truncprim | 1 | |
- truncprim | 100 | |
- truncprim | 150 | |
-(3 rows)
-
-DROP TABLE truncprim, truncpart;
-DROP FUNCTION tp_ins_data(), tp_chk_data();
--- test cascade when referencing a partitioned table
-CREATE TABLE trunc_a (a INT PRIMARY KEY) PARTITION BY RANGE (a);
-CREATE TABLE trunc_a1 PARTITION OF trunc_a FOR VALUES FROM (0) TO (10);
-CREATE TABLE trunc_a2 PARTITION OF trunc_a FOR VALUES FROM (10) TO (20)
- PARTITION BY RANGE (a);
-CREATE TABLE trunc_a21 PARTITION OF trunc_a2 FOR VALUES FROM (10) TO (12);
-CREATE TABLE trunc_a22 PARTITION OF trunc_a2 FOR VALUES FROM (12) TO (16);
-CREATE TABLE trunc_a2d PARTITION OF trunc_a2 DEFAULT;
-CREATE TABLE trunc_a3 PARTITION OF trunc_a FOR VALUES FROM (20) TO (30);
-INSERT INTO trunc_a VALUES (0), (5), (10), (15), (20), (25);
--- truncate a partition cascading to a table
-CREATE TABLE ref_b (
- b INT PRIMARY KEY,
- a INT REFERENCES trunc_a(a) ON DELETE CASCADE
-);
-INSERT INTO ref_b VALUES (10, 0), (50, 5), (100, 10), (150, 15);
-TRUNCATE TABLE trunc_a1 CASCADE;
-NOTICE: truncate cascades to table "ref_b"
-SELECT a FROM ref_b;
- a
----
-(0 rows)
-
-DROP TABLE ref_b;
--- truncate a partition cascading to a partitioned table
-CREATE TABLE ref_c (
- c INT PRIMARY KEY,
- a INT REFERENCES trunc_a(a) ON DELETE CASCADE
-) PARTITION BY RANGE (c);
-CREATE TABLE ref_c1 PARTITION OF ref_c FOR VALUES FROM (100) TO (200);
-CREATE TABLE ref_c2 PARTITION OF ref_c FOR VALUES FROM (200) TO (300);
-INSERT INTO ref_c VALUES (100, 10), (150, 15), (200, 20), (250, 25);
-TRUNCATE TABLE trunc_a21 CASCADE;
-NOTICE: truncate cascades to table "ref_c"
-NOTICE: truncate cascades to table "ref_c1"
-NOTICE: truncate cascades to table "ref_c2"
-SELECT a as "from table ref_c" FROM ref_c;
- from table ref_c
-------------------
-(0 rows)
-
-SELECT a as "from table trunc_a" FROM trunc_a ORDER BY a;
- from table trunc_a
---------------------
- 15
- 20
- 25
-(3 rows)
-
-DROP TABLE trunc_a, ref_c;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/alter_table.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/alter_table.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/alter_table.out 2024-11-15 02:50:52.418167543 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/alter_table.out 2024-11-15 02:59:18.177116960 +0000
@@ -1,4733 +1,2 @@
---
--- ALTER_TABLE
---
--- Clean up in case a prior regression run failed
-SET client_min_messages TO 'warning';
-DROP ROLE IF EXISTS regress_alter_table_user1;
-RESET client_min_messages;
-CREATE USER regress_alter_table_user1;
---
--- add attribute
---
-CREATE TABLE attmp (initial int4);
-COMMENT ON TABLE attmp_wrong IS 'table comment';
-ERROR: relation "attmp_wrong" does not exist
-COMMENT ON TABLE attmp IS 'table comment';
-COMMENT ON TABLE attmp IS NULL;
-ALTER TABLE attmp ADD COLUMN xmin integer; -- fails
-ERROR: column name "xmin" conflicts with a system column name
-ALTER TABLE attmp ADD COLUMN a int4 default 3;
-ALTER TABLE attmp ADD COLUMN b name;
-ALTER TABLE attmp ADD COLUMN c text;
-ALTER TABLE attmp ADD COLUMN d float8;
-ALTER TABLE attmp ADD COLUMN e float4;
-ALTER TABLE attmp ADD COLUMN f int2;
-ALTER TABLE attmp ADD COLUMN g polygon;
-ALTER TABLE attmp ADD COLUMN i char;
-ALTER TABLE attmp ADD COLUMN k int4;
-ALTER TABLE attmp ADD COLUMN l tid;
-ALTER TABLE attmp ADD COLUMN m xid;
-ALTER TABLE attmp ADD COLUMN n oidvector;
---ALTER TABLE attmp ADD COLUMN o lock;
-ALTER TABLE attmp ADD COLUMN p boolean;
-ALTER TABLE attmp ADD COLUMN q point;
-ALTER TABLE attmp ADD COLUMN r lseg;
-ALTER TABLE attmp ADD COLUMN s path;
-ALTER TABLE attmp ADD COLUMN t box;
-ALTER TABLE attmp ADD COLUMN v timestamp;
-ALTER TABLE attmp ADD COLUMN w interval;
-ALTER TABLE attmp ADD COLUMN x float8[];
-ALTER TABLE attmp ADD COLUMN y float4[];
-ALTER TABLE attmp ADD COLUMN z int2[];
-INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t,
- v, w, x, y, z)
- VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
- 'c',
- 314159, '(1,1)', '512',
- '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
- '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)',
- 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}');
-SELECT * FROM attmp;
- initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z
----------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+-----------
- | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | t | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4}
-(1 row)
-
-DROP TABLE attmp;
--- the wolf bug - schema mods caused inconsistent row descriptors
-CREATE TABLE attmp (
- initial int4
-);
-ALTER TABLE attmp ADD COLUMN a int4;
-ALTER TABLE attmp ADD COLUMN b name;
-ALTER TABLE attmp ADD COLUMN c text;
-ALTER TABLE attmp ADD COLUMN d float8;
-ALTER TABLE attmp ADD COLUMN e float4;
-ALTER TABLE attmp ADD COLUMN f int2;
-ALTER TABLE attmp ADD COLUMN g polygon;
-ALTER TABLE attmp ADD COLUMN i char;
-ALTER TABLE attmp ADD COLUMN k int4;
-ALTER TABLE attmp ADD COLUMN l tid;
-ALTER TABLE attmp ADD COLUMN m xid;
-ALTER TABLE attmp ADD COLUMN n oidvector;
---ALTER TABLE attmp ADD COLUMN o lock;
-ALTER TABLE attmp ADD COLUMN p boolean;
-ALTER TABLE attmp ADD COLUMN q point;
-ALTER TABLE attmp ADD COLUMN r lseg;
-ALTER TABLE attmp ADD COLUMN s path;
-ALTER TABLE attmp ADD COLUMN t box;
-ALTER TABLE attmp ADD COLUMN v timestamp;
-ALTER TABLE attmp ADD COLUMN w interval;
-ALTER TABLE attmp ADD COLUMN x float8[];
-ALTER TABLE attmp ADD COLUMN y float4[];
-ALTER TABLE attmp ADD COLUMN z int2[];
-INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t,
- v, w, x, y, z)
- VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
- 'c',
- 314159, '(1,1)', '512',
- '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
- '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)',
- 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}');
-SELECT * FROM attmp;
- initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z
----------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+-----------
- | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | t | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4}
-(1 row)
-
-CREATE INDEX attmp_idx ON attmp (a, (d + e), b);
-ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000;
-ERROR: column number must be in range from 1 to 32767
-LINE 1: ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000;
- ^
-ALTER INDEX attmp_idx ALTER COLUMN 1 SET STATISTICS 1000;
-ERROR: cannot alter statistics on non-expression column "a" of index "attmp_idx"
-HINT: Alter statistics on table column instead.
-ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS 1000;
-\d+ attmp_idx
- Index "public.attmp_idx"
- Column | Type | Key? | Definition | Storage | Stats target
---------+------------------+------+------------+---------+--------------
- a | integer | yes | a | plain |
- expr | double precision | yes | (d + e) | plain | 1000
- b | cstring | yes | b | plain |
-btree, for table "public.attmp"
-
-ALTER INDEX attmp_idx ALTER COLUMN 3 SET STATISTICS 1000;
-ERROR: cannot alter statistics on non-expression column "b" of index "attmp_idx"
-HINT: Alter statistics on table column instead.
-ALTER INDEX attmp_idx ALTER COLUMN 4 SET STATISTICS 1000;
-ERROR: column number 4 of relation "attmp_idx" does not exist
-ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS -1;
-DROP TABLE attmp;
---
--- rename - check on both non-temp and temp tables
---
-CREATE TABLE attmp (regtable int);
-CREATE TEMP TABLE attmp (attmptable int);
-ALTER TABLE attmp RENAME TO attmp_new;
-SELECT * FROM attmp;
- regtable
-----------
-(0 rows)
-
-SELECT * FROM attmp_new;
- attmptable
-------------
-(0 rows)
-
-ALTER TABLE attmp RENAME TO attmp_new2;
-SELECT * FROM attmp; -- should fail
-ERROR: relation "attmp" does not exist
-LINE 1: SELECT * FROM attmp;
- ^
-SELECT * FROM attmp_new;
- attmptable
-------------
-(0 rows)
-
-SELECT * FROM attmp_new2;
- regtable
-----------
-(0 rows)
-
-DROP TABLE attmp_new;
-DROP TABLE attmp_new2;
--- check rename of partitioned tables and indexes also
-CREATE TABLE part_attmp (a int primary key) partition by range (a);
-CREATE TABLE part_attmp1 PARTITION OF part_attmp FOR VALUES FROM (0) TO (100);
-ALTER INDEX part_attmp_pkey RENAME TO part_attmp_index;
-ALTER INDEX part_attmp1_pkey RENAME TO part_attmp1_index;
-ALTER TABLE part_attmp RENAME TO part_at2tmp;
-ALTER TABLE part_attmp1 RENAME TO part_at2tmp1;
-SET ROLE regress_alter_table_user1;
-ALTER INDEX part_attmp_index RENAME TO fail;
-ERROR: must be owner of index part_attmp_index
-ALTER INDEX part_attmp1_index RENAME TO fail;
-ERROR: must be owner of index part_attmp1_index
-ALTER TABLE part_at2tmp RENAME TO fail;
-ERROR: must be owner of table part_at2tmp
-ALTER TABLE part_at2tmp1 RENAME TO fail;
-ERROR: must be owner of table part_at2tmp1
-RESET ROLE;
-DROP TABLE part_at2tmp;
---
--- check renaming to a table's array type's autogenerated name
--- (the array type's name should get out of the way)
---
-CREATE TABLE attmp_array (id int);
-CREATE TABLE attmp_array2 (id int);
-SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
- typname
---------------
- _attmp_array
-(1 row)
-
-SELECT typname FROM pg_type WHERE oid = 'attmp_array2[]'::regtype;
- typname
----------------
- _attmp_array2
-(1 row)
-
-ALTER TABLE attmp_array2 RENAME TO _attmp_array;
-SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
- typname
----------------
- __attmp_array
-(1 row)
-
-SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype;
- typname
------------------
- __attmp_array_1
-(1 row)
-
-DROP TABLE _attmp_array;
-DROP TABLE attmp_array;
--- renaming to table's own array type's name is an interesting corner case
-CREATE TABLE attmp_array (id int);
-SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
- typname
---------------
- _attmp_array
-(1 row)
-
-ALTER TABLE attmp_array RENAME TO _attmp_array;
-SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype;
- typname
----------------
- __attmp_array
-(1 row)
-
-DROP TABLE _attmp_array;
--- ALTER TABLE ... RENAME on non-table relations
--- renaming indexes (FIXME: this should probably test the index's functionality)
-ALTER INDEX IF EXISTS __onek_unique1 RENAME TO attmp_onek_unique1;
-NOTICE: relation "__onek_unique1" does not exist, skipping
-ALTER INDEX IF EXISTS __attmp_onek_unique1 RENAME TO onek_unique1;
-NOTICE: relation "__attmp_onek_unique1" does not exist, skipping
-ALTER INDEX onek_unique1 RENAME TO attmp_onek_unique1;
-ALTER INDEX attmp_onek_unique1 RENAME TO onek_unique1;
-SET ROLE regress_alter_table_user1;
-ALTER INDEX onek_unique1 RENAME TO fail; -- permission denied
-ERROR: must be owner of index onek_unique1
-RESET ROLE;
--- rename statements with mismatching statement and object types
-CREATE TABLE alter_idx_rename_test (a INT);
-CREATE INDEX alter_idx_rename_test_idx ON alter_idx_rename_test (a);
-CREATE TABLE alter_idx_rename_test_parted (a INT) PARTITION BY LIST (a);
-CREATE INDEX alter_idx_rename_test_parted_idx ON alter_idx_rename_test_parted (a);
-BEGIN;
-ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2;
-ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2;
-SELECT relation::regclass, mode FROM pg_locks
-WHERE pid = pg_backend_pid() AND locktype = 'relation'
- AND relation::regclass::text LIKE 'alter\_idx%'
-ORDER BY relation::regclass::text COLLATE "C";
- relation | mode
---------------------------------+---------------------
- alter_idx_rename_test_2 | AccessExclusiveLock
- alter_idx_rename_test_parted_2 | AccessExclusiveLock
-(2 rows)
-
-COMMIT;
-BEGIN;
-ALTER INDEX alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2;
-ALTER INDEX alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2;
-SELECT relation::regclass, mode FROM pg_locks
-WHERE pid = pg_backend_pid() AND locktype = 'relation'
- AND relation::regclass::text LIKE 'alter\_idx%'
-ORDER BY relation::regclass::text COLLATE "C";
- relation | mode
-------------------------------------+--------------------------
- alter_idx_rename_test_idx_2 | ShareUpdateExclusiveLock
- alter_idx_rename_test_parted_idx_2 | ShareUpdateExclusiveLock
-(2 rows)
-
-COMMIT;
-BEGIN;
-ALTER TABLE alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3;
-ALTER TABLE alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3;
-SELECT relation::regclass, mode FROM pg_locks
-WHERE pid = pg_backend_pid() AND locktype = 'relation'
- AND relation::regclass::text LIKE 'alter\_idx%'
-ORDER BY relation::regclass::text COLLATE "C";
- relation | mode
-------------------------------------+---------------------
- alter_idx_rename_test_idx_3 | AccessExclusiveLock
- alter_idx_rename_test_parted_idx_3 | AccessExclusiveLock
-(2 rows)
-
-COMMIT;
-DROP TABLE alter_idx_rename_test_2;
--- renaming views
-CREATE VIEW attmp_view (unique1) AS SELECT unique1 FROM tenk1;
-ALTER TABLE attmp_view RENAME TO attmp_view_new;
-SET ROLE regress_alter_table_user1;
-ALTER VIEW attmp_view_new RENAME TO fail; -- permission denied
-ERROR: must be owner of view attmp_view_new
-RESET ROLE;
--- hack to ensure we get an indexscan here
-set enable_seqscan to off;
-set enable_bitmapscan to off;
--- 5 values, sorted
-SELECT unique1 FROM tenk1 WHERE unique1 < 5;
- unique1
----------
- 0
- 1
- 2
- 3
- 4
-(5 rows)
-
-reset enable_seqscan;
-reset enable_bitmapscan;
-DROP VIEW attmp_view_new;
--- toast-like relation name
-alter table stud_emp rename to pg_toast_stud_emp;
-alter table pg_toast_stud_emp rename to stud_emp;
--- renaming index should rename constraint as well
-ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1);
-ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo;
-ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo;
--- renaming constraint
-ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0);
-ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo;
-ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo;
--- renaming constraint should rename index as well
-ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1);
-DROP INDEX onek_unique1_constraint; -- to see whether it's there
-ERROR: cannot drop index onek_unique1_constraint because constraint onek_unique1_constraint on table onek requires it
-HINT: You can drop constraint onek_unique1_constraint on table onek instead.
-ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo;
-DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there
-ERROR: cannot drop index onek_unique1_constraint_foo because constraint onek_unique1_constraint_foo on table onek requires it
-HINT: You can drop constraint onek_unique1_constraint_foo on table onek instead.
-ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo;
--- renaming constraints vs. inheritance
-CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int);
-\d constraint_rename_test
- Table "public.constraint_rename_test"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
- c | integer | | |
-Check constraints:
- "con1" CHECK (a > 0)
-
-CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test);
-NOTICE: merging column "a" with inherited definition
-NOTICE: merging constraint "con1" with inherited definition
-\d constraint_rename_test2
- Table "public.constraint_rename_test2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
- c | integer | | |
- d | integer | | |
-Check constraints:
- "con1" CHECK (a > 0)
-Inherits: constraint_rename_test
-
-ALTER TABLE constraint_rename_test2 RENAME CONSTRAINT con1 TO con1foo; -- fail
-ERROR: cannot rename inherited constraint "con1"
-ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- fail
-ERROR: inherited constraint "con1" must be renamed in child tables too
-ALTER TABLE constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- ok
-\d constraint_rename_test
- Table "public.constraint_rename_test"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
- c | integer | | |
-Check constraints:
- "con1foo" CHECK (a > 0)
-Number of child tables: 1 (Use \d+ to list them.)
-
-\d constraint_rename_test2
- Table "public.constraint_rename_test2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
- c | integer | | |
- d | integer | | |
-Check constraints:
- "con1foo" CHECK (a > 0)
-Inherits: constraint_rename_test
-
-ALTER TABLE constraint_rename_test ADD CONSTRAINT con2 CHECK (b > 0) NO INHERIT;
-ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con2 TO con2foo; -- ok
-ALTER TABLE constraint_rename_test RENAME CONSTRAINT con2foo TO con2bar; -- ok
-\d constraint_rename_test
- Table "public.constraint_rename_test"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
- c | integer | | |
-Check constraints:
- "con1foo" CHECK (a > 0)
- "con2bar" CHECK (b > 0) NO INHERIT
-Number of child tables: 1 (Use \d+ to list them.)
-
-\d constraint_rename_test2
- Table "public.constraint_rename_test2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
- c | integer | | |
- d | integer | | |
-Check constraints:
- "con1foo" CHECK (a > 0)
-Inherits: constraint_rename_test
-
-ALTER TABLE constraint_rename_test ADD CONSTRAINT con3 PRIMARY KEY (a);
-ALTER TABLE constraint_rename_test RENAME CONSTRAINT con3 TO con3foo; -- ok
-\d constraint_rename_test
- Table "public.constraint_rename_test"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | not null |
- b | integer | | |
- c | integer | | |
-Indexes:
- "con3foo" PRIMARY KEY, btree (a)
-Check constraints:
- "con1foo" CHECK (a > 0)
- "con2bar" CHECK (b > 0) NO INHERIT
-Number of child tables: 1 (Use \d+ to list them.)
-
-\d constraint_rename_test2
- Table "public.constraint_rename_test2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | not null |
- b | integer | | |
- c | integer | | |
- d | integer | | |
-Check constraints:
- "con1foo" CHECK (a > 0)
-Inherits: constraint_rename_test
-
-DROP TABLE constraint_rename_test2;
-DROP TABLE constraint_rename_test;
-ALTER TABLE IF EXISTS constraint_not_exist RENAME CONSTRAINT con3 TO con3foo; -- ok
-NOTICE: relation "constraint_not_exist" does not exist, skipping
-ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a);
-NOTICE: relation "constraint_rename_test" does not exist, skipping
--- renaming constraints with cache reset of target relation
-CREATE TABLE constraint_rename_cache (a int,
- CONSTRAINT chk_a CHECK (a > 0),
- PRIMARY KEY (a));
-ALTER TABLE constraint_rename_cache
- RENAME CONSTRAINT chk_a TO chk_a_new;
-ALTER TABLE constraint_rename_cache
- RENAME CONSTRAINT constraint_rename_cache_pkey TO constraint_rename_pkey_new;
-CREATE TABLE like_constraint_rename_cache
- (LIKE constraint_rename_cache INCLUDING ALL);
-\d like_constraint_rename_cache
- Table "public.like_constraint_rename_cache"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | not null |
-Indexes:
- "like_constraint_rename_cache_pkey" PRIMARY KEY, btree (a)
-Check constraints:
- "chk_a_new" CHECK (a > 0)
-
-DROP TABLE constraint_rename_cache;
-DROP TABLE like_constraint_rename_cache;
--- FOREIGN KEY CONSTRAINT adding TEST
-CREATE TABLE attmp2 (a int primary key);
-CREATE TABLE attmp3 (a int, b int);
-CREATE TABLE attmp4 (a int, b int, unique(a,b));
-CREATE TABLE attmp5 (a int, b int);
--- Insert rows into attmp2 (pktable)
-INSERT INTO attmp2 values (1);
-INSERT INTO attmp2 values (2);
-INSERT INTO attmp2 values (3);
-INSERT INTO attmp2 values (4);
--- Insert rows into attmp3
-INSERT INTO attmp3 values (1,10);
-INSERT INTO attmp3 values (1,20);
-INSERT INTO attmp3 values (5,50);
--- Try (and fail) to add constraint due to invalid source columns
-ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full;
-ERROR: column "c" referenced in foreign key constraint does not exist
--- Try (and fail) to add constraint due to invalid destination columns explicitly given
-ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full;
-ERROR: column "b" referenced in foreign key constraint does not exist
--- Try (and fail) to add constraint due to invalid data
-ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full;
-ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr"
-DETAIL: Key (a)=(5) is not present in table "attmp2".
--- Delete failing row
-DELETE FROM attmp3 where a=5;
--- Try (and succeed)
-ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full;
-ALTER TABLE attmp3 drop constraint attmpconstr;
-INSERT INTO attmp3 values (5,50);
--- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate
-ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID;
-ALTER TABLE attmp3 validate constraint attmpconstr;
-ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr"
-DETAIL: Key (a)=(5) is not present in table "attmp2".
--- Delete failing row
-DELETE FROM attmp3 where a=5;
--- Try (and succeed) and repeat to show it works on already valid constraint
-ALTER TABLE attmp3 validate constraint attmpconstr;
-ALTER TABLE attmp3 validate constraint attmpconstr;
--- Try a non-verified CHECK constraint
-ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail
-ERROR: check constraint "b_greater_than_ten" of relation "attmp3" is violated by some row
-ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds
-ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails
-ERROR: check constraint "b_greater_than_ten" of relation "attmp3" is violated by some row
-DELETE FROM attmp3 WHERE NOT b > 10;
-ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds
-ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds
--- Test inherited NOT VALID CHECK constraints
-select * from attmp3;
- a | b
----+----
- 1 | 20
-(1 row)
-
-CREATE TABLE attmp6 () INHERITS (attmp3);
-CREATE TABLE attmp7 () INHERITS (attmp3);
-INSERT INTO attmp6 VALUES (6, 30), (7, 16);
-ALTER TABLE attmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID;
-ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- fails
-ERROR: check constraint "b_le_20" of relation "attmp6" is violated by some row
-DELETE FROM attmp6 WHERE b > 20;
-ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds
--- An already validated constraint must not be revalidated
-CREATE FUNCTION boo(int) RETURNS int IMMUTABLE STRICT LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'boo: %', $1; RETURN $1; END; $$;
-INSERT INTO attmp7 VALUES (8, 18);
-ALTER TABLE attmp7 ADD CONSTRAINT identity CHECK (b = boo(b));
-NOTICE: boo: 18
-ALTER TABLE attmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID;
-NOTICE: merging constraint "identity" with inherited definition
-ALTER TABLE attmp3 VALIDATE CONSTRAINT identity;
-NOTICE: boo: 20
-NOTICE: boo: 16
--- A NO INHERIT constraint should not be looked for in children during VALIDATE CONSTRAINT
-create table parent_noinh_convalid (a int);
-create table child_noinh_convalid () inherits (parent_noinh_convalid);
-insert into parent_noinh_convalid values (1);
-insert into child_noinh_convalid values (1);
-alter table parent_noinh_convalid add constraint check_a_is_2 check (a = 2) no inherit not valid;
--- fail, because of the row in parent
-alter table parent_noinh_convalid validate constraint check_a_is_2;
-ERROR: check constraint "check_a_is_2" of relation "parent_noinh_convalid" is violated by some row
-delete from only parent_noinh_convalid;
--- ok (parent itself contains no violating rows)
-alter table parent_noinh_convalid validate constraint check_a_is_2;
-select convalidated from pg_constraint where conrelid = 'parent_noinh_convalid'::regclass and conname = 'check_a_is_2';
- convalidated
---------------
- t
-(1 row)
-
--- cleanup
-drop table parent_noinh_convalid, child_noinh_convalid;
--- Try (and fail) to create constraint from attmp5(a) to attmp4(a) - unique constraint on
--- attmp4 is a,b
-ALTER TABLE attmp5 add constraint attmpconstr foreign key(a) references attmp4(a) match full;
-ERROR: there is no unique constraint matching given keys for referenced table "attmp4"
-DROP TABLE attmp7;
-DROP TABLE attmp6;
-DROP TABLE attmp5;
-DROP TABLE attmp4;
-DROP TABLE attmp3;
-DROP TABLE attmp2;
--- NOT VALID with plan invalidation -- ensure we don't use a constraint for
--- exclusion until validated
-set constraint_exclusion TO 'partition';
-create table nv_parent (d date, check (false) no inherit not valid);
--- not valid constraint added at creation time should automatically become valid
-\d nv_parent
- Table "public.nv_parent"
- Column | Type | Collation | Nullable | Default
---------+------+-----------+----------+---------
- d | date | | |
-Check constraints:
- "nv_parent_check" CHECK (false) NO INHERIT
-
-create table nv_child_2010 () inherits (nv_parent);
-create table nv_child_2011 () inherits (nv_parent);
-alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid;
-alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid;
-explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31';
- QUERY PLAN
----------------------------------------------------------------------------
- Append
- -> Seq Scan on nv_parent nv_parent_1
- Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
- -> Seq Scan on nv_child_2010 nv_parent_2
- Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
- -> Seq Scan on nv_child_2011 nv_parent_3
- Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
-(7 rows)
-
-create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent);
-explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date;
- QUERY PLAN
----------------------------------------------------------------------------
- Append
- -> Seq Scan on nv_parent nv_parent_1
- Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
- -> Seq Scan on nv_child_2010 nv_parent_2
- Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
- -> Seq Scan on nv_child_2011 nv_parent_3
- Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
-(7 rows)
-
-explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
- QUERY PLAN
----------------------------------------------------------------------------
- Append
- -> Seq Scan on nv_parent nv_parent_1
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
- -> Seq Scan on nv_child_2010 nv_parent_2
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
- -> Seq Scan on nv_child_2011 nv_parent_3
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
- -> Seq Scan on nv_child_2009 nv_parent_4
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
-(9 rows)
-
--- after validation, the constraint should be used
-alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check;
-explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
- QUERY PLAN
----------------------------------------------------------------------------
- Append
- -> Seq Scan on nv_parent nv_parent_1
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
- -> Seq Scan on nv_child_2010 nv_parent_2
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
- -> Seq Scan on nv_child_2009 nv_parent_3
- Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
-(7 rows)
-
--- add an inherited NOT VALID constraint
-alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid;
-\d nv_child_2009
- Table "public.nv_child_2009"
- Column | Type | Collation | Nullable | Default
---------+------+-----------+----------+---------
- d | date | | |
-Check constraints:
- "nv_child_2009_d_check" CHECK (d >= '01-01-2009'::date AND d <= '12-31-2009'::date)
- "nv_parent_d_check" CHECK (d >= '01-01-2001'::date AND d <= '12-31-2099'::date) NOT VALID
-Inherits: nv_parent
-
--- we leave nv_parent and children around to help test pg_dump logic
--- Foreign key adding test with mixed types
--- Note: these tables are TEMP to avoid name conflicts when this test
--- is run in parallel with foreign_key.sql.
-CREATE TEMP TABLE PKTABLE (ptest1 int PRIMARY KEY);
-INSERT INTO PKTABLE VALUES(42);
-CREATE TEMP TABLE FKTABLE (ftest1 inet);
--- This next should fail, because int=inet does not exist
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
-ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
--- This should also fail for the same reason, but here we
--- give the column name
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1);
-ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
-DROP TABLE FKTABLE;
--- This should succeed, even though they are different types,
--- because int=int8 exists and is a member of the integer opfamily
-CREATE TEMP TABLE FKTABLE (ftest1 int8);
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
--- Check it actually works
-INSERT INTO FKTABLE VALUES(42); -- should succeed
-INSERT INTO FKTABLE VALUES(43); -- should fail
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(43) is not present in table "pktable".
-DROP TABLE FKTABLE;
--- This should fail, because we'd have to cast numeric to int which is
--- not an implicit coercion (or use numeric=numeric, but that's not part
--- of the integer opfamily)
-CREATE TEMP TABLE FKTABLE (ftest1 numeric);
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
-ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: numeric and integer.
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- On the other hand, this should work because int implicitly promotes to
--- numeric, and we allow promotion on the FK side
-CREATE TEMP TABLE PKTABLE (ptest1 numeric PRIMARY KEY);
-INSERT INTO PKTABLE VALUES(42);
-CREATE TEMP TABLE FKTABLE (ftest1 int);
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
--- Check it actually works
-INSERT INTO FKTABLE VALUES(42); -- should succeed
-INSERT INTO FKTABLE VALUES(43); -- should fail
-ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
-DETAIL: Key (ftest1)=(43) is not present in table "pktable".
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
-CREATE TEMP TABLE PKTABLE (ptest1 int, ptest2 inet,
- PRIMARY KEY(ptest1, ptest2));
--- This should fail, because we just chose really odd types
-CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp);
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable;
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: cidr and integer.
-DROP TABLE FKTABLE;
--- Again, so should this...
-CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp);
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
- references pktable(ptest1, ptest2);
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest1" of the referenced table are of incompatible types: cidr and integer.
-DROP TABLE FKTABLE;
--- This fails because we mixed up the column ordering
-CREATE TEMP TABLE FKTABLE (ftest1 int, ftest2 inet);
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
- references pktable(ptest2, ptest1);
-ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
-DETAIL: Key columns "ftest1" of the referencing table and "ptest2" of the referenced table are of incompatible types: integer and inet.
--- As does this...
-ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest2, ftest1)
- references pktable(ptest1, ptest2);
-ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented
-DETAIL: Key columns "ftest2" of the referencing table and "ptest1" of the referenced table are of incompatible types: inet and integer.
-DROP TABLE FKTABLE;
-DROP TABLE PKTABLE;
--- Test that ALTER CONSTRAINT updates trigger deferrability properly
-CREATE TEMP TABLE PKTABLE (ptest1 int primary key);
-CREATE TEMP TABLE FKTABLE (ftest1 int);
-ALTER TABLE FKTABLE ADD CONSTRAINT fknd FOREIGN KEY(ftest1) REFERENCES pktable
- ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
-ALTER TABLE FKTABLE ADD CONSTRAINT fkdd FOREIGN KEY(ftest1) REFERENCES pktable
- ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE FKTABLE ADD CONSTRAINT fkdi FOREIGN KEY(ftest1) REFERENCES pktable
- ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY IMMEDIATE;
-ALTER TABLE FKTABLE ADD CONSTRAINT fknd2 FOREIGN KEY(ftest1) REFERENCES pktable
- ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE FKTABLE ALTER CONSTRAINT fknd2 NOT DEFERRABLE;
-ALTER TABLE FKTABLE ADD CONSTRAINT fkdd2 FOREIGN KEY(ftest1) REFERENCES pktable
- ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
-ALTER TABLE FKTABLE ALTER CONSTRAINT fkdd2 DEFERRABLE INITIALLY DEFERRED;
-ALTER TABLE FKTABLE ADD CONSTRAINT fkdi2 FOREIGN KEY(ftest1) REFERENCES pktable
- ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
-ALTER TABLE FKTABLE ALTER CONSTRAINT fkdi2 DEFERRABLE INITIALLY IMMEDIATE;
-SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred
-FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint
-WHERE tgrelid = 'pktable'::regclass
-ORDER BY 1,2,3;
- conname | tgfoid | tgtype | tgdeferrable | tginitdeferred
----------+------------------------+--------+--------------+----------------
- fkdd | "RI_FKey_cascade_del" | 9 | f | f
- fkdd | "RI_FKey_noaction_upd" | 17 | t | t
- fkdd2 | "RI_FKey_cascade_del" | 9 | f | f
- fkdd2 | "RI_FKey_noaction_upd" | 17 | t | t
- fkdi | "RI_FKey_cascade_del" | 9 | f | f
- fkdi | "RI_FKey_noaction_upd" | 17 | t | f
- fkdi2 | "RI_FKey_cascade_del" | 9 | f | f
- fkdi2 | "RI_FKey_noaction_upd" | 17 | t | f
- fknd | "RI_FKey_cascade_del" | 9 | f | f
- fknd | "RI_FKey_noaction_upd" | 17 | f | f
- fknd2 | "RI_FKey_cascade_del" | 9 | f | f
- fknd2 | "RI_FKey_noaction_upd" | 17 | f | f
-(12 rows)
-
-SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred
-FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint
-WHERE tgrelid = 'fktable'::regclass
-ORDER BY 1,2,3;
- conname | tgfoid | tgtype | tgdeferrable | tginitdeferred
----------+---------------------+--------+--------------+----------------
- fkdd | "RI_FKey_check_ins" | 5 | t | t
- fkdd | "RI_FKey_check_upd" | 17 | t | t
- fkdd2 | "RI_FKey_check_ins" | 5 | t | t
- fkdd2 | "RI_FKey_check_upd" | 17 | t | t
- fkdi | "RI_FKey_check_ins" | 5 | t | f
- fkdi | "RI_FKey_check_upd" | 17 | t | f
- fkdi2 | "RI_FKey_check_ins" | 5 | t | f
- fkdi2 | "RI_FKey_check_upd" | 17 | t | f
- fknd | "RI_FKey_check_ins" | 5 | f | f
- fknd | "RI_FKey_check_upd" | 17 | f | f
- fknd2 | "RI_FKey_check_ins" | 5 | f | f
- fknd2 | "RI_FKey_check_upd" | 17 | f | f
-(12 rows)
-
--- temp tables should go away by themselves, need not drop them.
--- test check constraint adding
-create table atacc1 ( test int );
--- add a check constraint
-alter table atacc1 add constraint atacc_test1 check (test>3);
--- should fail
-insert into atacc1 (test) values (2);
-ERROR: new row for relation "atacc1" violates check constraint "atacc_test1"
-DETAIL: Failing row contains (2).
--- should succeed
-insert into atacc1 (test) values (4);
-drop table atacc1;
--- let's do one where the check fails when added
-create table atacc1 ( test int );
--- insert a soon to be failing row
-insert into atacc1 (test) values (2);
--- add a check constraint (fails)
-alter table atacc1 add constraint atacc_test1 check (test>3);
-ERROR: check constraint "atacc_test1" of relation "atacc1" is violated by some row
-insert into atacc1 (test) values (4);
-drop table atacc1;
--- let's do one where the check fails because the column doesn't exist
-create table atacc1 ( test int );
--- add a check constraint (fails)
-alter table atacc1 add constraint atacc_test1 check (test1>3);
-ERROR: column "test1" does not exist
-HINT: Perhaps you meant to reference the column "atacc1.test".
-drop table atacc1;
--- something a little more complicated
-create table atacc1 ( test int, test2 int, test3 int);
--- add a check constraint (fails)
-alter table atacc1 add constraint atacc_test1 check (test+test23), test2 int);
-alter table atacc1 add check (test2>test);
--- should fail for $2
-insert into atacc1 (test2, test) values (3, 4);
-ERROR: new row for relation "atacc1" violates check constraint "atacc1_check"
-DETAIL: Failing row contains (4, 3).
-drop table atacc1;
--- inheritance related tests
-create table atacc1 (test int);
-create table atacc2 (test2 int);
-create table atacc3 (test3 int) inherits (atacc1, atacc2);
-alter table atacc2 add constraint foo check (test2>0);
--- fail and then succeed on atacc2
-insert into atacc2 (test2) values (-3);
-ERROR: new row for relation "atacc2" violates check constraint "foo"
-DETAIL: Failing row contains (-3).
-insert into atacc2 (test2) values (3);
--- fail and then succeed on atacc3
-insert into atacc3 (test2) values (-3);
-ERROR: new row for relation "atacc3" violates check constraint "foo"
-DETAIL: Failing row contains (null, -3, null).
-insert into atacc3 (test2) values (3);
-drop table atacc3;
-drop table atacc2;
-drop table atacc1;
--- same things with one created with INHERIT
-create table atacc1 (test int);
-create table atacc2 (test2 int);
-create table atacc3 (test3 int) inherits (atacc1, atacc2);
-alter table atacc3 no inherit atacc2;
--- fail
-alter table atacc3 no inherit atacc2;
-ERROR: relation "atacc2" is not a parent of relation "atacc3"
--- make sure it really isn't a child
-insert into atacc3 (test2) values (3);
-select test2 from atacc2;
- test2
--------
-(0 rows)
-
--- fail due to missing constraint
-alter table atacc2 add constraint foo check (test2>0);
-alter table atacc3 inherit atacc2;
-ERROR: child table is missing constraint "foo"
--- fail due to missing column
-alter table atacc3 rename test2 to testx;
-alter table atacc3 inherit atacc2;
-ERROR: child table is missing column "test2"
--- fail due to mismatched data type
-alter table atacc3 add test2 bool;
-alter table atacc3 inherit atacc2;
-ERROR: child table "atacc3" has different type for column "test2"
-alter table atacc3 drop test2;
--- succeed
-alter table atacc3 add test2 int;
-update atacc3 set test2 = 4 where test2 is null;
-alter table atacc3 add constraint foo check (test2>0);
-alter table atacc3 inherit atacc2;
--- fail due to duplicates and circular inheritance
-alter table atacc3 inherit atacc2;
-ERROR: relation "atacc2" would be inherited from more than once
-alter table atacc2 inherit atacc3;
-ERROR: circular inheritance not allowed
-DETAIL: "atacc3" is already a child of "atacc2".
-alter table atacc2 inherit atacc2;
-ERROR: circular inheritance not allowed
-DETAIL: "atacc2" is already a child of "atacc2".
--- test that we really are a child now (should see 4 not 3 and cascade should go through)
-select test2 from atacc2;
- test2
--------
- 4
-(1 row)
-
-drop table atacc2 cascade;
-NOTICE: drop cascades to table atacc3
-drop table atacc1;
--- adding only to a parent is allowed as of 9.2
-create table atacc1 (test int);
-create table atacc2 (test2 int) inherits (atacc1);
--- ok:
-alter table atacc1 add constraint foo check (test>0) no inherit;
--- check constraint is not there on child
-insert into atacc2 (test) values (-3);
--- check constraint is there on parent
-insert into atacc1 (test) values (-3);
-ERROR: new row for relation "atacc1" violates check constraint "foo"
-DETAIL: Failing row contains (-3).
-insert into atacc1 (test) values (3);
--- fail, violating row:
-alter table atacc2 add constraint foo check (test>0) no inherit;
-ERROR: check constraint "foo" of relation "atacc2" is violated by some row
-drop table atacc2;
-drop table atacc1;
--- test unique constraint adding
-create table atacc1 ( test int ) ;
--- add a unique constraint
-alter table atacc1 add constraint atacc_test1 unique (test);
--- insert first value
-insert into atacc1 (test) values (2);
--- should fail
-insert into atacc1 (test) values (2);
-ERROR: duplicate key value violates unique constraint "atacc_test1"
-DETAIL: Key (test)=(2) already exists.
--- should succeed
-insert into atacc1 (test) values (4);
--- try to create duplicates via alter table using - should fail
-alter table atacc1 alter column test type integer using 0;
-ERROR: could not create unique index "atacc_test1"
-DETAIL: Key (test)=(0) is duplicated.
-drop table atacc1;
--- let's do one where the unique constraint fails when added
-create table atacc1 ( test int );
--- insert soon to be failing rows
-insert into atacc1 (test) values (2);
-insert into atacc1 (test) values (2);
--- add a unique constraint (fails)
-alter table atacc1 add constraint atacc_test1 unique (test);
-ERROR: could not create unique index "atacc_test1"
-DETAIL: Key (test)=(2) is duplicated.
-insert into atacc1 (test) values (3);
-drop table atacc1;
--- let's do one where the unique constraint fails
--- because the column doesn't exist
-create table atacc1 ( test int );
--- add a unique constraint (fails)
-alter table atacc1 add constraint atacc_test1 unique (test1);
-ERROR: column "test1" named in key does not exist
-drop table atacc1;
--- something a little more complicated
-create table atacc1 ( test int, test2 int);
--- add a unique constraint
-alter table atacc1 add constraint atacc_test1 unique (test, test2);
--- insert initial value
-insert into atacc1 (test,test2) values (4,4);
--- should fail
-insert into atacc1 (test,test2) values (4,4);
-ERROR: duplicate key value violates unique constraint "atacc_test1"
-DETAIL: Key (test, test2)=(4, 4) already exists.
--- should all succeed
-insert into atacc1 (test,test2) values (4,5);
-insert into atacc1 (test,test2) values (5,4);
-insert into atacc1 (test,test2) values (5,5);
-drop table atacc1;
--- lets do some naming tests
-create table atacc1 (test int, test2 int, unique(test));
-alter table atacc1 add unique (test2);
--- should fail for @@ second one @@
-insert into atacc1 (test2, test) values (3, 3);
-insert into atacc1 (test2, test) values (2, 3);
-ERROR: duplicate key value violates unique constraint "atacc1_test_key"
-DETAIL: Key (test)=(3) already exists.
-drop table atacc1;
--- test primary key constraint adding
-create table atacc1 ( id serial, test int) ;
--- add a primary key constraint
-alter table atacc1 add constraint atacc_test1 primary key (test);
--- insert first value
-insert into atacc1 (test) values (2);
--- should fail
-insert into atacc1 (test) values (2);
-ERROR: duplicate key value violates unique constraint "atacc_test1"
-DETAIL: Key (test)=(2) already exists.
--- should succeed
-insert into atacc1 (test) values (4);
--- inserting NULL should fail
-insert into atacc1 (test) values(NULL);
-ERROR: null value in column "test" of relation "atacc1" violates not-null constraint
-DETAIL: Failing row contains (4, null).
--- try adding a second primary key (should fail)
-alter table atacc1 add constraint atacc_oid1 primary key(id);
-ERROR: multiple primary keys for table "atacc1" are not allowed
--- drop first primary key constraint
-alter table atacc1 drop constraint atacc_test1 restrict;
--- try adding a primary key on oid (should succeed)
-alter table atacc1 add constraint atacc_oid1 primary key(id);
-drop table atacc1;
--- let's do one where the primary key constraint fails when added
-create table atacc1 ( test int );
--- insert soon to be failing rows
-insert into atacc1 (test) values (2);
-insert into atacc1 (test) values (2);
--- add a primary key (fails)
-alter table atacc1 add constraint atacc_test1 primary key (test);
-ERROR: could not create unique index "atacc_test1"
-DETAIL: Key (test)=(2) is duplicated.
-insert into atacc1 (test) values (3);
-drop table atacc1;
--- let's do another one where the primary key constraint fails when added
-create table atacc1 ( test int );
--- insert soon to be failing row
-insert into atacc1 (test) values (NULL);
--- add a primary key (fails)
-alter table atacc1 add constraint atacc_test1 primary key (test);
-ERROR: column "test" of relation "atacc1" contains null values
-insert into atacc1 (test) values (3);
-drop table atacc1;
--- let's do one where the primary key constraint fails
--- because the column doesn't exist
-create table atacc1 ( test int );
--- add a primary key constraint (fails)
-alter table atacc1 add constraint atacc_test1 primary key (test1);
-ERROR: column "test1" of relation "atacc1" does not exist
-drop table atacc1;
--- adding a new column as primary key to a non-empty table.
--- should fail unless the column has a non-null default value.
-create table atacc1 ( test int );
-insert into atacc1 (test) values (0);
--- add a primary key column without a default (fails).
-alter table atacc1 add column test2 int primary key;
-ERROR: column "test2" of relation "atacc1" contains null values
--- now add a primary key column with a default (succeeds).
-alter table atacc1 add column test2 int default 0 primary key;
-drop table atacc1;
--- this combination used to have order-of-execution problems (bug #15580)
-create table atacc1 (a int);
-insert into atacc1 values(1);
-alter table atacc1
- add column b float8 not null default random(),
- add primary key(a);
-drop table atacc1;
--- additionally, we've seen issues with foreign key validation not being
--- properly delayed until after a table rewrite. Check that works ok.
-create table atacc1 (a int primary key);
-alter table atacc1 add constraint atacc1_fkey foreign key (a) references atacc1 (a) not valid;
-alter table atacc1 validate constraint atacc1_fkey, alter a type bigint;
-drop table atacc1;
--- we've also seen issues with check constraints being validated at the wrong
--- time when there's a pending table rewrite.
-create table atacc1 (a bigint, b int);
-insert into atacc1 values(1,1);
-alter table atacc1 add constraint atacc1_chk check(b = 1) not valid;
-alter table atacc1 validate constraint atacc1_chk, alter a type int;
-drop table atacc1;
--- same as above, but ensure the constraint violation is detected
-create table atacc1 (a bigint, b int);
-insert into atacc1 values(1,2);
-alter table atacc1 add constraint atacc1_chk check(b = 1) not valid;
-alter table atacc1 validate constraint atacc1_chk, alter a type int;
-ERROR: check constraint "atacc1_chk" of relation "atacc1" is violated by some row
-drop table atacc1;
--- something a little more complicated
-create table atacc1 ( test int, test2 int);
--- add a primary key constraint
-alter table atacc1 add constraint atacc_test1 primary key (test, test2);
--- try adding a second primary key - should fail
-alter table atacc1 add constraint atacc_test2 primary key (test);
-ERROR: multiple primary keys for table "atacc1" are not allowed
--- insert initial value
-insert into atacc1 (test,test2) values (4,4);
--- should fail
-insert into atacc1 (test,test2) values (4,4);
-ERROR: duplicate key value violates unique constraint "atacc_test1"
-DETAIL: Key (test, test2)=(4, 4) already exists.
-insert into atacc1 (test,test2) values (NULL,3);
-ERROR: null value in column "test" of relation "atacc1" violates not-null constraint
-DETAIL: Failing row contains (null, 3).
-insert into atacc1 (test,test2) values (3, NULL);
-ERROR: null value in column "test2" of relation "atacc1" violates not-null constraint
-DETAIL: Failing row contains (3, null).
-insert into atacc1 (test,test2) values (NULL,NULL);
-ERROR: null value in column "test" of relation "atacc1" violates not-null constraint
-DETAIL: Failing row contains (null, null).
--- should all succeed
-insert into atacc1 (test,test2) values (4,5);
-insert into atacc1 (test,test2) values (5,4);
-insert into atacc1 (test,test2) values (5,5);
-drop table atacc1;
--- lets do some naming tests
-create table atacc1 (test int, test2 int, primary key(test));
--- only first should succeed
-insert into atacc1 (test2, test) values (3, 3);
-insert into atacc1 (test2, test) values (2, 3);
-ERROR: duplicate key value violates unique constraint "atacc1_pkey"
-DETAIL: Key (test)=(3) already exists.
-insert into atacc1 (test2, test) values (1, NULL);
-ERROR: null value in column "test" of relation "atacc1" violates not-null constraint
-DETAIL: Failing row contains (null, 1).
-drop table atacc1;
--- alter table / alter column [set/drop] not null tests
--- try altering system catalogs, should fail
-alter table pg_class alter column relname drop not null;
-ERROR: permission denied: "pg_class" is a system catalog
-alter table pg_class alter relname set not null;
-ERROR: permission denied: "pg_class" is a system catalog
--- try altering non-existent table, should fail
-alter table non_existent alter column bar set not null;
-ERROR: relation "non_existent" does not exist
-alter table non_existent alter column bar drop not null;
-ERROR: relation "non_existent" does not exist
--- test setting columns to null and not null and vice versa
--- test checking for null values and primary key
-create table atacc1 (test int not null);
-alter table atacc1 add constraint "atacc1_pkey" primary key (test);
-\d atacc1
- Table "public.atacc1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- test | integer | | not null |
-Indexes:
- "atacc1_pkey" PRIMARY KEY, btree (test)
-
-alter table atacc1 alter column test drop not null;
-ERROR: column "test" is in a primary key
-\d atacc1
- Table "public.atacc1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- test | integer | | not null |
-Indexes:
- "atacc1_pkey" PRIMARY KEY, btree (test)
-
-alter table atacc1 drop constraint "atacc1_pkey";
-alter table atacc1 alter column test drop not null;
-\d atacc1
- Table "public.atacc1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- test | integer | | |
-
-insert into atacc1 values (null);
-alter table atacc1 alter test set not null;
-ERROR: column "test" of relation "atacc1" contains null values
-delete from atacc1;
-alter table atacc1 alter test set not null;
--- try altering a non-existent column, should fail
-alter table atacc1 alter bar set not null;
-ERROR: column "bar" of relation "atacc1" does not exist
-alter table atacc1 alter bar drop not null;
-ERROR: column "bar" of relation "atacc1" does not exist
--- try creating a view and altering that, should fail
-create view myview as select * from atacc1;
-alter table myview alter column test drop not null;
-ERROR: ALTER action ALTER COLUMN ... DROP NOT NULL cannot be performed on relation "myview"
-DETAIL: This operation is not supported for views.
-alter table myview alter column test set not null;
-ERROR: ALTER action ALTER COLUMN ... SET NOT NULL cannot be performed on relation "myview"
-DETAIL: This operation is not supported for views.
-drop view myview;
-drop table atacc1;
--- set not null verified by constraints
-create table atacc1 (test_a int, test_b int);
-insert into atacc1 values (null, 1);
--- constraint not cover all values, should fail
-alter table atacc1 add constraint atacc1_constr_or check(test_a is not null or test_b < 10);
-alter table atacc1 alter test_a set not null;
-ERROR: column "test_a" of relation "atacc1" contains null values
-alter table atacc1 drop constraint atacc1_constr_or;
--- not valid constraint, should fail
-alter table atacc1 add constraint atacc1_constr_invalid check(test_a is not null) not valid;
-alter table atacc1 alter test_a set not null;
-ERROR: column "test_a" of relation "atacc1" contains null values
-alter table atacc1 drop constraint atacc1_constr_invalid;
--- with valid constraint
-update atacc1 set test_a = 1;
-alter table atacc1 add constraint atacc1_constr_a_valid check(test_a is not null);
-alter table atacc1 alter test_a set not null;
-delete from atacc1;
-insert into atacc1 values (2, null);
-alter table atacc1 alter test_a drop not null;
--- test multiple set not null at same time
--- test_a checked by atacc1_constr_a_valid, test_b should fail by table scan
-alter table atacc1 alter test_a set not null, alter test_b set not null;
-ERROR: column "test_b" of relation "atacc1" contains null values
--- commands order has no importance
-alter table atacc1 alter test_b set not null, alter test_a set not null;
-ERROR: column "test_b" of relation "atacc1" contains null values
--- valid one by table scan, one by check constraints
-update atacc1 set test_b = 1;
-alter table atacc1 alter test_b set not null, alter test_a set not null;
-alter table atacc1 alter test_a drop not null, alter test_b drop not null;
--- both column has check constraints
-alter table atacc1 add constraint atacc1_constr_b_valid check(test_b is not null);
-alter table atacc1 alter test_b set not null, alter test_a set not null;
-drop table atacc1;
--- test inheritance
-create table parent (a int);
-create table child (b varchar(255)) inherits (parent);
-alter table parent alter a set not null;
-insert into parent values (NULL);
-ERROR: null value in column "a" of relation "parent" violates not-null constraint
-DETAIL: Failing row contains (null).
-insert into child (a, b) values (NULL, 'foo');
-ERROR: null value in column "a" of relation "child" violates not-null constraint
-DETAIL: Failing row contains (null, foo).
-alter table parent alter a drop not null;
-insert into parent values (NULL);
-insert into child (a, b) values (NULL, 'foo');
-alter table only parent alter a set not null;
-ERROR: column "a" of relation "parent" contains null values
-alter table child alter a set not null;
-ERROR: column "a" of relation "child" contains null values
-drop table child;
-drop table parent;
--- test setting and removing default values
-create table def_test (
- c1 int4 default 5,
- c2 text default 'initial_default'
-);
-insert into def_test default values;
-alter table def_test alter column c1 drop default;
-insert into def_test default values;
-alter table def_test alter column c2 drop default;
-insert into def_test default values;
-alter table def_test alter column c1 set default 10;
-alter table def_test alter column c2 set default 'new_default';
-insert into def_test default values;
-select * from def_test;
- c1 | c2
-----+-----------------
- 5 | initial_default
- | initial_default
- |
- 10 | new_default
-(4 rows)
-
--- set defaults to an incorrect type: this should fail
-alter table def_test alter column c1 set default 'wrong_datatype';
-ERROR: invalid input syntax for type integer: "wrong_datatype"
-alter table def_test alter column c2 set default 20;
--- set defaults on a non-existent column: this should fail
-alter table def_test alter column c3 set default 30;
-ERROR: column "c3" of relation "def_test" does not exist
--- set defaults on views: we need to create a view, add a rule
--- to allow insertions into it, and then alter the view to add
--- a default
-create view def_view_test as select * from def_test;
-create rule def_view_test_ins as
- on insert to def_view_test
- do instead insert into def_test select new.*;
-insert into def_view_test default values;
-alter table def_view_test alter column c1 set default 45;
-insert into def_view_test default values;
-alter table def_view_test alter column c2 set default 'view_default';
-insert into def_view_test default values;
-select * from def_view_test;
- c1 | c2
-----+-----------------
- 5 | initial_default
- | initial_default
- |
- 10 | new_default
- |
- 45 |
- 45 | view_default
-(7 rows)
-
-drop rule def_view_test_ins on def_view_test;
-drop view def_view_test;
-drop table def_test;
--- alter table / drop column tests
--- try altering system catalogs, should fail
-alter table pg_class drop column relname;
-ERROR: permission denied: "pg_class" is a system catalog
--- try altering non-existent table, should fail
-alter table nosuchtable drop column bar;
-ERROR: relation "nosuchtable" does not exist
--- test dropping columns
-create table atacc1 (a int4 not null, b int4, c int4 not null, d int4);
-insert into atacc1 values (1, 2, 3, 4);
-alter table atacc1 drop a;
-alter table atacc1 drop a;
-ERROR: column "a" of relation "atacc1" does not exist
--- SELECTs
-select * from atacc1;
- b | c | d
----+---+---
- 2 | 3 | 4
-(1 row)
-
-select * from atacc1 order by a;
-ERROR: column "a" does not exist
-LINE 1: select * from atacc1 order by a;
- ^
-select * from atacc1 order by "........pg.dropped.1........";
-ERROR: column "........pg.dropped.1........" does not exist
-LINE 1: select * from atacc1 order by "........pg.dropped.1........"...
- ^
-select * from atacc1 group by a;
-ERROR: column "a" does not exist
-LINE 1: select * from atacc1 group by a;
- ^
-select * from atacc1 group by "........pg.dropped.1........";
-ERROR: column "........pg.dropped.1........" does not exist
-LINE 1: select * from atacc1 group by "........pg.dropped.1........"...
- ^
-select atacc1.* from atacc1;
- b | c | d
----+---+---
- 2 | 3 | 4
-(1 row)
-
-select a from atacc1;
-ERROR: column "a" does not exist
-LINE 1: select a from atacc1;
- ^
-select atacc1.a from atacc1;
-ERROR: column atacc1.a does not exist
-LINE 1: select atacc1.a from atacc1;
- ^
-select b,c,d from atacc1;
- b | c | d
----+---+---
- 2 | 3 | 4
-(1 row)
-
-select a,b,c,d from atacc1;
-ERROR: column "a" does not exist
-LINE 1: select a,b,c,d from atacc1;
- ^
-select * from atacc1 where a = 1;
-ERROR: column "a" does not exist
-LINE 1: select * from atacc1 where a = 1;
- ^
-select "........pg.dropped.1........" from atacc1;
-ERROR: column "........pg.dropped.1........" does not exist
-LINE 1: select "........pg.dropped.1........" from atacc1;
- ^
-select atacc1."........pg.dropped.1........" from atacc1;
-ERROR: column atacc1.........pg.dropped.1........ does not exist
-LINE 1: select atacc1."........pg.dropped.1........" from atacc1;
- ^
-select "........pg.dropped.1........",b,c,d from atacc1;
-ERROR: column "........pg.dropped.1........" does not exist
-LINE 1: select "........pg.dropped.1........",b,c,d from atacc1;
- ^
-select * from atacc1 where "........pg.dropped.1........" = 1;
-ERROR: column "........pg.dropped.1........" does not exist
-LINE 1: select * from atacc1 where "........pg.dropped.1........" = ...
- ^
--- UPDATEs
-update atacc1 set a = 3;
-ERROR: column "a" of relation "atacc1" does not exist
-LINE 1: update atacc1 set a = 3;
- ^
-update atacc1 set b = 2 where a = 3;
-ERROR: column "a" does not exist
-LINE 1: update atacc1 set b = 2 where a = 3;
- ^
-update atacc1 set "........pg.dropped.1........" = 3;
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-LINE 1: update atacc1 set "........pg.dropped.1........" = 3;
- ^
-update atacc1 set b = 2 where "........pg.dropped.1........" = 3;
-ERROR: column "........pg.dropped.1........" does not exist
-LINE 1: update atacc1 set b = 2 where "........pg.dropped.1........"...
- ^
--- INSERTs
-insert into atacc1 values (10, 11, 12, 13);
-ERROR: INSERT has more expressions than target columns
-LINE 1: insert into atacc1 values (10, 11, 12, 13);
- ^
-insert into atacc1 values (default, 11, 12, 13);
-ERROR: INSERT has more expressions than target columns
-LINE 1: insert into atacc1 values (default, 11, 12, 13);
- ^
-insert into atacc1 values (11, 12, 13);
-insert into atacc1 (a) values (10);
-ERROR: column "a" of relation "atacc1" does not exist
-LINE 1: insert into atacc1 (a) values (10);
- ^
-insert into atacc1 (a) values (default);
-ERROR: column "a" of relation "atacc1" does not exist
-LINE 1: insert into atacc1 (a) values (default);
- ^
-insert into atacc1 (a,b,c,d) values (10,11,12,13);
-ERROR: column "a" of relation "atacc1" does not exist
-LINE 1: insert into atacc1 (a,b,c,d) values (10,11,12,13);
- ^
-insert into atacc1 (a,b,c,d) values (default,11,12,13);
-ERROR: column "a" of relation "atacc1" does not exist
-LINE 1: insert into atacc1 (a,b,c,d) values (default,11,12,13);
- ^
-insert into atacc1 (b,c,d) values (11,12,13);
-insert into atacc1 ("........pg.dropped.1........") values (10);
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-LINE 1: insert into atacc1 ("........pg.dropped.1........") values (...
- ^
-insert into atacc1 ("........pg.dropped.1........") values (default);
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-LINE 1: insert into atacc1 ("........pg.dropped.1........") values (...
- ^
-insert into atacc1 ("........pg.dropped.1........",b,c,d) values (10,11,12,13);
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-LINE 1: insert into atacc1 ("........pg.dropped.1........",b,c,d) va...
- ^
-insert into atacc1 ("........pg.dropped.1........",b,c,d) values (default,11,12,13);
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-LINE 1: insert into atacc1 ("........pg.dropped.1........",b,c,d) va...
- ^
--- DELETEs
-delete from atacc1 where a = 3;
-ERROR: column "a" does not exist
-LINE 1: delete from atacc1 where a = 3;
- ^
-delete from atacc1 where "........pg.dropped.1........" = 3;
-ERROR: column "........pg.dropped.1........" does not exist
-LINE 1: delete from atacc1 where "........pg.dropped.1........" = 3;
- ^
-delete from atacc1;
--- try dropping a non-existent column, should fail
-alter table atacc1 drop bar;
-ERROR: column "bar" of relation "atacc1" does not exist
--- try removing an oid column, should succeed (as it's nonexistent)
-alter table atacc1 SET WITHOUT OIDS;
--- try adding an oid column, should fail (not supported)
-alter table atacc1 SET WITH OIDS;
-ERROR: syntax error at or near "WITH"
-LINE 1: alter table atacc1 SET WITH OIDS;
- ^
--- try dropping the xmin column, should fail
-alter table atacc1 drop xmin;
-ERROR: cannot drop system column "xmin"
--- try creating a view and altering that, should fail
-create view myview as select * from atacc1;
-select * from myview;
- b | c | d
----+---+---
-(0 rows)
-
-alter table myview drop d;
-ERROR: ALTER action DROP COLUMN cannot be performed on relation "myview"
-DETAIL: This operation is not supported for views.
-drop view myview;
--- test some commands to make sure they fail on the dropped column
-analyze atacc1(a);
-ERROR: column "a" of relation "atacc1" does not exist
-analyze atacc1("........pg.dropped.1........");
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-vacuum analyze atacc1(a);
-ERROR: column "a" of relation "atacc1" does not exist
-vacuum analyze atacc1("........pg.dropped.1........");
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-comment on column atacc1.a is 'testing';
-ERROR: column "a" of relation "atacc1" does not exist
-comment on column atacc1."........pg.dropped.1........" is 'testing';
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-alter table atacc1 alter a set storage plain;
-ERROR: column "a" of relation "atacc1" does not exist
-alter table atacc1 alter "........pg.dropped.1........" set storage plain;
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-alter table atacc1 alter a set statistics 0;
-ERROR: column "a" of relation "atacc1" does not exist
-alter table atacc1 alter "........pg.dropped.1........" set statistics 0;
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-alter table atacc1 alter a set default 3;
-ERROR: column "a" of relation "atacc1" does not exist
-alter table atacc1 alter "........pg.dropped.1........" set default 3;
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-alter table atacc1 alter a drop default;
-ERROR: column "a" of relation "atacc1" does not exist
-alter table atacc1 alter "........pg.dropped.1........" drop default;
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-alter table atacc1 alter a set not null;
-ERROR: column "a" of relation "atacc1" does not exist
-alter table atacc1 alter "........pg.dropped.1........" set not null;
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-alter table atacc1 alter a drop not null;
-ERROR: column "a" of relation "atacc1" does not exist
-alter table atacc1 alter "........pg.dropped.1........" drop not null;
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-alter table atacc1 rename a to x;
-ERROR: column "a" does not exist
-alter table atacc1 rename "........pg.dropped.1........" to x;
-ERROR: column "........pg.dropped.1........" does not exist
-alter table atacc1 add primary key(a);
-ERROR: column "a" of relation "atacc1" does not exist
-alter table atacc1 add primary key("........pg.dropped.1........");
-ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
-alter table atacc1 add unique(a);
-ERROR: column "a" named in key does not exist
-alter table atacc1 add unique("........pg.dropped.1........");
-ERROR: column "........pg.dropped.1........" named in key does not exist
-alter table atacc1 add check (a > 3);
-ERROR: column "a" does not exist
-alter table atacc1 add check ("........pg.dropped.1........" > 3);
-ERROR: column "........pg.dropped.1........" does not exist
-create table atacc2 (id int4 unique);
-alter table atacc1 add foreign key (a) references atacc2(id);
-ERROR: column "a" referenced in foreign key constraint does not exist
-alter table atacc1 add foreign key ("........pg.dropped.1........") references atacc2(id);
-ERROR: column "........pg.dropped.1........" referenced in foreign key constraint does not exist
-alter table atacc2 add foreign key (id) references atacc1(a);
-ERROR: column "a" referenced in foreign key constraint does not exist
-alter table atacc2 add foreign key (id) references atacc1("........pg.dropped.1........");
-ERROR: column "........pg.dropped.1........" referenced in foreign key constraint does not exist
-drop table atacc2;
-create index "testing_idx" on atacc1(a);
-ERROR: column "a" does not exist
-create index "testing_idx" on atacc1("........pg.dropped.1........");
-ERROR: column "........pg.dropped.1........" does not exist
--- test create as and select into
-insert into atacc1 values (21, 22, 23);
-create table attest1 as select * from atacc1;
-select * from attest1;
- b | c | d
-----+----+----
- 21 | 22 | 23
-(1 row)
-
-drop table attest1;
-select * into attest2 from atacc1;
-select * from attest2;
- b | c | d
-----+----+----
- 21 | 22 | 23
-(1 row)
-
-drop table attest2;
--- try dropping all columns
-alter table atacc1 drop c;
-alter table atacc1 drop d;
-alter table atacc1 drop b;
-select * from atacc1;
---
-(1 row)
-
-drop table atacc1;
--- test constraint error reporting in presence of dropped columns
-create table atacc1 (id serial primary key, value int check (value < 10));
-insert into atacc1(value) values (100);
-ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check"
-DETAIL: Failing row contains (1, 100).
-alter table atacc1 drop column value;
-alter table atacc1 add column value int check (value < 10);
-insert into atacc1(value) values (100);
-ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check"
-DETAIL: Failing row contains (2, 100).
-insert into atacc1(id, value) values (null, 0);
-ERROR: null value in column "id" of relation "atacc1" violates not-null constraint
-DETAIL: Failing row contains (null, 0).
-drop table atacc1;
--- test inheritance
-create table parent (a int, b int, c int);
-insert into parent values (1, 2, 3);
-alter table parent drop a;
-create table child (d varchar(255)) inherits (parent);
-insert into child values (12, 13, 'testing');
-select * from parent;
- b | c
-----+----
- 2 | 3
- 12 | 13
-(2 rows)
-
-select * from child;
- b | c | d
-----+----+---------
- 12 | 13 | testing
-(1 row)
-
-alter table parent drop c;
-select * from parent;
- b
-----
- 2
- 12
-(2 rows)
-
-select * from child;
- b | d
-----+---------
- 12 | testing
-(1 row)
-
-drop table child;
-drop table parent;
--- check error cases for inheritance column merging
-create table parent (a float8, b numeric(10,4), c text collate "C");
-create table child (a float4) inherits (parent); -- fail
-NOTICE: merging column "a" with inherited definition
-ERROR: column "a" has a type conflict
-DETAIL: double precision versus real
-create table child (b decimal(10,7)) inherits (parent); -- fail
-NOTICE: moving and merging column "b" with inherited definition
-DETAIL: User-specified column moved to the position of the inherited column.
-ERROR: column "b" has a type conflict
-DETAIL: numeric(10,4) versus numeric(10,7)
-create table child (c text collate "POSIX") inherits (parent); -- fail
-NOTICE: moving and merging column "c" with inherited definition
-DETAIL: User-specified column moved to the position of the inherited column.
-ERROR: column "c" has a collation conflict
-DETAIL: "C" versus "POSIX"
-create table child (a double precision, b decimal(10,4)) inherits (parent);
-NOTICE: merging column "a" with inherited definition
-NOTICE: merging column "b" with inherited definition
-drop table child;
-drop table parent;
--- test copy in/out
-create table attest (a int4, b int4, c int4);
-insert into attest values (1,2,3);
-alter table attest drop a;
-copy attest to stdout;
-2 3
-copy attest(a) to stdout;
-ERROR: column "a" of relation "attest" does not exist
-copy attest("........pg.dropped.1........") to stdout;
-ERROR: column "........pg.dropped.1........" of relation "attest" does not exist
-copy attest from stdin;
-ERROR: extra data after last expected column
-CONTEXT: COPY attest, line 1: "10 11 12"
-select * from attest;
- b | c
----+---
- 2 | 3
-(1 row)
-
-copy attest from stdin;
-select * from attest;
- b | c
-----+----
- 2 | 3
- 21 | 22
-(2 rows)
-
-copy attest(a) from stdin;
-ERROR: column "a" of relation "attest" does not exist
-copy attest("........pg.dropped.1........") from stdin;
-ERROR: column "........pg.dropped.1........" of relation "attest" does not exist
-copy attest(b,c) from stdin;
-select * from attest;
- b | c
-----+----
- 2 | 3
- 21 | 22
- 31 | 32
-(3 rows)
-
-drop table attest;
--- test inheritance
-create table dropColumn (a int, b int, e int);
-create table dropColumnChild (c int) inherits (dropColumn);
-create table dropColumnAnother (d int) inherits (dropColumnChild);
--- these two should fail
-alter table dropColumnchild drop column a;
-ERROR: cannot drop inherited column "a"
-alter table only dropColumnChild drop column b;
-ERROR: cannot drop inherited column "b"
--- these three should work
-alter table only dropColumn drop column e;
-alter table dropColumnChild drop column c;
-alter table dropColumn drop column a;
-create table renameColumn (a int);
-create table renameColumnChild (b int) inherits (renameColumn);
-create table renameColumnAnother (c int) inherits (renameColumnChild);
--- these three should fail
-alter table renameColumnChild rename column a to d;
-ERROR: cannot rename inherited column "a"
-alter table only renameColumnChild rename column a to d;
-ERROR: inherited column "a" must be renamed in child tables too
-alter table only renameColumn rename column a to d;
-ERROR: inherited column "a" must be renamed in child tables too
--- these should work
-alter table renameColumn rename column a to d;
-alter table renameColumnChild rename column b to a;
--- these should work
-alter table if exists doesnt_exist_tab rename column a to d;
-NOTICE: relation "doesnt_exist_tab" does not exist, skipping
-alter table if exists doesnt_exist_tab rename column b to a;
-NOTICE: relation "doesnt_exist_tab" does not exist, skipping
--- this should work
-alter table renameColumn add column w int;
--- this should fail
-alter table only renameColumn add column x int;
-ERROR: column must be added to child tables too
--- Test corner cases in dropping of inherited columns
-create table p1 (f1 int, f2 int);
-create table c1 (f1 int not null) inherits(p1);
-NOTICE: merging column "f1" with inherited definition
--- should be rejected since c1.f1 is inherited
-alter table c1 drop column f1;
-ERROR: cannot drop inherited column "f1"
--- should work
-alter table p1 drop column f1;
--- c1.f1 is still there, but no longer inherited
-select f1 from c1;
- f1
-----
-(0 rows)
-
-alter table c1 drop column f1;
-select f1 from c1;
-ERROR: column "f1" does not exist
-LINE 1: select f1 from c1;
- ^
-HINT: Perhaps you meant to reference the column "c1.f2".
-drop table p1 cascade;
-NOTICE: drop cascades to table c1
-create table p1 (f1 int, f2 int);
-create table c1 () inherits(p1);
--- should be rejected since c1.f1 is inherited
-alter table c1 drop column f1;
-ERROR: cannot drop inherited column "f1"
-alter table p1 drop column f1;
--- c1.f1 is dropped now, since there is no local definition for it
-select f1 from c1;
-ERROR: column "f1" does not exist
-LINE 1: select f1 from c1;
- ^
-HINT: Perhaps you meant to reference the column "c1.f2".
-drop table p1 cascade;
-NOTICE: drop cascades to table c1
-create table p1 (f1 int, f2 int);
-create table c1 () inherits(p1);
--- should be rejected since c1.f1 is inherited
-alter table c1 drop column f1;
-ERROR: cannot drop inherited column "f1"
-alter table only p1 drop column f1;
--- c1.f1 is NOT dropped, but must now be considered non-inherited
-alter table c1 drop column f1;
-drop table p1 cascade;
-NOTICE: drop cascades to table c1
-create table p1 (f1 int, f2 int);
-create table c1 (f1 int not null) inherits(p1);
-NOTICE: merging column "f1" with inherited definition
--- should be rejected since c1.f1 is inherited
-alter table c1 drop column f1;
-ERROR: cannot drop inherited column "f1"
-alter table only p1 drop column f1;
--- c1.f1 is still there, but no longer inherited
-alter table c1 drop column f1;
-drop table p1 cascade;
-NOTICE: drop cascades to table c1
-create table p1(id int, name text);
-create table p2(id2 int, name text, height int);
-create table c1(age int) inherits(p1,p2);
-NOTICE: merging multiple inherited definitions of column "name"
-create table gc1() inherits (c1);
-select relname, attname, attinhcount, attislocal
-from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid)
-where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped
-order by relname, attnum;
- relname | attname | attinhcount | attislocal
----------+---------+-------------+------------
- c1 | id | 1 | f
- c1 | name | 2 | f
- c1 | id2 | 1 | f
- c1 | height | 1 | f
- c1 | age | 0 | t
- gc1 | id | 1 | f
- gc1 | name | 1 | f
- gc1 | id2 | 1 | f
- gc1 | height | 1 | f
- gc1 | age | 1 | f
- p1 | id | 0 | t
- p1 | name | 0 | t
- p2 | id2 | 0 | t
- p2 | name | 0 | t
- p2 | height | 0 | t
-(15 rows)
-
--- should work
-alter table only p1 drop column name;
--- should work. Now c1.name is local and inhcount is 0.
-alter table p2 drop column name;
--- should be rejected since its inherited
-alter table gc1 drop column name;
-ERROR: cannot drop inherited column "name"
--- should work, and drop gc1.name along
-alter table c1 drop column name;
--- should fail: column does not exist
-alter table gc1 drop column name;
-ERROR: column "name" of relation "gc1" does not exist
--- should work and drop the attribute in all tables
-alter table p2 drop column height;
--- IF EXISTS test
-create table dropColumnExists ();
-alter table dropColumnExists drop column non_existing; --fail
-ERROR: column "non_existing" of relation "dropcolumnexists" does not exist
-alter table dropColumnExists drop column if exists non_existing; --succeed
-NOTICE: column "non_existing" of relation "dropcolumnexists" does not exist, skipping
-select relname, attname, attinhcount, attislocal
-from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid)
-where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped
-order by relname, attnum;
- relname | attname | attinhcount | attislocal
----------+---------+-------------+------------
- c1 | id | 1 | f
- c1 | id2 | 1 | f
- c1 | age | 0 | t
- gc1 | id | 1 | f
- gc1 | id2 | 1 | f
- gc1 | age | 1 | f
- p1 | id | 0 | t
- p2 | id2 | 0 | t
-(8 rows)
-
-drop table p1, p2 cascade;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table c1
-drop cascades to table gc1
--- test attinhcount tracking with merged columns
-create table depth0();
-create table depth1(c text) inherits (depth0);
-create table depth2() inherits (depth1);
-alter table depth0 add c text;
-NOTICE: merging definition of column "c" for child "depth1"
-select attrelid::regclass, attname, attinhcount, attislocal
-from pg_attribute
-where attnum > 0 and attrelid::regclass in ('depth0', 'depth1', 'depth2')
-order by attrelid::regclass::text, attnum;
- attrelid | attname | attinhcount | attislocal
-----------+---------+-------------+------------
- depth0 | c | 0 | t
- depth1 | c | 1 | t
- depth2 | c | 1 | f
-(3 rows)
-
--- test renumbering of child-table columns in inherited operations
-create table p1 (f1 int);
-create table c1 (f2 text, f3 int) inherits (p1);
-alter table p1 add column a1 int check (a1 > 0);
-alter table p1 add column f2 text;
-NOTICE: merging definition of column "f2" for child "c1"
-insert into p1 values (1,2,'abc');
-insert into c1 values(11,'xyz',33,0); -- should fail
-ERROR: new row for relation "c1" violates check constraint "p1_a1_check"
-DETAIL: Failing row contains (11, xyz, 33, 0).
-insert into c1 values(11,'xyz',33,22);
-select * from p1;
- f1 | a1 | f2
-----+----+-----
- 1 | 2 | abc
- 11 | 22 | xyz
-(2 rows)
-
-update p1 set a1 = a1 + 1, f2 = upper(f2);
-select * from p1;
- f1 | a1 | f2
-----+----+-----
- 1 | 3 | ABC
- 11 | 23 | XYZ
-(2 rows)
-
-drop table p1 cascade;
-NOTICE: drop cascades to table c1
--- test that operations with a dropped column do not try to reference
--- its datatype
-create domain mytype as text;
-create temp table foo (f1 text, f2 mytype, f3 text);
-insert into foo values('bb','cc','dd');
-select * from foo;
- f1 | f2 | f3
-----+----+----
- bb | cc | dd
-(1 row)
-
-drop domain mytype cascade;
-NOTICE: drop cascades to column f2 of table foo
-select * from foo;
- f1 | f3
-----+----
- bb | dd
-(1 row)
-
-insert into foo values('qq','rr');
-select * from foo;
- f1 | f3
-----+----
- bb | dd
- qq | rr
-(2 rows)
-
-update foo set f3 = 'zz';
-select * from foo;
- f1 | f3
-----+----
- bb | zz
- qq | zz
-(2 rows)
-
-select f3,max(f1) from foo group by f3;
- f3 | max
-----+-----
- zz | qq
-(1 row)
-
--- Simple tests for alter table column type
-alter table foo alter f1 TYPE integer; -- fails
-ERROR: column "f1" cannot be cast automatically to type integer
-HINT: You might need to specify "USING f1::integer".
-alter table foo alter f1 TYPE varchar(10);
-create table anothertab (atcol1 serial8, atcol2 boolean,
- constraint anothertab_chk check (atcol1 <= 3));
-insert into anothertab (atcol1, atcol2) values (default, true);
-insert into anothertab (atcol1, atcol2) values (default, false);
-select * from anothertab;
- atcol1 | atcol2
---------+--------
- 1 | t
- 2 | f
-(2 rows)
-
-alter table anothertab alter column atcol1 type boolean; -- fails
-ERROR: column "atcol1" cannot be cast automatically to type boolean
-HINT: You might need to specify "USING atcol1::boolean".
-alter table anothertab alter column atcol1 type boolean using atcol1::int; -- fails
-ERROR: result of USING clause for column "atcol1" cannot be cast automatically to type boolean
-HINT: You might need to add an explicit cast.
-alter table anothertab alter column atcol1 type integer;
-select * from anothertab;
- atcol1 | atcol2
---------+--------
- 1 | t
- 2 | f
-(2 rows)
-
-insert into anothertab (atcol1, atcol2) values (45, null); -- fails
-ERROR: new row for relation "anothertab" violates check constraint "anothertab_chk"
-DETAIL: Failing row contains (45, null).
-insert into anothertab (atcol1, atcol2) values (default, null);
-select * from anothertab;
- atcol1 | atcol2
---------+--------
- 1 | t
- 2 | f
- 3 |
-(3 rows)
-
-alter table anothertab alter column atcol2 type text
- using case when atcol2 is true then 'IT WAS TRUE'
- when atcol2 is false then 'IT WAS FALSE'
- else 'IT WAS NULL!' end;
-select * from anothertab;
- atcol1 | atcol2
---------+--------------
- 1 | IT WAS TRUE
- 2 | IT WAS FALSE
- 3 | IT WAS NULL!
-(3 rows)
-
-alter table anothertab alter column atcol1 type boolean
- using case when atcol1 % 2 = 0 then true else false end; -- fails
-ERROR: default for column "atcol1" cannot be cast automatically to type boolean
-alter table anothertab alter column atcol1 drop default;
-alter table anothertab alter column atcol1 type boolean
- using case when atcol1 % 2 = 0 then true else false end; -- fails
-ERROR: operator does not exist: boolean <= integer
-HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
-alter table anothertab drop constraint anothertab_chk;
-alter table anothertab drop constraint anothertab_chk; -- fails
-ERROR: constraint "anothertab_chk" of relation "anothertab" does not exist
-alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds
-NOTICE: constraint "anothertab_chk" of relation "anothertab" does not exist, skipping
-alter table anothertab alter column atcol1 type boolean
- using case when atcol1 % 2 = 0 then true else false end;
-select * from anothertab;
- atcol1 | atcol2
---------+--------------
- f | IT WAS TRUE
- t | IT WAS FALSE
- f | IT WAS NULL!
-(3 rows)
-
-drop table anothertab;
--- Test index handling in alter table column type (cf. bugs #15835, #15865)
-create table anothertab(f1 int primary key, f2 int unique,
- f3 int, f4 int, f5 int);
-alter table anothertab
- add exclude using btree (f3 with =);
-alter table anothertab
- add exclude using btree (f4 with =) where (f4 is not null);
-alter table anothertab
- add exclude using btree (f4 with =) where (f5 > 0);
-alter table anothertab
- add unique(f1,f4);
-create index on anothertab(f2,f3);
-create unique index on anothertab(f4);
-\d anothertab
- Table "public.anothertab"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- f1 | integer | | not null |
- f2 | integer | | |
- f3 | integer | | |
- f4 | integer | | |
- f5 | integer | | |
-Indexes:
- "anothertab_pkey" PRIMARY KEY, btree (f1)
- "anothertab_f1_f4_key" UNIQUE CONSTRAINT, btree (f1, f4)
- "anothertab_f2_f3_idx" btree (f2, f3)
- "anothertab_f2_key" UNIQUE CONSTRAINT, btree (f2)
- "anothertab_f3_excl" EXCLUDE USING btree (f3 WITH =)
- "anothertab_f4_excl" EXCLUDE USING btree (f4 WITH =) WHERE (f4 IS NOT NULL)
- "anothertab_f4_excl1" EXCLUDE USING btree (f4 WITH =) WHERE (f5 > 0)
- "anothertab_f4_idx" UNIQUE, btree (f4)
-
-alter table anothertab alter column f1 type bigint;
-alter table anothertab
- alter column f2 type bigint,
- alter column f3 type bigint,
- alter column f4 type bigint;
-alter table anothertab alter column f5 type bigint;
-\d anothertab
- Table "public.anothertab"
- Column | Type | Collation | Nullable | Default
---------+--------+-----------+----------+---------
- f1 | bigint | | not null |
- f2 | bigint | | |
- f3 | bigint | | |
- f4 | bigint | | |
- f5 | bigint | | |
-Indexes:
- "anothertab_pkey" PRIMARY KEY, btree (f1)
- "anothertab_f1_f4_key" UNIQUE CONSTRAINT, btree (f1, f4)
- "anothertab_f2_f3_idx" btree (f2, f3)
- "anothertab_f2_key" UNIQUE CONSTRAINT, btree (f2)
- "anothertab_f3_excl" EXCLUDE USING btree (f3 WITH =)
- "anothertab_f4_excl" EXCLUDE USING btree (f4 WITH =) WHERE (f4 IS NOT NULL)
- "anothertab_f4_excl1" EXCLUDE USING btree (f4 WITH =) WHERE (f5 > 0)
- "anothertab_f4_idx" UNIQUE, btree (f4)
-
-drop table anothertab;
--- test that USING expressions are parsed before column alter type / drop steps
-create table another (f1 int, f2 text, f3 text);
-insert into another values(1, 'one', 'uno');
-insert into another values(2, 'two', 'due');
-insert into another values(3, 'three', 'tre');
-select * from another;
- f1 | f2 | f3
-----+-------+-----
- 1 | one | uno
- 2 | two | due
- 3 | three | tre
-(3 rows)
-
-alter table another
- alter f1 type text using f2 || ' and ' || f3 || ' more',
- alter f2 type bigint using f1 * 10,
- drop column f3;
-select * from another;
- f1 | f2
---------------------+----
- one and uno more | 10
- two and due more | 20
- three and tre more | 30
-(3 rows)
-
-drop table another;
--- Create an index that skips WAL, then perform a SET DATA TYPE that skips
--- rewriting the index.
-begin;
-create table skip_wal_skip_rewrite_index (c varchar(10) primary key);
-alter table skip_wal_skip_rewrite_index alter c type varchar(20);
-commit;
--- We disallow changing table's row type if it's used for storage
-create table at_tab1 (a int, b text);
-create table at_tab2 (x int, y at_tab1);
-alter table at_tab1 alter column b type varchar; -- fails
-ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type
-drop table at_tab2;
--- Use of row type in an expression is defended differently
-create table at_tab2 (x int, y text, check((x,y)::at_tab1 = (1,'42')::at_tab1));
-alter table at_tab1 alter column b type varchar; -- allowed, but ...
-insert into at_tab2 values(1,'42'); -- ... this will fail
-ERROR: ROW() column has type text instead of type character varying
-drop table at_tab1, at_tab2;
--- Check it for a partitioned table, too
-create table at_tab1 (a int, b text) partition by list(a);
-create table at_tab2 (x int, y at_tab1);
-alter table at_tab1 alter column b type varchar; -- fails
-ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type
-drop table at_tab1, at_tab2;
--- Alter column type that's part of a partitioned index
-create table at_partitioned (a int, b text) partition by range (a);
-create table at_part_1 partition of at_partitioned for values from (0) to (1000);
-insert into at_partitioned values (512, '0.123');
-create table at_part_2 (b text, a int);
-insert into at_part_2 values ('1.234', 1024);
-create index on at_partitioned (b);
-create index on at_partitioned (a);
-\d at_part_1
- Table "public.at_part_1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | text | | |
-Partition of: at_partitioned FOR VALUES FROM (0) TO (1000)
-Indexes:
- "at_part_1_a_idx" btree (a)
- "at_part_1_b_idx" btree (b)
-
-\d at_part_2
- Table "public.at_part_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- b | text | | |
- a | integer | | |
-
-alter table at_partitioned attach partition at_part_2 for values from (1000) to (2000);
-\d at_part_2
- Table "public.at_part_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- b | text | | |
- a | integer | | |
-Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000)
-Indexes:
- "at_part_2_a_idx" btree (a)
- "at_part_2_b_idx" btree (b)
-
-alter table at_partitioned alter column b type numeric using b::numeric;
-\d at_part_1
- Table "public.at_part_1"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | numeric | | |
-Partition of: at_partitioned FOR VALUES FROM (0) TO (1000)
-Indexes:
- "at_part_1_a_idx" btree (a)
- "at_part_1_b_idx" btree (b)
-
-\d at_part_2
- Table "public.at_part_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- b | numeric | | |
- a | integer | | |
-Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000)
-Indexes:
- "at_part_2_a_idx" btree (a)
- "at_part_2_b_idx" btree (b)
-
-drop table at_partitioned;
--- Alter column type when no table rewrite is required
--- Also check that comments are preserved
-create table at_partitioned(id int, name varchar(64), unique (id, name))
- partition by hash(id);
-comment on constraint at_partitioned_id_name_key on at_partitioned is 'parent constraint';
-comment on index at_partitioned_id_name_key is 'parent index';
-create table at_partitioned_0 partition of at_partitioned
- for values with (modulus 2, remainder 0);
-comment on constraint at_partitioned_0_id_name_key on at_partitioned_0 is 'child 0 constraint';
-comment on index at_partitioned_0_id_name_key is 'child 0 index';
-create table at_partitioned_1 partition of at_partitioned
- for values with (modulus 2, remainder 1);
-comment on constraint at_partitioned_1_id_name_key on at_partitioned_1 is 'child 1 constraint';
-comment on index at_partitioned_1_id_name_key is 'child 1 index';
-insert into at_partitioned values(1, 'foo');
-insert into at_partitioned values(3, 'bar');
-create temp table old_oids as
- select relname, oid as oldoid, relfilenode as oldfilenode
- from pg_class where relname like 'at_partitioned%';
-select relname,
- c.oid = oldoid as orig_oid,
- case relfilenode
- when 0 then 'none'
- when c.oid then 'own'
- when oldfilenode then 'orig'
- else 'OTHER'
- end as storage,
- obj_description(c.oid, 'pg_class') as desc
- from pg_class c left join old_oids using (relname)
- where relname like 'at_partitioned%'
- order by relname;
- relname | orig_oid | storage | desc
-------------------------------+----------+---------+---------------
- at_partitioned | t | none |
- at_partitioned_0 | t | own |
- at_partitioned_0_id_name_key | t | own | child 0 index
- at_partitioned_1 | t | own |
- at_partitioned_1_id_name_key | t | own | child 1 index
- at_partitioned_id_name_key | t | none | parent index
-(6 rows)
-
-select conname, obj_description(oid, 'pg_constraint') as desc
- from pg_constraint where conname like 'at_partitioned%'
- order by conname;
- conname | desc
-------------------------------+--------------------
- at_partitioned_0_id_name_key | child 0 constraint
- at_partitioned_1_id_name_key | child 1 constraint
- at_partitioned_id_name_key | parent constraint
-(3 rows)
-
-alter table at_partitioned alter column name type varchar(127);
-select relname,
- c.oid = oldoid as orig_oid,
- case relfilenode
- when 0 then 'none'
- when c.oid then 'own'
- when oldfilenode then 'orig'
- else 'OTHER'
- end as storage,
- obj_description(c.oid, 'pg_class') as desc
- from pg_class c left join old_oids using (relname)
- where relname like 'at_partitioned%'
- order by relname;
- relname | orig_oid | storage | desc
-------------------------------+----------+---------+--------------
- at_partitioned | t | none |
- at_partitioned_0 | t | own |
- at_partitioned_0_id_name_key | f | own |
- at_partitioned_1 | t | own |
- at_partitioned_1_id_name_key | f | own |
- at_partitioned_id_name_key | f | none | parent index
-(6 rows)
-
-select conname, obj_description(oid, 'pg_constraint') as desc
- from pg_constraint where conname like 'at_partitioned%'
- order by conname;
- conname | desc
-------------------------------+-------------------
- at_partitioned_0_id_name_key |
- at_partitioned_1_id_name_key |
- at_partitioned_id_name_key | parent constraint
-(3 rows)
-
--- Don't remove this DROP, it exposes bug #15672
-drop table at_partitioned;
--- disallow recursive containment of row types
-create temp table recur1 (f1 int);
-alter table recur1 add column f2 recur1; -- fails
-ERROR: composite type recur1 cannot be made a member of itself
-alter table recur1 add column f2 recur1[]; -- fails
-ERROR: composite type recur1 cannot be made a member of itself
-create domain array_of_recur1 as recur1[];
-alter table recur1 add column f2 array_of_recur1; -- fails
-ERROR: composite type recur1 cannot be made a member of itself
-create temp table recur2 (f1 int, f2 recur1);
-alter table recur1 add column f2 recur2; -- fails
-ERROR: composite type recur1 cannot be made a member of itself
-alter table recur1 add column f2 int;
-alter table recur1 alter column f2 type recur2; -- fails
-ERROR: composite type recur1 cannot be made a member of itself
--- SET STORAGE may need to add a TOAST table
-create table test_storage (a text, c text storage plain);
-select reltoastrelid <> 0 as has_toast_table
- from pg_class where oid = 'test_storage'::regclass;
- has_toast_table
------------------
- t
-(1 row)
-
-alter table test_storage alter a set storage plain;
--- rewrite table to remove its TOAST table; need a non-constant column default
-alter table test_storage add b int default random()::int;
-select reltoastrelid <> 0 as has_toast_table
- from pg_class where oid = 'test_storage'::regclass;
- has_toast_table
------------------
- f
-(1 row)
-
-alter table test_storage alter a set storage default; -- re-add TOAST table
-select reltoastrelid <> 0 as has_toast_table
- from pg_class where oid = 'test_storage'::regclass;
- has_toast_table
------------------
- t
-(1 row)
-
--- check STORAGE correctness
-create table test_storage_failed (a text, b int storage extended);
-ERROR: column data type integer can only have storage PLAIN
--- test that SET STORAGE propagates to index correctly
-create index test_storage_idx on test_storage (b, a);
-alter table test_storage alter column a set storage external;
-\d+ test_storage
- Table "public.test_storage"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+-------------------+----------+--------------+-------------
- a | text | | | | external | |
- c | text | | | | plain | |
- b | integer | | | random()::integer | plain | |
-Indexes:
- "test_storage_idx" btree (b, a)
-
-\d+ test_storage_idx
- Index "public.test_storage_idx"
- Column | Type | Key? | Definition | Storage | Stats target
---------+---------+------+------------+----------+--------------
- b | integer | yes | b | plain |
- a | text | yes | a | external |
-btree, for table "public.test_storage"
-
--- ALTER COLUMN TYPE with a check constraint and a child table (bug #13779)
-CREATE TABLE test_inh_check (a float check (a > 10.2), b float);
-CREATE TABLE test_inh_check_child() INHERITS(test_inh_check);
-\d test_inh_check
- Table "public.test_inh_check"
- Column | Type | Collation | Nullable | Default
---------+------------------+-----------+----------+---------
- a | double precision | | |
- b | double precision | | |
-Check constraints:
- "test_inh_check_a_check" CHECK (a > 10.2::double precision)
-Number of child tables: 1 (Use \d+ to list them.)
-
-\d test_inh_check_child
- Table "public.test_inh_check_child"
- Column | Type | Collation | Nullable | Default
---------+------------------+-----------+----------+---------
- a | double precision | | |
- b | double precision | | |
-Check constraints:
- "test_inh_check_a_check" CHECK (a > 10.2::double precision)
-Inherits: test_inh_check
-
-select relname, conname, coninhcount, conislocal, connoinherit
- from pg_constraint c, pg_class r
- where relname like 'test_inh_check%' and c.conrelid = r.oid
- order by 1, 2;
- relname | conname | coninhcount | conislocal | connoinherit
-----------------------+------------------------+-------------+------------+--------------
- test_inh_check | test_inh_check_a_check | 0 | t | f
- test_inh_check_child | test_inh_check_a_check | 1 | f | f
-(2 rows)
-
-ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric;
-\d test_inh_check
- Table "public.test_inh_check"
- Column | Type | Collation | Nullable | Default
---------+------------------+-----------+----------+---------
- a | numeric | | |
- b | double precision | | |
-Check constraints:
- "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
-Number of child tables: 1 (Use \d+ to list them.)
-
-\d test_inh_check_child
- Table "public.test_inh_check_child"
- Column | Type | Collation | Nullable | Default
---------+------------------+-----------+----------+---------
- a | numeric | | |
- b | double precision | | |
-Check constraints:
- "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
-Inherits: test_inh_check
-
-select relname, conname, coninhcount, conislocal, connoinherit
- from pg_constraint c, pg_class r
- where relname like 'test_inh_check%' and c.conrelid = r.oid
- order by 1, 2;
- relname | conname | coninhcount | conislocal | connoinherit
-----------------------+------------------------+-------------+------------+--------------
- test_inh_check | test_inh_check_a_check | 0 | t | f
- test_inh_check_child | test_inh_check_a_check | 1 | f | f
-(2 rows)
-
--- also try noinherit, local, and local+inherited cases
-ALTER TABLE test_inh_check ADD CONSTRAINT bnoinherit CHECK (b > 100) NO INHERIT;
-ALTER TABLE test_inh_check_child ADD CONSTRAINT blocal CHECK (b < 1000);
-ALTER TABLE test_inh_check_child ADD CONSTRAINT bmerged CHECK (b > 1);
-ALTER TABLE test_inh_check ADD CONSTRAINT bmerged CHECK (b > 1);
-NOTICE: merging constraint "bmerged" with inherited definition
-\d test_inh_check
- Table "public.test_inh_check"
- Column | Type | Collation | Nullable | Default
---------+------------------+-----------+----------+---------
- a | numeric | | |
- b | double precision | | |
-Check constraints:
- "bmerged" CHECK (b > 1::double precision)
- "bnoinherit" CHECK (b > 100::double precision) NO INHERIT
- "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
-Number of child tables: 1 (Use \d+ to list them.)
-
-\d test_inh_check_child
- Table "public.test_inh_check_child"
- Column | Type | Collation | Nullable | Default
---------+------------------+-----------+----------+---------
- a | numeric | | |
- b | double precision | | |
-Check constraints:
- "blocal" CHECK (b < 1000::double precision)
- "bmerged" CHECK (b > 1::double precision)
- "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
-Inherits: test_inh_check
-
-select relname, conname, coninhcount, conislocal, connoinherit
- from pg_constraint c, pg_class r
- where relname like 'test_inh_check%' and c.conrelid = r.oid
- order by 1, 2;
- relname | conname | coninhcount | conislocal | connoinherit
-----------------------+------------------------+-------------+------------+--------------
- test_inh_check | bmerged | 0 | t | f
- test_inh_check | bnoinherit | 0 | t | t
- test_inh_check | test_inh_check_a_check | 0 | t | f
- test_inh_check_child | blocal | 0 | t | f
- test_inh_check_child | bmerged | 1 | t | f
- test_inh_check_child | test_inh_check_a_check | 1 | f | f
-(6 rows)
-
-ALTER TABLE test_inh_check ALTER COLUMN b TYPE numeric;
-NOTICE: merging constraint "bmerged" with inherited definition
-\d test_inh_check
- Table "public.test_inh_check"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | numeric | | |
- b | numeric | | |
-Check constraints:
- "bmerged" CHECK (b::double precision > 1::double precision)
- "bnoinherit" CHECK (b::double precision > 100::double precision) NO INHERIT
- "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
-Number of child tables: 1 (Use \d+ to list them.)
-
-\d test_inh_check_child
- Table "public.test_inh_check_child"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | numeric | | |
- b | numeric | | |
-Check constraints:
- "blocal" CHECK (b::double precision < 1000::double precision)
- "bmerged" CHECK (b::double precision > 1::double precision)
- "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
-Inherits: test_inh_check
-
-select relname, conname, coninhcount, conislocal, connoinherit
- from pg_constraint c, pg_class r
- where relname like 'test_inh_check%' and c.conrelid = r.oid
- order by 1, 2;
- relname | conname | coninhcount | conislocal | connoinherit
-----------------------+------------------------+-------------+------------+--------------
- test_inh_check | bmerged | 0 | t | f
- test_inh_check | bnoinherit | 0 | t | t
- test_inh_check | test_inh_check_a_check | 0 | t | f
- test_inh_check_child | blocal | 0 | t | f
- test_inh_check_child | bmerged | 1 | t | f
- test_inh_check_child | test_inh_check_a_check | 1 | f | f
-(6 rows)
-
--- ALTER COLUMN TYPE with different schema in children
--- Bug at https://postgr.es/m/20170102225618.GA10071@telsasoft.com
-CREATE TABLE test_type_diff (f1 int);
-CREATE TABLE test_type_diff_c (extra smallint) INHERITS (test_type_diff);
-ALTER TABLE test_type_diff ADD COLUMN f2 int;
-INSERT INTO test_type_diff_c VALUES (1, 2, 3);
-ALTER TABLE test_type_diff ALTER COLUMN f2 TYPE bigint USING f2::bigint;
-CREATE TABLE test_type_diff2 (int_two int2, int_four int4, int_eight int8);
-CREATE TABLE test_type_diff2_c1 (int_four int4, int_eight int8, int_two int2);
-CREATE TABLE test_type_diff2_c2 (int_eight int8, int_two int2, int_four int4);
-CREATE TABLE test_type_diff2_c3 (int_two int2, int_four int4, int_eight int8);
-ALTER TABLE test_type_diff2_c1 INHERIT test_type_diff2;
-ALTER TABLE test_type_diff2_c2 INHERIT test_type_diff2;
-ALTER TABLE test_type_diff2_c3 INHERIT test_type_diff2;
-INSERT INTO test_type_diff2_c1 VALUES (1, 2, 3);
-INSERT INTO test_type_diff2_c2 VALUES (4, 5, 6);
-INSERT INTO test_type_diff2_c3 VALUES (7, 8, 9);
-ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int8 USING int_four::int8;
--- whole-row references are disallowed
-ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int4 USING (pg_column_size(test_type_diff2));
-ERROR: cannot convert whole-row table reference
-DETAIL: USING expression contains a whole-row table reference.
--- check for rollback of ANALYZE corrupting table property flags (bug #11638)
-CREATE TABLE check_fk_presence_1 (id int PRIMARY KEY, t text);
-CREATE TABLE check_fk_presence_2 (id int REFERENCES check_fk_presence_1, t text);
-BEGIN;
-ALTER TABLE check_fk_presence_2 DROP CONSTRAINT check_fk_presence_2_id_fkey;
-ANALYZE check_fk_presence_2;
-ROLLBACK;
-\d check_fk_presence_2
- Table "public.check_fk_presence_2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- id | integer | | |
- t | text | | |
-Foreign-key constraints:
- "check_fk_presence_2_id_fkey" FOREIGN KEY (id) REFERENCES check_fk_presence_1(id)
-
-DROP TABLE check_fk_presence_1, check_fk_presence_2;
--- check column addition within a view (bug #14876)
-create table at_base_table(id int, stuff text);
-insert into at_base_table values (23, 'skidoo');
-create view at_view_1 as select * from at_base_table bt;
-create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1;
-\d+ at_view_1
- View "public.at_view_1"
- Column | Type | Collation | Nullable | Default | Storage | Description
---------+---------+-----------+----------+---------+----------+-------------
- id | integer | | | | plain |
- stuff | text | | | | extended |
-View definition:
- SELECT id,
- stuff
- FROM at_base_table bt;
-
-\d+ at_view_2
- View "public.at_view_2"
- Column | Type | Collation | Nullable | Default | Storage | Description
---------+---------+-----------+----------+---------+----------+-------------
- id | integer | | | | plain |
- stuff | text | | | | extended |
- j | json | | | | extended |
-View definition:
- SELECT id,
- stuff,
- to_json(v1.*) AS j
- FROM at_view_1 v1;
-
-explain (verbose, costs off) select * from at_view_2;
- QUERY PLAN
-----------------------------------------------------------
- Seq Scan on public.at_base_table bt
- Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff))
-(2 rows)
-
-select * from at_view_2;
- id | stuff | j
-----+--------+----------------------------
- 23 | skidoo | {"id":23,"stuff":"skidoo"}
-(1 row)
-
-create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt;
-\d+ at_view_1
- View "public.at_view_1"
- Column | Type | Collation | Nullable | Default | Storage | Description
---------+---------+-----------+----------+---------+----------+-------------
- id | integer | | | | plain |
- stuff | text | | | | extended |
- more | integer | | | | plain |
-View definition:
- SELECT id,
- stuff,
- 2 + 2 AS more
- FROM at_base_table bt;
-
-\d+ at_view_2
- View "public.at_view_2"
- Column | Type | Collation | Nullable | Default | Storage | Description
---------+---------+-----------+----------+---------+----------+-------------
- id | integer | | | | plain |
- stuff | text | | | | extended |
- j | json | | | | extended |
-View definition:
- SELECT id,
- stuff,
- to_json(v1.*) AS j
- FROM at_view_1 v1;
-
-explain (verbose, costs off) select * from at_view_2;
- QUERY PLAN
--------------------------------------------------------------
- Seq Scan on public.at_base_table bt
- Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, 4))
-(2 rows)
-
-select * from at_view_2;
- id | stuff | j
-----+--------+-------------------------------------
- 23 | skidoo | {"id":23,"stuff":"skidoo","more":4}
-(1 row)
-
-drop view at_view_2;
-drop view at_view_1;
-drop table at_base_table;
--- related case (bug #17811)
-begin;
-create temp table t1 as select * from int8_tbl;
-create temp view v1 as select 1::int8 as q1;
-create temp view v2 as select * from v1;
-create or replace temp view v1 with (security_barrier = true)
- as select * from t1;
-create temp table log (q1 int8, q2 int8);
-create rule v1_upd_rule as on update to v1
- do also insert into log values (new.*);
-update v2 set q1 = q1 + 1 where q1 = 123;
-select * from t1;
- q1 | q2
-------------------+-------------------
- 4567890123456789 | 123
- 4567890123456789 | 4567890123456789
- 4567890123456789 | -4567890123456789
- 124 | 456
- 124 | 4567890123456789
-(5 rows)
-
-select * from log;
- q1 | q2
------+------------------
- 124 | 456
- 124 | 4567890123456789
-(2 rows)
-
-rollback;
--- check adding a column not itself requiring a rewrite, together with
--- a column requiring a default (bug #16038)
--- ensure that rewrites aren't silently optimized away, removing the
--- value of the test
-CREATE FUNCTION check_ddl_rewrite(p_tablename regclass, p_ddl text)
-RETURNS boolean
-LANGUAGE plpgsql AS $$
-DECLARE
- v_relfilenode oid;
-BEGIN
- v_relfilenode := relfilenode FROM pg_class WHERE oid = p_tablename;
-
- EXECUTE p_ddl;
-
- RETURN v_relfilenode <> (SELECT relfilenode FROM pg_class WHERE oid = p_tablename);
-END;
-$$;
-CREATE TABLE rewrite_test(col text);
-INSERT INTO rewrite_test VALUES ('something');
-INSERT INTO rewrite_test VALUES (NULL);
--- empty[12] don't need rewrite, but notempty[12]_rewrite will force one
-SELECT check_ddl_rewrite('rewrite_test', $$
- ALTER TABLE rewrite_test
- ADD COLUMN empty1 text,
- ADD COLUMN notempty1_rewrite serial;
-$$);
- check_ddl_rewrite
--------------------
- t
-(1 row)
-
-SELECT check_ddl_rewrite('rewrite_test', $$
- ALTER TABLE rewrite_test
- ADD COLUMN notempty2_rewrite serial,
- ADD COLUMN empty2 text;
-$$);
- check_ddl_rewrite
--------------------
- t
-(1 row)
-
--- also check that fast defaults cause no problem, first without rewrite
-SELECT check_ddl_rewrite('rewrite_test', $$
- ALTER TABLE rewrite_test
- ADD COLUMN empty3 text,
- ADD COLUMN notempty3_norewrite int default 42;
-$$);
- check_ddl_rewrite
--------------------
- f
-(1 row)
-
-SELECT check_ddl_rewrite('rewrite_test', $$
- ALTER TABLE rewrite_test
- ADD COLUMN notempty4_norewrite int default 42,
- ADD COLUMN empty4 text;
-$$);
- check_ddl_rewrite
--------------------
- f
-(1 row)
-
--- then with rewrite
-SELECT check_ddl_rewrite('rewrite_test', $$
- ALTER TABLE rewrite_test
- ADD COLUMN empty5 text,
- ADD COLUMN notempty5_norewrite int default 42,
- ADD COLUMN notempty5_rewrite serial;
-$$);
- check_ddl_rewrite
--------------------
- t
-(1 row)
-
-SELECT check_ddl_rewrite('rewrite_test', $$
- ALTER TABLE rewrite_test
- ADD COLUMN notempty6_rewrite serial,
- ADD COLUMN empty6 text,
- ADD COLUMN notempty6_norewrite int default 42;
-$$);
- check_ddl_rewrite
--------------------
- t
-(1 row)
-
--- cleanup
-DROP FUNCTION check_ddl_rewrite(regclass, text);
-DROP TABLE rewrite_test;
---
--- lock levels
---
-drop type lockmodes;
-ERROR: type "lockmodes" does not exist
-create type lockmodes as enum (
- 'SIReadLock'
-,'AccessShareLock'
-,'RowShareLock'
-,'RowExclusiveLock'
-,'ShareUpdateExclusiveLock'
-,'ShareLock'
-,'ShareRowExclusiveLock'
-,'ExclusiveLock'
-,'AccessExclusiveLock'
-);
-drop view my_locks;
-ERROR: view "my_locks" does not exist
-create or replace view my_locks as
-select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode
-from pg_locks l join pg_class c on l.relation = c.oid
-where virtualtransaction = (
- select virtualtransaction
- from pg_locks
- where transactionid = pg_current_xact_id()::xid)
-and locktype = 'relation'
-and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog')
-and c.relname != 'my_locks'
-group by c.relname;
-create table alterlock (f1 int primary key, f2 text);
-insert into alterlock values (1, 'foo');
-create table alterlock2 (f3 int primary key, f1 int);
-insert into alterlock2 values (1, 1);
-begin; alter table alterlock alter column f2 set statistics 150;
-select * from my_locks order by 1;
- relname | max_lockmode
------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
-(1 row)
-
-rollback;
-begin; alter table alterlock cluster on alterlock_pkey;
-select * from my_locks order by 1;
- relname | max_lockmode
-----------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
- alterlock_pkey | ShareUpdateExclusiveLock
-(2 rows)
-
-commit;
-begin; alter table alterlock set without cluster;
-select * from my_locks order by 1;
- relname | max_lockmode
------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
-(1 row)
-
-commit;
-begin; alter table alterlock set (fillfactor = 100);
-select * from my_locks order by 1;
- relname | max_lockmode
------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
- pg_toast | ShareUpdateExclusiveLock
-(2 rows)
-
-commit;
-begin; alter table alterlock reset (fillfactor);
-select * from my_locks order by 1;
- relname | max_lockmode
------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
- pg_toast | ShareUpdateExclusiveLock
-(2 rows)
-
-commit;
-begin; alter table alterlock set (toast.autovacuum_enabled = off);
-select * from my_locks order by 1;
- relname | max_lockmode
------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
- pg_toast | ShareUpdateExclusiveLock
-(2 rows)
-
-commit;
-begin; alter table alterlock set (autovacuum_enabled = off);
-select * from my_locks order by 1;
- relname | max_lockmode
------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
- pg_toast | ShareUpdateExclusiveLock
-(2 rows)
-
-commit;
-begin; alter table alterlock alter column f2 set (n_distinct = 1);
-select * from my_locks order by 1;
- relname | max_lockmode
------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
-(1 row)
-
-rollback;
--- test that mixing options with different lock levels works as expected
-begin; alter table alterlock set (autovacuum_enabled = off, fillfactor = 80);
-select * from my_locks order by 1;
- relname | max_lockmode
------------+--------------------------
- alterlock | ShareUpdateExclusiveLock
- pg_toast | ShareUpdateExclusiveLock
-(2 rows)
-
-commit;
-begin; alter table alterlock alter column f2 set storage extended;
-select * from my_locks order by 1;
- relname | max_lockmode
------------+---------------------
- alterlock | AccessExclusiveLock
-(1 row)
-
-rollback;
-begin; alter table alterlock alter column f2 set default 'x';
-select * from my_locks order by 1;
- relname | max_lockmode
------------+---------------------
- alterlock | AccessExclusiveLock
-(1 row)
-
-rollback;
-begin;
-create trigger ttdummy
- before delete or update on alterlock
- for each row
- execute procedure
- ttdummy (1, 1);
-select * from my_locks order by 1;
- relname | max_lockmode
------------+-----------------------
- alterlock | ShareRowExclusiveLock
-(1 row)
-
-rollback;
-begin;
-select * from my_locks order by 1;
- relname | max_lockmode
----------+--------------
-(0 rows)
-
-alter table alterlock2 add foreign key (f1) references alterlock (f1);
-select * from my_locks order by 1;
- relname | max_lockmode
------------------+-----------------------
- alterlock | ShareRowExclusiveLock
- alterlock2 | ShareRowExclusiveLock
- alterlock2_pkey | AccessShareLock
- alterlock_pkey | AccessShareLock
-(4 rows)
-
-rollback;
-begin;
-alter table alterlock2
-add constraint alterlock2nv foreign key (f1) references alterlock (f1) NOT VALID;
-select * from my_locks order by 1;
- relname | max_lockmode
-------------+-----------------------
- alterlock | ShareRowExclusiveLock
- alterlock2 | ShareRowExclusiveLock
-(2 rows)
-
-commit;
-begin;
-alter table alterlock2 validate constraint alterlock2nv;
-select * from my_locks order by 1;
- relname | max_lockmode
------------------+--------------------------
- alterlock | RowShareLock
- alterlock2 | ShareUpdateExclusiveLock
- alterlock2_pkey | AccessShareLock
- alterlock_pkey | AccessShareLock
-(4 rows)
-
-rollback;
-create or replace view my_locks as
-select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode
-from pg_locks l join pg_class c on l.relation = c.oid
-where virtualtransaction = (
- select virtualtransaction
- from pg_locks
- where transactionid = pg_current_xact_id()::xid)
-and locktype = 'relation'
-and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog')
-and c.relname = 'my_locks'
-group by c.relname;
--- raise exception
-alter table my_locks set (autovacuum_enabled = false);
-ERROR: unrecognized parameter "autovacuum_enabled"
-alter view my_locks set (autovacuum_enabled = false);
-ERROR: unrecognized parameter "autovacuum_enabled"
-alter table my_locks reset (autovacuum_enabled);
-alter view my_locks reset (autovacuum_enabled);
-begin;
-alter view my_locks set (security_barrier=off);
-select * from my_locks order by 1;
- relname | max_lockmode
-----------+---------------------
- my_locks | AccessExclusiveLock
-(1 row)
-
-alter view my_locks reset (security_barrier);
-rollback;
--- this test intentionally applies the ALTER TABLE command against a view, but
--- uses a view option so we expect this to succeed. This form of SQL is
--- accepted for historical reasons, as shown in the docs for ALTER VIEW
-begin;
-alter table my_locks set (security_barrier=off);
-select * from my_locks order by 1;
- relname | max_lockmode
-----------+---------------------
- my_locks | AccessExclusiveLock
-(1 row)
-
-alter table my_locks reset (security_barrier);
-rollback;
--- cleanup
-drop table alterlock2;
-drop table alterlock;
-drop view my_locks;
-drop type lockmodes;
---
--- alter function
---
-create function test_strict(text) returns text as
- 'select coalesce($1, ''got passed a null'');'
- language sql returns null on null input;
-select test_strict(NULL);
- test_strict
--------------
-
-(1 row)
-
-alter function test_strict(text) called on null input;
-select test_strict(NULL);
- test_strict
--------------------
- got passed a null
-(1 row)
-
-create function non_strict(text) returns text as
- 'select coalesce($1, ''got passed a null'');'
- language sql called on null input;
-select non_strict(NULL);
- non_strict
--------------------
- got passed a null
-(1 row)
-
-alter function non_strict(text) returns null on null input;
-select non_strict(NULL);
- non_strict
-------------
-
-(1 row)
-
---
--- alter object set schema
---
-create schema alter1;
-create schema alter2;
-create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0));
-create view alter1.v1 as select * from alter1.t1;
-create function alter1.plus1(int) returns int as 'select $1+1' language sql;
-create domain alter1.posint integer check (value > 0);
-create type alter1.ctype as (f1 int, f2 text);
-create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql
-as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2';
-create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype);
-create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as
- operator 1 alter1.=(alter1.ctype, alter1.ctype);
-create conversion alter1.latin1_to_utf8 for 'latin1' to 'utf8' from iso8859_1_to_utf8;
-create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype);
-create text search configuration alter1.cfg(parser = alter1.prs);
-create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize);
-create text search dictionary alter1.dict(template = alter1.tmpl);
-insert into alter1.t1(f2) values(11);
-insert into alter1.t1(f2) values(12);
-alter table alter1.t1 set schema alter1; -- no-op, same schema
-alter table alter1.t1 set schema alter2;
-alter table alter1.v1 set schema alter2;
-alter function alter1.plus1(int) set schema alter2;
-alter domain alter1.posint set schema alter2;
-alter operator class alter1.ctype_hash_ops using hash set schema alter2;
-alter operator family alter1.ctype_hash_ops using hash set schema alter2;
-alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2;
-alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2;
-alter type alter1.ctype set schema alter1; -- no-op, same schema
-alter type alter1.ctype set schema alter2;
-alter conversion alter1.latin1_to_utf8 set schema alter2;
-alter text search parser alter1.prs set schema alter2;
-alter text search configuration alter1.cfg set schema alter2;
-alter text search template alter1.tmpl set schema alter2;
-alter text search dictionary alter1.dict set schema alter2;
--- this should succeed because nothing is left in alter1
-drop schema alter1;
-insert into alter2.t1(f2) values(13);
-insert into alter2.t1(f2) values(14);
-select * from alter2.t1;
- f1 | f2
-----+----
- 1 | 11
- 2 | 12
- 3 | 13
- 4 | 14
-(4 rows)
-
-select * from alter2.v1;
- f1 | f2
-----+----
- 1 | 11
- 2 | 12
- 3 | 13
- 4 | 14
-(4 rows)
-
-select alter2.plus1(41);
- plus1
--------
- 42
-(1 row)
-
--- clean up
-drop schema alter2 cascade;
-NOTICE: drop cascades to 13 other objects
-DETAIL: drop cascades to table alter2.t1
-drop cascades to view alter2.v1
-drop cascades to function alter2.plus1(integer)
-drop cascades to type alter2.posint
-drop cascades to type alter2.ctype
-drop cascades to function alter2.same(alter2.ctype,alter2.ctype)
-drop cascades to operator alter2.=(alter2.ctype,alter2.ctype)
-drop cascades to operator family alter2.ctype_hash_ops for access method hash
-drop cascades to conversion alter2.latin1_to_utf8
-drop cascades to text search parser alter2.prs
-drop cascades to text search configuration alter2.cfg
-drop cascades to text search template alter2.tmpl
-drop cascades to text search dictionary alter2.dict
---
--- composite types
---
-CREATE TYPE test_type AS (a int);
-\d test_type
- Composite type "public.test_type"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-
-ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails
-ERROR: relation "nosuchtype" does not exist
-ALTER TYPE test_type ADD ATTRIBUTE b text;
-\d test_type
- Composite type "public.test_type"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | text | | |
-
-ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails
-ERROR: column "b" of relation "test_type" already exists
-ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar;
-\d test_type
- Composite type "public.test_type"
- Column | Type | Collation | Nullable | Default
---------+-------------------+-----------+----------+---------
- a | integer | | |
- b | character varying | | |
-
-ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer;
-\d test_type
- Composite type "public.test_type"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | integer | | |
-
-ALTER TYPE test_type DROP ATTRIBUTE b;
-\d test_type
- Composite type "public.test_type"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-
-ALTER TYPE test_type DROP ATTRIBUTE c; -- fails
-ERROR: column "c" of relation "test_type" does not exist
-ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c;
-NOTICE: column "c" of relation "test_type" does not exist, skipping
-ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean;
-\d test_type
- Composite type "public.test_type"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- d | boolean | | |
-
-ALTER TYPE test_type RENAME ATTRIBUTE a TO aa;
-ERROR: column "a" does not exist
-ALTER TYPE test_type RENAME ATTRIBUTE d TO dd;
-\d test_type
- Composite type "public.test_type"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- dd | boolean | | |
-
-DROP TYPE test_type;
-CREATE TYPE test_type1 AS (a int, b text);
-CREATE TABLE test_tbl1 (x int, y test_type1);
-ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails
-ERROR: cannot alter type "test_type1" because column "test_tbl1.y" uses it
-DROP TABLE test_tbl1;
-CREATE TABLE test_tbl1 (x int, y text);
-CREATE INDEX test_tbl1_idx ON test_tbl1((row(x,y)::test_type1));
-ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails
-ERROR: cannot alter type "test_type1" because column "test_tbl1_idx.row" uses it
-DROP TABLE test_tbl1;
-DROP TYPE test_type1;
-CREATE TYPE test_type2 AS (a int, b text);
-CREATE TABLE test_tbl2 OF test_type2;
-CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2);
-\d test_type2
- Composite type "public.test_type2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | text | | |
-
-\d test_tbl2
- Table "public.test_tbl2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | text | | |
-Number of child tables: 1 (Use \d+ to list them.)
-Typed table of type: test_type2
-
-ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails
-ERROR: cannot alter type "test_type2" because it is the type of a typed table
-HINT: Use ALTER ... CASCADE to alter the typed tables too.
-ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE;
-\d test_type2
- Composite type "public.test_type2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | text | | |
- c | text | | |
-
-\d test_tbl2
- Table "public.test_tbl2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- b | text | | |
- c | text | | |
-Number of child tables: 1 (Use \d+ to list them.)
-Typed table of type: test_type2
-
-ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails
-ERROR: cannot alter type "test_type2" because it is the type of a typed table
-HINT: Use ALTER ... CASCADE to alter the typed tables too.
-ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE;
-\d test_type2
- Composite type "public.test_type2"
- Column | Type | Collation | Nullable | Default
---------+-------------------+-----------+----------+---------
- a | integer | | |
- b | character varying | | |
- c | text | | |
-
-\d test_tbl2
- Table "public.test_tbl2"
- Column | Type | Collation | Nullable | Default
---------+-------------------+-----------+----------+---------
- a | integer | | |
- b | character varying | | |
- c | text | | |
-Number of child tables: 1 (Use \d+ to list them.)
-Typed table of type: test_type2
-
-ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails
-ERROR: cannot alter type "test_type2" because it is the type of a typed table
-HINT: Use ALTER ... CASCADE to alter the typed tables too.
-ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE;
-\d test_type2
- Composite type "public.test_type2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- c | text | | |
-
-\d test_tbl2
- Table "public.test_tbl2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- c | text | | |
-Number of child tables: 1 (Use \d+ to list them.)
-Typed table of type: test_type2
-
-ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails
-ERROR: cannot alter type "test_type2" because it is the type of a typed table
-HINT: Use ALTER ... CASCADE to alter the typed tables too.
-ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE;
-\d test_type2
- Composite type "public.test_type2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- aa | integer | | |
- c | text | | |
-
-\d test_tbl2
- Table "public.test_tbl2"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- aa | integer | | |
- c | text | | |
-Number of child tables: 1 (Use \d+ to list them.)
-Typed table of type: test_type2
-
-\d test_tbl2_subclass
- Table "public.test_tbl2_subclass"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- aa | integer | | |
- c | text | | |
-Inherits: test_tbl2
-
-DROP TABLE test_tbl2_subclass, test_tbl2;
-DROP TYPE test_type2;
-CREATE TYPE test_typex AS (a int, b text);
-CREATE TABLE test_tblx (x int, y test_typex check ((y).a > 0));
-ALTER TYPE test_typex DROP ATTRIBUTE a; -- fails
-ERROR: cannot drop column a of composite type test_typex because other objects depend on it
-DETAIL: constraint test_tblx_y_check on table test_tblx depends on column a of composite type test_typex
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE;
-NOTICE: drop cascades to constraint test_tblx_y_check on table test_tblx
-\d test_tblx
- Table "public.test_tblx"
- Column | Type | Collation | Nullable | Default
---------+------------+-----------+----------+---------
- x | integer | | |
- y | test_typex | | |
-
-DROP TABLE test_tblx;
-DROP TYPE test_typex;
--- This test isn't that interesting on its own, but the purpose is to leave
--- behind a table to test pg_upgrade with. The table has a composite type
--- column in it, and the composite type has a dropped attribute.
-CREATE TYPE test_type3 AS (a int);
-CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3;
-ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int;
-CREATE TYPE test_type_empty AS ();
-DROP TYPE test_type_empty;
---
--- typed tables: OF / NOT OF
---
-CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2));
-ALTER TYPE tt_t0 DROP ATTRIBUTE z;
-CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK
-CREATE TABLE tt1 (x int, y bigint); -- wrong base type
-CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod
-CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order
-CREATE TABLE tt4 (x int); -- too few columns
-CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns
-CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent
-CREATE TABLE tt7 (x int, q text, y numeric(8,2));
-ALTER TABLE tt7 DROP q; -- OK
-ALTER TABLE tt0 OF tt_t0;
-ALTER TABLE tt1 OF tt_t0;
-ERROR: table "tt1" has different type for column "y"
-ALTER TABLE tt2 OF tt_t0;
-ERROR: table "tt2" has different type for column "y"
-ALTER TABLE tt3 OF tt_t0;
-ERROR: table has column "y" where type requires "x"
-ALTER TABLE tt4 OF tt_t0;
-ERROR: table is missing column "y"
-ALTER TABLE tt5 OF tt_t0;
-ERROR: table has extra column "z"
-ALTER TABLE tt6 OF tt_t0;
-ERROR: typed tables cannot inherit
-ALTER TABLE tt7 OF tt_t0;
-CREATE TYPE tt_t1 AS (x int, y numeric(8,2));
-ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table
-ALTER TABLE tt7 NOT OF;
-\d tt7
- Table "public.tt7"
- Column | Type | Collation | Nullable | Default
---------+--------------+-----------+----------+---------
- x | integer | | |
- y | numeric(8,2) | | |
-
--- make sure we can drop a constraint on the parent but it remains on the child
-CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL));
-CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent);
-ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check";
--- should fail
-INSERT INTO test_drop_constr_child (c) VALUES (NULL);
-ERROR: new row for relation "test_drop_constr_child" violates check constraint "test_drop_constr_parent_c_check"
-DETAIL: Failing row contains (null).
-DROP TABLE test_drop_constr_parent CASCADE;
-NOTICE: drop cascades to table test_drop_constr_child
---
--- IF EXISTS test
---
-ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
-NOTICE: relation "tt8" does not exist, skipping
-ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
-NOTICE: relation "tt8" does not exist, skipping
-ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
-NOTICE: relation "tt8" does not exist, skipping
-ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
-NOTICE: relation "tt8" does not exist, skipping
-ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
-NOTICE: relation "tt8" does not exist, skipping
-ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
-NOTICE: relation "tt8" does not exist, skipping
-CREATE TABLE tt8(a int);
-CREATE SCHEMA alter2;
-ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
-ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
-ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
-ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
-ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
-ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
-\d alter2.tt8
- Table "alter2.tt8"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
- f1 | integer | | not null | 0
-Indexes:
- "xxx" PRIMARY KEY, btree (f1)
-Check constraints:
- "tt8_f_check" CHECK (f1 >= 0 AND f1 <= 10)
-
-DROP TABLE alter2.tt8;
-DROP SCHEMA alter2;
---
--- Check conflicts between index and CHECK constraint names
---
-CREATE TABLE tt9(c integer);
-ALTER TABLE tt9 ADD CHECK(c > 1);
-ALTER TABLE tt9 ADD CHECK(c > 2); -- picks nonconflicting name
-ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 3);
-ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 4); -- fail, dup name
-ERROR: constraint "foo" for relation "tt9" already exists
-ALTER TABLE tt9 ADD UNIQUE(c);
-ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name
-ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key UNIQUE(c); -- fail, dup name
-ERROR: relation "tt9_c_key" already exists
-ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name
-ERROR: constraint "foo" for relation "tt9" already exists
-ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name
-ERROR: constraint "tt9_c_key" for relation "tt9" already exists
-ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6);
-ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name
-\d tt9
- Table "public.tt9"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c | integer | | |
-Indexes:
- "tt9_c_key" UNIQUE CONSTRAINT, btree (c)
- "tt9_c_key1" UNIQUE CONSTRAINT, btree (c)
- "tt9_c_key3" UNIQUE CONSTRAINT, btree (c)
-Check constraints:
- "foo" CHECK (c > 3)
- "tt9_c_check" CHECK (c > 1)
- "tt9_c_check1" CHECK (c > 2)
- "tt9_c_key2" CHECK (c > 6)
-
-DROP TABLE tt9;
--- Check that comments on constraints and indexes are not lost at ALTER TABLE.
-CREATE TABLE comment_test (
- id int,
- constraint id_notnull_constraint not null id,
- positive_col int CHECK (positive_col > 0),
- indexed_col int,
- CONSTRAINT comment_test_pk PRIMARY KEY (id));
-CREATE INDEX comment_test_index ON comment_test(indexed_col);
-COMMENT ON COLUMN comment_test.id IS 'Column ''id'' on comment_test';
-COMMENT ON INDEX comment_test_index IS 'Simple index on comment_test';
-COMMENT ON CONSTRAINT comment_test_positive_col_check ON comment_test IS 'CHECK constraint on comment_test.positive_col';
-COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint of comment_test';
-COMMENT ON CONSTRAINT id_notnull_constraint ON comment_test IS 'NOT NULL constraint of comment_test';
-COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test';
-SELECT col_description('comment_test'::regclass, 1) as comment;
- comment
------------------------------
- Column 'id' on comment_test
-(1 row)
-
-SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2;
- index | comment
---------------------+-----------------------------------------------
- comment_test_index | Simple index on comment_test
- comment_test_pk | Index backing the PRIMARY KEY of comment_test
-(2 rows)
-
-SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2;
- constraint | comment
----------------------------------+-----------------------------------------------
- comment_test_pk | PRIMARY KEY constraint of comment_test
- comment_test_positive_col_check | CHECK constraint on comment_test.positive_col
- id_notnull_constraint | NOT NULL constraint of comment_test
-(3 rows)
-
--- Change the datatype of all the columns. ALTER TABLE is optimized to not
--- rebuild an index if the new data type is binary compatible with the old
--- one. Check do a dummy ALTER TABLE that doesn't change the datatype
--- first, to test that no-op codepath, and another one that does.
-ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE int;
-ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE text;
-ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int;
-ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text;
-ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE int;
-ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint;
--- Check that the comments are intact.
-SELECT col_description('comment_test'::regclass, 1) as comment;
- comment
------------------------------
- Column 'id' on comment_test
-(1 row)
-
-SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2;
- index | comment
---------------------+-----------------------------------------------
- comment_test_index | Simple index on comment_test
- comment_test_pk | Index backing the PRIMARY KEY of comment_test
-(2 rows)
-
-SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2;
- constraint | comment
----------------------------------+-----------------------------------------------
- comment_test_pk | PRIMARY KEY constraint of comment_test
- comment_test_positive_col_check | CHECK constraint on comment_test.positive_col
- id_notnull_constraint | NOT NULL constraint of comment_test
-(3 rows)
-
--- Check compatibility for foreign keys and comments. This is done
--- separately as rebuilding the column type of the parent leads
--- to an error and would reduce the test scope.
-CREATE TABLE comment_test_child (
- id text CONSTRAINT comment_test_child_fk REFERENCES comment_test);
-CREATE INDEX comment_test_child_fk ON comment_test_child(id);
-COMMENT ON COLUMN comment_test_child.id IS 'Column ''id'' on comment_test_child';
-COMMENT ON INDEX comment_test_child_fk IS 'Index backing the FOREIGN KEY of comment_test_child';
-COMMENT ON CONSTRAINT comment_test_child_fk ON comment_test_child IS 'FOREIGN KEY constraint of comment_test_child';
--- Change column type of parent
-ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text;
-ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int USING id::integer;
-ERROR: foreign key constraint "comment_test_child_fk" cannot be implemented
-DETAIL: Key columns "id" of the referencing table and "id" of the referenced table are of incompatible types: text and integer.
--- Comments should be intact
-SELECT col_description('comment_test_child'::regclass, 1) as comment;
- comment
------------------------------------
- Column 'id' on comment_test_child
-(1 row)
-
-SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test_child'::regclass ORDER BY 1, 2;
- index | comment
------------------------+-----------------------------------------------------
- comment_test_child_fk | Index backing the FOREIGN KEY of comment_test_child
-(1 row)
-
-SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test_child'::regclass ORDER BY 1, 2;
- constraint | comment
------------------------+----------------------------------------------
- comment_test_child_fk | FOREIGN KEY constraint of comment_test_child
-(1 row)
-
--- Check that we map relation oids to filenodes and back correctly. Only
--- display bad mappings so the test output doesn't change all the time. A
--- filenode function call can return NULL for a relation dropped concurrently
--- with the call's surrounding query, so ignore a NULL mapped_oid for
--- relations that no longer exist after all calls finish.
-CREATE TEMP TABLE filenode_mapping AS
-SELECT
- oid, mapped_oid, reltablespace, relfilenode, relname
-FROM pg_class,
- pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid
-WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid;
-SELECT m.* FROM filenode_mapping m LEFT JOIN pg_class c ON c.oid = m.oid
-WHERE c.oid IS NOT NULL OR m.mapped_oid IS NOT NULL;
- oid | mapped_oid | reltablespace | relfilenode | relname
------+------------+---------------+-------------+---------
-(0 rows)
-
--- Checks on creating and manipulation of user defined relations in
--- pg_catalog.
-SHOW allow_system_table_mods;
- allow_system_table_mods
--------------------------
- off
-(1 row)
-
--- disallowed because of search_path issues with pg_dump
-CREATE TABLE pg_catalog.new_system_table();
-ERROR: permission denied to create "pg_catalog.new_system_table"
-DETAIL: System catalog modifications are currently disallowed.
--- instead create in public first, move to catalog
-CREATE TABLE new_system_table(id serial primary key, othercol text);
-ALTER TABLE new_system_table SET SCHEMA pg_catalog;
-ALTER TABLE new_system_table SET SCHEMA public;
-ALTER TABLE new_system_table SET SCHEMA pg_catalog;
--- will be ignored -- already there:
-ALTER TABLE new_system_table SET SCHEMA pg_catalog;
-ALTER TABLE new_system_table RENAME TO old_system_table;
-CREATE INDEX old_system_table__othercol ON old_system_table (othercol);
-INSERT INTO old_system_table(othercol) VALUES ('somedata'), ('otherdata');
-UPDATE old_system_table SET id = -id;
-DELETE FROM old_system_table WHERE othercol = 'somedata';
-TRUNCATE old_system_table;
-ALTER TABLE old_system_table DROP CONSTRAINT new_system_table_pkey;
-ALTER TABLE old_system_table DROP COLUMN othercol;
-DROP TABLE old_system_table;
--- set logged
-CREATE UNLOGGED TABLE unlogged1(f1 SERIAL PRIMARY KEY, f2 TEXT); -- has sequence, toast
--- check relpersistence of an unlogged table
-SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
-UNION ALL
-SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
-UNION ALL
-SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1'
-ORDER BY relname;
- relname | relkind | relpersistence
------------------------+---------+----------------
- unlogged1 | r | u
- unlogged1 toast index | i | u
- unlogged1 toast table | t | u
- unlogged1_f1_seq | S | u
- unlogged1_pkey | i | u
-(5 rows)
-
-CREATE UNLOGGED TABLE unlogged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged1); -- foreign key
-CREATE UNLOGGED TABLE unlogged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged3); -- self-referencing foreign key
-ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key
-ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
-ERROR: could not change table "unlogged2" to logged because it references unlogged table "unlogged1"
-ALTER TABLE unlogged1 SET LOGGED;
--- check relpersistence of an unlogged table after changing to permanent
-SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
-UNION ALL
-SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
-UNION ALL
-SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1'
-ORDER BY relname;
- relname | relkind | relpersistence
------------------------+---------+----------------
- unlogged1 | r | p
- unlogged1 toast index | i | p
- unlogged1 toast table | t | p
- unlogged1_f1_seq | S | p
- unlogged1_pkey | i | p
-(5 rows)
-
-ALTER TABLE unlogged1 SET LOGGED; -- silently do nothing
-DROP TABLE unlogged3;
-DROP TABLE unlogged2;
-DROP TABLE unlogged1;
--- set unlogged
-CREATE TABLE logged1(f1 SERIAL PRIMARY KEY, f2 TEXT); -- has sequence, toast
--- check relpersistence of a permanent table
-SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1'
-UNION ALL
-SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1'
-UNION ALL
-SELECT r.relname ||' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1'
-ORDER BY relname;
- relname | relkind | relpersistence
----------------------+---------+----------------
- logged1 | r | p
- logged1 toast index | i | p
- logged1 toast table | t | p
- logged1_f1_seq | S | p
- logged1_pkey | i | p
-(5 rows)
-
-CREATE TABLE logged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged1); -- foreign key
-CREATE TABLE logged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged3); -- self-referencing foreign key
-ALTER TABLE logged1 SET UNLOGGED; -- fails because a foreign key from a permanent table exists
-ERROR: could not change table "logged1" to unlogged because it references logged table "logged2"
-ALTER TABLE logged3 SET UNLOGGED; -- skip self-referencing foreign key
-ALTER TABLE logged2 SET UNLOGGED;
-ALTER TABLE logged1 SET UNLOGGED;
--- check relpersistence of a permanent table after changing to unlogged
-SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1'
-UNION ALL
-SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1'
-UNION ALL
-SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1'
-ORDER BY relname;
- relname | relkind | relpersistence
----------------------+---------+----------------
- logged1 | r | u
- logged1 toast index | i | u
- logged1 toast table | t | u
- logged1_f1_seq | S | u
- logged1_pkey | i | u
-(5 rows)
-
-ALTER TABLE logged1 SET UNLOGGED; -- silently do nothing
-DROP TABLE logged3;
-DROP TABLE logged2;
-DROP TABLE logged1;
--- test ADD COLUMN IF NOT EXISTS
-CREATE TABLE test_add_column(c1 integer);
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
-
-ALTER TABLE test_add_column
- ADD COLUMN c2 integer;
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
- c2 | integer | | |
-
-ALTER TABLE test_add_column
- ADD COLUMN c2 integer; -- fail because c2 already exists
-ERROR: column "c2" of relation "test_add_column" already exists
-ALTER TABLE ONLY test_add_column
- ADD COLUMN c2 integer; -- fail because c2 already exists
-ERROR: column "c2" of relation "test_add_column" already exists
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
- c2 | integer | | |
-
-ALTER TABLE test_add_column
- ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists
-NOTICE: column "c2" of relation "test_add_column" already exists, skipping
-ALTER TABLE ONLY test_add_column
- ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists
-NOTICE: column "c2" of relation "test_add_column" already exists, skipping
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
- c2 | integer | | |
-
-ALTER TABLE test_add_column
- ADD COLUMN c2 integer, -- fail because c2 already exists
- ADD COLUMN c3 integer primary key;
-ERROR: column "c2" of relation "test_add_column" already exists
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
- c2 | integer | | |
-
-ALTER TABLE test_add_column
- ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
- ADD COLUMN c3 integer primary key;
-NOTICE: column "c2" of relation "test_add_column" already exists, skipping
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
- c2 | integer | | |
- c3 | integer | | not null |
-Indexes:
- "test_add_column_pkey" PRIMARY KEY, btree (c3)
-
-ALTER TABLE test_add_column
- ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
- ADD COLUMN IF NOT EXISTS c3 integer primary key; -- skipping because c3 already exists
-NOTICE: column "c2" of relation "test_add_column" already exists, skipping
-NOTICE: column "c3" of relation "test_add_column" already exists, skipping
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
- c2 | integer | | |
- c3 | integer | | not null |
-Indexes:
- "test_add_column_pkey" PRIMARY KEY, btree (c3)
-
-ALTER TABLE test_add_column
- ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
- ADD COLUMN IF NOT EXISTS c3 integer, -- skipping because c3 already exists
- ADD COLUMN c4 integer REFERENCES test_add_column;
-NOTICE: column "c2" of relation "test_add_column" already exists, skipping
-NOTICE: column "c3" of relation "test_add_column" already exists, skipping
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
- c2 | integer | | |
- c3 | integer | | not null |
- c4 | integer | | |
-Indexes:
- "test_add_column_pkey" PRIMARY KEY, btree (c3)
-Foreign-key constraints:
- "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
-Referenced by:
- TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
-
-ALTER TABLE test_add_column
- ADD COLUMN IF NOT EXISTS c4 integer REFERENCES test_add_column;
-NOTICE: column "c4" of relation "test_add_column" already exists, skipping
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- c1 | integer | | |
- c2 | integer | | |
- c3 | integer | | not null |
- c4 | integer | | |
-Indexes:
- "test_add_column_pkey" PRIMARY KEY, btree (c3)
-Foreign-key constraints:
- "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
-Referenced by:
- TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
-
-ALTER TABLE test_add_column
- ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 8);
-\d test_add_column
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------------------------------------------
- c1 | integer | | |
- c2 | integer | | |
- c3 | integer | | not null |
- c4 | integer | | |
- c5 | integer | | not null | nextval('test_add_column_c5_seq'::regclass)
-Indexes:
- "test_add_column_pkey" PRIMARY KEY, btree (c3)
-Check constraints:
- "test_add_column_c5_check" CHECK (c5 > 8)
-Foreign-key constraints:
- "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
-Referenced by:
- TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
-
-ALTER TABLE test_add_column
- ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 10);
-NOTICE: column "c5" of relation "test_add_column" already exists, skipping
-\d test_add_column*
- Table "public.test_add_column"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------------------------------------------
- c1 | integer | | |
- c2 | integer | | |
- c3 | integer | | not null |
- c4 | integer | | |
- c5 | integer | | not null | nextval('test_add_column_c5_seq'::regclass)
-Indexes:
- "test_add_column_pkey" PRIMARY KEY, btree (c3)
-Check constraints:
- "test_add_column_c5_check" CHECK (c5 > 8)
-Foreign-key constraints:
- "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
-Referenced by:
- TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
-
- Sequence "public.test_add_column_c5_seq"
- Type | Start | Minimum | Maximum | Increment | Cycles? | Cache
----------+-------+---------+------------+-----------+---------+-------
- integer | 1 | 1 | 2147483647 | 1 | no | 1
-Owned by: public.test_add_column.c5
-
- Index "public.test_add_column_pkey"
- Column | Type | Key? | Definition
---------+---------+------+------------
- c3 | integer | yes | c3
-primary key, btree, for table "public.test_add_column"
-
-DROP TABLE test_add_column;
-\d test_add_column*
--- assorted cases with multiple ALTER TABLE steps
-CREATE TABLE ataddindex(f1 INT);
-INSERT INTO ataddindex VALUES (42), (43);
-CREATE UNIQUE INDEX ataddindexi0 ON ataddindex(f1);
-ALTER TABLE ataddindex
- ADD PRIMARY KEY USING INDEX ataddindexi0,
- ALTER f1 TYPE BIGINT;
-\d ataddindex
- Table "public.ataddindex"
- Column | Type | Collation | Nullable | Default
---------+--------+-----------+----------+---------
- f1 | bigint | | not null |
-Indexes:
- "ataddindexi0" PRIMARY KEY, btree (f1)
-
-DROP TABLE ataddindex;
-CREATE TABLE ataddindex(f1 VARCHAR(10));
-INSERT INTO ataddindex(f1) VALUES ('foo'), ('a');
-ALTER TABLE ataddindex
- ALTER f1 SET DATA TYPE TEXT,
- ADD EXCLUDE ((f1 LIKE 'a') WITH =);
-\d ataddindex
- Table "public.ataddindex"
- Column | Type | Collation | Nullable | Default
---------+------+-----------+----------+---------
- f1 | text | | |
-Indexes:
- "ataddindex_expr_excl" EXCLUDE USING btree ((f1 ~~ 'a'::text) WITH =)
-
-DROP TABLE ataddindex;
-CREATE TABLE ataddindex(id int, ref_id int);
-ALTER TABLE ataddindex
- ADD PRIMARY KEY (id),
- ADD FOREIGN KEY (ref_id) REFERENCES ataddindex;
-\d ataddindex
- Table "public.ataddindex"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- id | integer | | not null |
- ref_id | integer | | |
-Indexes:
- "ataddindex_pkey" PRIMARY KEY, btree (id)
-Foreign-key constraints:
- "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id)
-Referenced by:
- TABLE "ataddindex" CONSTRAINT "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id)
-
-DROP TABLE ataddindex;
-CREATE TABLE ataddindex(id int, ref_id int);
-ALTER TABLE ataddindex
- ADD UNIQUE (id),
- ADD FOREIGN KEY (ref_id) REFERENCES ataddindex (id);
-\d ataddindex
- Table "public.ataddindex"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- id | integer | | |
- ref_id | integer | | |
-Indexes:
- "ataddindex_id_key" UNIQUE CONSTRAINT, btree (id)
-Foreign-key constraints:
- "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id)
-Referenced by:
- TABLE "ataddindex" CONSTRAINT "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id)
-
-DROP TABLE ataddindex;
-CREATE TABLE atnotnull1 ();
-ALTER TABLE atnotnull1
- ADD COLUMN a INT,
- ALTER a SET NOT NULL;
-ALTER TABLE atnotnull1
- ADD COLUMN b INT,
- ADD NOT NULL b;
-ALTER TABLE atnotnull1
- ADD COLUMN c INT,
- ADD PRIMARY KEY (c);
-\d+ atnotnull1
- Table "public.atnotnull1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+---------+--------------+-------------
- a | integer | | not null | | plain | |
- b | integer | | not null | | plain | |
- c | integer | | not null | | plain | |
-Indexes:
- "atnotnull1_pkey" PRIMARY KEY, btree (c)
-Not-null constraints:
- "atnotnull1_a_not_null" NOT NULL "a"
- "atnotnull1_b_not_null" NOT NULL "b"
- "atnotnull1_c_not_null" NOT NULL "c"
-
--- cannot drop column that is part of the partition key
-CREATE TABLE partitioned (
- a int,
- b int
-) PARTITION BY RANGE (a, (a+b+1));
-ALTER TABLE partitioned DROP COLUMN a;
-ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned"
-ALTER TABLE partitioned ALTER COLUMN a TYPE char(5);
-ERROR: cannot alter column "a" because it is part of the partition key of relation "partitioned"
-ALTER TABLE partitioned DROP COLUMN b;
-ERROR: cannot drop column "b" because it is part of the partition key of relation "partitioned"
-ALTER TABLE partitioned ALTER COLUMN b TYPE char(5);
-ERROR: cannot alter column "b" because it is part of the partition key of relation "partitioned"
--- specifying storage parameters for partitioned tables is not supported
-ALTER TABLE partitioned SET (fillfactor=100);
-ERROR: cannot specify storage parameters for a partitioned table
-HINT: Specify storage parameters for its leaf partitions instead.
--- partitioned table cannot participate in regular inheritance
-CREATE TABLE nonpartitioned (
- a int,
- b int
-);
-ALTER TABLE partitioned INHERIT nonpartitioned;
-ERROR: cannot change inheritance of partitioned table
-ALTER TABLE nonpartitioned INHERIT partitioned;
-ERROR: cannot inherit from partitioned table "partitioned"
--- cannot add NO INHERIT constraint to partitioned tables
-ALTER TABLE partitioned ADD CONSTRAINT chk_a CHECK (a > 0) NO INHERIT;
-ERROR: cannot add NO INHERIT constraint to partitioned table "partitioned"
-DROP TABLE partitioned, nonpartitioned;
---
--- ATTACH PARTITION
---
--- check that target table is partitioned
-CREATE TABLE unparted (
- a int
-);
-CREATE TABLE fail_part (like unparted);
-ALTER TABLE unparted ATTACH PARTITION fail_part FOR VALUES IN ('a');
-ERROR: ALTER action ATTACH PARTITION cannot be performed on relation "unparted"
-DETAIL: This operation is not supported for tables.
-DROP TABLE unparted, fail_part;
--- check that partition bound is compatible
-CREATE TABLE list_parted (
- a int NOT NULL,
- b char(2) COLLATE "C",
- CONSTRAINT check_a CHECK (a > 0)
-) PARTITION BY LIST (a);
-CREATE TABLE fail_part (LIKE list_parted);
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) TO (10);
-ERROR: invalid bound specification for a list partition
-LINE 1: ...list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) T...
- ^
-DROP TABLE fail_part;
--- check that the table being attached exists
-ALTER TABLE list_parted ATTACH PARTITION nonexistent FOR VALUES IN (1);
-ERROR: relation "nonexistent" does not exist
--- check ownership of the source table
-CREATE ROLE regress_test_me;
-CREATE ROLE regress_test_not_me;
-CREATE TABLE not_owned_by_me (LIKE list_parted);
-ALTER TABLE not_owned_by_me OWNER TO regress_test_not_me;
-SET SESSION AUTHORIZATION regress_test_me;
-CREATE TABLE owned_by_me (
- a int
-) PARTITION BY LIST (a);
-ALTER TABLE owned_by_me ATTACH PARTITION not_owned_by_me FOR VALUES IN (1);
-ERROR: must be owner of table not_owned_by_me
-RESET SESSION AUTHORIZATION;
-DROP TABLE owned_by_me, not_owned_by_me;
-DROP ROLE regress_test_not_me;
-DROP ROLE regress_test_me;
--- check that the table being attached is not part of regular inheritance
-CREATE TABLE parent (LIKE list_parted);
-CREATE TABLE child () INHERITS (parent);
-ALTER TABLE list_parted ATTACH PARTITION child FOR VALUES IN (1);
-ERROR: cannot attach inheritance child as partition
-ALTER TABLE list_parted ATTACH PARTITION parent FOR VALUES IN (1);
-ERROR: cannot attach inheritance parent as partition
-DROP TABLE child;
--- now it should work, with a little tweak
-ALTER TABLE parent ADD CONSTRAINT check_a CHECK (a > 0);
-ALTER TABLE list_parted ATTACH PARTITION parent FOR VALUES IN (1);
--- test insert/update, per bug #18550
-INSERT INTO parent VALUES (1);
-UPDATE parent SET a = 2 WHERE a = 1;
-ERROR: new row for relation "parent" violates partition constraint
-DETAIL: Failing row contains (2, null).
-DROP TABLE parent CASCADE;
--- check any TEMP-ness
-CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a);
-CREATE TABLE perm_part (a int);
-ALTER TABLE temp_parted ATTACH PARTITION perm_part FOR VALUES IN (1);
-ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted"
-DROP TABLE temp_parted, perm_part;
--- check that the table being attached is not a typed table
-CREATE TYPE mytype AS (a int);
-CREATE TABLE fail_part OF mytype;
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
-ERROR: cannot attach a typed table as partition
-DROP TYPE mytype CASCADE;
-NOTICE: drop cascades to table fail_part
--- check that the table being attached has only columns present in the parent
-CREATE TABLE fail_part (like list_parted, c int);
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
-ERROR: table "fail_part" contains column "c" not found in parent "list_parted"
-DETAIL: The new partition may contain only the columns present in parent.
-DROP TABLE fail_part;
--- check that the table being attached has every column of the parent
-CREATE TABLE fail_part (a int NOT NULL);
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
-ERROR: child table is missing column "b"
-DROP TABLE fail_part;
--- check that columns match in type, collation and NOT NULL status
-CREATE TABLE fail_part (
- b char(3),
- a int NOT NULL
-);
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
-ERROR: child table "fail_part" has different type for column "b"
-ALTER TABLE fail_part ALTER b TYPE char (2) COLLATE "POSIX";
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
-ERROR: child table "fail_part" has different collation for column "b"
-DROP TABLE fail_part;
--- check that the table being attached has all constraints of the parent
-CREATE TABLE fail_part (
- b char(2) COLLATE "C",
- a int NOT NULL
-);
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
-ERROR: child table is missing constraint "check_a"
--- check that the constraint matches in definition with parent's constraint
-ALTER TABLE fail_part ADD CONSTRAINT check_a CHECK (a >= 0);
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
-ERROR: child table "fail_part" has different definition for check constraint "check_a"
-DROP TABLE fail_part;
--- check the attributes and constraints after partition is attached
-CREATE TABLE part_1 (
- a int NOT NULL,
- b char(2) COLLATE "C",
- CONSTRAINT check_a CHECK (a > 0)
-);
-ALTER TABLE list_parted ATTACH PARTITION part_1 FOR VALUES IN (1);
--- attislocal and conislocal are always false for merged attributes and constraints respectively.
-SELECT attislocal, attinhcount FROM pg_attribute WHERE attrelid = 'part_1'::regclass AND attnum > 0;
- attislocal | attinhcount
-------------+-------------
- f | 1
- f | 1
-(2 rows)
-
-SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_1'::regclass AND conname = 'check_a';
- conislocal | coninhcount
-------------+-------------
- f | 1
-(1 row)
-
--- check that NOT NULL NO INHERIT cannot be merged to a normal NOT NULL
-CREATE TABLE part_fail (a int NOT NULL NO INHERIT,
- b char(2) COLLATE "C",
- CONSTRAINT check_a CHECK (a > 0)
-);
-ALTER TABLE list_parted ATTACH PARTITION part_fail FOR VALUES IN (2);
-ERROR: constraint "part_fail_a_not_null" conflicts with non-inherited constraint on child table "part_fail"
-DROP TABLE part_fail;
--- check that the new partition won't overlap with an existing partition
-CREATE TABLE fail_part (LIKE part_1 INCLUDING CONSTRAINTS);
-ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
-ERROR: partition "fail_part" would overlap partition "part_1"
-LINE 1: ...LE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
- ^
-DROP TABLE fail_part;
--- check that an existing table can be attached as a default partition
-CREATE TABLE def_part (LIKE list_parted INCLUDING CONSTRAINTS);
-ALTER TABLE list_parted ATTACH PARTITION def_part DEFAULT;
--- check attaching default partition fails if a default partition already
--- exists
-CREATE TABLE fail_def_part (LIKE part_1 INCLUDING CONSTRAINTS);
-ALTER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT;
-ERROR: partition "fail_def_part" conflicts with existing default partition "def_part"
-LINE 1: ...ER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT;
- ^
--- check validation when attaching list partitions
-CREATE TABLE list_parted2 (
- a int,
- b char
-) PARTITION BY LIST (a);
--- check that violating rows are correctly reported
-CREATE TABLE part_2 (LIKE list_parted2);
-INSERT INTO part_2 VALUES (3, 'a');
-ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
-ERROR: partition constraint of relation "part_2" is violated by some row
--- should be ok after deleting the bad row
-DELETE FROM part_2;
-ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
--- check partition cannot be attached if default has some row for its values
-CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT;
-INSERT INTO list_parted2_def VALUES (11, 'z');
-CREATE TABLE part_3 (LIKE list_parted2);
-ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11);
-ERROR: updated partition constraint for default partition "list_parted2_def" would be violated by some row
--- should be ok after deleting the bad row
-DELETE FROM list_parted2_def WHERE a = 11;
-ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11);
--- adding constraints that describe the desired partition constraint
--- (or more restrictive) will help skip the validation scan
-CREATE TABLE part_3_4 (
- LIKE list_parted2,
- CONSTRAINT check_a CHECK (a IN (3))
-);
--- however, if a list partition does not accept nulls, there should be
--- an explicit NOT NULL constraint on the partition key column for the
--- validation scan to be skipped;
-ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);
--- adding a NOT NULL constraint will cause the scan to be skipped
-ALTER TABLE list_parted2 DETACH PARTITION part_3_4;
-ALTER TABLE part_3_4 ALTER a SET NOT NULL;
-ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);
--- check if default partition scan skipped
-ALTER TABLE list_parted2_def ADD CONSTRAINT check_a CHECK (a IN (5, 6));
-CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66);
--- check validation when attaching range partitions
-CREATE TABLE range_parted (
- a int,
- b int
-) PARTITION BY RANGE (a, b);
--- check that violating rows are correctly reported
-CREATE TABLE part1 (
- a int NOT NULL CHECK (a = 1),
- b int NOT NULL CHECK (b >= 1 AND b <= 10)
-);
-INSERT INTO part1 VALUES (1, 10);
--- Remember the TO bound is exclusive
-ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10);
-ERROR: partition constraint of relation "part1" is violated by some row
--- should be ok after deleting the bad row
-DELETE FROM part1;
-ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10);
--- adding constraints that describe the desired partition constraint
--- (or more restrictive) will help skip the validation scan
-CREATE TABLE part2 (
- a int NOT NULL CHECK (a = 1),
- b int NOT NULL CHECK (b >= 10 AND b < 18)
-);
-ALTER TABLE range_parted ATTACH PARTITION part2 FOR VALUES FROM (1, 10) TO (1, 20);
--- Create default partition
-CREATE TABLE partr_def1 PARTITION OF range_parted DEFAULT;
--- Only one default partition is allowed, hence, following should give error
-CREATE TABLE partr_def2 (LIKE part1 INCLUDING CONSTRAINTS);
-ALTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT;
-ERROR: partition "partr_def2" conflicts with existing default partition "partr_def1"
-LINE 1: ...LTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT;
- ^
--- Overlapping partitions cannot be attached, hence, following should give error
-INSERT INTO partr_def1 VALUES (2, 10);
-CREATE TABLE part3 (LIKE range_parted);
-ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (2, 10) TO (2, 20);
-ERROR: updated partition constraint for default partition "partr_def1" would be violated by some row
--- Attaching partitions should be successful when there are no overlapping rows
-ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (3, 10) TO (3, 20);
--- check that leaf partitions are scanned when attaching a partitioned
--- table
-CREATE TABLE part_5 (
- LIKE list_parted2
-) PARTITION BY LIST (b);
--- check that violating rows are correctly reported
-CREATE TABLE part_5_a PARTITION OF part_5 FOR VALUES IN ('a');
-INSERT INTO part_5_a (a, b) VALUES (6, 'a');
-ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
-ERROR: partition constraint of relation "part_5_a" is violated by some row
--- delete the faulting row and also add a constraint to skip the scan
-DELETE FROM part_5_a WHERE a NOT IN (3);
-ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 5);
-ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
-ALTER TABLE list_parted2 DETACH PARTITION part_5;
-ALTER TABLE part_5 DROP CONSTRAINT check_a;
--- scan should again be skipped, even though NOT NULL is now a column property
-ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)), ALTER a SET NOT NULL;
-ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
--- Check the case where attnos of the partitioning columns in the table being
--- attached differs from the parent. It should not affect the constraint-
--- checking logic that allows to skip the scan.
-CREATE TABLE part_6 (
- c int,
- LIKE list_parted2,
- CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 6)
-);
-ALTER TABLE part_6 DROP c;
-ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6);
--- Similar to above, but the table being attached is a partitioned table
--- whose partition has still different attnos for the root partitioning
--- columns.
-CREATE TABLE part_7 (
- LIKE list_parted2,
- CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7)
-) PARTITION BY LIST (b);
-CREATE TABLE part_7_a_null (
- c int,
- d int,
- e int,
- LIKE list_parted2, -- 'a' will have attnum = 4
- CONSTRAINT check_b CHECK (b IS NULL OR b = 'a'),
- CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7)
-);
-ALTER TABLE part_7_a_null DROP c, DROP d, DROP e;
-ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN ('a', null);
-ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);
--- Same example, but check this time that the constraint correctly detects
--- violating rows
-ALTER TABLE list_parted2 DETACH PARTITION part_7;
-ALTER TABLE part_7 DROP CONSTRAINT check_a; -- thusly, scan won't be skipped
-INSERT INTO part_7 (a, b) VALUES (8, null), (9, 'a');
-SELECT tableoid::regclass, a, b FROM part_7 order by a;
- tableoid | a | b
----------------+---+---
- part_7_a_null | 8 |
- part_7_a_null | 9 | a
-(2 rows)
-
-ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);
-ERROR: partition constraint of relation "part_7_a_null" is violated by some row
--- check that leaf partitions of default partition are scanned when
--- attaching a partitioned table.
-ALTER TABLE part_5 DROP CONSTRAINT check_a;
-CREATE TABLE part5_def PARTITION OF part_5 DEFAULT PARTITION BY LIST(a);
-CREATE TABLE part5_def_p1 PARTITION OF part5_def FOR VALUES IN (5);
-INSERT INTO part5_def_p1 VALUES (5, 'y');
-CREATE TABLE part5_p1 (LIKE part_5);
-ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y');
-ERROR: updated partition constraint for default partition "part5_def_p1" would be violated by some row
--- should be ok after deleting the bad row
-DELETE FROM part5_def_p1 WHERE b = 'y';
-ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y');
--- check that the table being attached is not already a partition
-ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
-ERROR: "part_2" is already a partition
--- check that circular inheritance is not allowed
-ALTER TABLE part_5 ATTACH PARTITION list_parted2 FOR VALUES IN ('b');
-ERROR: circular inheritance not allowed
-DETAIL: "part_5" is already a child of "list_parted2".
-ALTER TABLE list_parted2 ATTACH PARTITION list_parted2 FOR VALUES IN (0);
-ERROR: circular inheritance not allowed
-DETAIL: "list_parted2" is already a child of "list_parted2".
--- If a partitioned table being created or an existing table being attached
--- as a partition does not have a constraint that would allow validation scan
--- to be skipped, but an individual partition does, then the partition's
--- validation scan is skipped.
-CREATE TABLE quuux (a int, b text) PARTITION BY LIST (a);
-CREATE TABLE quuux_default PARTITION OF quuux DEFAULT PARTITION BY LIST (b);
-CREATE TABLE quuux_default1 PARTITION OF quuux_default (
- CONSTRAINT check_1 CHECK (a IS NOT NULL AND a = 1)
-) FOR VALUES IN ('b');
-CREATE TABLE quuux1 (a int, b text);
-ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1); -- validate!
-CREATE TABLE quuux2 (a int, b text);
-ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2); -- skip validation
-DROP TABLE quuux1, quuux2;
--- should validate for quuux1, but not for quuux2
-CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1);
-CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2);
-DROP TABLE quuux;
--- check validation when attaching hash partitions
--- Use hand-rolled hash functions and operator class to get predictable result
--- on different machines. part_test_int4_ops is defined in test_setup.sql.
--- check that the new partition won't overlap with an existing partition
-CREATE TABLE hash_parted (
- a int,
- b int
-) PARTITION BY HASH (a part_test_int4_ops);
-CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 4, REMAINDER 0);
-CREATE TABLE fail_part (LIKE hpart_1);
-ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 4);
-ERROR: partition "fail_part" would overlap partition "hpart_1"
-LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU...
- ^
-ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 0);
-ERROR: partition "fail_part" would overlap partition "hpart_1"
-LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU...
- ^
-DROP TABLE fail_part;
--- check validation when attaching hash partitions
--- check that violating rows are correctly reported
-CREATE TABLE hpart_2 (LIKE hash_parted);
-INSERT INTO hpart_2 VALUES (3, 0);
-ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1);
-ERROR: partition constraint of relation "hpart_2" is violated by some row
--- should be ok after deleting the bad row
-DELETE FROM hpart_2;
-ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1);
--- check that leaf partitions are scanned when attaching a partitioned
--- table
-CREATE TABLE hpart_5 (
- LIKE hash_parted
-) PARTITION BY LIST (b);
--- check that violating rows are correctly reported
-CREATE TABLE hpart_5_a PARTITION OF hpart_5 FOR VALUES IN ('1', '2', '3');
-INSERT INTO hpart_5_a (a, b) VALUES (7, 1);
-ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2);
-ERROR: partition constraint of relation "hpart_5_a" is violated by some row
--- should be ok after deleting the bad row
-DELETE FROM hpart_5_a;
-ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2);
--- check that the table being attach is with valid modulus and remainder value
-CREATE TABLE fail_part(LIKE hash_parted);
-ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 0, REMAINDER 1);
-ERROR: modulus for hash partition must be an integer value greater than zero
-ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 8);
-ERROR: remainder for hash partition must be less than modulus
-ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 3, REMAINDER 2);
-ERROR: every hash partition modulus must be a factor of the next larger modulus
-DETAIL: The new modulus 3 is not a factor of 4, the modulus of existing partition "hpart_1".
-DROP TABLE fail_part;
---
--- DETACH PARTITION
---
--- check that the table is partitioned at all
-CREATE TABLE regular_table (a int);
-ALTER TABLE regular_table DETACH PARTITION any_name;
-ERROR: ALTER action DETACH PARTITION cannot be performed on relation "regular_table"
-DETAIL: This operation is not supported for tables.
-ALTER TABLE regular_table DETACH PARTITION any_name CONCURRENTLY;
-ERROR: ALTER action DETACH PARTITION cannot be performed on relation "regular_table"
-DETAIL: This operation is not supported for tables.
-ALTER TABLE regular_table DETACH PARTITION any_name FINALIZE;
-ERROR: ALTER action DETACH PARTITION ... FINALIZE cannot be performed on relation "regular_table"
-DETAIL: This operation is not supported for tables.
-DROP TABLE regular_table;
--- check that the partition being detached exists at all
-ALTER TABLE list_parted2 DETACH PARTITION part_4;
-ERROR: relation "part_4" does not exist
-ALTER TABLE hash_parted DETACH PARTITION hpart_4;
-ERROR: relation "hpart_4" does not exist
--- check that the partition being detached is actually a partition of the parent
-CREATE TABLE not_a_part (a int);
-ALTER TABLE list_parted2 DETACH PARTITION not_a_part;
-ERROR: relation "not_a_part" is not a partition of relation "list_parted2"
-ALTER TABLE list_parted2 DETACH PARTITION part_1;
-ERROR: relation "part_1" is not a partition of relation "list_parted2"
-ALTER TABLE hash_parted DETACH PARTITION not_a_part;
-ERROR: relation "not_a_part" is not a partition of relation "hash_parted"
-DROP TABLE not_a_part;
--- check that, after being detached, attinhcount/coninhcount is dropped to 0 and
--- attislocal/conislocal is set to true
-ALTER TABLE list_parted2 DETACH PARTITION part_3_4;
-SELECT attinhcount, attislocal FROM pg_attribute WHERE attrelid = 'part_3_4'::regclass AND attnum > 0;
- attinhcount | attislocal
--------------+------------
- 0 | t
- 0 | t
-(2 rows)
-
-SELECT coninhcount, conislocal FROM pg_constraint WHERE conrelid = 'part_3_4'::regclass AND conname = 'check_a';
- coninhcount | conislocal
--------------+------------
- 0 | t
-(1 row)
-
-DROP TABLE part_3_4;
--- check that a detached partition is not dropped on dropping a partitioned table
-CREATE TABLE range_parted2 (
- a int
-) PARTITION BY RANGE(a);
-CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100);
-ALTER TABLE range_parted2 DETACH PARTITION part_rp;
-DROP TABLE range_parted2;
-SELECT * from part_rp;
- a
----
-(0 rows)
-
-DROP TABLE part_rp;
--- concurrent detach
-CREATE TABLE range_parted2 (
- a int
-) PARTITION BY RANGE(a);
-CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100);
-BEGIN;
--- doesn't work in a partition block
-ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
-ERROR: ALTER TABLE ... DETACH CONCURRENTLY cannot run inside a transaction block
-COMMIT;
-CREATE TABLE part_rpd PARTITION OF range_parted2 DEFAULT;
--- doesn't work if there's a default partition
-ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
-ERROR: cannot detach partitions concurrently when a default partition exists
--- doesn't work for the default partition
-ALTER TABLE range_parted2 DETACH PARTITION part_rpd CONCURRENTLY;
-ERROR: cannot detach partitions concurrently when a default partition exists
-DROP TABLE part_rpd;
--- works fine
-ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
-\d+ range_parted2
- Partitioned table "public.range_parted2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+---------+--------------+-------------
- a | integer | | | | plain | |
-Partition key: RANGE (a)
-Number of partitions: 0
-
--- constraint should be created
-\d part_rp
- Table "public.part_rp"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-Check constraints:
- "part_rp_a_check" CHECK (a IS NOT NULL AND a >= 0 AND a < 100)
-
-CREATE TABLE part_rp100 PARTITION OF range_parted2 (CHECK (a>=123 AND a<133 AND a IS NOT NULL)) FOR VALUES FROM (100) to (200);
-ALTER TABLE range_parted2 DETACH PARTITION part_rp100 CONCURRENTLY;
--- redundant constraint should not be created
-\d part_rp100
- Table "public.part_rp100"
- Column | Type | Collation | Nullable | Default
---------+---------+-----------+----------+---------
- a | integer | | |
-Check constraints:
- "part_rp100_a_check" CHECK (a >= 123 AND a < 133 AND a IS NOT NULL)
-
-DROP TABLE range_parted2;
--- Check ALTER TABLE commands for partitioned tables and partitions
--- cannot add/drop column to/from *only* the parent
-ALTER TABLE ONLY list_parted2 ADD COLUMN c int;
-ERROR: column must be added to child tables too
-ALTER TABLE ONLY list_parted2 DROP COLUMN b;
-ERROR: cannot drop column from only the partitioned table when partitions exist
-HINT: Do not specify the ONLY keyword.
--- cannot add a column to partition or drop an inherited one
-ALTER TABLE part_2 ADD COLUMN c text;
-ERROR: cannot add column to a partition
-ALTER TABLE part_2 DROP COLUMN b;
-ERROR: cannot drop inherited column "b"
--- Nor rename, alter type
-ALTER TABLE part_2 RENAME COLUMN b to c;
-ERROR: cannot rename inherited column "b"
-ALTER TABLE part_2 ALTER COLUMN b TYPE text;
-ERROR: cannot alter inherited column "b"
--- cannot add NOT NULL or check constraints to *only* the parent, when
--- partitions exist
-ALTER TABLE ONLY list_parted2 ALTER b SET NOT NULL;
-ERROR: constraint must be added to child tables too
-HINT: Do not specify the ONLY keyword.
-ALTER TABLE ONLY list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz');
-ERROR: constraint must be added to child tables too
--- dropping them is ok though
-ALTER TABLE list_parted2 ALTER b SET NOT NULL;
-ALTER TABLE ONLY list_parted2 ALTER b DROP NOT NULL;
-ALTER TABLE list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz');
-ALTER TABLE ONLY list_parted2 DROP CONSTRAINT check_b;
--- ... and the partitions should still have both
-\d+ part_2
- Table "public.part_2"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+--------------+-----------+----------+---------+----------+--------------+-------------
- a | integer | | | | plain | |
- b | character(1) | | not null | | extended | |
-Partition of: list_parted2 FOR VALUES IN (2)
-Partition constraint: ((a IS NOT NULL) AND (a = 2))
-Check constraints:
- "check_b" CHECK (b <> 'zz'::bpchar)
-Not-null constraints:
- "list_parted2_b_not_null" NOT NULL "b"
-
--- It's alright though, if no partitions are yet created
-CREATE TABLE parted_no_parts (a int) PARTITION BY LIST (a);
-ALTER TABLE ONLY parted_no_parts ALTER a SET NOT NULL;
-ALTER TABLE ONLY parted_no_parts ADD CONSTRAINT check_a CHECK (a > 0);
-DROP TABLE parted_no_parts;
--- cannot drop inherited NOT NULL or check constraints from partition
-ALTER TABLE list_parted2 ALTER b SET NOT NULL, ADD CONSTRAINT check_a2 CHECK (a > 0);
-ALTER TABLE part_2 ALTER b DROP NOT NULL;
-ERROR: column "b" is marked NOT NULL in parent table
-ALTER TABLE part_2 DROP CONSTRAINT check_a2;
-ERROR: cannot drop inherited constraint "check_a2" of relation "part_2"
--- can't drop NOT NULL from under an invalid PK
-CREATE TABLE list_parted3 (a int NOT NULL) PARTITION BY LIST (a);
-CREATE TABLE list_parted3_1 PARTITION OF list_parted3 FOR VALUES IN (1);
-ALTER TABLE ONLY list_parted3 ADD PRIMARY KEY (a);
-ALTER TABLE ONLY list_parted3 DROP CONSTRAINT list_parted3_a_not_null;
-ERROR: column "a" is in a primary key
--- Doesn't make sense to add NO INHERIT constraints on partitioned tables
-ALTER TABLE list_parted2 add constraint check_b2 check (b <> 'zz') NO INHERIT;
-ERROR: cannot add NO INHERIT constraint to partitioned table "list_parted2"
--- check that a partition cannot participate in regular inheritance
-CREATE TABLE inh_test () INHERITS (part_2);
-ERROR: cannot inherit from partition "part_2"
-CREATE TABLE inh_test (LIKE part_2);
-ALTER TABLE inh_test INHERIT part_2;
-ERROR: cannot inherit from a partition
-ALTER TABLE part_2 INHERIT inh_test;
-ERROR: cannot change inheritance of a partition
--- cannot drop or alter type of partition key columns of lower level
--- partitioned tables; for example, part_5, which is list_parted2's
--- partition, is partitioned on b;
-ALTER TABLE list_parted2 DROP COLUMN b;
-ERROR: cannot drop column "b" because it is part of the partition key of relation "part_5"
-ALTER TABLE list_parted2 ALTER COLUMN b TYPE text;
-ERROR: cannot alter column "b" because it is part of the partition key of relation "part_5"
--- dropping non-partition key columns should be allowed on the parent table.
-ALTER TABLE list_parted DROP COLUMN b;
-SELECT * FROM list_parted;
- a
----
-(0 rows)
-
--- cleanup
-DROP TABLE list_parted, list_parted2, range_parted, list_parted3;
-DROP TABLE fail_def_part;
-DROP TABLE hash_parted;
--- more tests for certain multi-level partitioning scenarios
-create table p (a int, b int) partition by range (a, b);
-create table p1 (b int, a int not null) partition by range (b);
-create table p11 (like p1);
-alter table p11 drop a;
-alter table p11 add a int;
-alter table p11 drop a;
-alter table p11 add a int not null;
--- attnum for key attribute 'a' is different in p, p1, and p11
-select attrelid::regclass, attname, attnum
-from pg_attribute
-where attname = 'a'
- and (attrelid = 'p'::regclass
- or attrelid = 'p1'::regclass
- or attrelid = 'p11'::regclass)
-order by attrelid::regclass::text;
- attrelid | attname | attnum
-----------+---------+--------
- p | a | 1
- p1 | a | 2
- p11 | a | 4
-(3 rows)
-
-alter table p1 attach partition p11 for values from (2) to (5);
-insert into p1 (a, b) values (2, 3);
--- check that partition validation scan correctly detects violating rows
-alter table p attach partition p1 for values from (1, 2) to (1, 10);
-ERROR: partition constraint of relation "p11" is violated by some row
--- cleanup
-drop table p;
-drop table p1;
--- validate constraint on partitioned tables should only scan leaf partitions
-create table parted_validate_test (a int) partition by list (a);
-create table parted_validate_test_1 partition of parted_validate_test for values in (0, 1);
-alter table parted_validate_test add constraint parted_validate_test_chka check (a > 0) not valid;
-alter table parted_validate_test validate constraint parted_validate_test_chka;
-drop table parted_validate_test;
--- test alter column options
-CREATE TABLE attmp(i integer);
-INSERT INTO attmp VALUES (1);
-ALTER TABLE attmp ALTER COLUMN i SET (n_distinct = 1, n_distinct_inherited = 2);
-ALTER TABLE attmp ALTER COLUMN i RESET (n_distinct_inherited);
-ANALYZE attmp;
-DROP TABLE attmp;
-DROP USER regress_alter_table_user1;
--- check that violating rows are correctly reported when attaching as the
--- default partition
-create table defpart_attach_test (a int) partition by list (a);
-create table defpart_attach_test1 partition of defpart_attach_test for values in (1);
-create table defpart_attach_test_d (b int, a int);
-alter table defpart_attach_test_d drop b;
-insert into defpart_attach_test_d values (1), (2);
--- error because its constraint as the default partition would be violated
--- by the row containing 1
-alter table defpart_attach_test attach partition defpart_attach_test_d default;
-ERROR: partition constraint of relation "defpart_attach_test_d" is violated by some row
-delete from defpart_attach_test_d where a = 1;
-alter table defpart_attach_test_d add check (a > 1);
--- should be attached successfully and without needing to be scanned
-alter table defpart_attach_test attach partition defpart_attach_test_d default;
--- check that attaching a partition correctly reports any rows in the default
--- partition that should not be there for the new partition to be attached
--- successfully
-create table defpart_attach_test_2 (like defpart_attach_test_d);
-alter table defpart_attach_test attach partition defpart_attach_test_2 for values in (2);
-ERROR: updated partition constraint for default partition "defpart_attach_test_d" would be violated by some row
-drop table defpart_attach_test;
--- check combinations of temporary and permanent relations when attaching
--- partitions.
-create table perm_part_parent (a int) partition by list (a);
-create temp table temp_part_parent (a int) partition by list (a);
-create table perm_part_child (a int);
-create temp table temp_part_child (a int);
-alter table temp_part_parent attach partition perm_part_child default; -- error
-ERROR: cannot attach a permanent relation as partition of temporary relation "temp_part_parent"
-alter table perm_part_parent attach partition temp_part_child default; -- error
-ERROR: cannot attach a temporary relation as partition of permanent relation "perm_part_parent"
-alter table temp_part_parent attach partition temp_part_child default; -- ok
-drop table perm_part_parent cascade;
-drop table temp_part_parent cascade;
--- check that attaching partitions to a table while it is being used is
--- prevented
-create table tab_part_attach (a int) partition by list (a);
-create or replace function func_part_attach() returns trigger
- language plpgsql as $$
- begin
- execute 'create table tab_part_attach_1 (a int)';
- execute 'alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)';
- return null;
- end $$;
-create trigger trig_part_attach before insert on tab_part_attach
- for each statement execute procedure func_part_attach();
-insert into tab_part_attach values (1);
-ERROR: cannot ALTER TABLE "tab_part_attach" because it is being used by active queries in this session
-CONTEXT: SQL statement "alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)"
-PL/pgSQL function func_part_attach() line 4 at EXECUTE
-drop table tab_part_attach;
-drop function func_part_attach();
--- test case where the partitioning operator is a SQL function whose
--- evaluation results in the table's relcache being rebuilt partway through
--- the execution of an ATTACH PARTITION command
-create function at_test_sql_partop (int4, int4) returns int language sql
-as $$ select case when $1 = $2 then 0 when $1 > $2 then 1 else -1 end; $$;
-create operator class at_test_sql_partop for type int4 using btree as
- operator 1 < (int4, int4), operator 2 <= (int4, int4),
- operator 3 = (int4, int4), operator 4 >= (int4, int4),
- operator 5 > (int4, int4), function 1 at_test_sql_partop(int4, int4);
-create table at_test_sql_partop (a int) partition by range (a at_test_sql_partop);
-create table at_test_sql_partop_1 (a int);
-alter table at_test_sql_partop attach partition at_test_sql_partop_1 for values from (0) to (10);
-drop table at_test_sql_partop;
-drop operator class at_test_sql_partop using btree;
-drop function at_test_sql_partop;
-/* Test case for bug #16242 */
--- We create a parent and child where the child has missing
--- non-null attribute values, and arrange to pass them through
--- tuple conversion from the child to the parent tupdesc
-create table bar1 (a integer, b integer not null default 1)
- partition by range (a);
-create table bar2 (a integer);
-insert into bar2 values (1);
-alter table bar2 add column b integer not null default 1;
--- (at this point bar2 contains tuple with natts=1)
-alter table bar1 attach partition bar2 default;
--- this works:
-select * from bar1;
- a | b
----+---
- 1 | 1
-(1 row)
-
--- this exercises tuple conversion:
-create function xtrig()
- returns trigger language plpgsql
-as $$
- declare
- r record;
- begin
- for r in select * from old loop
- raise info 'a=%, b=%', r.a, r.b;
- end loop;
- return NULL;
- end;
-$$;
-create trigger xtrig
- after update on bar1
- referencing old table as old
- for each statement execute procedure xtrig();
-update bar1 set a = a + 1;
-INFO: a=1, b=1
-/* End test case for bug #16242 */
-/* Test case for bug #17409 */
-create table attbl (p1 int constraint pk_attbl primary key);
-create table atref (c1 int references attbl(p1));
-cluster attbl using pk_attbl;
-alter table attbl alter column p1 set data type bigint;
-alter table atref alter column c1 set data type bigint;
-drop table attbl, atref;
-create table attbl (p1 int constraint pk_attbl primary key);
-alter table attbl replica identity using index pk_attbl;
-create table atref (c1 int references attbl(p1));
-alter table attbl alter column p1 set data type bigint;
-alter table atref alter column c1 set data type bigint;
-drop table attbl, atref;
-/* End test case for bug #17409 */
--- Test that ALTER TABLE rewrite preserves a clustered index
--- for normal indexes and indexes on constraints.
-create table alttype_cluster (a int);
-alter table alttype_cluster add primary key (a);
-create index alttype_cluster_ind on alttype_cluster (a);
-alter table alttype_cluster cluster on alttype_cluster_ind;
--- Normal index remains clustered.
-select indexrelid::regclass, indisclustered from pg_index
- where indrelid = 'alttype_cluster'::regclass
- order by indexrelid::regclass::text;
- indexrelid | indisclustered
-----------------------+----------------
- alttype_cluster_ind | t
- alttype_cluster_pkey | f
-(2 rows)
-
-alter table alttype_cluster alter a type bigint;
-select indexrelid::regclass, indisclustered from pg_index
- where indrelid = 'alttype_cluster'::regclass
- order by indexrelid::regclass::text;
- indexrelid | indisclustered
-----------------------+----------------
- alttype_cluster_ind | t
- alttype_cluster_pkey | f
-(2 rows)
-
--- Constraint index remains clustered.
-alter table alttype_cluster cluster on alttype_cluster_pkey;
-select indexrelid::regclass, indisclustered from pg_index
- where indrelid = 'alttype_cluster'::regclass
- order by indexrelid::regclass::text;
- indexrelid | indisclustered
-----------------------+----------------
- alttype_cluster_ind | f
- alttype_cluster_pkey | t
-(2 rows)
-
-alter table alttype_cluster alter a type int;
-select indexrelid::regclass, indisclustered from pg_index
- where indrelid = 'alttype_cluster'::regclass
- order by indexrelid::regclass::text;
- indexrelid | indisclustered
-----------------------+----------------
- alttype_cluster_ind | f
- alttype_cluster_pkey | t
-(2 rows)
-
-drop table alttype_cluster;
---
--- Check that attaching or detaching a partitioned partition correctly leads
--- to its partitions' constraint being updated to reflect the parent's
--- newly added/removed constraint
-create table target_parted (a int, b int) partition by list (a);
-create table attach_parted (a int, b int) partition by list (b);
-create table attach_parted_part1 partition of attach_parted for values in (1);
--- insert a row directly into the leaf partition so that its partition
--- constraint is built and stored in the relcache
-insert into attach_parted_part1 values (1, 1);
--- the following better invalidate the partition constraint of the leaf
--- partition too...
-alter table target_parted attach partition attach_parted for values in (1);
--- ...such that the following insert fails
-insert into attach_parted_part1 values (2, 1);
-ERROR: new row for relation "attach_parted_part1" violates partition constraint
-DETAIL: Failing row contains (2, 1).
--- ...and doesn't when the partition is detached along with its own partition
-alter table target_parted detach partition attach_parted;
-insert into attach_parted_part1 values (2, 1);
--- Test altering table having publication
-create schema alter1;
-create schema alter2;
-create table alter1.t1 (a int);
-set client_min_messages = 'ERROR';
-create publication pub1 for table alter1.t1, tables in schema alter2;
-reset client_min_messages;
-alter table alter1.t1 set schema alter2;
-\d+ alter2.t1
- Table "alter2.t1"
- Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
---------+---------+-----------+----------+---------+---------+--------------+-------------
- a | integer | | | | plain | |
-Publications:
- "pub1"
-
-drop publication pub1;
-drop schema alter1 cascade;
-drop schema alter2 cascade;
-NOTICE: drop cascades to table alter2.t1
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/sequence.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/sequence.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/sequence.out 2024-11-15 02:50:52.502029300 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/sequence.out 2024-11-15 02:59:18.185116971 +0000
@@ -1,849 +1,2 @@
---
--- CREATE SEQUENCE
---
--- various error cases
-CREATE SEQUENCE sequence_testx INCREMENT BY 0;
-ERROR: INCREMENT must not be zero
-CREATE SEQUENCE sequence_testx INCREMENT BY -1 MINVALUE 20;
-ERROR: MINVALUE (20) must be less than MAXVALUE (-1)
-CREATE SEQUENCE sequence_testx INCREMENT BY 1 MAXVALUE -20;
-ERROR: MINVALUE (1) must be less than MAXVALUE (-20)
-CREATE SEQUENCE sequence_testx INCREMENT BY -1 START 10;
-ERROR: START value (10) cannot be greater than MAXVALUE (-1)
-CREATE SEQUENCE sequence_testx INCREMENT BY 1 START -10;
-ERROR: START value (-10) cannot be less than MINVALUE (1)
-CREATE SEQUENCE sequence_testx CACHE 0;
-ERROR: CACHE (0) must be greater than zero
--- OWNED BY errors
-CREATE SEQUENCE sequence_testx OWNED BY nobody; -- nonsense word
-ERROR: invalid OWNED BY option
-HINT: Specify OWNED BY table.column or OWNED BY NONE.
-CREATE SEQUENCE sequence_testx OWNED BY pg_class_oid_index.oid; -- not a table
-ERROR: sequence cannot be owned by relation "pg_class_oid_index"
-DETAIL: This operation is not supported for indexes.
-CREATE SEQUENCE sequence_testx OWNED BY pg_class.relname; -- not same schema
-ERROR: sequence must be in same schema as table it is linked to
-CREATE TABLE sequence_test_table (a int);
-CREATE SEQUENCE sequence_testx OWNED BY sequence_test_table.b; -- wrong column
-ERROR: column "b" of relation "sequence_test_table" does not exist
-DROP TABLE sequence_test_table;
--- sequence data types
-CREATE SEQUENCE sequence_test5 AS integer;
-CREATE SEQUENCE sequence_test6 AS smallint;
-CREATE SEQUENCE sequence_test7 AS bigint;
-CREATE SEQUENCE sequence_test8 AS integer MAXVALUE 100000;
-CREATE SEQUENCE sequence_test9 AS integer INCREMENT BY -1;
-CREATE SEQUENCE sequence_test10 AS integer MINVALUE -100000 START 1;
-CREATE SEQUENCE sequence_test11 AS smallint;
-CREATE SEQUENCE sequence_test12 AS smallint INCREMENT -1;
-CREATE SEQUENCE sequence_test13 AS smallint MINVALUE -32768;
-CREATE SEQUENCE sequence_test14 AS smallint MAXVALUE 32767 INCREMENT -1;
-CREATE SEQUENCE sequence_testx AS text;
-ERROR: sequence type must be smallint, integer, or bigint
-CREATE SEQUENCE sequence_testx AS nosuchtype;
-ERROR: type "nosuchtype" does not exist
-LINE 1: CREATE SEQUENCE sequence_testx AS nosuchtype;
- ^
-CREATE SEQUENCE sequence_testx AS smallint MAXVALUE 100000;
-ERROR: MAXVALUE (100000) is out of range for sequence data type smallint
-CREATE SEQUENCE sequence_testx AS smallint MINVALUE -100000;
-ERROR: MINVALUE (-100000) is out of range for sequence data type smallint
-ALTER SEQUENCE sequence_test5 AS smallint; -- success, max will be adjusted
-ALTER SEQUENCE sequence_test8 AS smallint; -- fail, max has to be adjusted
-ERROR: MAXVALUE (100000) is out of range for sequence data type smallint
-ALTER SEQUENCE sequence_test8 AS smallint MAXVALUE 20000; -- ok now
-ALTER SEQUENCE sequence_test9 AS smallint; -- success, min will be adjusted
-ALTER SEQUENCE sequence_test10 AS smallint; -- fail, min has to be adjusted
-ERROR: MINVALUE (-100000) is out of range for sequence data type smallint
-ALTER SEQUENCE sequence_test10 AS smallint MINVALUE -20000; -- ok now
-ALTER SEQUENCE sequence_test11 AS int; -- max will be adjusted
-ALTER SEQUENCE sequence_test12 AS int; -- min will be adjusted
-ALTER SEQUENCE sequence_test13 AS int; -- min and max will be adjusted
-ALTER SEQUENCE sequence_test14 AS int; -- min and max will be adjusted
----
---- test creation of SERIAL column
----
-CREATE TABLE serialTest1 (f1 text, f2 serial);
-INSERT INTO serialTest1 VALUES ('foo');
-INSERT INTO serialTest1 VALUES ('bar');
-INSERT INTO serialTest1 VALUES ('force', 100);
-INSERT INTO serialTest1 VALUES ('wrong', NULL);
-ERROR: null value in column "f2" of relation "serialtest1" violates not-null constraint
-DETAIL: Failing row contains (wrong, null).
-SELECT * FROM serialTest1;
- f1 | f2
--------+-----
- foo | 1
- bar | 2
- force | 100
-(3 rows)
-
-SELECT pg_get_serial_sequence('serialTest1', 'f2');
- pg_get_serial_sequence
----------------------------
- public.serialtest1_f2_seq
-(1 row)
-
--- test smallserial / bigserial
-CREATE TABLE serialTest2 (f1 text, f2 serial, f3 smallserial, f4 serial2,
- f5 bigserial, f6 serial8);
-INSERT INTO serialTest2 (f1)
- VALUES ('test_defaults');
-INSERT INTO serialTest2 (f1, f2, f3, f4, f5, f6)
- VALUES ('test_max_vals', 2147483647, 32767, 32767, 9223372036854775807,
- 9223372036854775807),
- ('test_min_vals', -2147483648, -32768, -32768, -9223372036854775808,
- -9223372036854775808);
--- All these INSERTs should fail:
-INSERT INTO serialTest2 (f1, f3)
- VALUES ('bogus', -32769);
-ERROR: smallint out of range
-INSERT INTO serialTest2 (f1, f4)
- VALUES ('bogus', -32769);
-ERROR: smallint out of range
-INSERT INTO serialTest2 (f1, f3)
- VALUES ('bogus', 32768);
-ERROR: smallint out of range
-INSERT INTO serialTest2 (f1, f4)
- VALUES ('bogus', 32768);
-ERROR: smallint out of range
-INSERT INTO serialTest2 (f1, f5)
- VALUES ('bogus', -9223372036854775809);
-ERROR: bigint out of range
-INSERT INTO serialTest2 (f1, f6)
- VALUES ('bogus', -9223372036854775809);
-ERROR: bigint out of range
-INSERT INTO serialTest2 (f1, f5)
- VALUES ('bogus', 9223372036854775808);
-ERROR: bigint out of range
-INSERT INTO serialTest2 (f1, f6)
- VALUES ('bogus', 9223372036854775808);
-ERROR: bigint out of range
-SELECT * FROM serialTest2 ORDER BY f2 ASC;
- f1 | f2 | f3 | f4 | f5 | f6
----------------+-------------+--------+--------+----------------------+----------------------
- test_min_vals | -2147483648 | -32768 | -32768 | -9223372036854775808 | -9223372036854775808
- test_defaults | 1 | 1 | 1 | 1 | 1
- test_max_vals | 2147483647 | 32767 | 32767 | 9223372036854775807 | 9223372036854775807
-(3 rows)
-
-SELECT nextval('serialTest2_f2_seq');
- nextval
----------
- 2
-(1 row)
-
-SELECT nextval('serialTest2_f3_seq');
- nextval
----------
- 2
-(1 row)
-
-SELECT nextval('serialTest2_f4_seq');
- nextval
----------
- 2
-(1 row)
-
-SELECT nextval('serialTest2_f5_seq');
- nextval
----------
- 2
-(1 row)
-
-SELECT nextval('serialTest2_f6_seq');
- nextval
----------
- 2
-(1 row)
-
--- basic sequence operations using both text and oid references
-CREATE SEQUENCE sequence_test;
-CREATE SEQUENCE IF NOT EXISTS sequence_test;
-NOTICE: relation "sequence_test" already exists, skipping
-SELECT nextval('sequence_test'::text);
- nextval
----------
- 1
-(1 row)
-
-SELECT nextval('sequence_test'::regclass);
- nextval
----------
- 2
-(1 row)
-
-SELECT currval('sequence_test'::text);
- currval
----------
- 2
-(1 row)
-
-SELECT currval('sequence_test'::regclass);
- currval
----------
- 2
-(1 row)
-
-SELECT setval('sequence_test'::text, 32);
- setval
---------
- 32
-(1 row)
-
-SELECT nextval('sequence_test'::regclass);
- nextval
----------
- 33
-(1 row)
-
-SELECT setval('sequence_test'::text, 99, false);
- setval
---------
- 99
-(1 row)
-
-SELECT nextval('sequence_test'::regclass);
- nextval
----------
- 99
-(1 row)
-
-SELECT setval('sequence_test'::regclass, 32);
- setval
---------
- 32
-(1 row)
-
-SELECT nextval('sequence_test'::text);
- nextval
----------
- 33
-(1 row)
-
-SELECT setval('sequence_test'::regclass, 99, false);
- setval
---------
- 99
-(1 row)
-
-SELECT nextval('sequence_test'::text);
- nextval
----------
- 99
-(1 row)
-
-DISCARD SEQUENCES;
-SELECT currval('sequence_test'::regclass);
-ERROR: currval of sequence "sequence_test" is not yet defined in this session
-DROP SEQUENCE sequence_test;
--- renaming sequences
-CREATE SEQUENCE foo_seq;
-ALTER TABLE foo_seq RENAME TO foo_seq_new;
-SELECT * FROM foo_seq_new;
- last_value | log_cnt | is_called
-------------+---------+-----------
- 1 | 0 | f
-(1 row)
-
-SELECT nextval('foo_seq_new');
- nextval
----------
- 1
-(1 row)
-
-SELECT nextval('foo_seq_new');
- nextval
----------
- 2
-(1 row)
-
--- log_cnt can be higher if there is a checkpoint just at the right
--- time, so just test for the expected range
-SELECT last_value, log_cnt IN (31, 32) AS log_cnt_ok, is_called FROM foo_seq_new;
- last_value | log_cnt_ok | is_called
-------------+------------+-----------
- 2 | t | t
-(1 row)
-
-DROP SEQUENCE foo_seq_new;
--- renaming serial sequences
-ALTER TABLE serialtest1_f2_seq RENAME TO serialtest1_f2_foo;
-INSERT INTO serialTest1 VALUES ('more');
-SELECT * FROM serialTest1;
- f1 | f2
--------+-----
- foo | 1
- bar | 2
- force | 100
- more | 3
-(4 rows)
-
---
--- Check dependencies of serial and ordinary sequences
---
-CREATE TEMP SEQUENCE myseq2;
-CREATE TEMP SEQUENCE myseq3;
-CREATE TEMP TABLE t1 (
- f1 serial,
- f2 int DEFAULT nextval('myseq2'),
- f3 int DEFAULT nextval('myseq3'::text)
-);
--- Both drops should fail, but with different error messages:
-DROP SEQUENCE t1_f1_seq;
-ERROR: cannot drop sequence t1_f1_seq because other objects depend on it
-DETAIL: default value for column f1 of table t1 depends on sequence t1_f1_seq
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP SEQUENCE myseq2;
-ERROR: cannot drop sequence myseq2 because other objects depend on it
-DETAIL: default value for column f2 of table t1 depends on sequence myseq2
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
--- This however will work:
-DROP SEQUENCE myseq3;
-DROP TABLE t1;
--- Fails because no longer existent:
-DROP SEQUENCE t1_f1_seq;
-ERROR: sequence "t1_f1_seq" does not exist
--- Now OK:
-DROP SEQUENCE myseq2;
---
--- Alter sequence
---
-ALTER SEQUENCE IF EXISTS sequence_test2 RESTART WITH 24
- INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE;
-NOTICE: relation "sequence_test2" does not exist, skipping
-ALTER SEQUENCE serialTest1 CYCLE; -- error, not a sequence
-ERROR: cannot open relation "serialtest1"
-DETAIL: This operation is not supported for tables.
-CREATE SEQUENCE sequence_test2 START WITH 32;
-CREATE SEQUENCE sequence_test4 INCREMENT BY -1;
-SELECT nextval('sequence_test2');
- nextval
----------
- 32
-(1 row)
-
-SELECT nextval('sequence_test4');
- nextval
----------
- -1
-(1 row)
-
-ALTER SEQUENCE sequence_test2 RESTART;
-SELECT nextval('sequence_test2');
- nextval
----------
- 32
-(1 row)
-
-ALTER SEQUENCE sequence_test2 RESTART WITH 0; -- error
-ERROR: RESTART value (0) cannot be less than MINVALUE (1)
-ALTER SEQUENCE sequence_test4 RESTART WITH 40; -- error
-ERROR: RESTART value (40) cannot be greater than MAXVALUE (-1)
--- test CYCLE and NO CYCLE
-ALTER SEQUENCE sequence_test2 RESTART WITH 24
- INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE;
-SELECT nextval('sequence_test2');
- nextval
----------
- 24
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- 28
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- 32
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- 36
-(1 row)
-
-SELECT nextval('sequence_test2'); -- cycled
- nextval
----------
- 5
-(1 row)
-
-ALTER SEQUENCE sequence_test2 RESTART WITH 24
- NO CYCLE;
-SELECT nextval('sequence_test2');
- nextval
----------
- 24
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- 28
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- 32
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- 36
-(1 row)
-
-SELECT nextval('sequence_test2'); -- error
-ERROR: nextval: reached maximum value of sequence "sequence_test2" (36)
-ALTER SEQUENCE sequence_test2 RESTART WITH -24 START WITH -24
- INCREMENT BY -4 MINVALUE -36 MAXVALUE -5 CYCLE;
-SELECT nextval('sequence_test2');
- nextval
----------
- -24
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- -28
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- -32
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- -36
-(1 row)
-
-SELECT nextval('sequence_test2'); -- cycled
- nextval
----------
- -5
-(1 row)
-
-ALTER SEQUENCE sequence_test2 RESTART WITH -24
- NO CYCLE;
-SELECT nextval('sequence_test2');
- nextval
----------
- -24
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- -28
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- -32
-(1 row)
-
-SELECT nextval('sequence_test2');
- nextval
----------
- -36
-(1 row)
-
-SELECT nextval('sequence_test2'); -- error
-ERROR: nextval: reached minimum value of sequence "sequence_test2" (-36)
--- reset
-ALTER SEQUENCE IF EXISTS sequence_test2 RESTART WITH 32 START WITH 32
- INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE;
-SELECT setval('sequence_test2', -100); -- error
-ERROR: setval: value -100 is out of bounds for sequence "sequence_test2" (5..36)
-SELECT setval('sequence_test2', 100); -- error
-ERROR: setval: value 100 is out of bounds for sequence "sequence_test2" (5..36)
-SELECT setval('sequence_test2', 5);
- setval
---------
- 5
-(1 row)
-
-CREATE SEQUENCE sequence_test3; -- not read from, to test is_called
--- Information schema
-SELECT * FROM information_schema.sequences
- WHERE sequence_name ~ ANY(ARRAY['sequence_test', 'serialtest'])
- ORDER BY sequence_name ASC;
- sequence_catalog | sequence_schema | sequence_name | data_type | numeric_precision | numeric_precision_radix | numeric_scale | start_value | minimum_value | maximum_value | increment | cycle_option
-------------------+-----------------+--------------------+-----------+-------------------+-------------------------+---------------+-------------+----------------------+---------------------+-----------+--------------
- regression | public | sequence_test10 | smallint | 16 | 2 | 0 | 1 | -20000 | 32767 | 1 | NO
- regression | public | sequence_test11 | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO
- regression | public | sequence_test12 | integer | 32 | 2 | 0 | -1 | -2147483648 | -1 | -1 | NO
- regression | public | sequence_test13 | integer | 32 | 2 | 0 | -32768 | -2147483648 | 2147483647 | 1 | NO
- regression | public | sequence_test14 | integer | 32 | 2 | 0 | 32767 | -2147483648 | 2147483647 | -1 | NO
- regression | public | sequence_test2 | bigint | 64 | 2 | 0 | 32 | 5 | 36 | 4 | YES
- regression | public | sequence_test3 | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO
- regression | public | sequence_test4 | bigint | 64 | 2 | 0 | -1 | -9223372036854775808 | -1 | -1 | NO
- regression | public | sequence_test5 | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO
- regression | public | sequence_test6 | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO
- regression | public | sequence_test7 | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO
- regression | public | sequence_test8 | smallint | 16 | 2 | 0 | 1 | 1 | 20000 | 1 | NO
- regression | public | sequence_test9 | smallint | 16 | 2 | 0 | -1 | -32768 | -1 | -1 | NO
- regression | public | serialtest1_f2_foo | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO
- regression | public | serialtest2_f2_seq | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO
- regression | public | serialtest2_f3_seq | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO
- regression | public | serialtest2_f4_seq | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO
- regression | public | serialtest2_f5_seq | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO
- regression | public | serialtest2_f6_seq | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO
-(19 rows)
-
-SELECT schemaname, sequencename, start_value, min_value, max_value, increment_by, cycle, cache_size, last_value
-FROM pg_sequences
-WHERE sequencename ~ ANY(ARRAY['sequence_test', 'serialtest'])
- ORDER BY sequencename ASC;
- schemaname | sequencename | start_value | min_value | max_value | increment_by | cycle | cache_size | last_value
-------------+--------------------+-------------+----------------------+---------------------+--------------+-------+------------+------------
- public | sequence_test10 | 1 | -20000 | 32767 | 1 | f | 1 |
- public | sequence_test11 | 1 | 1 | 2147483647 | 1 | f | 1 |
- public | sequence_test12 | -1 | -2147483648 | -1 | -1 | f | 1 |
- public | sequence_test13 | -32768 | -2147483648 | 2147483647 | 1 | f | 1 |
- public | sequence_test14 | 32767 | -2147483648 | 2147483647 | -1 | f | 1 |
- public | sequence_test2 | 32 | 5 | 36 | 4 | t | 1 | 5
- public | sequence_test3 | 1 | 1 | 9223372036854775807 | 1 | f | 1 |
- public | sequence_test4 | -1 | -9223372036854775808 | -1 | -1 | f | 1 | -1
- public | sequence_test5 | 1 | 1 | 32767 | 1 | f | 1 |
- public | sequence_test6 | 1 | 1 | 32767 | 1 | f | 1 |
- public | sequence_test7 | 1 | 1 | 9223372036854775807 | 1 | f | 1 |
- public | sequence_test8 | 1 | 1 | 20000 | 1 | f | 1 |
- public | sequence_test9 | -1 | -32768 | -1 | -1 | f | 1 |
- public | serialtest1_f2_foo | 1 | 1 | 2147483647 | 1 | f | 1 | 3
- public | serialtest2_f2_seq | 1 | 1 | 2147483647 | 1 | f | 1 | 2
- public | serialtest2_f3_seq | 1 | 1 | 32767 | 1 | f | 1 | 2
- public | serialtest2_f4_seq | 1 | 1 | 32767 | 1 | f | 1 | 2
- public | serialtest2_f5_seq | 1 | 1 | 9223372036854775807 | 1 | f | 1 | 2
- public | serialtest2_f6_seq | 1 | 1 | 9223372036854775807 | 1 | f | 1 | 2
-(19 rows)
-
-SELECT * FROM pg_sequence_parameters('sequence_test4'::regclass);
- start_value | minimum_value | maximum_value | increment | cycle_option | cache_size | data_type
--------------+----------------------+---------------+-----------+--------------+------------+-----------
- -1 | -9223372036854775808 | -1 | -1 | f | 1 | 20
-(1 row)
-
-\d sequence_test4
- Sequence "public.sequence_test4"
- Type | Start | Minimum | Maximum | Increment | Cycles? | Cache
---------+-------+----------------------+---------+-----------+---------+-------
- bigint | -1 | -9223372036854775808 | -1 | -1 | no | 1
-
-\d serialtest2_f2_seq
- Sequence "public.serialtest2_f2_seq"
- Type | Start | Minimum | Maximum | Increment | Cycles? | Cache
----------+-------+---------+------------+-----------+---------+-------
- integer | 1 | 1 | 2147483647 | 1 | no | 1
-Owned by: public.serialtest2.f2
-
--- Test comments
-COMMENT ON SEQUENCE asdf IS 'won''t work';
-ERROR: relation "asdf" does not exist
-COMMENT ON SEQUENCE sequence_test2 IS 'will work';
-COMMENT ON SEQUENCE sequence_test2 IS NULL;
--- Test lastval()
-CREATE SEQUENCE seq;
-SELECT nextval('seq');
- nextval
----------
- 1
-(1 row)
-
-SELECT lastval();
- lastval
----------
- 1
-(1 row)
-
-SELECT setval('seq', 99);
- setval
---------
- 99
-(1 row)
-
-SELECT lastval();
- lastval
----------
- 99
-(1 row)
-
-DISCARD SEQUENCES;
-SELECT lastval();
-ERROR: lastval is not yet defined in this session
-CREATE SEQUENCE seq2;
-SELECT nextval('seq2');
- nextval
----------
- 1
-(1 row)
-
-SELECT lastval();
- lastval
----------
- 1
-(1 row)
-
-DROP SEQUENCE seq2;
--- should fail
-SELECT lastval();
-ERROR: lastval is not yet defined in this session
--- unlogged sequences
--- (more tests in src/test/recovery/)
-CREATE UNLOGGED SEQUENCE sequence_test_unlogged;
-ALTER SEQUENCE sequence_test_unlogged SET LOGGED;
-\d sequence_test_unlogged
- Sequence "public.sequence_test_unlogged"
- Type | Start | Minimum | Maximum | Increment | Cycles? | Cache
---------+-------+---------+---------------------+-----------+---------+-------
- bigint | 1 | 1 | 9223372036854775807 | 1 | no | 1
-
-ALTER SEQUENCE sequence_test_unlogged SET UNLOGGED;
-\d sequence_test_unlogged
- Unlogged sequence "public.sequence_test_unlogged"
- Type | Start | Minimum | Maximum | Increment | Cycles? | Cache
---------+-------+---------+---------------------+-----------+---------+-------
- bigint | 1 | 1 | 9223372036854775807 | 1 | no | 1
-
-DROP SEQUENCE sequence_test_unlogged;
--- Test sequences in read-only transactions
-CREATE TEMPORARY SEQUENCE sequence_test_temp1;
-START TRANSACTION READ ONLY;
-SELECT nextval('sequence_test_temp1'); -- ok
- nextval
----------
- 1
-(1 row)
-
-SELECT nextval('sequence_test2'); -- error
-ERROR: cannot execute nextval() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT setval('sequence_test_temp1', 1); -- ok
- setval
---------
- 1
-(1 row)
-
-SELECT setval('sequence_test2', 1); -- error
-ERROR: cannot execute setval() in a read-only transaction
-ROLLBACK;
--- privileges tests
-CREATE USER regress_seq_user;
--- nextval
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT SELECT ON seq3 TO regress_seq_user;
-SELECT nextval('seq3');
-ERROR: permission denied for sequence seq3
-ROLLBACK;
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT UPDATE ON seq3 TO regress_seq_user;
-SELECT nextval('seq3');
- nextval
----------
- 1
-(1 row)
-
-ROLLBACK;
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT USAGE ON seq3 TO regress_seq_user;
-SELECT nextval('seq3');
- nextval
----------
- 1
-(1 row)
-
-ROLLBACK;
--- currval
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-SELECT nextval('seq3');
- nextval
----------
- 1
-(1 row)
-
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT SELECT ON seq3 TO regress_seq_user;
-SELECT currval('seq3');
- currval
----------
- 1
-(1 row)
-
-ROLLBACK;
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-SELECT nextval('seq3');
- nextval
----------
- 1
-(1 row)
-
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT UPDATE ON seq3 TO regress_seq_user;
-SELECT currval('seq3');
-ERROR: permission denied for sequence seq3
-ROLLBACK;
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-SELECT nextval('seq3');
- nextval
----------
- 1
-(1 row)
-
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT USAGE ON seq3 TO regress_seq_user;
-SELECT currval('seq3');
- currval
----------
- 1
-(1 row)
-
-ROLLBACK;
--- lastval
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-SELECT nextval('seq3');
- nextval
----------
- 1
-(1 row)
-
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT SELECT ON seq3 TO regress_seq_user;
-SELECT lastval();
- lastval
----------
- 1
-(1 row)
-
-ROLLBACK;
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-SELECT nextval('seq3');
- nextval
----------
- 1
-(1 row)
-
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT UPDATE ON seq3 TO regress_seq_user;
-SELECT lastval();
-ERROR: permission denied for sequence seq3
-ROLLBACK;
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-SELECT nextval('seq3');
- nextval
----------
- 1
-(1 row)
-
-REVOKE ALL ON seq3 FROM regress_seq_user;
-GRANT USAGE ON seq3 TO regress_seq_user;
-SELECT lastval();
- lastval
----------
- 1
-(1 row)
-
-ROLLBACK;
--- setval
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-CREATE SEQUENCE seq3;
-REVOKE ALL ON seq3 FROM regress_seq_user;
-SAVEPOINT save;
-SELECT setval('seq3', 5);
-ERROR: permission denied for sequence seq3
-ROLLBACK TO save;
-GRANT UPDATE ON seq3 TO regress_seq_user;
-SELECT setval('seq3', 5);
- setval
---------
- 5
-(1 row)
-
-SELECT nextval('seq3');
- nextval
----------
- 6
-(1 row)
-
-ROLLBACK;
--- ALTER SEQUENCE
-BEGIN;
-SET LOCAL SESSION AUTHORIZATION regress_seq_user;
-ALTER SEQUENCE sequence_test2 START WITH 1;
-ERROR: must be owner of sequence sequence_test2
-ROLLBACK;
--- Sequences should get wiped out as well:
-DROP TABLE serialTest1, serialTest2;
--- Make sure sequences are gone:
-SELECT * FROM information_schema.sequences WHERE sequence_name IN
- ('sequence_test2', 'serialtest2_f2_seq', 'serialtest2_f3_seq',
- 'serialtest2_f4_seq', 'serialtest2_f5_seq', 'serialtest2_f6_seq')
- ORDER BY sequence_name ASC;
- sequence_catalog | sequence_schema | sequence_name | data_type | numeric_precision | numeric_precision_radix | numeric_scale | start_value | minimum_value | maximum_value | increment | cycle_option
-------------------+-----------------+----------------+-----------+-------------------+-------------------------+---------------+-------------+---------------+---------------+-----------+--------------
- regression | public | sequence_test2 | bigint | 64 | 2 | 0 | 32 | 5 | 36 | 4 | YES
-(1 row)
-
-DROP USER regress_seq_user;
-DROP SEQUENCE seq;
--- cache tests
-CREATE SEQUENCE test_seq1 CACHE 10;
-SELECT nextval('test_seq1');
- nextval
----------
- 1
-(1 row)
-
-SELECT nextval('test_seq1');
- nextval
----------
- 2
-(1 row)
-
-SELECT nextval('test_seq1');
- nextval
----------
- 3
-(1 row)
-
--- pg_get_sequence_data
-SELECT * FROM pg_get_sequence_data('test_seq1');
- last_value | is_called
-------------+-----------
- 10 | t
-(1 row)
-
-DROP SEQUENCE test_seq1;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/polymorphism.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/polymorphism.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/polymorphism.out 2024-11-15 02:50:52.486055632 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/polymorphism.out 2024-11-15 02:59:18.173116956 +0000
@@ -1,2110 +1,2 @@
---
--- Tests for polymorphic SQL functions and aggregates based on them.
--- Tests for other features related to function-calling have snuck in, too.
---
-create function polyf(x anyelement) returns anyelement as $$
- select x + 1
-$$ language sql;
-select polyf(42) as int, polyf(4.5) as num;
- int | num
------+-----
- 43 | 5.5
-(1 row)
-
-select polyf(point(3,4)); -- fail for lack of + operator
-ERROR: operator does not exist: point + integer
-LINE 2: select x + 1
- ^
-HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
-QUERY:
- select x + 1
-
-CONTEXT: SQL function "polyf" during inlining
-drop function polyf(x anyelement);
-create function polyf(x anyelement) returns anyarray as $$
- select array[x + 1, x + 2]
-$$ language sql;
-select polyf(42) as int, polyf(4.5) as num;
- int | num
----------+-----------
- {43,44} | {5.5,6.5}
-(1 row)
-
-drop function polyf(x anyelement);
-create function polyf(x anyarray) returns anyelement as $$
- select x[1]
-$$ language sql;
-select polyf(array[2,4]) as int, polyf(array[4.5, 7.7]) as num;
- int | num
------+-----
- 2 | 4.5
-(1 row)
-
-select polyf(stavalues1) from pg_statistic; -- fail, can't infer element type
-ERROR: cannot determine element type of "anyarray" argument
-drop function polyf(x anyarray);
-create function polyf(x anyarray) returns anyarray as $$
- select x
-$$ language sql;
-select polyf(array[2,4]) as int, polyf(array[4.5, 7.7]) as num;
- int | num
--------+-----------
- {2,4} | {4.5,7.7}
-(1 row)
-
-select polyf(stavalues1) from pg_statistic; -- fail, can't infer element type
-ERROR: return type anyarray is not supported for SQL functions
-CONTEXT: SQL function "polyf" during inlining
-drop function polyf(x anyarray);
--- fail, can't infer type:
-create function polyf(x anyelement) returns anyrange as $$
- select array[x + 1, x + 2]
-$$ language sql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange.
-create function polyf(x anyrange) returns anyarray as $$
- select array[lower(x), upper(x)]
-$$ language sql;
-select polyf(int4range(42, 49)) as int, polyf(float8range(4.5, 7.8)) as num;
- int | num
----------+-----------
- {42,49} | {4.5,7.8}
-(1 row)
-
-drop function polyf(x anyrange);
-create function polyf(x anycompatible, y anycompatible) returns anycompatiblearray as $$
- select array[x, y]
-$$ language sql;
-select polyf(2, 4) as int, polyf(2, 4.5) as num;
- int | num
--------+---------
- {2,4} | {2,4.5}
-(1 row)
-
-drop function polyf(x anycompatible, y anycompatible);
-create function polyf(x anycompatiblerange, y anycompatible, z anycompatible) returns anycompatiblearray as $$
- select array[lower(x), upper(x), y, z]
-$$ language sql;
-select polyf(int4range(42, 49), 11, 2::smallint) as int, polyf(float8range(4.5, 7.8), 7.8, 11::real) as num;
- int | num
---------------+------------------
- {42,49,11,2} | {4.5,7.8,7.8,11}
-(1 row)
-
-select polyf(int4range(42, 49), 11, 4.5) as fail; -- range type doesn't fit
-ERROR: function polyf(int4range, integer, numeric) does not exist
-LINE 1: select polyf(int4range(42, 49), 11, 4.5) as fail;
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function polyf(x anycompatiblerange, y anycompatible, z anycompatible);
-create function polyf(x anycompatiblemultirange, y anycompatible, z anycompatible) returns anycompatiblearray as $$
- select array[lower(x), upper(x), y, z]
-$$ language sql;
-select polyf(multirange(int4range(42, 49)), 11, 2::smallint) as int, polyf(multirange(float8range(4.5, 7.8)), 7.8, 11::real) as num;
- int | num
---------------+------------------
- {42,49,11,2} | {4.5,7.8,7.8,11}
-(1 row)
-
-select polyf(multirange(int4range(42, 49)), 11, 4.5) as fail; -- range type doesn't fit
-ERROR: function polyf(int4multirange, integer, numeric) does not exist
-LINE 1: select polyf(multirange(int4range(42, 49)), 11, 4.5) as fail...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function polyf(x anycompatiblemultirange, y anycompatible, z anycompatible);
--- fail, can't infer type:
-create function polyf(x anycompatible) returns anycompatiblerange as $$
- select array[x + 1, x + 2]
-$$ language sql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange.
-create function polyf(x anycompatiblerange, y anycompatiblearray) returns anycompatiblerange as $$
- select x
-$$ language sql;
-select polyf(int4range(42, 49), array[11]) as int, polyf(float8range(4.5, 7.8), array[7]) as num;
- int | num
----------+-----------
- [42,49) | [4.5,7.8)
-(1 row)
-
-drop function polyf(x anycompatiblerange, y anycompatiblearray);
--- fail, can't infer type:
-create function polyf(x anycompatible) returns anycompatiblemultirange as $$
- select array[x + 1, x + 2]
-$$ language sql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anycompatiblemultirange requires at least one input of type anycompatiblerange or anycompatiblemultirange.
-create function polyf(x anycompatiblemultirange, y anycompatiblearray) returns anycompatiblemultirange as $$
- select x
-$$ language sql;
-select polyf(multirange(int4range(42, 49)), array[11]) as int, polyf(multirange(float8range(4.5, 7.8)), array[7]) as num;
- int | num
------------+-------------
- {[42,49)} | {[4.5,7.8)}
-(1 row)
-
-drop function polyf(x anycompatiblemultirange, y anycompatiblearray);
-create function polyf(a anyelement, b anyarray,
- c anycompatible, d anycompatible,
- OUT x anyarray, OUT y anycompatiblearray)
-as $$
- select a || b, array[c, d]
-$$ language sql;
-select x, pg_typeof(x), y, pg_typeof(y)
- from polyf(11, array[1, 2], 42, 34.5);
- x | pg_typeof | y | pg_typeof
-----------+-----------+-----------+-----------
- {11,1,2} | integer[] | {42,34.5} | numeric[]
-(1 row)
-
-select x, pg_typeof(x), y, pg_typeof(y)
- from polyf(11, array[1, 2], point(1,2), point(3,4));
- x | pg_typeof | y | pg_typeof
-----------+-----------+-------------------+-----------
- {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[]
-(1 row)
-
-select x, pg_typeof(x), y, pg_typeof(y)
- from polyf(11, '{1,2}', point(1,2), '(3,4)');
- x | pg_typeof | y | pg_typeof
-----------+-----------+-------------------+-----------
- {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[]
-(1 row)
-
-select x, pg_typeof(x), y, pg_typeof(y)
- from polyf(11, array[1, 2.2], 42, 34.5); -- fail
-ERROR: function polyf(integer, numeric[], integer, numeric) does not exist
-LINE 2: from polyf(11, array[1, 2.2], 42, 34.5);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function polyf(a anyelement, b anyarray,
- c anycompatible, d anycompatible);
-create function polyf(anyrange) returns anymultirange
-as 'select multirange($1);' language sql;
-select polyf(int4range(1,10));
- polyf
-----------
- {[1,10)}
-(1 row)
-
-select polyf(null);
-ERROR: could not determine polymorphic type because input has type unknown
-drop function polyf(anyrange);
-create function polyf(anymultirange) returns anyelement
-as 'select lower($1);' language sql;
-select polyf(int4multirange(int4range(1,10), int4range(20,30)));
- polyf
--------
- 1
-(1 row)
-
-select polyf(null);
-ERROR: could not determine polymorphic type because input has type unknown
-drop function polyf(anymultirange);
-create function polyf(anycompatiblerange) returns anycompatiblemultirange
-as 'select multirange($1);' language sql;
-select polyf(int4range(1,10));
- polyf
-----------
- {[1,10)}
-(1 row)
-
-select polyf(null);
-ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown
-drop function polyf(anycompatiblerange);
-create function polyf(anymultirange) returns anyrange
-as 'select range_merge($1);' language sql;
-select polyf(int4multirange(int4range(1,10), int4range(20,30)));
- polyf
---------
- [1,30)
-(1 row)
-
-select polyf(null);
-ERROR: could not determine polymorphic type because input has type unknown
-drop function polyf(anymultirange);
-create function polyf(anycompatiblemultirange) returns anycompatiblerange
-as 'select range_merge($1);' language sql;
-select polyf(int4multirange(int4range(1,10), int4range(20,30)));
- polyf
---------
- [1,30)
-(1 row)
-
-select polyf(null);
-ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown
-drop function polyf(anycompatiblemultirange);
-create function polyf(anycompatiblemultirange) returns anycompatible
-as 'select lower($1);' language sql;
-select polyf(int4multirange(int4range(1,10), int4range(20,30)));
- polyf
--------
- 1
-(1 row)
-
-select polyf(null);
-ERROR: could not determine polymorphic type anycompatiblemultirange because input has type unknown
-drop function polyf(anycompatiblemultirange);
---
--- Polymorphic aggregate tests
---
--- Legend:
------------
--- A = type is ANY
--- P = type is polymorphic
--- N = type is non-polymorphic
--- B = aggregate base type
--- S = aggregate state type
--- R = aggregate return type
--- 1 = arg1 of a function
--- 2 = arg2 of a function
--- ag = aggregate
--- tf = trans (state) function
--- ff = final function
--- rt = return type of a function
--- -> = implies
--- => = allowed
--- !> = not allowed
--- E = exists
--- NE = not-exists
---
--- Possible states:
--- ----------------
--- B = (A || P || N)
--- when (B = A) -> (tf2 = NE)
--- S = (P || N)
--- ff = (E || NE)
--- tf1 = (P || N)
--- tf2 = (NE || P || N)
--- R = (P || N)
--- create functions for use as tf and ff with the needed combinations of
--- argument polymorphism, but within the constraints of valid aggregate
--- functions, i.e. tf arg1 and tf return type must match
--- polymorphic single arg transfn
-CREATE FUNCTION stfp(anyarray) RETURNS anyarray AS
-'select $1' LANGUAGE SQL;
--- non-polymorphic single arg transfn
-CREATE FUNCTION stfnp(int[]) RETURNS int[] AS
-'select $1' LANGUAGE SQL;
--- dual polymorphic transfn
-CREATE FUNCTION tfp(anyarray,anyelement) RETURNS anyarray AS
-'select $1 || $2' LANGUAGE SQL;
--- dual non-polymorphic transfn
-CREATE FUNCTION tfnp(int[],int) RETURNS int[] AS
-'select $1 || $2' LANGUAGE SQL;
--- arg1 only polymorphic transfn
-CREATE FUNCTION tf1p(anyarray,int) RETURNS anyarray AS
-'select $1' LANGUAGE SQL;
--- arg2 only polymorphic transfn
-CREATE FUNCTION tf2p(int[],anyelement) RETURNS int[] AS
-'select $1' LANGUAGE SQL;
--- multi-arg polymorphic
-CREATE FUNCTION sum3(anyelement,anyelement,anyelement) returns anyelement AS
-'select $1+$2+$3' language sql strict;
--- finalfn polymorphic
-CREATE FUNCTION ffp(anyarray) RETURNS anyarray AS
-'select $1' LANGUAGE SQL;
--- finalfn non-polymorphic
-CREATE FUNCTION ffnp(int[]) returns int[] as
-'select $1' LANGUAGE SQL;
--- Try to cover all the possible states:
---
--- Note: in Cases 1 & 2, we are trying to return P. Therefore, if the transfn
--- is stfnp, tfnp, or tf2p, we must use ffp as finalfn, because stfnp, tfnp,
--- and tf2p do not return P. Conversely, in Cases 3 & 4, we are trying to
--- return N. Therefore, if the transfn is stfp, tfp, or tf1p, we must use ffnp
--- as finalfn, because stfp, tfp, and tf1p do not return N.
---
--- Case1 (R = P) && (B = A)
--- ------------------------
--- S tf1
--- -------
--- N N
--- should CREATE
-CREATE AGGREGATE myaggp01a(*) (SFUNC = stfnp, STYPE = int4[],
- FINALFUNC = ffp, INITCOND = '{}');
--- P N
--- should ERROR: stfnp(anyarray) not matched by stfnp(int[])
-CREATE AGGREGATE myaggp02a(*) (SFUNC = stfnp, STYPE = anyarray,
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- N P
--- should CREATE
-CREATE AGGREGATE myaggp03a(*) (SFUNC = stfp, STYPE = int4[],
- FINALFUNC = ffp, INITCOND = '{}');
-CREATE AGGREGATE myaggp03b(*) (SFUNC = stfp, STYPE = int4[],
- INITCOND = '{}');
--- P P
--- should ERROR: we have no way to resolve S
-CREATE AGGREGATE myaggp04a(*) (SFUNC = stfp, STYPE = anyarray,
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
-CREATE AGGREGATE myaggp04b(*) (SFUNC = stfp, STYPE = anyarray,
- INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- Case2 (R = P) && ((B = P) || (B = N))
--- -------------------------------------
--- S tf1 B tf2
--- -----------------------
--- N N N N
--- should CREATE
-CREATE AGGREGATE myaggp05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[],
- FINALFUNC = ffp, INITCOND = '{}');
--- N N N P
--- should CREATE
-CREATE AGGREGATE myaggp06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[],
- FINALFUNC = ffp, INITCOND = '{}');
--- N N P N
--- should ERROR: tfnp(int[], anyelement) not matched by tfnp(int[], int)
-CREATE AGGREGATE myaggp07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[],
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: function tfnp(integer[], anyelement) does not exist
--- N N P P
--- should CREATE
-CREATE AGGREGATE myaggp08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[],
- FINALFUNC = ffp, INITCOND = '{}');
--- N P N N
--- should CREATE
-CREATE AGGREGATE myaggp09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[],
- FINALFUNC = ffp, INITCOND = '{}');
-CREATE AGGREGATE myaggp09b(BASETYPE = int, SFUNC = tf1p, STYPE = int[],
- INITCOND = '{}');
--- N P N P
--- should CREATE
-CREATE AGGREGATE myaggp10a(BASETYPE = int, SFUNC = tfp, STYPE = int[],
- FINALFUNC = ffp, INITCOND = '{}');
-CREATE AGGREGATE myaggp10b(BASETYPE = int, SFUNC = tfp, STYPE = int[],
- INITCOND = '{}');
--- N P P N
--- should ERROR: tf1p(int[],anyelement) not matched by tf1p(anyarray,int)
-CREATE AGGREGATE myaggp11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[],
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: function tf1p(integer[], anyelement) does not exist
-CREATE AGGREGATE myaggp11b(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[],
- INITCOND = '{}');
-ERROR: function tf1p(integer[], anyelement) does not exist
--- N P P P
--- should ERROR: tfp(int[],anyelement) not matched by tfp(anyarray,anyelement)
-CREATE AGGREGATE myaggp12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[],
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: function tfp(integer[], anyelement) does not exist
-CREATE AGGREGATE myaggp12b(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[],
- INITCOND = '{}');
-ERROR: function tfp(integer[], anyelement) does not exist
--- P N N N
--- should ERROR: tfnp(anyarray, int) not matched by tfnp(int[],int)
-CREATE AGGREGATE myaggp13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray,
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- P N N P
--- should ERROR: tf2p(anyarray, int) not matched by tf2p(int[],anyelement)
-CREATE AGGREGATE myaggp14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray,
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- P N P N
--- should ERROR: tfnp(anyarray, anyelement) not matched by tfnp(int[],int)
-CREATE AGGREGATE myaggp15a(BASETYPE = anyelement, SFUNC = tfnp,
- STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}');
-ERROR: function tfnp(anyarray, anyelement) does not exist
--- P N P P
--- should ERROR: tf2p(anyarray, anyelement) not matched by tf2p(int[],anyelement)
-CREATE AGGREGATE myaggp16a(BASETYPE = anyelement, SFUNC = tf2p,
- STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}');
-ERROR: function tf2p(anyarray, anyelement) does not exist
--- P P N N
--- should ERROR: we have no way to resolve S
-CREATE AGGREGATE myaggp17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray,
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
-CREATE AGGREGATE myaggp17b(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray,
- INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- P P N P
--- should ERROR: tfp(anyarray, int) not matched by tfp(anyarray, anyelement)
-CREATE AGGREGATE myaggp18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray,
- FINALFUNC = ffp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
-CREATE AGGREGATE myaggp18b(BASETYPE = int, SFUNC = tfp, STYPE = anyarray,
- INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- P P P N
--- should ERROR: tf1p(anyarray, anyelement) not matched by tf1p(anyarray, int)
-CREATE AGGREGATE myaggp19a(BASETYPE = anyelement, SFUNC = tf1p,
- STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}');
-ERROR: function tf1p(anyarray, anyelement) does not exist
-CREATE AGGREGATE myaggp19b(BASETYPE = anyelement, SFUNC = tf1p,
- STYPE = anyarray, INITCOND = '{}');
-ERROR: function tf1p(anyarray, anyelement) does not exist
--- P P P P
--- should CREATE
-CREATE AGGREGATE myaggp20a(BASETYPE = anyelement, SFUNC = tfp,
- STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}');
-CREATE AGGREGATE myaggp20b(BASETYPE = anyelement, SFUNC = tfp,
- STYPE = anyarray, INITCOND = '{}');
--- Case3 (R = N) && (B = A)
--- ------------------------
--- S tf1
--- -------
--- N N
--- should CREATE
-CREATE AGGREGATE myaggn01a(*) (SFUNC = stfnp, STYPE = int4[],
- FINALFUNC = ffnp, INITCOND = '{}');
-CREATE AGGREGATE myaggn01b(*) (SFUNC = stfnp, STYPE = int4[],
- INITCOND = '{}');
--- P N
--- should ERROR: stfnp(anyarray) not matched by stfnp(int[])
-CREATE AGGREGATE myaggn02a(*) (SFUNC = stfnp, STYPE = anyarray,
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
-CREATE AGGREGATE myaggn02b(*) (SFUNC = stfnp, STYPE = anyarray,
- INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- N P
--- should CREATE
-CREATE AGGREGATE myaggn03a(*) (SFUNC = stfp, STYPE = int4[],
- FINALFUNC = ffnp, INITCOND = '{}');
--- P P
--- should ERROR: ffnp(anyarray) not matched by ffnp(int[])
-CREATE AGGREGATE myaggn04a(*) (SFUNC = stfp, STYPE = anyarray,
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- Case4 (R = N) && ((B = P) || (B = N))
--- -------------------------------------
--- S tf1 B tf2
--- -----------------------
--- N N N N
--- should CREATE
-CREATE AGGREGATE myaggn05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[],
- FINALFUNC = ffnp, INITCOND = '{}');
-CREATE AGGREGATE myaggn05b(BASETYPE = int, SFUNC = tfnp, STYPE = int[],
- INITCOND = '{}');
--- N N N P
--- should CREATE
-CREATE AGGREGATE myaggn06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[],
- FINALFUNC = ffnp, INITCOND = '{}');
-CREATE AGGREGATE myaggn06b(BASETYPE = int, SFUNC = tf2p, STYPE = int[],
- INITCOND = '{}');
--- N N P N
--- should ERROR: tfnp(int[], anyelement) not matched by tfnp(int[], int)
-CREATE AGGREGATE myaggn07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[],
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: function tfnp(integer[], anyelement) does not exist
-CREATE AGGREGATE myaggn07b(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[],
- INITCOND = '{}');
-ERROR: function tfnp(integer[], anyelement) does not exist
--- N N P P
--- should CREATE
-CREATE AGGREGATE myaggn08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[],
- FINALFUNC = ffnp, INITCOND = '{}');
-CREATE AGGREGATE myaggn08b(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[],
- INITCOND = '{}');
--- N P N N
--- should CREATE
-CREATE AGGREGATE myaggn09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[],
- FINALFUNC = ffnp, INITCOND = '{}');
--- N P N P
--- should CREATE
-CREATE AGGREGATE myaggn10a(BASETYPE = int, SFUNC = tfp, STYPE = int[],
- FINALFUNC = ffnp, INITCOND = '{}');
--- N P P N
--- should ERROR: tf1p(int[],anyelement) not matched by tf1p(anyarray,int)
-CREATE AGGREGATE myaggn11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[],
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: function tf1p(integer[], anyelement) does not exist
--- N P P P
--- should ERROR: tfp(int[],anyelement) not matched by tfp(anyarray,anyelement)
-CREATE AGGREGATE myaggn12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[],
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: function tfp(integer[], anyelement) does not exist
--- P N N N
--- should ERROR: tfnp(anyarray, int) not matched by tfnp(int[],int)
-CREATE AGGREGATE myaggn13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray,
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
-CREATE AGGREGATE myaggn13b(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray,
- INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- P N N P
--- should ERROR: tf2p(anyarray, int) not matched by tf2p(int[],anyelement)
-CREATE AGGREGATE myaggn14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray,
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
-CREATE AGGREGATE myaggn14b(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray,
- INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- P N P N
--- should ERROR: tfnp(anyarray, anyelement) not matched by tfnp(int[],int)
-CREATE AGGREGATE myaggn15a(BASETYPE = anyelement, SFUNC = tfnp,
- STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: function tfnp(anyarray, anyelement) does not exist
-CREATE AGGREGATE myaggn15b(BASETYPE = anyelement, SFUNC = tfnp,
- STYPE = anyarray, INITCOND = '{}');
-ERROR: function tfnp(anyarray, anyelement) does not exist
--- P N P P
--- should ERROR: tf2p(anyarray, anyelement) not matched by tf2p(int[],anyelement)
-CREATE AGGREGATE myaggn16a(BASETYPE = anyelement, SFUNC = tf2p,
- STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: function tf2p(anyarray, anyelement) does not exist
-CREATE AGGREGATE myaggn16b(BASETYPE = anyelement, SFUNC = tf2p,
- STYPE = anyarray, INITCOND = '{}');
-ERROR: function tf2p(anyarray, anyelement) does not exist
--- P P N N
--- should ERROR: ffnp(anyarray) not matched by ffnp(int[])
-CREATE AGGREGATE myaggn17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray,
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- P P N P
--- should ERROR: tfp(anyarray, int) not matched by tfp(anyarray, anyelement)
-CREATE AGGREGATE myaggn18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray,
- FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: cannot determine transition data type
-DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange.
--- P P P N
--- should ERROR: tf1p(anyarray, anyelement) not matched by tf1p(anyarray, int)
-CREATE AGGREGATE myaggn19a(BASETYPE = anyelement, SFUNC = tf1p,
- STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: function tf1p(anyarray, anyelement) does not exist
--- P P P P
--- should ERROR: ffnp(anyarray) not matched by ffnp(int[])
-CREATE AGGREGATE myaggn20a(BASETYPE = anyelement, SFUNC = tfp,
- STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}');
-ERROR: function ffnp(anyarray) does not exist
--- multi-arg polymorphic
-CREATE AGGREGATE mysum2(anyelement,anyelement) (SFUNC = sum3,
- STYPE = anyelement, INITCOND = '0');
--- create test data for polymorphic aggregates
-create temp table t(f1 int, f2 int[], f3 text);
-insert into t values(1,array[1],'a');
-insert into t values(1,array[11],'b');
-insert into t values(1,array[111],'c');
-insert into t values(2,array[2],'a');
-insert into t values(2,array[22],'b');
-insert into t values(2,array[222],'c');
-insert into t values(3,array[3],'a');
-insert into t values(3,array[3],'b');
--- test the successfully created polymorphic aggregates
-select f3, myaggp01a(*) from t group by f3 order by f3;
- f3 | myaggp01a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggp03a(*) from t group by f3 order by f3;
- f3 | myaggp03a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggp03b(*) from t group by f3 order by f3;
- f3 | myaggp03b
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggp05a(f1) from t group by f3 order by f3;
- f3 | myaggp05a
-----+-----------
- a | {1,2,3}
- b | {1,2,3}
- c | {1,2}
-(3 rows)
-
-select f3, myaggp06a(f1) from t group by f3 order by f3;
- f3 | myaggp06a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggp08a(f1) from t group by f3 order by f3;
- f3 | myaggp08a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggp09a(f1) from t group by f3 order by f3;
- f3 | myaggp09a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggp09b(f1) from t group by f3 order by f3;
- f3 | myaggp09b
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggp10a(f1) from t group by f3 order by f3;
- f3 | myaggp10a
-----+-----------
- a | {1,2,3}
- b | {1,2,3}
- c | {1,2}
-(3 rows)
-
-select f3, myaggp10b(f1) from t group by f3 order by f3;
- f3 | myaggp10b
-----+-----------
- a | {1,2,3}
- b | {1,2,3}
- c | {1,2}
-(3 rows)
-
-select f3, myaggp20a(f1) from t group by f3 order by f3;
- f3 | myaggp20a
-----+-----------
- a | {1,2,3}
- b | {1,2,3}
- c | {1,2}
-(3 rows)
-
-select f3, myaggp20b(f1) from t group by f3 order by f3;
- f3 | myaggp20b
-----+-----------
- a | {1,2,3}
- b | {1,2,3}
- c | {1,2}
-(3 rows)
-
-select f3, myaggn01a(*) from t group by f3 order by f3;
- f3 | myaggn01a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggn01b(*) from t group by f3 order by f3;
- f3 | myaggn01b
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggn03a(*) from t group by f3 order by f3;
- f3 | myaggn03a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggn05a(f1) from t group by f3 order by f3;
- f3 | myaggn05a
-----+-----------
- a | {1,2,3}
- b | {1,2,3}
- c | {1,2}
-(3 rows)
-
-select f3, myaggn05b(f1) from t group by f3 order by f3;
- f3 | myaggn05b
-----+-----------
- a | {1,2,3}
- b | {1,2,3}
- c | {1,2}
-(3 rows)
-
-select f3, myaggn06a(f1) from t group by f3 order by f3;
- f3 | myaggn06a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggn06b(f1) from t group by f3 order by f3;
- f3 | myaggn06b
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggn08a(f1) from t group by f3 order by f3;
- f3 | myaggn08a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggn08b(f1) from t group by f3 order by f3;
- f3 | myaggn08b
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggn09a(f1) from t group by f3 order by f3;
- f3 | myaggn09a
-----+-----------
- a | {}
- b | {}
- c | {}
-(3 rows)
-
-select f3, myaggn10a(f1) from t group by f3 order by f3;
- f3 | myaggn10a
-----+-----------
- a | {1,2,3}
- b | {1,2,3}
- c | {1,2}
-(3 rows)
-
-select mysum2(f1, f1 + 1) from t;
- mysum2
---------
- 38
-(1 row)
-
--- test inlining of polymorphic SQL functions
-create function bleat(int) returns int as $$
-begin
- raise notice 'bleat %', $1;
- return $1;
-end$$ language plpgsql;
-create function sql_if(bool, anyelement, anyelement) returns anyelement as $$
-select case when $1 then $2 else $3 end $$ language sql;
--- Note this would fail with integer overflow, never mind wrong bleat() output,
--- if the CASE expression were not successfully inlined
-select f1, sql_if(f1 > 0, bleat(f1), bleat(f1 + 1)) from int4_tbl;
-NOTICE: bleat 1
-NOTICE: bleat 123456
-NOTICE: bleat -123455
-NOTICE: bleat 2147483647
-NOTICE: bleat -2147483646
- f1 | sql_if
--------------+-------------
- 0 | 1
- 123456 | 123456
- -123456 | -123455
- 2147483647 | 2147483647
- -2147483647 | -2147483646
-(5 rows)
-
-select q2, sql_if(q2 > 0, q2, q2 + 1) from int8_tbl;
- q2 | sql_if
--------------------+-------------------
- 456 | 456
- 4567890123456789 | 4567890123456789
- 123 | 123
- 4567890123456789 | 4567890123456789
- -4567890123456789 | -4567890123456788
-(5 rows)
-
--- another sort of polymorphic aggregate
-CREATE AGGREGATE array_larger_accum (anyarray)
-(
- sfunc = array_larger,
- stype = anyarray,
- initcond = '{}'
-);
-SELECT array_larger_accum(i)
-FROM (VALUES (ARRAY[1,2]), (ARRAY[3,4])) as t(i);
- array_larger_accum
---------------------
- {3,4}
-(1 row)
-
-SELECT array_larger_accum(i)
-FROM (VALUES (ARRAY[row(1,2),row(3,4)]), (ARRAY[row(5,6),row(7,8)])) as t(i);
- array_larger_accum
---------------------
- {"(5,6)","(7,8)"}
-(1 row)
-
--- another kind of polymorphic aggregate
-create function add_group(grp anyarray, ad anyelement, size integer)
- returns anyarray
- as $$
-begin
- if grp is null then
- return array[ad];
- end if;
- if array_upper(grp, 1) < size then
- return grp || ad;
- end if;
- return grp;
-end;
-$$
- language plpgsql immutable;
-create aggregate build_group(anyelement, integer) (
- SFUNC = add_group,
- STYPE = anyarray
-);
-select build_group(q1,3) from int8_tbl;
- build_group
-----------------------------
- {123,123,4567890123456789}
-(1 row)
-
--- this should fail because stype isn't compatible with arg
-create aggregate build_group(int8, integer) (
- SFUNC = add_group,
- STYPE = int2[]
-);
-ERROR: function add_group(smallint[], bigint, integer) does not exist
--- but we can make a non-poly agg from a poly sfunc if types are OK
-create aggregate build_group(int8, integer) (
- SFUNC = add_group,
- STYPE = int8[]
-);
--- check proper resolution of data types for polymorphic transfn/finalfn
-create function first_el_transfn(anyarray, anyelement) returns anyarray as
-'select $1 || $2' language sql immutable;
-create function first_el(anyarray) returns anyelement as
-'select $1[1]' language sql strict immutable;
-create aggregate first_el_agg_f8(float8) (
- SFUNC = array_append,
- STYPE = float8[],
- FINALFUNC = first_el
-);
-create aggregate first_el_agg_any(anyelement) (
- SFUNC = first_el_transfn,
- STYPE = anyarray,
- FINALFUNC = first_el
-);
-select first_el_agg_f8(x::float8) from generate_series(1,10) x;
- first_el_agg_f8
------------------
- 1
-(1 row)
-
-select first_el_agg_any(x) from generate_series(1,10) x;
- first_el_agg_any
-------------------
- 1
-(1 row)
-
-select first_el_agg_f8(x::float8) over(order by x) from generate_series(1,10) x;
- first_el_agg_f8
------------------
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
-(10 rows)
-
-select first_el_agg_any(x) over(order by x) from generate_series(1,10) x;
- first_el_agg_any
-------------------
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
-(10 rows)
-
--- check that we can apply functions taking ANYARRAY to pg_stats
-select distinct array_ndims(histogram_bounds) from pg_stats
-where histogram_bounds is not null;
- array_ndims
--------------
- 1
-(1 row)
-
--- such functions must protect themselves if varying element type isn't OK
--- (WHERE clause here is to avoid possibly getting a collation error instead)
-select max(histogram_bounds) from pg_stats where tablename = 'pg_am';
-ERROR: cannot compare arrays of different element types
--- another corner case is the input functions for polymorphic pseudotypes
-select array_in('{1,2,3}','int4'::regtype,-1); -- this has historically worked
- array_in
-----------
- {1,2,3}
-(1 row)
-
-select * from array_in('{1,2,3}','int4'::regtype,-1); -- this not
-ERROR: function "array_in" in FROM has unsupported return type anyarray
-LINE 1: select * from array_in('{1,2,3}','int4'::regtype,-1);
- ^
-select anyrange_in('[10,20)','int4range'::regtype,-1);
-ERROR: cannot accept a value of type anyrange
--- test variadic polymorphic functions
-create function myleast(variadic anyarray) returns anyelement as $$
- select min($1[i]) from generate_subscripts($1,1) g(i)
-$$ language sql immutable strict;
-select myleast(10, 1, 20, 33);
- myleast
----------
- 1
-(1 row)
-
-select myleast(1.1, 0.22, 0.55);
- myleast
----------
- 0.22
-(1 row)
-
-select myleast('z'::text);
- myleast
----------
- z
-(1 row)
-
-select myleast(); -- fail
-ERROR: function myleast() does not exist
-LINE 1: select myleast();
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
--- test with variadic call parameter
-select myleast(variadic array[1,2,3,4,-1]);
- myleast
----------
- -1
-(1 row)
-
-select myleast(variadic array[1.1, -5.5]);
- myleast
----------
- -5.5
-(1 row)
-
---test with empty variadic call parameter
-select myleast(variadic array[]::int[]);
- myleast
----------
-
-(1 row)
-
--- an example with some ordinary arguments too
-create function concat(text, variadic anyarray) returns text as $$
- select array_to_string($2, $1);
-$$ language sql immutable strict;
-select concat('%', 1, 2, 3, 4, 5);
- concat
------------
- 1%2%3%4%5
-(1 row)
-
-select concat('|', 'a'::text, 'b', 'c');
- concat
---------
- a|b|c
-(1 row)
-
-select concat('|', variadic array[1,2,33]);
- concat
---------
- 1|2|33
-(1 row)
-
-select concat('|', variadic array[]::int[]);
- concat
---------
-
-(1 row)
-
-drop function concat(text, anyarray);
--- mix variadic with anyelement
-create function formarray(anyelement, variadic anyarray) returns anyarray as $$
- select array_prepend($1, $2);
-$$ language sql immutable strict;
-select formarray(1,2,3,4,5);
- formarray
--------------
- {1,2,3,4,5}
-(1 row)
-
-select formarray(1.1, variadic array[1.2,55.5]);
- formarray
-----------------
- {1.1,1.2,55.5}
-(1 row)
-
-select formarray(1.1, array[1.2,55.5]); -- fail without variadic
-ERROR: function formarray(numeric, numeric[]) does not exist
-LINE 1: select formarray(1.1, array[1.2,55.5]);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select formarray(1, 'x'::text); -- fail, type mismatch
-ERROR: function formarray(integer, text) does not exist
-LINE 1: select formarray(1, 'x'::text);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select formarray(1, variadic array['x'::text]); -- fail, type mismatch
-ERROR: function formarray(integer, text[]) does not exist
-LINE 1: select formarray(1, variadic array['x'::text]);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function formarray(anyelement, variadic anyarray);
--- test pg_typeof() function
-select pg_typeof(null); -- unknown
- pg_typeof
------------
- unknown
-(1 row)
-
-select pg_typeof(0); -- integer
- pg_typeof
------------
- integer
-(1 row)
-
-select pg_typeof(0.0); -- numeric
- pg_typeof
------------
- numeric
-(1 row)
-
-select pg_typeof(1+1 = 2); -- boolean
- pg_typeof
------------
- boolean
-(1 row)
-
-select pg_typeof('x'); -- unknown
- pg_typeof
------------
- unknown
-(1 row)
-
-select pg_typeof('' || ''); -- text
- pg_typeof
------------
- text
-(1 row)
-
-select pg_typeof(pg_typeof(0)); -- regtype
- pg_typeof
------------
- regtype
-(1 row)
-
-select pg_typeof(array[1.2,55.5]); -- numeric[]
- pg_typeof
------------
- numeric[]
-(1 row)
-
-select pg_typeof(myleast(10, 1, 20, 33)); -- polymorphic input
- pg_typeof
------------
- integer
-(1 row)
-
--- test functions with default parameters
--- test basic functionality
-create function dfunc(a int = 1, int = 2) returns int as $$
- select $1 + $2;
-$$ language sql;
-select dfunc();
- dfunc
--------
- 3
-(1 row)
-
-select dfunc(10);
- dfunc
--------
- 12
-(1 row)
-
-select dfunc(10, 20);
- dfunc
--------
- 30
-(1 row)
-
-select dfunc(10, 20, 30); -- fail
-ERROR: function dfunc(integer, integer, integer) does not exist
-LINE 1: select dfunc(10, 20, 30);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function dfunc(); -- fail
-ERROR: function dfunc() does not exist
-drop function dfunc(int); -- fail
-ERROR: function dfunc(integer) does not exist
-drop function dfunc(int, int); -- ok
--- fail: defaults must be at end of argument list
-create function dfunc(a int = 1, b int) returns int as $$
- select $1 + $2;
-$$ language sql;
-ERROR: input parameters after one with a default value must also have defaults
-LINE 1: create function dfunc(a int = 1, b int) returns int as $$
- ^
--- however, this should work:
-create function dfunc(a int = 1, out sum int, b int = 2) as $$
- select $1 + $2;
-$$ language sql;
-select dfunc();
- dfunc
--------
- 3
-(1 row)
-
--- verify it lists properly
-\df dfunc
- List of functions
- Schema | Name | Result data type | Argument data types | Type
---------+-------+------------------+-----------------------------------------------------------+------
- public | dfunc | integer | a integer DEFAULT 1, OUT sum integer, b integer DEFAULT 2 | func
-(1 row)
-
-drop function dfunc(int, int);
--- check implicit coercion
-create function dfunc(a int DEFAULT 1.0, int DEFAULT '-1') returns int as $$
- select $1 + $2;
-$$ language sql;
-select dfunc();
- dfunc
--------
- 0
-(1 row)
-
-create function dfunc(a text DEFAULT 'Hello', b text DEFAULT 'World') returns text as $$
- select $1 || ', ' || $2;
-$$ language sql;
-select dfunc(); -- fail: which dfunc should be called? int or text
-ERROR: function dfunc() is not unique
-LINE 1: select dfunc();
- ^
-HINT: Could not choose a best candidate function. You might need to add explicit type casts.
-select dfunc('Hi'); -- ok
- dfunc
------------
- Hi, World
-(1 row)
-
-select dfunc('Hi', 'City'); -- ok
- dfunc
-----------
- Hi, City
-(1 row)
-
-select dfunc(0); -- ok
- dfunc
--------
- -1
-(1 row)
-
-select dfunc(10, 20); -- ok
- dfunc
--------
- 30
-(1 row)
-
-drop function dfunc(int, int);
-drop function dfunc(text, text);
-create function dfunc(int = 1, int = 2) returns int as $$
- select 2;
-$$ language sql;
-create function dfunc(int = 1, int = 2, int = 3, int = 4) returns int as $$
- select 4;
-$$ language sql;
--- Now, dfunc(nargs = 2) and dfunc(nargs = 4) are ambiguous when called
--- with 0 to 2 arguments.
-select dfunc(); -- fail
-ERROR: function dfunc() is not unique
-LINE 1: select dfunc();
- ^
-HINT: Could not choose a best candidate function. You might need to add explicit type casts.
-select dfunc(1); -- fail
-ERROR: function dfunc(integer) is not unique
-LINE 1: select dfunc(1);
- ^
-HINT: Could not choose a best candidate function. You might need to add explicit type casts.
-select dfunc(1, 2); -- fail
-ERROR: function dfunc(integer, integer) is not unique
-LINE 1: select dfunc(1, 2);
- ^
-HINT: Could not choose a best candidate function. You might need to add explicit type casts.
-select dfunc(1, 2, 3); -- ok
- dfunc
--------
- 4
-(1 row)
-
-select dfunc(1, 2, 3, 4); -- ok
- dfunc
--------
- 4
-(1 row)
-
-drop function dfunc(int, int);
-drop function dfunc(int, int, int, int);
--- default values are not allowed for output parameters
-create function dfunc(out int = 20) returns int as $$
- select 1;
-$$ language sql;
-ERROR: only input parameters can have default values
-LINE 1: create function dfunc(out int = 20) returns int as $$
- ^
--- polymorphic parameter test
-create function dfunc(anyelement = 'World'::text) returns text as $$
- select 'Hello, ' || $1::text;
-$$ language sql;
-select dfunc();
- dfunc
---------------
- Hello, World
-(1 row)
-
-select dfunc(0);
- dfunc
-----------
- Hello, 0
-(1 row)
-
-select dfunc(to_date('20081215','YYYYMMDD'));
- dfunc
--------------------
- Hello, 12-15-2008
-(1 row)
-
-select dfunc('City'::text);
- dfunc
--------------
- Hello, City
-(1 row)
-
-drop function dfunc(anyelement);
--- check defaults for variadics
-create function dfunc(a variadic int[]) returns int as
-$$ select array_upper($1, 1) $$ language sql;
-select dfunc(); -- fail
-ERROR: function dfunc() does not exist
-LINE 1: select dfunc();
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select dfunc(10);
- dfunc
--------
- 1
-(1 row)
-
-select dfunc(10,20);
- dfunc
--------
- 2
-(1 row)
-
-create or replace function dfunc(a variadic int[] default array[]::int[]) returns int as
-$$ select array_upper($1, 1) $$ language sql;
-select dfunc(); -- now ok
- dfunc
--------
-
-(1 row)
-
-select dfunc(10);
- dfunc
--------
- 1
-(1 row)
-
-select dfunc(10,20);
- dfunc
--------
- 2
-(1 row)
-
--- can't remove the default once it exists
-create or replace function dfunc(a variadic int[]) returns int as
-$$ select array_upper($1, 1) $$ language sql;
-ERROR: cannot remove parameter defaults from existing function
-HINT: Use DROP FUNCTION dfunc(integer[]) first.
-\df dfunc
- List of functions
- Schema | Name | Result data type | Argument data types | Type
---------+-------+------------------+-------------------------------------------------+------
- public | dfunc | integer | VARIADIC a integer[] DEFAULT ARRAY[]::integer[] | func
-(1 row)
-
-drop function dfunc(a variadic int[]);
--- Ambiguity should be reported only if there's not a better match available
-create function dfunc(int = 1, int = 2, int = 3) returns int as $$
- select 3;
-$$ language sql;
-create function dfunc(int = 1, int = 2) returns int as $$
- select 2;
-$$ language sql;
-create function dfunc(text) returns text as $$
- select $1;
-$$ language sql;
--- dfunc(narg=2) and dfunc(narg=3) are ambiguous
-select dfunc(1); -- fail
-ERROR: function dfunc(integer) is not unique
-LINE 1: select dfunc(1);
- ^
-HINT: Could not choose a best candidate function. You might need to add explicit type casts.
--- but this works since the ambiguous functions aren't preferred anyway
-select dfunc('Hi');
- dfunc
--------
- Hi
-(1 row)
-
-drop function dfunc(int, int, int);
-drop function dfunc(int, int);
-drop function dfunc(text);
---
--- Tests for named- and mixed-notation function calling
---
-create function dfunc(a int, b int, c int = 0, d int = 0)
- returns table (a int, b int, c int, d int) as $$
- select $1, $2, $3, $4;
-$$ language sql;
-select (dfunc(10,20,30)).*;
- a | b | c | d
-----+----+----+---
- 10 | 20 | 30 | 0
-(1 row)
-
-select (dfunc(a := 10, b := 20, c := 30)).*;
- a | b | c | d
-----+----+----+---
- 10 | 20 | 30 | 0
-(1 row)
-
-select * from dfunc(a := 10, b := 20);
- a | b | c | d
-----+----+---+---
- 10 | 20 | 0 | 0
-(1 row)
-
-select * from dfunc(b := 10, a := 20);
- a | b | c | d
-----+----+---+---
- 20 | 10 | 0 | 0
-(1 row)
-
-select * from dfunc(0); -- fail
-ERROR: function dfunc(integer) does not exist
-LINE 1: select * from dfunc(0);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select * from dfunc(1,2);
- a | b | c | d
----+---+---+---
- 1 | 2 | 0 | 0
-(1 row)
-
-select * from dfunc(1,2,c := 3);
- a | b | c | d
----+---+---+---
- 1 | 2 | 3 | 0
-(1 row)
-
-select * from dfunc(1,2,d := 3);
- a | b | c | d
----+---+---+---
- 1 | 2 | 0 | 3
-(1 row)
-
-select * from dfunc(x := 20, b := 10, x := 30); -- fail, duplicate name
-ERROR: argument name "x" used more than once
-LINE 1: select * from dfunc(x := 20, b := 10, x := 30);
- ^
-select * from dfunc(10, b := 20, 30); -- fail, named args must be last
-ERROR: positional argument cannot follow named argument
-LINE 1: select * from dfunc(10, b := 20, 30);
- ^
-select * from dfunc(x := 10, b := 20, c := 30); -- fail, unknown param
-ERROR: function dfunc(x => integer, b => integer, c => integer) does not exist
-LINE 1: select * from dfunc(x := 10, b := 20, c := 30);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select * from dfunc(10, 10, a := 20); -- fail, a overlaps positional parameter
-ERROR: function dfunc(integer, integer, a => integer) does not exist
-LINE 1: select * from dfunc(10, 10, a := 20);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select * from dfunc(1,c := 2,d := 3); -- fail, no value for b
-ERROR: function dfunc(integer, c => integer, d => integer) does not exist
-LINE 1: select * from dfunc(1,c := 2,d := 3);
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function dfunc(int, int, int, int);
--- test with different parameter types
-create function dfunc(a varchar, b numeric, c date = current_date)
- returns table (a varchar, b numeric, c date) as $$
- select $1, $2, $3;
-$$ language sql;
-select (dfunc('Hello World', 20, '2009-07-25'::date)).*;
- a | b | c
--------------+----+------------
- Hello World | 20 | 07-25-2009
-(1 row)
-
-select * from dfunc('Hello World', 20, '2009-07-25'::date);
- a | b | c
--------------+----+------------
- Hello World | 20 | 07-25-2009
-(1 row)
-
-select * from dfunc(c := '2009-07-25'::date, a := 'Hello World', b := 20);
- a | b | c
--------------+----+------------
- Hello World | 20 | 07-25-2009
-(1 row)
-
-select * from dfunc('Hello World', b := 20, c := '2009-07-25'::date);
- a | b | c
--------------+----+------------
- Hello World | 20 | 07-25-2009
-(1 row)
-
-select * from dfunc('Hello World', c := '2009-07-25'::date, b := 20);
- a | b | c
--------------+----+------------
- Hello World | 20 | 07-25-2009
-(1 row)
-
-select * from dfunc('Hello World', c := 20, b := '2009-07-25'::date); -- fail
-ERROR: function dfunc(unknown, c => integer, b => date) does not exist
-LINE 1: select * from dfunc('Hello World', c := 20, b := '2009-07-25...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function dfunc(varchar, numeric, date);
--- test out parameters with named params
-create function dfunc(a varchar = 'def a', out _a varchar, c numeric = NULL, out _c numeric)
-returns record as $$
- select $1, $2;
-$$ language sql;
-select (dfunc()).*;
- _a | _c
--------+----
- def a |
-(1 row)
-
-select * from dfunc();
- _a | _c
--------+----
- def a |
-(1 row)
-
-select * from dfunc('Hello', 100);
- _a | _c
--------+-----
- Hello | 100
-(1 row)
-
-select * from dfunc(a := 'Hello', c := 100);
- _a | _c
--------+-----
- Hello | 100
-(1 row)
-
-select * from dfunc(c := 100, a := 'Hello');
- _a | _c
--------+-----
- Hello | 100
-(1 row)
-
-select * from dfunc('Hello');
- _a | _c
--------+----
- Hello |
-(1 row)
-
-select * from dfunc('Hello', c := 100);
- _a | _c
--------+-----
- Hello | 100
-(1 row)
-
-select * from dfunc(c := 100);
- _a | _c
--------+-----
- def a | 100
-(1 row)
-
--- fail, can no longer change an input parameter's name
-create or replace function dfunc(a varchar = 'def a', out _a varchar, x numeric = NULL, out _c numeric)
-returns record as $$
- select $1, $2;
-$$ language sql;
-ERROR: cannot change name of input parameter "c"
-HINT: Use DROP FUNCTION dfunc(character varying,numeric) first.
-create or replace function dfunc(a varchar = 'def a', out _a varchar, numeric = NULL, out _c numeric)
-returns record as $$
- select $1, $2;
-$$ language sql;
-ERROR: cannot change name of input parameter "c"
-HINT: Use DROP FUNCTION dfunc(character varying,numeric) first.
-drop function dfunc(varchar, numeric);
---fail, named parameters are not unique
-create function testpolym(a int, a int) returns int as $$ select 1;$$ language sql;
-ERROR: parameter name "a" used more than once
-LINE 1: create function testpolym(a int, a int) returns int as $$ se...
- ^
-create function testpolym(int, out a int, out a int) returns int as $$ select 1;$$ language sql;
-ERROR: parameter name "a" used more than once
-LINE 1: create function testpolym(int, out a int, out a int) returns...
- ^
-create function testpolym(out a int, inout a int) returns int as $$ select 1;$$ language sql;
-ERROR: parameter name "a" used more than once
-LINE 1: create function testpolym(out a int, inout a int) returns in...
- ^
-create function testpolym(a int, inout a int) returns int as $$ select 1;$$ language sql;
-ERROR: parameter name "a" used more than once
-LINE 1: create function testpolym(a int, inout a int) returns int as...
- ^
--- valid
-create function testpolym(a int, out a int) returns int as $$ select $1;$$ language sql;
-select testpolym(37);
- testpolym
------------
- 37
-(1 row)
-
-drop function testpolym(int);
-create function testpolym(a int) returns table(a int) as $$ select $1;$$ language sql;
-select * from testpolym(37);
- a
-----
- 37
-(1 row)
-
-drop function testpolym(int);
--- test polymorphic params and defaults
-create function dfunc(a anyelement, b anyelement = null, flag bool = true)
-returns anyelement as $$
- select case when $3 then $1 else $2 end;
-$$ language sql;
-select dfunc(1,2);
- dfunc
--------
- 1
-(1 row)
-
-select dfunc('a'::text, 'b'); -- positional notation with default
- dfunc
--------
- a
-(1 row)
-
-select dfunc(a := 1, b := 2);
- dfunc
--------
- 1
-(1 row)
-
-select dfunc(a := 'a'::text, b := 'b');
- dfunc
--------
- a
-(1 row)
-
-select dfunc(a := 'a'::text, b := 'b', flag := false); -- named notation
- dfunc
--------
- b
-(1 row)
-
-select dfunc(b := 'b'::text, a := 'a'); -- named notation with default
- dfunc
--------
- a
-(1 row)
-
-select dfunc(a := 'a'::text, flag := true); -- named notation with default
- dfunc
--------
- a
-(1 row)
-
-select dfunc(a := 'a'::text, flag := false); -- named notation with default
- dfunc
--------
-
-(1 row)
-
-select dfunc(b := 'b'::text, a := 'a', flag := true); -- named notation
- dfunc
--------
- a
-(1 row)
-
-select dfunc('a'::text, 'b', false); -- full positional notation
- dfunc
--------
- b
-(1 row)
-
-select dfunc('a'::text, 'b', flag := false); -- mixed notation
- dfunc
--------
- b
-(1 row)
-
-select dfunc('a'::text, 'b', true); -- full positional notation
- dfunc
--------
- a
-(1 row)
-
-select dfunc('a'::text, 'b', flag := true); -- mixed notation
- dfunc
--------
- a
-(1 row)
-
--- ansi/sql syntax
-select dfunc(a => 1, b => 2);
- dfunc
--------
- 1
-(1 row)
-
-select dfunc(a => 'a'::text, b => 'b');
- dfunc
--------
- a
-(1 row)
-
-select dfunc(a => 'a'::text, b => 'b', flag => false); -- named notation
- dfunc
--------
- b
-(1 row)
-
-select dfunc(b => 'b'::text, a => 'a'); -- named notation with default
- dfunc
--------
- a
-(1 row)
-
-select dfunc(a => 'a'::text, flag => true); -- named notation with default
- dfunc
--------
- a
-(1 row)
-
-select dfunc(a => 'a'::text, flag => false); -- named notation with default
- dfunc
--------
-
-(1 row)
-
-select dfunc(b => 'b'::text, a => 'a', flag => true); -- named notation
- dfunc
--------
- a
-(1 row)
-
-select dfunc('a'::text, 'b', false); -- full positional notation
- dfunc
--------
- b
-(1 row)
-
-select dfunc('a'::text, 'b', flag => false); -- mixed notation
- dfunc
--------
- b
-(1 row)
-
-select dfunc('a'::text, 'b', true); -- full positional notation
- dfunc
--------
- a
-(1 row)
-
-select dfunc('a'::text, 'b', flag => true); -- mixed notation
- dfunc
--------
- a
-(1 row)
-
--- this tests lexer edge cases around =>
-select dfunc(a =>-1);
- dfunc
--------
- -1
-(1 row)
-
-select dfunc(a =>+1);
- dfunc
--------
- 1
-(1 row)
-
-select dfunc(a =>/**/1);
- dfunc
--------
- 1
-(1 row)
-
-select dfunc(a =>--comment to be removed by psql
- 1);
- dfunc
--------
- 1
-(1 row)
-
--- need DO to protect the -- from psql
-do $$
- declare r integer;
- begin
- select dfunc(a=>-- comment
- 1) into r;
- raise info 'r = %', r;
- end;
-$$;
-INFO: r = 1
--- check reverse-listing of named-arg calls
-CREATE VIEW dfview AS
- SELECT q1, q2,
- dfunc(q1,q2, flag := q1>q2) as c3,
- dfunc(q1, flag := q1 q1 > q2) AS c3,
- dfunc(q1, flag => q1 < q2, b => q2) AS c4
- FROM int8_tbl;
-
-drop view dfview;
-drop function dfunc(anyelement, anyelement, bool);
---
--- Tests for ANYCOMPATIBLE polymorphism family
---
-create function anyctest(anycompatible, anycompatible)
-returns anycompatible as $$
- select greatest($1, $2)
-$$ language sql;
-select x, pg_typeof(x) from anyctest(11, 12) x;
- x | pg_typeof
-----+-----------
- 12 | integer
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, 12.3) x;
- x | pg_typeof
-------+-----------
- 12.3 | numeric
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, point(1,2)) x; -- fail
-ERROR: function anyctest(integer, point) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11, point(1,2)) x;
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select x, pg_typeof(x) from anyctest('11', '12.3') x; -- defaults to text
- x | pg_typeof
-------+-----------
- 12.3 | text
-(1 row)
-
-drop function anyctest(anycompatible, anycompatible);
-create function anyctest(anycompatible, anycompatible)
-returns anycompatiblearray as $$
- select array[$1, $2]
-$$ language sql;
-select x, pg_typeof(x) from anyctest(11, 12) x;
- x | pg_typeof
----------+-----------
- {11,12} | integer[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, 12.3) x;
- x | pg_typeof
------------+-----------
- {11,12.3} | numeric[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, array[1,2]) x; -- fail
-ERROR: function anyctest(integer, integer[]) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11, array[1,2]) x;
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function anyctest(anycompatible, anycompatible);
-create function anyctest(anycompatible, anycompatiblearray)
-returns anycompatiblearray as $$
- select array[$1] || $2
-$$ language sql;
-select x, pg_typeof(x) from anyctest(11, array[12]) x;
- x | pg_typeof
----------+-----------
- {11,12} | integer[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, array[12.3]) x;
- x | pg_typeof
------------+-----------
- {11,12.3} | numeric[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(12.3, array[13]) x;
- x | pg_typeof
------------+-----------
- {12.3,13} | numeric[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(12.3, '{13,14.4}') x;
- x | pg_typeof
-----------------+-----------
- {12.3,13,14.4} | numeric[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, array[point(1,2)]) x; -- fail
-ERROR: function anyctest(integer, point[]) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11, array[point(1,2)]) ...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select x, pg_typeof(x) from anyctest(11, 12) x; -- fail
-ERROR: function anyctest(integer, integer) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x;
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function anyctest(anycompatible, anycompatiblearray);
-create function anyctest(anycompatible, anycompatiblerange)
-returns anycompatiblerange as $$
- select $2
-$$ language sql;
-select x, pg_typeof(x) from anyctest(11, int4range(4,7)) x;
- x | pg_typeof
--------+-----------
- [4,7) | int4range
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, numrange(4,7)) x;
- x | pg_typeof
--------+-----------
- [4,7) | numrange
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, 12) x; -- fail
-ERROR: function anyctest(integer, integer) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x;
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select x, pg_typeof(x) from anyctest(11.2, int4range(4,7)) x; -- fail
-ERROR: function anyctest(numeric, int4range) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11.2, int4range(4,7)) x...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select x, pg_typeof(x) from anyctest(11.2, '[4,7)') x; -- fail
-ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown
-drop function anyctest(anycompatible, anycompatiblerange);
-create function anyctest(anycompatiblerange, anycompatiblerange)
-returns anycompatible as $$
- select lower($1) + upper($2)
-$$ language sql;
-select x, pg_typeof(x) from anyctest(int4range(11,12), int4range(4,7)) x;
- x | pg_typeof
-----+-----------
- 18 | integer
-(1 row)
-
-select x, pg_typeof(x) from anyctest(int4range(11,12), numrange(4,7)) x; -- fail
-ERROR: function anyctest(int4range, numrange) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(int4range(11,12), numra...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function anyctest(anycompatiblerange, anycompatiblerange);
--- fail, can't infer result type:
-create function anyctest(anycompatible)
-returns anycompatiblerange as $$
- select $1
-$$ language sql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange.
-create function anyctest(anycompatible, anycompatiblemultirange)
-returns anycompatiblemultirange as $$
- select $2
-$$ language sql;
-select x, pg_typeof(x) from anyctest(11, multirange(int4range(4,7))) x;
- x | pg_typeof
----------+----------------
- {[4,7)} | int4multirange
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, multirange(numrange(4,7))) x;
- x | pg_typeof
----------+---------------
- {[4,7)} | nummultirange
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, 12) x; -- fail
-ERROR: function anyctest(integer, integer) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x;
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select x, pg_typeof(x) from anyctest(11.2, multirange(int4range(4,7))) x; -- fail
-ERROR: function anyctest(numeric, int4multirange) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11.2, multirange(int4ra...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select x, pg_typeof(x) from anyctest(11.2, '{[4,7)}') x; -- fail
-ERROR: could not determine polymorphic type anycompatiblemultirange because input has type unknown
-drop function anyctest(anycompatible, anycompatiblemultirange);
-create function anyctest(anycompatiblemultirange, anycompatiblemultirange)
-returns anycompatible as $$
- select lower($1) + upper($2)
-$$ language sql;
-select x, pg_typeof(x) from anyctest(multirange(int4range(11,12)), multirange(int4range(4,7))) x;
- x | pg_typeof
-----+-----------
- 18 | integer
-(1 row)
-
-select x, pg_typeof(x) from anyctest(multirange(int4range(11,12)), multirange(numrange(4,7))) x; -- fail
-ERROR: function anyctest(int4multirange, nummultirange) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(multirange(int4range(11...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function anyctest(anycompatiblemultirange, anycompatiblemultirange);
--- fail, can't infer result type:
-create function anyctest(anycompatible)
-returns anycompatiblemultirange as $$
- select $1
-$$ language sql;
-ERROR: cannot determine result data type
-DETAIL: A result of type anycompatiblemultirange requires at least one input of type anycompatiblerange or anycompatiblemultirange.
-create function anyctest(anycompatiblenonarray, anycompatiblenonarray)
-returns anycompatiblearray as $$
- select array[$1, $2]
-$$ language sql;
-select x, pg_typeof(x) from anyctest(11, 12) x;
- x | pg_typeof
----------+-----------
- {11,12} | integer[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, 12.3) x;
- x | pg_typeof
------------+-----------
- {11,12.3} | numeric[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(array[11], array[1,2]) x; -- fail
-ERROR: function anyctest(integer[], integer[]) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(array[11], array[1,2]) ...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function anyctest(anycompatiblenonarray, anycompatiblenonarray);
-create function anyctest(a anyelement, b anyarray,
- c anycompatible, d anycompatible)
-returns anycompatiblearray as $$
- select array[c, d]
-$$ language sql;
-select x, pg_typeof(x) from anyctest(11, array[1, 2], 42, 34.5) x;
- x | pg_typeof
------------+-----------
- {42,34.5} | numeric[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, array[1, 2], point(1,2), point(3,4)) x;
- x | pg_typeof
--------------------+-----------
- {"(1,2)","(3,4)"} | point[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, '{1,2}', point(1,2), '(3,4)') x;
- x | pg_typeof
--------------------+-----------
- {"(1,2)","(3,4)"} | point[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, array[1, 2.2], 42, 34.5) x; -- fail
-ERROR: function anyctest(integer, numeric[], integer, numeric) does not exist
-LINE 1: select x, pg_typeof(x) from anyctest(11, array[1, 2.2], 42, ...
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-drop function anyctest(a anyelement, b anyarray,
- c anycompatible, d anycompatible);
-create function anyctest(variadic anycompatiblearray)
-returns anycompatiblearray as $$
- select $1
-$$ language sql;
-select x, pg_typeof(x) from anyctest(11, 12) x;
- x | pg_typeof
----------+-----------
- {11,12} | integer[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, 12.2) x;
- x | pg_typeof
------------+-----------
- {11,12.2} | numeric[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, '12') x;
- x | pg_typeof
----------+-----------
- {11,12} | integer[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(11, '12.2') x; -- fail
-ERROR: invalid input syntax for type integer: "12.2"
-LINE 1: select x, pg_typeof(x) from anyctest(11, '12.2') x;
- ^
-select x, pg_typeof(x) from anyctest(variadic array[11, 12]) x;
- x | pg_typeof
----------+-----------
- {11,12} | integer[]
-(1 row)
-
-select x, pg_typeof(x) from anyctest(variadic array[11, 12.2]) x;
- x | pg_typeof
------------+-----------
- {11,12.2} | numeric[]
-(1 row)
-
-drop function anyctest(variadic anycompatiblearray);
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/rowtypes.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/rowtypes.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/rowtypes.out 2024-11-15 02:50:52.494042466 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/rowtypes.out 2024-11-15 02:59:18.181116966 +0000
@@ -1,1396 +1,2 @@
---
--- ROWTYPES
---
--- Make both a standalone composite type and a table rowtype
-create type complex as (r float8, i float8);
-create temp table fullname (first text, last text);
--- Nested composite
-create type quad as (c1 complex, c2 complex);
--- Some simple tests of I/O conversions and row construction
-select (1.1,2.2)::complex, row((3.3,4.4),(5.5,null))::quad;
- row | row
------------+------------------------
- (1.1,2.2) | ("(3.3,4.4)","(5.5,)")
-(1 row)
-
-select row('Joe', 'Blow')::fullname, '(Joe,Blow)'::fullname;
- row | fullname
-------------+------------
- (Joe,Blow) | (Joe,Blow)
-(1 row)
-
-select '(Joe,von Blow)'::fullname, '(Joe,d''Blow)'::fullname;
- fullname | fullname
-------------------+--------------
- (Joe,"von Blow") | (Joe,d'Blow)
-(1 row)
-
-select '(Joe,"von""Blow")'::fullname, E'(Joe,d\\\\Blow)'::fullname;
- fullname | fullname
--------------------+-----------------
- (Joe,"von""Blow") | (Joe,"d\\Blow")
-(1 row)
-
-select '(Joe,"Blow,Jr")'::fullname;
- fullname
------------------
- (Joe,"Blow,Jr")
-(1 row)
-
-select '(Joe,)'::fullname; -- ok, null 2nd column
- fullname
-----------
- (Joe,)
-(1 row)
-
-select '(Joe)'::fullname; -- bad
-ERROR: malformed record literal: "(Joe)"
-LINE 1: select '(Joe)'::fullname;
- ^
-DETAIL: Too few columns.
-select '(Joe,,)'::fullname; -- bad
-ERROR: malformed record literal: "(Joe,,)"
-LINE 1: select '(Joe,,)'::fullname;
- ^
-DETAIL: Too many columns.
-select '[]'::fullname; -- bad
-ERROR: malformed record literal: "[]"
-LINE 1: select '[]'::fullname;
- ^
-DETAIL: Missing left parenthesis.
-select ' (Joe,Blow) '::fullname; -- ok, extra whitespace
- fullname
-------------
- (Joe,Blow)
-(1 row)
-
-select '(Joe,Blow) /'::fullname; -- bad
-ERROR: malformed record literal: "(Joe,Blow) /"
-LINE 1: select '(Joe,Blow) /'::fullname;
- ^
-DETAIL: Junk after right parenthesis.
--- test non-error-throwing API
-SELECT pg_input_is_valid('(1,2)', 'complex');
- pg_input_is_valid
--------------------
- t
-(1 row)
-
-SELECT pg_input_is_valid('(1,2', 'complex');
- pg_input_is_valid
--------------------
- f
-(1 row)
-
-SELECT pg_input_is_valid('(1,zed)', 'complex');
- pg_input_is_valid
--------------------
- f
-(1 row)
-
-SELECT * FROM pg_input_error_info('(1,zed)', 'complex');
- message | detail | hint | sql_error_code
--------------------------------------------------------+--------+------+----------------
- invalid input syntax for type double precision: "zed" | | | 22P02
-(1 row)
-
-SELECT * FROM pg_input_error_info('(1,1e400)', 'complex');
- message | detail | hint | sql_error_code
----------------------------------------------------+--------+------+----------------
- "1e400" is out of range for type double precision | | | 22003
-(1 row)
-
-create temp table quadtable(f1 int, q quad);
-insert into quadtable values (1, ((3.3,4.4),(5.5,6.6)));
-insert into quadtable values (2, ((null,4.4),(5.5,6.6)));
-select * from quadtable;
- f1 | q
-----+---------------------------
- 1 | ("(3.3,4.4)","(5.5,6.6)")
- 2 | ("(,4.4)","(5.5,6.6)")
-(2 rows)
-
-select f1, q.c1 from quadtable; -- fails, q is a table reference
-ERROR: missing FROM-clause entry for table "q"
-LINE 1: select f1, q.c1 from quadtable;
- ^
-select f1, (q).c1, (qq.q).c1.i from quadtable qq;
- f1 | c1 | i
-----+-----------+-----
- 1 | (3.3,4.4) | 4.4
- 2 | (,4.4) | 4.4
-(2 rows)
-
-create temp table people (fn fullname, bd date);
-insert into people values ('(Joe,Blow)', '1984-01-10');
-select * from people;
- fn | bd
-------------+------------
- (Joe,Blow) | 01-10-1984
-(1 row)
-
--- at the moment this will not work due to ALTER TABLE inadequacy:
-alter table fullname add column suffix text default '';
-ERROR: cannot alter table "fullname" because column "people.fn" uses its row type
--- but this should work:
-alter table fullname add column suffix text default null;
-select * from people;
- fn | bd
--------------+------------
- (Joe,Blow,) | 01-10-1984
-(1 row)
-
--- test insertion/updating of subfields
-update people set fn.suffix = 'Jr';
-select * from people;
- fn | bd
----------------+------------
- (Joe,Blow,Jr) | 01-10-1984
-(1 row)
-
-insert into quadtable (f1, q.c1.r, q.c2.i) values(44,55,66);
-update quadtable set q.c1.r = 12 where f1 = 2;
-update quadtable set q.c1 = 12; -- error, type mismatch
-ERROR: subfield "c1" is of type complex but expression is of type integer
-LINE 1: update quadtable set q.c1 = 12;
- ^
-HINT: You will need to rewrite or cast the expression.
-select * from quadtable;
- f1 | q
-----+---------------------------
- 1 | ("(3.3,4.4)","(5.5,6.6)")
- 44 | ("(55,)","(,66)")
- 2 | ("(12,4.4)","(5.5,6.6)")
-(3 rows)
-
--- The object here is to ensure that toasted references inside
--- composite values don't cause problems. The large f1 value will
--- be toasted inside pp, it must still work after being copied to people.
-create temp table pp (f1 text);
-insert into pp values (repeat('abcdefghijkl', 100000));
-insert into people select ('Jim', f1, null)::fullname, current_date from pp;
-select (fn).first, substr((fn).last, 1, 20), length((fn).last) from people;
- first | substr | length
--------+----------------------+---------
- Joe | Blow | 4
- Jim | abcdefghijklabcdefgh | 1200000
-(2 rows)
-
--- try an update on a toasted composite value, too
-update people set fn.first = 'Jack';
-select (fn).first, substr((fn).last, 1, 20), length((fn).last) from people;
- first | substr | length
--------+----------------------+---------
- Jack | Blow | 4
- Jack | abcdefghijklabcdefgh | 1200000
-(2 rows)
-
--- Test row comparison semantics. Prior to PG 8.2 we did this in a totally
--- non-spec-compliant way.
-select ROW(1,2) < ROW(1,3) as true;
- true
-------
- t
-(1 row)
-
-select ROW(1,2) < ROW(1,1) as false;
- false
--------
- f
-(1 row)
-
-select ROW(1,2) < ROW(1,NULL) as null;
- null
-------
-
-(1 row)
-
-select ROW(1,2,3) < ROW(1,3,NULL) as true; -- the NULL is not examined
- true
-------
- t
-(1 row)
-
-select ROW(11,'ABC') < ROW(11,'DEF') as true;
- true
-------
- t
-(1 row)
-
-select ROW(11,'ABC') > ROW(11,'DEF') as false;
- false
--------
- f
-(1 row)
-
-select ROW(12,'ABC') > ROW(11,'DEF') as true;
- true
-------
- t
-(1 row)
-
--- = and <> have different NULL-behavior than < etc
-select ROW(1,2,3) < ROW(1,NULL,4) as null;
- null
-------
-
-(1 row)
-
-select ROW(1,2,3) = ROW(1,NULL,4) as false;
- false
--------
- f
-(1 row)
-
-select ROW(1,2,3) <> ROW(1,NULL,4) as true;
- true
-------
- t
-(1 row)
-
--- We allow operators beyond the six standard ones, if they have btree
--- operator classes.
-select ROW('ABC','DEF') ~<=~ ROW('DEF','ABC') as true;
- true
-------
- t
-(1 row)
-
-select ROW('ABC','DEF') ~>=~ ROW('DEF','ABC') as false;
- false
--------
- f
-(1 row)
-
-select ROW('ABC','DEF') ~~ ROW('DEF','ABC') as fail;
-ERROR: could not determine interpretation of row comparison operator ~~
-LINE 1: select ROW('ABC','DEF') ~~ ROW('DEF','ABC') as fail;
- ^
-HINT: Row comparison operators must be associated with btree operator families.
--- Comparisons of ROW() expressions can cope with some type mismatches
-select ROW(1,2) = ROW(1,2::int8);
- ?column?
-----------
- t
-(1 row)
-
-select ROW(1,2) in (ROW(3,4), ROW(1,2));
- ?column?
-----------
- t
-(1 row)
-
-select ROW(1,2) in (ROW(3,4), ROW(1,2::int8));
- ?column?
-----------
- t
-(1 row)
-
--- Check row comparison with a subselect
-select unique1, unique2 from tenk1
-where (unique1, unique2) < any (select ten, ten from tenk1 where hundred < 3)
- and unique1 <= 20
-order by 1;
- unique1 | unique2
----------+---------
- 0 | 9998
- 1 | 2838
-(2 rows)
-
--- Also check row comparison with an indexable condition
-explain (costs off)
-select thousand, tenthous from tenk1
-where (thousand, tenthous) >= (997, 5000)
-order by thousand, tenthous;
- QUERY PLAN
------------------------------------------------------------
- Index Only Scan using tenk1_thous_tenthous on tenk1
- Index Cond: (ROW(thousand, tenthous) >= ROW(997, 5000))
-(2 rows)
-
-select thousand, tenthous from tenk1
-where (thousand, tenthous) >= (997, 5000)
-order by thousand, tenthous;
- thousand | tenthous
-----------+----------
- 997 | 5997
- 997 | 6997
- 997 | 7997
- 997 | 8997
- 997 | 9997
- 998 | 998
- 998 | 1998
- 998 | 2998
- 998 | 3998
- 998 | 4998
- 998 | 5998
- 998 | 6998
- 998 | 7998
- 998 | 8998
- 998 | 9998
- 999 | 999
- 999 | 1999
- 999 | 2999
- 999 | 3999
- 999 | 4999
- 999 | 5999
- 999 | 6999
- 999 | 7999
- 999 | 8999
- 999 | 9999
-(25 rows)
-
-explain (costs off)
-select thousand, tenthous, four from tenk1
-where (thousand, tenthous, four) > (998, 5000, 3)
-order by thousand, tenthous;
- QUERY PLAN
------------------------------------------------------------------------
- Sort
- Sort Key: thousand, tenthous
- -> Bitmap Heap Scan on tenk1
- Filter: (ROW(thousand, tenthous, four) > ROW(998, 5000, 3))
- -> Bitmap Index Scan on tenk1_thous_tenthous
- Index Cond: (ROW(thousand, tenthous) >= ROW(998, 5000))
-(6 rows)
-
-select thousand, tenthous, four from tenk1
-where (thousand, tenthous, four) > (998, 5000, 3)
-order by thousand, tenthous;
- thousand | tenthous | four
-----------+----------+------
- 998 | 5998 | 2
- 998 | 6998 | 2
- 998 | 7998 | 2
- 998 | 8998 | 2
- 998 | 9998 | 2
- 999 | 999 | 3
- 999 | 1999 | 3
- 999 | 2999 | 3
- 999 | 3999 | 3
- 999 | 4999 | 3
- 999 | 5999 | 3
- 999 | 6999 | 3
- 999 | 7999 | 3
- 999 | 8999 | 3
- 999 | 9999 | 3
-(15 rows)
-
-explain (costs off)
-select thousand, tenthous from tenk1
-where (998, 5000) < (thousand, tenthous)
-order by thousand, tenthous;
- QUERY PLAN
-----------------------------------------------------------
- Index Only Scan using tenk1_thous_tenthous on tenk1
- Index Cond: (ROW(thousand, tenthous) > ROW(998, 5000))
-(2 rows)
-
-select thousand, tenthous from tenk1
-where (998, 5000) < (thousand, tenthous)
-order by thousand, tenthous;
- thousand | tenthous
-----------+----------
- 998 | 5998
- 998 | 6998
- 998 | 7998
- 998 | 8998
- 998 | 9998
- 999 | 999
- 999 | 1999
- 999 | 2999
- 999 | 3999
- 999 | 4999
- 999 | 5999
- 999 | 6999
- 999 | 7999
- 999 | 8999
- 999 | 9999
-(15 rows)
-
-explain (costs off)
-select thousand, hundred from tenk1
-where (998, 5000) < (thousand, hundred)
-order by thousand, hundred;
- QUERY PLAN
------------------------------------------------------------
- Sort
- Sort Key: thousand, hundred
- -> Bitmap Heap Scan on tenk1
- Filter: (ROW(998, 5000) < ROW(thousand, hundred))
- -> Bitmap Index Scan on tenk1_thous_tenthous
- Index Cond: (thousand >= 998)
-(6 rows)
-
-select thousand, hundred from tenk1
-where (998, 5000) < (thousand, hundred)
-order by thousand, hundred;
- thousand | hundred
-----------+---------
- 999 | 99
- 999 | 99
- 999 | 99
- 999 | 99
- 999 | 99
- 999 | 99
- 999 | 99
- 999 | 99
- 999 | 99
- 999 | 99
-(10 rows)
-
--- Test case for bug #14010: indexed row comparisons fail with nulls
-create temp table test_table (a text, b text);
-insert into test_table values ('a', 'b');
-insert into test_table select 'a', null from generate_series(1,1000);
-insert into test_table values ('b', 'a');
-create index on test_table (a,b);
-set enable_sort = off;
-explain (costs off)
-select a,b from test_table where (a,b) > ('a','a') order by a,b;
- QUERY PLAN
---------------------------------------------------------
- Index Only Scan using test_table_a_b_idx on test_table
- Index Cond: (ROW(a, b) > ROW('a'::text, 'a'::text))
-(2 rows)
-
-select a,b from test_table where (a,b) > ('a','a') order by a,b;
- a | b
----+---
- a | b
- b | a
-(2 rows)
-
-reset enable_sort;
--- Check row comparisons with IN
-select * from int8_tbl i8 where i8 in (row(123,456)); -- fail, type mismatch
-ERROR: cannot compare dissimilar column types bigint and integer at record column 1
-explain (costs off)
-select * from int8_tbl i8
-where i8 in (row(123,456)::int8_tbl, '(4567890123456789,123)');
- QUERY PLAN
--------------------------------------------------------------------------------
- Seq Scan on int8_tbl i8
- Filter: (i8.* = ANY ('{"(123,456)","(4567890123456789,123)"}'::int8_tbl[]))
-(2 rows)
-
-select * from int8_tbl i8
-where i8 in (row(123,456)::int8_tbl, '(4567890123456789,123)');
- q1 | q2
-------------------+-----
- 123 | 456
- 4567890123456789 | 123
-(2 rows)
-
--- Check ability to select columns from an anonymous rowtype
-select (row(1, 2.0)).f1;
- f1
-----
- 1
-(1 row)
-
-select (row(1, 2.0)).f2;
- f2
------
- 2.0
-(1 row)
-
-select (row(1, 2.0)).nosuch; -- fail
-ERROR: could not identify column "nosuch" in record data type
-LINE 1: select (row(1, 2.0)).nosuch;
- ^
-select (row(1, 2.0)).*;
- f1 | f2
-----+-----
- 1 | 2.0
-(1 row)
-
-select (r).f1 from (select row(1, 2.0) as r) ss;
- f1
-----
- 1
-(1 row)
-
-select (r).f3 from (select row(1, 2.0) as r) ss; -- fail
-ERROR: could not identify column "f3" in record data type
-LINE 1: select (r).f3 from (select row(1, 2.0) as r) ss;
- ^
-select (r).* from (select row(1, 2.0) as r) ss;
- f1 | f2
-----+-----
- 1 | 2.0
-(1 row)
-
--- Check some corner cases involving empty rowtypes
-select ROW();
- row
------
- ()
-(1 row)
-
-select ROW() IS NULL;
- ?column?
-----------
- t
-(1 row)
-
-select ROW() = ROW();
-ERROR: cannot compare rows of zero length
-LINE 1: select ROW() = ROW();
- ^
--- Check ability to create arrays of anonymous rowtypes
-select array[ row(1,2), row(3,4), row(5,6) ];
- array
----------------------------
- {"(1,2)","(3,4)","(5,6)"}
-(1 row)
-
--- Check ability to compare an anonymous row to elements of an array
-select row(1,1.1) = any (array[ row(7,7.7), row(1,1.1), row(0,0.0) ]);
- ?column?
-----------
- t
-(1 row)
-
-select row(1,1.1) = any (array[ row(7,7.7), row(1,1.0), row(0,0.0) ]);
- ?column?
-----------
- f
-(1 row)
-
--- Check behavior with a non-comparable rowtype
-create type cantcompare as (p point, r float8);
-create temp table cc (f1 cantcompare);
-insert into cc values('("(1,2)",3)');
-insert into cc values('("(4,5)",6)');
-select * from cc order by f1; -- fail, but should complain about cantcompare
-ERROR: could not identify an ordering operator for type cantcompare
-LINE 1: select * from cc order by f1;
- ^
-HINT: Use an explicit ordering operator or modify the query.
---
--- Tests for record_{eq,cmp}
---
-create type testtype1 as (a int, b int);
--- all true
-select row(1, 2)::testtype1 < row(1, 3)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 2)::testtype1 <= row(1, 3)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 2)::testtype1 = row(1, 2)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 2)::testtype1 <> row(1, 3)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 3)::testtype1 >= row(1, 2)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 3)::testtype1 > row(1, 2)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
--- all false
-select row(1, -2)::testtype1 < row(1, -3)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -2)::testtype1 <= row(1, -3)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -2)::testtype1 = row(1, -3)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -2)::testtype1 <> row(1, -2)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -3)::testtype1 >= row(1, -2)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -3)::testtype1 > row(1, -2)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
--- true, but see *< below
-select row(1, -2)::testtype1 < row(1, 3)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
--- mismatches
-create type testtype3 as (a int, b text);
-select row(1, 2)::testtype1 < row(1, 'abc')::testtype3;
-ERROR: cannot compare dissimilar column types integer and text at record column 2
-select row(1, 2)::testtype1 <> row(1, 'abc')::testtype3;
-ERROR: cannot compare dissimilar column types integer and text at record column 2
-create type testtype5 as (a int);
-select row(1, 2)::testtype1 < row(1)::testtype5;
-ERROR: cannot compare record types with different numbers of columns
-select row(1, 2)::testtype1 <> row(1)::testtype5;
-ERROR: cannot compare record types with different numbers of columns
--- non-comparable types
-create type testtype6 as (a int, b point);
-select row(1, '(1,2)')::testtype6 < row(1, '(1,3)')::testtype6;
-ERROR: could not identify a comparison function for type point
-select row(1, '(1,2)')::testtype6 <> row(1, '(1,3)')::testtype6;
-ERROR: could not identify an equality operator for type point
-drop type testtype1, testtype3, testtype5, testtype6;
---
--- Tests for record_image_{eq,cmp}
---
-create type testtype1 as (a int, b int);
--- all true
-select row(1, 2)::testtype1 *< row(1, 3)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 2)::testtype1 *<= row(1, 3)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 2)::testtype1 *= row(1, 2)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 2)::testtype1 *<> row(1, 3)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 3)::testtype1 *>= row(1, 2)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 3)::testtype1 *> row(1, 2)::testtype1;
- ?column?
-----------
- t
-(1 row)
-
--- all false
-select row(1, -2)::testtype1 *< row(1, -3)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -2)::testtype1 *<= row(1, -3)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -2)::testtype1 *= row(1, -3)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -2)::testtype1 *<> row(1, -2)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -3)::testtype1 *>= row(1, -2)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, -3)::testtype1 *> row(1, -2)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
--- This returns the "wrong" order because record_image_cmp works on
--- unsigned datums without knowing about the actual data type.
-select row(1, -2)::testtype1 *< row(1, 3)::testtype1;
- ?column?
-----------
- f
-(1 row)
-
--- other types
-create type testtype2 as (a smallint, b bool); -- byval different sizes
-select row(1, true)::testtype2 *< row(2, true)::testtype2;
- ?column?
-----------
- t
-(1 row)
-
-select row(-2, true)::testtype2 *< row(-1, true)::testtype2;
- ?column?
-----------
- t
-(1 row)
-
-select row(0, false)::testtype2 *< row(0, true)::testtype2;
- ?column?
-----------
- t
-(1 row)
-
-select row(0, false)::testtype2 *<> row(0, true)::testtype2;
- ?column?
-----------
- t
-(1 row)
-
-create type testtype3 as (a int, b text); -- variable length
-select row(1, 'abc')::testtype3 *< row(1, 'abd')::testtype3;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 'abc')::testtype3 *< row(1, 'abcd')::testtype3;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, 'abc')::testtype3 *> row(1, 'abd')::testtype3;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, 'abc')::testtype3 *<> row(1, 'abd')::testtype3;
- ?column?
-----------
- t
-(1 row)
-
-create type testtype4 as (a int, b point); -- by ref, fixed length
-select row(1, '(1,2)')::testtype4 *< row(1, '(1,3)')::testtype4;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, '(1,2)')::testtype4 *<> row(1, '(1,3)')::testtype4;
- ?column?
-----------
- t
-(1 row)
-
--- mismatches
-select row(1, 2)::testtype1 *< row(1, 'abc')::testtype3;
-ERROR: cannot compare dissimilar column types integer and text at record column 2
-select row(1, 2)::testtype1 *<> row(1, 'abc')::testtype3;
-ERROR: cannot compare dissimilar column types integer and text at record column 2
-create type testtype5 as (a int);
-select row(1, 2)::testtype1 *< row(1)::testtype5;
-ERROR: cannot compare record types with different numbers of columns
-select row(1, 2)::testtype1 *<> row(1)::testtype5;
-ERROR: cannot compare record types with different numbers of columns
--- non-comparable types
-create type testtype6 as (a int, b point);
-select row(1, '(1,2)')::testtype6 *< row(1, '(1,3)')::testtype6;
- ?column?
-----------
- t
-(1 row)
-
-select row(1, '(1,2)')::testtype6 *>= row(1, '(1,3)')::testtype6;
- ?column?
-----------
- f
-(1 row)
-
-select row(1, '(1,2)')::testtype6 *<> row(1, '(1,3)')::testtype6;
- ?column?
-----------
- t
-(1 row)
-
--- anonymous rowtypes in coldeflists
-select q.a, q.b = row(2), q.c = array[row(3)], q.d = row(row(4)) from
- unnest(array[row(1, row(2), array[row(3)], row(row(4))),
- row(2, row(3), array[row(4)], row(row(5)))])
- as q(a int, b record, c record[], d record);
- a | ?column? | ?column? | ?column?
----+----------+----------+----------
- 1 | t | t | t
- 2 | f | f | f
-(2 rows)
-
-drop type testtype1, testtype2, testtype3, testtype4, testtype5, testtype6;
---
--- Test case derived from bug #5716: check multiple uses of a rowtype result
---
-BEGIN;
-CREATE TABLE price (
- id SERIAL PRIMARY KEY,
- active BOOLEAN NOT NULL,
- price NUMERIC
-);
-CREATE TYPE price_input AS (
- id INTEGER,
- price NUMERIC
-);
-CREATE TYPE price_key AS (
- id INTEGER
-);
-CREATE FUNCTION price_key_from_table(price) RETURNS price_key AS $$
- SELECT $1.id
-$$ LANGUAGE SQL;
-CREATE FUNCTION price_key_from_input(price_input) RETURNS price_key AS $$
- SELECT $1.id
-$$ LANGUAGE SQL;
-insert into price values (1,false,42), (10,false,100), (11,true,17.99);
-UPDATE price
- SET active = true, price = input_prices.price
- FROM unnest(ARRAY[(10, 123.00), (11, 99.99)]::price_input[]) input_prices
- WHERE price_key_from_table(price.*) = price_key_from_input(input_prices.*);
-select * from price;
- id | active | price
-----+--------+--------
- 1 | f | 42
- 10 | t | 123.00
- 11 | t | 99.99
-(3 rows)
-
-rollback;
---
--- Test case derived from bug #9085: check * qualification of composite
--- parameters for SQL functions
---
-create temp table compos (f1 int, f2 text);
-create function fcompos1(v compos) returns void as $$
-insert into compos values (v); -- fail
-$$ language sql;
-ERROR: column "f1" is of type integer but expression is of type compos
-LINE 2: insert into compos values (v); -- fail
- ^
-HINT: You will need to rewrite or cast the expression.
-create function fcompos1(v compos) returns void as $$
-insert into compos values (v.*);
-$$ language sql;
-create function fcompos2(v compos) returns void as $$
-select fcompos1(v);
-$$ language sql;
-create function fcompos3(v compos) returns void as $$
-select fcompos1(fcompos3.v.*);
-$$ language sql;
-select fcompos1(row(1,'one'));
- fcompos1
-----------
-
-(1 row)
-
-select fcompos2(row(2,'two'));
- fcompos2
-----------
-
-(1 row)
-
-select fcompos3(row(3,'three'));
- fcompos3
-----------
-
-(1 row)
-
-select * from compos;
- f1 | f2
-----+-------
- 1 | one
- 2 | two
- 3 | three
-(3 rows)
-
---
--- We allow I/O conversion casts from composite types to strings to be
--- invoked via cast syntax, but not functional syntax. This is because
--- the latter is too prone to be invoked unintentionally.
---
-select cast (fullname as text) from fullname;
- fullname
-----------
-(0 rows)
-
-select fullname::text from fullname;
- fullname
-----------
-(0 rows)
-
-select text(fullname) from fullname; -- error
-ERROR: function text(fullname) does not exist
-LINE 1: select text(fullname) from fullname;
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select fullname.text from fullname; -- error
-ERROR: column fullname.text does not exist
-LINE 1: select fullname.text from fullname;
- ^
--- same, but RECORD instead of named composite type:
-select cast (row('Jim', 'Beam') as text);
- row
-------------
- (Jim,Beam)
-(1 row)
-
-select (row('Jim', 'Beam'))::text;
- row
-------------
- (Jim,Beam)
-(1 row)
-
-select text(row('Jim', 'Beam')); -- error
-ERROR: function text(record) does not exist
-LINE 1: select text(row('Jim', 'Beam'));
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-select (row('Jim', 'Beam')).text; -- error
-ERROR: could not identify column "text" in record data type
-LINE 1: select (row('Jim', 'Beam')).text;
- ^
---
--- Check the equivalence of functional and column notation
---
-insert into fullname values ('Joe', 'Blow');
-select f.last from fullname f;
- last
-------
- Blow
-(1 row)
-
-select last(f) from fullname f;
- last
-------
- Blow
-(1 row)
-
-create function longname(fullname) returns text language sql
-as $$select $1.first || ' ' || $1.last$$;
-select f.longname from fullname f;
- longname
-----------
- Joe Blow
-(1 row)
-
-select longname(f) from fullname f;
- longname
-----------
- Joe Blow
-(1 row)
-
--- Starting in v11, the notational form does matter if there's ambiguity
-alter table fullname add column longname text;
-select f.longname from fullname f;
- longname
-----------
-
-(1 row)
-
-select longname(f) from fullname f;
- longname
-----------
- Joe Blow
-(1 row)
-
---
--- Test that composite values are seen to have the correct column names
--- (bug #11210 and other reports)
---
-select row_to_json(i) from int8_tbl i;
- row_to_json
-------------------------------------------------
- {"q1":123,"q2":456}
- {"q1":123,"q2":4567890123456789}
- {"q1":4567890123456789,"q2":123}
- {"q1":4567890123456789,"q2":4567890123456789}
- {"q1":4567890123456789,"q2":-4567890123456789}
-(5 rows)
-
--- since "i" is of type "int8_tbl", attaching aliases doesn't change anything:
-select row_to_json(i) from int8_tbl i(x,y);
- row_to_json
-------------------------------------------------
- {"q1":123,"q2":456}
- {"q1":123,"q2":4567890123456789}
- {"q1":4567890123456789,"q2":123}
- {"q1":4567890123456789,"q2":4567890123456789}
- {"q1":4567890123456789,"q2":-4567890123456789}
-(5 rows)
-
--- in these examples, we'll report the exposed column names of the subselect:
-select row_to_json(ss) from
- (select q1, q2 from int8_tbl) as ss;
- row_to_json
-------------------------------------------------
- {"q1":123,"q2":456}
- {"q1":123,"q2":4567890123456789}
- {"q1":4567890123456789,"q2":123}
- {"q1":4567890123456789,"q2":4567890123456789}
- {"q1":4567890123456789,"q2":-4567890123456789}
-(5 rows)
-
-select row_to_json(ss) from
- (select q1, q2 from int8_tbl offset 0) as ss;
- row_to_json
-------------------------------------------------
- {"q1":123,"q2":456}
- {"q1":123,"q2":4567890123456789}
- {"q1":4567890123456789,"q2":123}
- {"q1":4567890123456789,"q2":4567890123456789}
- {"q1":4567890123456789,"q2":-4567890123456789}
-(5 rows)
-
-select row_to_json(ss) from
- (select q1 as a, q2 as b from int8_tbl) as ss;
- row_to_json
-----------------------------------------------
- {"a":123,"b":456}
- {"a":123,"b":4567890123456789}
- {"a":4567890123456789,"b":123}
- {"a":4567890123456789,"b":4567890123456789}
- {"a":4567890123456789,"b":-4567890123456789}
-(5 rows)
-
-select row_to_json(ss) from
- (select q1 as a, q2 as b from int8_tbl offset 0) as ss;
- row_to_json
-----------------------------------------------
- {"a":123,"b":456}
- {"a":123,"b":4567890123456789}
- {"a":4567890123456789,"b":123}
- {"a":4567890123456789,"b":4567890123456789}
- {"a":4567890123456789,"b":-4567890123456789}
-(5 rows)
-
-select row_to_json(ss) from
- (select q1 as a, q2 as b from int8_tbl) as ss(x,y);
- row_to_json
-----------------------------------------------
- {"x":123,"y":456}
- {"x":123,"y":4567890123456789}
- {"x":4567890123456789,"y":123}
- {"x":4567890123456789,"y":4567890123456789}
- {"x":4567890123456789,"y":-4567890123456789}
-(5 rows)
-
-select row_to_json(ss) from
- (select q1 as a, q2 as b from int8_tbl offset 0) as ss(x,y);
- row_to_json
-----------------------------------------------
- {"x":123,"y":456}
- {"x":123,"y":4567890123456789}
- {"x":4567890123456789,"y":123}
- {"x":4567890123456789,"y":4567890123456789}
- {"x":4567890123456789,"y":-4567890123456789}
-(5 rows)
-
-explain (costs off)
-select row_to_json(q) from
- (select thousand, tenthous from tenk1
- where thousand = 42 and tenthous < 2000 offset 0) q;
- QUERY PLAN
--------------------------------------------------------------
- Subquery Scan on q
- -> Index Only Scan using tenk1_thous_tenthous on tenk1
- Index Cond: ((thousand = 42) AND (tenthous < 2000))
-(3 rows)
-
-select row_to_json(q) from
- (select thousand, tenthous from tenk1
- where thousand = 42 and tenthous < 2000 offset 0) q;
- row_to_json
----------------------------------
- {"thousand":42,"tenthous":42}
- {"thousand":42,"tenthous":1042}
-(2 rows)
-
-select row_to_json(q) from
- (select thousand as x, tenthous as y from tenk1
- where thousand = 42 and tenthous < 2000 offset 0) q;
- row_to_json
--------------------
- {"x":42,"y":42}
- {"x":42,"y":1042}
-(2 rows)
-
-select row_to_json(q) from
- (select thousand as x, tenthous as y from tenk1
- where thousand = 42 and tenthous < 2000 offset 0) q(a,b);
- row_to_json
--------------------
- {"a":42,"b":42}
- {"a":42,"b":1042}
-(2 rows)
-
-create temp table tt1 as select * from int8_tbl limit 2;
-create temp table tt2 () inherits(tt1);
-insert into tt2 values(0,0);
-select row_to_json(r) from (select q2,q1 from tt1 offset 0) r;
- row_to_json
-----------------------------------
- {"q2":456,"q1":123}
- {"q2":4567890123456789,"q1":123}
- {"q2":0,"q1":0}
-(3 rows)
-
--- check no-op rowtype conversions
-create temp table tt3 () inherits(tt2);
-insert into tt3 values(33,44);
-select row_to_json(tt3::tt2::tt1) from tt3;
- row_to_json
--------------------
- {"q1":33,"q2":44}
-(1 row)
-
---
--- IS [NOT] NULL should not recurse into nested composites (bug #14235)
---
-explain (verbose, costs off)
-select r, r is null as isnull, r is not null as isnotnull
-from (values (1,row(1,2)), (1,row(null,null)), (1,null),
- (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b);
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Values Scan on "*VALUES*"
- Output: ROW("*VALUES*".column1, "*VALUES*".column2), (("*VALUES*".column1 IS NULL) AND ("*VALUES*".column2 IS NOT DISTINCT FROM NULL)), (("*VALUES*".column1 IS NOT NULL) AND ("*VALUES*".column2 IS DISTINCT FROM NULL))
-(2 rows)
-
-select r, r is null as isnull, r is not null as isnotnull
-from (values (1,row(1,2)), (1,row(null,null)), (1,null),
- (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b);
- r | isnull | isnotnull
--------------+--------+-----------
- (1,"(1,2)") | f | t
- (1,"(,)") | f | t
- (1,) | f | f
- (,"(1,2)") | f | f
- (,"(,)") | f | f
- (,) | t | f
-(6 rows)
-
-explain (verbose, costs off)
-with r(a,b) as materialized
- (values (1,row(1,2)), (1,row(null,null)), (1,null),
- (null,row(1,2)), (null,row(null,null)), (null,null) )
-select r, r is null as isnull, r is not null as isnotnull from r;
- QUERY PLAN
-----------------------------------------------------------
- CTE Scan on r
- Output: r.*, (r.* IS NULL), (r.* IS NOT NULL)
- CTE r
- -> Values Scan on "*VALUES*"
- Output: "*VALUES*".column1, "*VALUES*".column2
-(5 rows)
-
-with r(a,b) as materialized
- (values (1,row(1,2)), (1,row(null,null)), (1,null),
- (null,row(1,2)), (null,row(null,null)), (null,null) )
-select r, r is null as isnull, r is not null as isnotnull from r;
- r | isnull | isnotnull
--------------+--------+-----------
- (1,"(1,2)") | f | t
- (1,"(,)") | f | t
- (1,) | f | f
- (,"(1,2)") | f | f
- (,"(,)") | f | f
- (,) | t | f
-(6 rows)
-
---
--- Check parsing of indirect references to composite values (bug #18077)
---
-explain (verbose, costs off)
-with cte(c) as materialized (select row(1, 2)),
- cte2(c) as (select * from cte)
-select * from cte2 as t
-where (select * from (select c as c1) s
- where (select (c1).f1 > 0)) is not null;
- QUERY PLAN
-----------------------------------------------
- CTE Scan on cte
- Output: cte.c
- Filter: ((SubPlan 3) IS NOT NULL)
- CTE cte
- -> Result
- Output: '(1,2)'::record
- SubPlan 3
- -> Result
- Output: cte.c
- One-Time Filter: (InitPlan 2).col1
- InitPlan 2
- -> Result
- Output: ((cte.c).f1 > 0)
-(13 rows)
-
-with cte(c) as materialized (select row(1, 2)),
- cte2(c) as (select * from cte)
-select * from cte2 as t
-where (select * from (select c as c1) s
- where (select (c1).f1 > 0)) is not null;
- c
--------
- (1,2)
-(1 row)
-
--- Also check deparsing of such cases
-create view composite_v as
-with cte(c) as materialized (select row(1, 2)),
- cte2(c) as (select * from cte)
-select 1 as one from cte2 as t
-where (select * from (select c as c1) s
- where (select (c1).f1 > 0)) is not null;
-select pg_get_viewdef('composite_v', true);
- pg_get_viewdef
---------------------------------------------------------
- WITH cte(c) AS MATERIALIZED ( +
- SELECT ROW(1, 2) AS "row" +
- ), cte2(c) AS ( +
- SELECT cte.c +
- FROM cte +
- ) +
- SELECT 1 AS one +
- FROM cte2 t +
- WHERE (( SELECT s.c1 +
- FROM ( SELECT t.c AS c1) s +
- WHERE ( SELECT (s.c1).f1 > 0))) IS NOT NULL;
-(1 row)
-
-drop view composite_v;
---
--- Check cases where the composite comes from a proven-dummy rel (bug #18576)
---
-explain (verbose, costs off)
-select (ss.a).x, (ss.a).n from
- (select information_schema._pg_expandarray(array[1,2]) AS a) ss;
- QUERY PLAN
-------------------------------------------------------------------------
- Subquery Scan on ss
- Output: (ss.a).x, (ss.a).n
- -> ProjectSet
- Output: information_schema._pg_expandarray('{1,2}'::integer[])
- -> Result
-(5 rows)
-
-explain (verbose, costs off)
-select (ss.a).x, (ss.a).n from
- (select information_schema._pg_expandarray(array[1,2]) AS a) ss
-where false;
- QUERY PLAN
---------------------------
- Result
- Output: (a).f1, (a).f2
- One-Time Filter: false
-(3 rows)
-
-explain (verbose, costs off)
-with cte(c) as materialized (select row(1, 2)),
- cte2(c) as (select * from cte)
-select (c).f1 from cte2 as t;
- QUERY PLAN
------------------------------------
- CTE Scan on cte
- Output: (cte.c).f1
- CTE cte
- -> Result
- Output: '(1,2)'::record
-(5 rows)
-
-explain (verbose, costs off)
-with cte(c) as materialized (select row(1, 2)),
- cte2(c) as (select * from cte)
-select (c).f1 from cte2 as t
-where false;
- QUERY PLAN
------------------------------------
- Result
- Output: (cte.c).f1
- One-Time Filter: false
- CTE cte
- -> Result
- Output: '(1,2)'::record
-(6 rows)
-
---
--- Tests for component access / FieldSelect
---
-CREATE TABLE compositetable(a text, b text);
-INSERT INTO compositetable(a, b) VALUES('fa', 'fb');
--- composite type columns can't directly be accessed (error)
-SELECT d.a FROM (SELECT compositetable AS d FROM compositetable) s;
-ERROR: missing FROM-clause entry for table "d"
-LINE 1: SELECT d.a FROM (SELECT compositetable AS d FROM compositeta...
- ^
--- but can be accessed with proper parens
-SELECT (d).a, (d).b FROM (SELECT compositetable AS d FROM compositetable) s;
- a | b
-----+----
- fa | fb
-(1 row)
-
--- system columns can't be accessed in composite types (error)
-SELECT (d).ctid FROM (SELECT compositetable AS d FROM compositetable) s;
-ERROR: column "ctid" not found in data type compositetable
-LINE 1: SELECT (d).ctid FROM (SELECT compositetable AS d FROM compos...
- ^
--- accessing non-existing column in NULL datum errors out
-SELECT (NULL::compositetable).nonexistent;
-ERROR: column "nonexistent" not found in data type compositetable
-LINE 1: SELECT (NULL::compositetable).nonexistent;
- ^
--- existing column in a NULL composite yield NULL
-SELECT (NULL::compositetable).a;
- a
----
-
-(1 row)
-
--- oids can't be accessed in composite types (error)
-SELECT (NULL::compositetable).oid;
-ERROR: column "oid" not found in data type compositetable
-LINE 1: SELECT (NULL::compositetable).oid;
- ^
-DROP TABLE compositetable;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/returning.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/returning.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/returning.out 2024-11-15 02:50:52.494042466 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/returning.out 2024-11-15 02:59:18.197116986 +0000
@@ -1,357 +1,2 @@
---
--- Test INSERT/UPDATE/DELETE RETURNING
---
--- Simple cases
-CREATE TEMP TABLE foo (f1 serial, f2 text, f3 int default 42);
-INSERT INTO foo (f2,f3)
- VALUES ('test', DEFAULT), ('More', 11), (upper('more'), 7+9)
- RETURNING *, f1+f3 AS sum;
- f1 | f2 | f3 | sum
-----+------+----+-----
- 1 | test | 42 | 43
- 2 | More | 11 | 13
- 3 | MORE | 16 | 19
-(3 rows)
-
-SELECT * FROM foo;
- f1 | f2 | f3
-----+------+----
- 1 | test | 42
- 2 | More | 11
- 3 | MORE | 16
-(3 rows)
-
-UPDATE foo SET f2 = lower(f2), f3 = DEFAULT RETURNING foo.*, f1+f3 AS sum13;
- f1 | f2 | f3 | sum13
-----+------+----+-------
- 1 | test | 42 | 43
- 2 | more | 42 | 44
- 3 | more | 42 | 45
-(3 rows)
-
-SELECT * FROM foo;
- f1 | f2 | f3
-----+------+----
- 1 | test | 42
- 2 | more | 42
- 3 | more | 42
-(3 rows)
-
-DELETE FROM foo WHERE f1 > 2 RETURNING f3, f2, f1, least(f1,f3);
- f3 | f2 | f1 | least
-----+------+----+-------
- 42 | more | 3 | 3
-(1 row)
-
-SELECT * FROM foo;
- f1 | f2 | f3
-----+------+----
- 1 | test | 42
- 2 | more | 42
-(2 rows)
-
--- Subplans and initplans in the RETURNING list
-INSERT INTO foo SELECT f1+10, f2, f3+99 FROM foo
- RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan,
- EXISTS(SELECT * FROM int4_tbl) AS initplan;
- f1 | f2 | f3 | subplan | initplan
-----+------+-----+---------+----------
- 11 | test | 141 | t | t
- 12 | more | 141 | f | t
-(2 rows)
-
-UPDATE foo SET f3 = f3 * 2
- WHERE f1 > 10
- RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan,
- EXISTS(SELECT * FROM int4_tbl) AS initplan;
- f1 | f2 | f3 | subplan | initplan
-----+------+-----+---------+----------
- 11 | test | 282 | t | t
- 12 | more | 282 | f | t
-(2 rows)
-
-DELETE FROM foo
- WHERE f1 > 10
- RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan,
- EXISTS(SELECT * FROM int4_tbl) AS initplan;
- f1 | f2 | f3 | subplan | initplan
-----+------+-----+---------+----------
- 11 | test | 282 | t | t
- 12 | more | 282 | f | t
-(2 rows)
-
--- Joins
-UPDATE foo SET f3 = f3*2
- FROM int4_tbl i
- WHERE foo.f1 + 123455 = i.f1
- RETURNING foo.*, i.f1 as "i.f1";
- f1 | f2 | f3 | i.f1
-----+------+----+--------
- 1 | test | 84 | 123456
-(1 row)
-
-SELECT * FROM foo;
- f1 | f2 | f3
-----+------+----
- 2 | more | 42
- 1 | test | 84
-(2 rows)
-
-DELETE FROM foo
- USING int4_tbl i
- WHERE foo.f1 + 123455 = i.f1
- RETURNING foo.*, i.f1 as "i.f1";
- f1 | f2 | f3 | i.f1
-----+------+----+--------
- 1 | test | 84 | 123456
-(1 row)
-
-SELECT * FROM foo;
- f1 | f2 | f3
-----+------+----
- 2 | more | 42
-(1 row)
-
--- Check inheritance cases
-CREATE TEMP TABLE foochild (fc int) INHERITS (foo);
-INSERT INTO foochild VALUES(123,'child',999,-123);
-ALTER TABLE foo ADD COLUMN f4 int8 DEFAULT 99;
-SELECT * FROM foo;
- f1 | f2 | f3 | f4
------+-------+-----+----
- 2 | more | 42 | 99
- 123 | child | 999 | 99
-(2 rows)
-
-SELECT * FROM foochild;
- f1 | f2 | f3 | fc | f4
------+-------+-----+------+----
- 123 | child | 999 | -123 | 99
-(1 row)
-
-UPDATE foo SET f4 = f4 + f3 WHERE f4 = 99 RETURNING *;
- f1 | f2 | f3 | f4
------+-------+-----+------
- 2 | more | 42 | 141
- 123 | child | 999 | 1098
-(2 rows)
-
-SELECT * FROM foo;
- f1 | f2 | f3 | f4
------+-------+-----+------
- 2 | more | 42 | 141
- 123 | child | 999 | 1098
-(2 rows)
-
-SELECT * FROM foochild;
- f1 | f2 | f3 | fc | f4
------+-------+-----+------+------
- 123 | child | 999 | -123 | 1098
-(1 row)
-
-UPDATE foo SET f3 = f3*2
- FROM int8_tbl i
- WHERE foo.f1 = i.q2
- RETURNING *;
- f1 | f2 | f3 | f4 | q1 | q2
------+-------+------+------+------------------+-----
- 123 | child | 1998 | 1098 | 4567890123456789 | 123
-(1 row)
-
-SELECT * FROM foo;
- f1 | f2 | f3 | f4
------+-------+------+------
- 2 | more | 42 | 141
- 123 | child | 1998 | 1098
-(2 rows)
-
-SELECT * FROM foochild;
- f1 | f2 | f3 | fc | f4
------+-------+------+------+------
- 123 | child | 1998 | -123 | 1098
-(1 row)
-
-DELETE FROM foo
- USING int8_tbl i
- WHERE foo.f1 = i.q2
- RETURNING *;
- f1 | f2 | f3 | f4 | q1 | q2
------+-------+------+------+------------------+-----
- 123 | child | 1998 | 1098 | 4567890123456789 | 123
-(1 row)
-
-SELECT * FROM foo;
- f1 | f2 | f3 | f4
-----+------+----+-----
- 2 | more | 42 | 141
-(1 row)
-
-SELECT * FROM foochild;
- f1 | f2 | f3 | fc | f4
-----+----+----+----+----
-(0 rows)
-
-DROP TABLE foochild;
--- Rules and views
-CREATE TEMP VIEW voo AS SELECT f1, f2 FROM foo;
-CREATE RULE voo_i AS ON INSERT TO voo DO INSTEAD
- INSERT INTO foo VALUES(new.*, 57);
-INSERT INTO voo VALUES(11,'zit');
--- fails:
-INSERT INTO voo VALUES(12,'zoo') RETURNING *, f1*2;
-ERROR: cannot perform INSERT RETURNING on relation "voo"
-HINT: You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause.
--- fails, incompatible list:
-CREATE OR REPLACE RULE voo_i AS ON INSERT TO voo DO INSTEAD
- INSERT INTO foo VALUES(new.*, 57) RETURNING *;
-ERROR: RETURNING list has too many entries
-CREATE OR REPLACE RULE voo_i AS ON INSERT TO voo DO INSTEAD
- INSERT INTO foo VALUES(new.*, 57) RETURNING f1, f2;
--- should still work
-INSERT INTO voo VALUES(13,'zit2');
--- works now
-INSERT INTO voo VALUES(14,'zoo2') RETURNING *;
- f1 | f2
-----+------
- 14 | zoo2
-(1 row)
-
-SELECT * FROM foo;
- f1 | f2 | f3 | f4
-----+------+----+-----
- 2 | more | 42 | 141
- 11 | zit | 57 | 99
- 13 | zit2 | 57 | 99
- 14 | zoo2 | 57 | 99
-(4 rows)
-
-SELECT * FROM voo;
- f1 | f2
-----+------
- 2 | more
- 11 | zit
- 13 | zit2
- 14 | zoo2
-(4 rows)
-
-CREATE OR REPLACE RULE voo_u AS ON UPDATE TO voo DO INSTEAD
- UPDATE foo SET f1 = new.f1, f2 = new.f2 WHERE f1 = old.f1
- RETURNING f1, f2;
-update voo set f1 = f1 + 1 where f2 = 'zoo2';
-update voo set f1 = f1 + 1 where f2 = 'zoo2' RETURNING *, f1*2;
- f1 | f2 | ?column?
-----+------+----------
- 16 | zoo2 | 32
-(1 row)
-
-SELECT * FROM foo;
- f1 | f2 | f3 | f4
-----+------+----+-----
- 2 | more | 42 | 141
- 11 | zit | 57 | 99
- 13 | zit2 | 57 | 99
- 16 | zoo2 | 57 | 99
-(4 rows)
-
-SELECT * FROM voo;
- f1 | f2
-----+------
- 2 | more
- 11 | zit
- 13 | zit2
- 16 | zoo2
-(4 rows)
-
-CREATE OR REPLACE RULE voo_d AS ON DELETE TO voo DO INSTEAD
- DELETE FROM foo WHERE f1 = old.f1
- RETURNING f1, f2;
-DELETE FROM foo WHERE f1 = 13;
-DELETE FROM foo WHERE f2 = 'zit' RETURNING *;
- f1 | f2 | f3 | f4
-----+-----+----+----
- 11 | zit | 57 | 99
-(1 row)
-
-SELECT * FROM foo;
- f1 | f2 | f3 | f4
-----+------+----+-----
- 2 | more | 42 | 141
- 16 | zoo2 | 57 | 99
-(2 rows)
-
-SELECT * FROM voo;
- f1 | f2
-----+------
- 2 | more
- 16 | zoo2
-(2 rows)
-
--- Try a join case
-CREATE TEMP TABLE joinme (f2j text, other int);
-INSERT INTO joinme VALUES('more', 12345);
-INSERT INTO joinme VALUES('zoo2', 54321);
-INSERT INTO joinme VALUES('other', 0);
-CREATE TEMP VIEW joinview AS
- SELECT foo.*, other FROM foo JOIN joinme ON (f2 = f2j);
-SELECT * FROM joinview;
- f1 | f2 | f3 | f4 | other
-----+------+----+-----+-------
- 2 | more | 42 | 141 | 12345
- 16 | zoo2 | 57 | 99 | 54321
-(2 rows)
-
-CREATE RULE joinview_u AS ON UPDATE TO joinview DO INSTEAD
- UPDATE foo SET f1 = new.f1, f3 = new.f3
- FROM joinme WHERE f2 = f2j AND f2 = old.f2
- RETURNING foo.*, other;
-UPDATE joinview SET f1 = f1 + 1 WHERE f3 = 57 RETURNING *, other + 1;
- f1 | f2 | f3 | f4 | other | ?column?
-----+------+----+----+-------+----------
- 17 | zoo2 | 57 | 99 | 54321 | 54322
-(1 row)
-
-SELECT * FROM joinview;
- f1 | f2 | f3 | f4 | other
-----+------+----+-----+-------
- 2 | more | 42 | 141 | 12345
- 17 | zoo2 | 57 | 99 | 54321
-(2 rows)
-
-SELECT * FROM foo;
- f1 | f2 | f3 | f4
-----+------+----+-----
- 2 | more | 42 | 141
- 17 | zoo2 | 57 | 99
-(2 rows)
-
-SELECT * FROM voo;
- f1 | f2
-----+------
- 2 | more
- 17 | zoo2
-(2 rows)
-
--- Check aliased target relation
-INSERT INTO foo AS bar DEFAULT VALUES RETURNING *; -- ok
- f1 | f2 | f3 | f4
-----+----+----+----
- 4 | | 42 | 99
-(1 row)
-
-INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*; -- fails, wrong name
-ERROR: invalid reference to FROM-clause entry for table "foo"
-LINE 1: INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*;
- ^
-HINT: Perhaps you meant to reference the table alias "bar".
-INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.*; -- ok
- f1 | f2 | f3 | f4
-----+----+----+----
- 5 | | 42 | 99
-(1 row)
-
-INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.f3; -- ok
- f3
-----
- 42
-(1 row)
-
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/largeobject.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/largeobject.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/largeobject.out 2024-11-15 02:50:52.462095130 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/largeobject.out 2024-11-15 02:59:18.189116977 +0000
@@ -1,563 +1,2 @@
---
--- Test large object support
---
--- directory paths are passed to us in environment variables
-\getenv abs_srcdir PG_ABS_SRCDIR
-\getenv abs_builddir PG_ABS_BUILDDIR
--- ensure consistent test output regardless of the default bytea format
-SET bytea_output TO escape;
--- Test ALTER LARGE OBJECT OWNER
-CREATE ROLE regress_lo_user;
-SELECT lo_create(42);
- lo_create
------------
- 42
-(1 row)
-
-ALTER LARGE OBJECT 42 OWNER TO regress_lo_user;
--- Test GRANT, COMMENT as non-superuser
-SET SESSION AUTHORIZATION regress_lo_user;
-GRANT SELECT ON LARGE OBJECT 42 TO public;
-COMMENT ON LARGE OBJECT 42 IS 'the ultimate answer';
-RESET SESSION AUTHORIZATION;
--- Test psql's \lo_list et al (we assume no other LOs exist yet)
-\lo_list
- Large objects
- ID | Owner | Description
-----+-----------------+---------------------
- 42 | regress_lo_user | the ultimate answer
-(1 row)
-
-\lo_list+
- Large objects
- ID | Owner | Access privileges | Description
-----+-----------------+------------------------------------+---------------------
- 42 | regress_lo_user | regress_lo_user=rw/regress_lo_user+| the ultimate answer
- | | =r/regress_lo_user |
-(1 row)
-
-\lo_unlink 42
-\dl
- Large objects
- ID | Owner | Description
-----+-------+-------------
-(0 rows)
-
--- Load a file
-CREATE TABLE lotest_stash_values (loid oid, fd integer);
--- lo_creat(mode integer) returns oid
--- The mode arg to lo_creat is unused, some vestigal holdover from ancient times
--- returns the large object id
-INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42);
--- NOTE: large objects require transactions
-BEGIN;
--- lo_open(lobjId oid, mode integer) returns integer
--- The mode parameter to lo_open uses two constants:
--- INV_WRITE = 0x20000
--- INV_READ = 0x40000
--- The return value is a file descriptor-like value which remains valid for the
--- transaction.
-UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer));
--- loread/lowrite names are wonky, different from other functions which are lo_*
--- lowrite(fd integer, data bytea) returns integer
--- the integer is the number of bytes written
-SELECT lowrite(fd, '
-I wandered lonely as a cloud
-That floats on high o''er vales and hills,
-When all at once I saw a crowd,
-A host, of golden daffodils;
-Beside the lake, beneath the trees,
-Fluttering and dancing in the breeze.
-
-Continuous as the stars that shine
-And twinkle on the milky way,
-They stretched in never-ending line
-Along the margin of a bay:
-Ten thousand saw I at a glance,
-Tossing their heads in sprightly dance.
-
-The waves beside them danced; but they
-Out-did the sparkling waves in glee:
-A poet could not but be gay,
-In such a jocund company:
-I gazed--and gazed--but little thought
-What wealth the show to me had brought:
-
-For oft, when on my couch I lie
-In vacant or in pensive mood,
-They flash upon that inward eye
-Which is the bliss of solitude;
-And then my heart with pleasure fills,
-And dances with the daffodils.
-
- -- William Wordsworth
-') FROM lotest_stash_values;
- lowrite
----------
- 848
-(1 row)
-
--- lo_close(fd integer) returns integer
--- return value is 0 for success, or <0 for error (actually only -1, but...)
-SELECT lo_close(fd) FROM lotest_stash_values;
- lo_close
-----------
- 0
-(1 row)
-
-END;
--- Copy to another large object.
--- Note: we intentionally don't remove the object created here;
--- it's left behind to help test pg_dump.
-SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values
-\gset
--- Add a comment to it, as well, for pg_dump/pg_upgrade testing.
-COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud';
--- Read out a portion
-BEGIN;
-UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer));
--- lo_lseek(fd integer, offset integer, whence integer) returns integer
--- offset is in bytes, whence is one of three values:
--- SEEK_SET (= 0) meaning relative to beginning
--- SEEK_CUR (= 1) meaning relative to current position
--- SEEK_END (= 2) meaning relative to end (offset better be negative)
--- returns current position in file
-SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values;
- lo_lseek
-----------
- 104
-(1 row)
-
--- loread/lowrite names are wonky, different from other functions which are lo_*
--- loread(fd integer, len integer) returns bytea
-SELECT loread(fd, 28) FROM lotest_stash_values;
- loread
-------------------------------
- A host, of golden daffodils;
-(1 row)
-
-SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values;
- lo_lseek
-----------
- 113
-(1 row)
-
-SELECT lowrite(fd, 'n') FROM lotest_stash_values;
- lowrite
----------
- 1
-(1 row)
-
-SELECT lo_tell(fd) FROM lotest_stash_values;
- lo_tell
----------
- 114
-(1 row)
-
-SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values;
- lo_lseek
-----------
- 104
-(1 row)
-
-SELECT loread(fd, 28) FROM lotest_stash_values;
- loread
-------------------------------
- A host, on golden daffodils;
-(1 row)
-
-SELECT lo_close(fd) FROM lotest_stash_values;
- lo_close
-----------
- 0
-(1 row)
-
-END;
--- Test resource management
-BEGIN;
-SELECT lo_open(loid, x'40000'::int) from lotest_stash_values;
- lo_open
----------
- 0
-(1 row)
-
-ABORT;
-\set filename :abs_builddir '/results/invalid/path'
-\set dobody 'DECLARE loid oid; BEGIN '
-\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; '
-\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); '
-\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN '
-\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END'
-DO :'dobody';
-NOTICE: could not open file, as expected
--- Test truncation.
-BEGIN;
-UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer));
-SELECT lo_truncate(fd, 11) FROM lotest_stash_values;
- lo_truncate
--------------
- 0
-(1 row)
-
-SELECT loread(fd, 15) FROM lotest_stash_values;
- loread
-----------------
- \012I wandered
-(1 row)
-
-SELECT lo_truncate(fd, 10000) FROM lotest_stash_values;
- lo_truncate
--------------
- 0
-(1 row)
-
-SELECT loread(fd, 10) FROM lotest_stash_values;
- loread
-------------------------------------------
- \000\000\000\000\000\000\000\000\000\000
-(1 row)
-
-SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values;
- lo_lseek
-----------
- 10000
-(1 row)
-
-SELECT lo_tell(fd) FROM lotest_stash_values;
- lo_tell
----------
- 10000
-(1 row)
-
-SELECT lo_truncate(fd, 5000) FROM lotest_stash_values;
- lo_truncate
--------------
- 0
-(1 row)
-
-SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values;
- lo_lseek
-----------
- 5000
-(1 row)
-
-SELECT lo_tell(fd) FROM lotest_stash_values;
- lo_tell
----------
- 5000
-(1 row)
-
-SELECT lo_close(fd) FROM lotest_stash_values;
- lo_close
-----------
- 0
-(1 row)
-
-END;
--- Test 64-bit large object functions.
-BEGIN;
-UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer));
-SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values;
- lo_lseek64
-------------
- 4294967296
-(1 row)
-
-SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values;
- lowrite
----------
- 10
-(1 row)
-
-SELECT lo_tell64(fd) FROM lotest_stash_values;
- lo_tell64
-------------
- 4294967306
-(1 row)
-
-SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values;
- lo_lseek64
-------------
- 4294967296
-(1 row)
-
-SELECT lo_tell64(fd) FROM lotest_stash_values;
- lo_tell64
-------------
- 4294967296
-(1 row)
-
-SELECT loread(fd, 10) FROM lotest_stash_values;
- loread
-------------
- offset:4GB
-(1 row)
-
-SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values;
- lo_truncate64
----------------
- 0
-(1 row)
-
-SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values;
- lo_lseek64
-------------
- 5000000000
-(1 row)
-
-SELECT lo_tell64(fd) FROM lotest_stash_values;
- lo_tell64
-------------
- 5000000000
-(1 row)
-
-SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values;
- lo_truncate64
----------------
- 0
-(1 row)
-
-SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values;
- lo_lseek64
-------------
- 3000000000
-(1 row)
-
-SELECT lo_tell64(fd) FROM lotest_stash_values;
- lo_tell64
-------------
- 3000000000
-(1 row)
-
-SELECT lo_close(fd) FROM lotest_stash_values;
- lo_close
-----------
- 0
-(1 row)
-
-END;
--- lo_unlink(lobjId oid) returns integer
--- return value appears to always be 1
-SELECT lo_unlink(loid) from lotest_stash_values;
- lo_unlink
------------
- 1
-(1 row)
-
-TRUNCATE lotest_stash_values;
-\set filename :abs_srcdir '/data/tenk.data'
-INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename');
-BEGIN;
-UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer));
--- verify length of large object
-SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values;
- lo_lseek
-----------
- 670800
-(1 row)
-
--- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block
--- edge case
-SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values;
- lo_lseek
-----------
- 2030
-(1 row)
-
--- this should get half of the value from page 0 and half from page 1 of the
--- large object
-SELECT loread(fd, 36) FROM lotest_stash_values;
- loread
------------------------------------------------------------------
- AAA\011FBAAAA\011VVVVxx\0122513\01132\0111\0111\0113\01113\0111
-(1 row)
-
-SELECT lo_tell(fd) FROM lotest_stash_values;
- lo_tell
----------
- 2066
-(1 row)
-
-SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values;
- lo_lseek
-----------
- 2040
-(1 row)
-
-SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values;
- lowrite
----------
- 16
-(1 row)
-
-SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values;
- lo_lseek
-----------
- 2030
-(1 row)
-
-SELECT loread(fd, 36) FROM lotest_stash_values;
- loread
------------------------------------------------------
- AAA\011FBAAAAabcdefghijklmnop1\0111\0113\01113\0111
-(1 row)
-
-SELECT lo_close(fd) FROM lotest_stash_values;
- lo_close
-----------
- 0
-(1 row)
-
-END;
-\set filename :abs_builddir '/results/lotest.txt'
-SELECT lo_export(loid, :'filename') FROM lotest_stash_values;
- lo_export
------------
- 1
-(1 row)
-
-\lo_import :filename
-\set newloid :LASTOID
--- just make sure \lo_export does not barf
-\set filename :abs_builddir '/results/lotest2.txt'
-\lo_export :newloid :filename
--- This is a hack to test that export/import are reversible
--- This uses knowledge about the inner workings of large object mechanism
--- which should not be used outside it. This makes it a HACK
-SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values)
-EXCEPT
-SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid;
- pageno | data
---------+------
-(0 rows)
-
-SELECT lo_unlink(loid) FROM lotest_stash_values;
- lo_unlink
------------
- 1
-(1 row)
-
-TRUNCATE lotest_stash_values;
-\lo_unlink :newloid
-\set filename :abs_builddir '/results/lotest.txt'
-\lo_import :filename
-\set newloid_1 :LASTOID
-SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2
-\gset
-SELECT fipshash(lo_get(:newloid_1)) = fipshash(lo_get(:newloid_2));
- ?column?
-----------
- t
-(1 row)
-
-SELECT lo_get(:newloid_1, 0, 20);
- lo_get
--------------------------------------------
- 8800\0110\0110\0110\0110\0110\0110\011800
-(1 row)
-
-SELECT lo_get(:newloid_1, 10, 20);
- lo_get
--------------------------------------------
- \0110\0110\0110\011800\011800\0113800\011
-(1 row)
-
-SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex'));
- lo_put
---------
-
-(1 row)
-
-SELECT lo_get(:newloid_1, 0, 20);
- lo_get
--------------------------------------------------
- 8800\011\257\257\257\2570\0110\0110\0110\011800
-(1 row)
-
-SELECT lo_put(:newloid_1, 4294967310, 'foo');
- lo_put
---------
-
-(1 row)
-
-SELECT lo_get(:newloid_1);
-ERROR: large object read request is too large
-SELECT lo_get(:newloid_1, 4294967294, 100);
- lo_get
----------------------------------------------------------------------
- \000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000foo
-(1 row)
-
-\lo_unlink :newloid_1
-\lo_unlink :newloid_2
--- This object is left in the database for pg_dump test purposes
-SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid
-\gset
-SET bytea_output TO hex;
-SELECT lo_get(:newloid);
- lo_get
-------------
- \xdeadbeef
-(1 row)
-
--- Create one more object that we leave behind for testing pg_dump/pg_upgrade;
--- this one intentionally has an OID in the system range
-SELECT lo_create(2121);
- lo_create
------------
- 2121
-(1 row)
-
-COMMENT ON LARGE OBJECT 2121 IS 'testing comments';
--- Test writes on large objects in read-only transactions
-START TRANSACTION READ ONLY;
--- INV_READ ... ok
-SELECT lo_open(2121, x'40000'::int);
- lo_open
----------
- 0
-(1 row)
-
--- INV_WRITE ... error
-SELECT lo_open(2121, x'20000'::int);
-ERROR: cannot execute lo_open(INV_WRITE) in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lo_create(42);
-ERROR: cannot execute lo_create() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lo_creat(42);
-ERROR: cannot execute lo_creat() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lo_unlink(42);
-ERROR: cannot execute lo_unlink() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lowrite(42, 'x');
-ERROR: cannot execute lowrite() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lo_import(:'filename');
-ERROR: cannot execute lo_import() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lo_truncate(42, 0);
-ERROR: cannot execute lo_truncate() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lo_truncate64(42, 0);
-ERROR: cannot execute lo_truncate64() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lo_from_bytea(0, 'x');
-ERROR: cannot execute lo_from_bytea() in a read-only transaction
-ROLLBACK;
-START TRANSACTION READ ONLY;
-SELECT lo_put(42, 0, 'x');
-ERROR: cannot execute lo_put() in a read-only transaction
-ROLLBACK;
--- Clean up
-DROP TABLE lotest_stash_values;
-DROP ROLE regress_lo_user;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/with.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/with.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/with.out 2024-11-15 02:50:52.521996385 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/with.out 2024-11-15 02:59:18.181116966 +0000
@@ -1,3635 +1,2 @@
---
--- Tests for common table expressions (WITH query, ... SELECT ...)
---
--- Basic WITH
-WITH q1(x,y) AS (SELECT 1,2)
-SELECT * FROM q1, q1 AS q2;
- x | y | x | y
----+---+---+---
- 1 | 2 | 1 | 2
-(1 row)
-
--- Multiple uses are evaluated only once
-SELECT count(*) FROM (
- WITH q1(x) AS (SELECT random() FROM generate_series(1, 5))
- SELECT * FROM q1
- UNION
- SELECT * FROM q1
-) ss;
- count
--------
- 5
-(1 row)
-
--- WITH RECURSIVE
--- sum of 1..100
-WITH RECURSIVE t(n) AS (
- VALUES (1)
-UNION ALL
- SELECT n+1 FROM t WHERE n < 100
-)
-SELECT sum(n) FROM t;
- sum
-------
- 5050
-(1 row)
-
-WITH RECURSIVE t(n) AS (
- SELECT (VALUES(1))
-UNION ALL
- SELECT n+1 FROM t WHERE n < 5
-)
-SELECT * FROM t;
- n
----
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
--- UNION DISTINCT requires hashable type
-WITH RECURSIVE t(n) AS (
- VALUES ('01'::varbit)
-UNION
- SELECT n || '10'::varbit FROM t WHERE n < '100'::varbit
-)
-SELECT n FROM t;
-ERROR: could not implement recursive UNION
-DETAIL: All column datatypes must be hashable.
--- recursive view
-CREATE RECURSIVE VIEW nums (n) AS
- VALUES (1)
-UNION ALL
- SELECT n+1 FROM nums WHERE n < 5;
-SELECT * FROM nums;
- n
----
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
-CREATE OR REPLACE RECURSIVE VIEW nums (n) AS
- VALUES (1)
-UNION ALL
- SELECT n+1 FROM nums WHERE n < 6;
-SELECT * FROM nums;
- n
----
- 1
- 2
- 3
- 4
- 5
- 6
-(6 rows)
-
--- This is an infinite loop with UNION ALL, but not with UNION
-WITH RECURSIVE t(n) AS (
- SELECT 1
-UNION
- SELECT 10-n FROM t)
-SELECT * FROM t;
- n
----
- 1
- 9
-(2 rows)
-
--- This'd be an infinite loop, but outside query reads only as much as needed
-WITH RECURSIVE t(n) AS (
- VALUES (1)
-UNION ALL
- SELECT n+1 FROM t)
-SELECT * FROM t LIMIT 10;
- n
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
-(10 rows)
-
--- UNION case should have same property
-WITH RECURSIVE t(n) AS (
- SELECT 1
-UNION
- SELECT n+1 FROM t)
-SELECT * FROM t LIMIT 10;
- n
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
-(10 rows)
-
--- Test behavior with an unknown-type literal in the WITH
-WITH q AS (SELECT 'foo' AS x)
-SELECT x, pg_typeof(x) FROM q;
- x | pg_typeof
------+-----------
- foo | text
-(1 row)
-
-WITH RECURSIVE t(n) AS (
- SELECT 'foo'
-UNION ALL
- SELECT n || ' bar' FROM t WHERE length(n) < 20
-)
-SELECT n, pg_typeof(n) FROM t;
- n | pg_typeof
--------------------------+-----------
- foo | text
- foo bar | text
- foo bar bar | text
- foo bar bar bar | text
- foo bar bar bar bar | text
- foo bar bar bar bar bar | text
-(6 rows)
-
--- In a perfect world, this would work and resolve the literal as int ...
--- but for now, we have to be content with resolving to text too soon.
-WITH RECURSIVE t(n) AS (
- SELECT '7'
-UNION ALL
- SELECT n+1 FROM t WHERE n < 10
-)
-SELECT n, pg_typeof(n) FROM t;
-ERROR: operator does not exist: text + integer
-LINE 4: SELECT n+1 FROM t WHERE n < 10
- ^
-HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
--- Deeply nested WITH caused a list-munging problem in v13
--- Detection of cross-references and self-references
-WITH RECURSIVE w1(c1) AS
- (WITH w2(c2) AS
- (WITH w3(c3) AS
- (WITH w4(c4) AS
- (WITH w5(c5) AS
- (WITH RECURSIVE w6(c6) AS
- (WITH w6(c6) AS
- (WITH w8(c8) AS
- (SELECT 1)
- SELECT * FROM w8)
- SELECT * FROM w6)
- SELECT * FROM w6)
- SELECT * FROM w5)
- SELECT * FROM w4)
- SELECT * FROM w3)
- SELECT * FROM w2)
-SELECT * FROM w1;
- c1
-----
- 1
-(1 row)
-
--- Detection of invalid self-references
-WITH RECURSIVE outermost(x) AS (
- SELECT 1
- UNION (WITH innermost1 AS (
- SELECT 2
- UNION (WITH innermost2 AS (
- SELECT 3
- UNION (WITH innermost3 AS (
- SELECT 4
- UNION (WITH innermost4 AS (
- SELECT 5
- UNION (WITH innermost5 AS (
- SELECT 6
- UNION (WITH innermost6 AS
- (SELECT 7)
- SELECT * FROM innermost6))
- SELECT * FROM innermost5))
- SELECT * FROM innermost4))
- SELECT * FROM innermost3))
- SELECT * FROM innermost2))
- SELECT * FROM outermost
- UNION SELECT * FROM innermost1)
- )
- SELECT * FROM outermost ORDER BY 1;
- x
----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
-(7 rows)
-
---
--- Some examples with a tree
---
--- department structure represented here is as follows:
---
--- ROOT-+->A-+->B-+->C
--- | |
--- | +->D-+->F
--- +->E-+->G
-CREATE TEMP TABLE department (
- id INTEGER PRIMARY KEY, -- department ID
- parent_department INTEGER REFERENCES department, -- upper department ID
- name TEXT -- department name
-);
-INSERT INTO department VALUES (0, NULL, 'ROOT');
-INSERT INTO department VALUES (1, 0, 'A');
-INSERT INTO department VALUES (2, 1, 'B');
-INSERT INTO department VALUES (3, 2, 'C');
-INSERT INTO department VALUES (4, 2, 'D');
-INSERT INTO department VALUES (5, 0, 'E');
-INSERT INTO department VALUES (6, 4, 'F');
-INSERT INTO department VALUES (7, 5, 'G');
--- extract all departments under 'A'. Result should be A, B, C, D and F
-WITH RECURSIVE subdepartment AS
-(
- -- non recursive term
- SELECT name as root_name, * FROM department WHERE name = 'A'
- UNION ALL
- -- recursive term
- SELECT sd.root_name, d.* FROM department AS d, subdepartment AS sd
- WHERE d.parent_department = sd.id
-)
-SELECT * FROM subdepartment ORDER BY name;
- root_name | id | parent_department | name
------------+----+-------------------+------
- A | 1 | 0 | A
- A | 2 | 1 | B
- A | 3 | 2 | C
- A | 4 | 2 | D
- A | 6 | 4 | F
-(5 rows)
-
--- extract all departments under 'A' with "level" number
-WITH RECURSIVE subdepartment(level, id, parent_department, name) AS
-(
- -- non recursive term
- SELECT 1, * FROM department WHERE name = 'A'
- UNION ALL
- -- recursive term
- SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd
- WHERE d.parent_department = sd.id
-)
-SELECT * FROM subdepartment ORDER BY name;
- level | id | parent_department | name
--------+----+-------------------+------
- 1 | 1 | 0 | A
- 2 | 2 | 1 | B
- 3 | 3 | 2 | C
- 3 | 4 | 2 | D
- 4 | 6 | 4 | F
-(5 rows)
-
--- extract all departments under 'A' with "level" number.
--- Only shows level 2 or more
-WITH RECURSIVE subdepartment(level, id, parent_department, name) AS
-(
- -- non recursive term
- SELECT 1, * FROM department WHERE name = 'A'
- UNION ALL
- -- recursive term
- SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd
- WHERE d.parent_department = sd.id
-)
-SELECT * FROM subdepartment WHERE level >= 2 ORDER BY name;
- level | id | parent_department | name
--------+----+-------------------+------
- 2 | 2 | 1 | B
- 3 | 3 | 2 | C
- 3 | 4 | 2 | D
- 4 | 6 | 4 | F
-(4 rows)
-
--- "RECURSIVE" is ignored if the query has no self-reference
-WITH RECURSIVE subdepartment AS
-(
- -- note lack of recursive UNION structure
- SELECT * FROM department WHERE name = 'A'
-)
-SELECT * FROM subdepartment ORDER BY name;
- id | parent_department | name
-----+-------------------+------
- 1 | 0 | A
-(1 row)
-
--- inside subqueries
-SELECT count(*) FROM (
- WITH RECURSIVE t(n) AS (
- SELECT 1 UNION ALL SELECT n + 1 FROM t WHERE n < 500
- )
- SELECT * FROM t) AS t WHERE n < (
- SELECT count(*) FROM (
- WITH RECURSIVE t(n) AS (
- SELECT 1 UNION ALL SELECT n + 1 FROM t WHERE n < 100
- )
- SELECT * FROM t WHERE n < 50000
- ) AS t WHERE n < 100);
- count
--------
- 98
-(1 row)
-
--- use same CTE twice at different subquery levels
-WITH q1(x,y) AS (
- SELECT hundred, sum(ten) FROM tenk1 GROUP BY hundred
- )
-SELECT count(*) FROM q1 WHERE y > (SELECT sum(y)/100 FROM q1 qsub);
- count
--------
- 50
-(1 row)
-
--- via a VIEW
-CREATE TEMPORARY VIEW vsubdepartment AS
- WITH RECURSIVE subdepartment AS
- (
- -- non recursive term
- SELECT * FROM department WHERE name = 'A'
- UNION ALL
- -- recursive term
- SELECT d.* FROM department AS d, subdepartment AS sd
- WHERE d.parent_department = sd.id
- )
- SELECT * FROM subdepartment;
-SELECT * FROM vsubdepartment ORDER BY name;
- id | parent_department | name
-----+-------------------+------
- 1 | 0 | A
- 2 | 1 | B
- 3 | 2 | C
- 4 | 2 | D
- 6 | 4 | F
-(5 rows)
-
--- Check reverse listing
-SELECT pg_get_viewdef('vsubdepartment'::regclass);
- pg_get_viewdef
------------------------------------------------
- WITH RECURSIVE subdepartment AS ( +
- SELECT department.id, +
- department.parent_department, +
- department.name +
- FROM department +
- WHERE (department.name = 'A'::text)+
- UNION ALL +
- SELECT d.id, +
- d.parent_department, +
- d.name +
- FROM department d, +
- subdepartment sd +
- WHERE (d.parent_department = sd.id)+
- ) +
- SELECT id, +
- parent_department, +
- name +
- FROM subdepartment;
-(1 row)
-
-SELECT pg_get_viewdef('vsubdepartment'::regclass, true);
- pg_get_viewdef
----------------------------------------------
- WITH RECURSIVE subdepartment AS ( +
- SELECT department.id, +
- department.parent_department, +
- department.name +
- FROM department +
- WHERE department.name = 'A'::text+
- UNION ALL +
- SELECT d.id, +
- d.parent_department, +
- d.name +
- FROM department d, +
- subdepartment sd +
- WHERE d.parent_department = sd.id+
- ) +
- SELECT id, +
- parent_department, +
- name +
- FROM subdepartment;
-(1 row)
-
--- Another reverse-listing example
-CREATE VIEW sums_1_100 AS
-WITH RECURSIVE t(n) AS (
- VALUES (1)
-UNION ALL
- SELECT n+1 FROM t WHERE n < 100
-)
-SELECT sum(n) FROM t;
-\d+ sums_1_100
- View "public.sums_1_100"
- Column | Type | Collation | Nullable | Default | Storage | Description
---------+--------+-----------+----------+---------+---------+-------------
- sum | bigint | | | | plain |
-View definition:
- WITH RECURSIVE t(n) AS (
- VALUES (1)
- UNION ALL
- SELECT t_1.n + 1
- FROM t t_1
- WHERE t_1.n < 100
- )
- SELECT sum(n) AS sum
- FROM t;
-
--- corner case in which sub-WITH gets initialized first
-with recursive q as (
- select * from department
- union all
- (with x as (select * from q)
- select * from x)
- )
-select * from q limit 24;
- id | parent_department | name
-----+-------------------+------
- 0 | | ROOT
- 1 | 0 | A
- 2 | 1 | B
- 3 | 2 | C
- 4 | 2 | D
- 5 | 0 | E
- 6 | 4 | F
- 7 | 5 | G
- 0 | | ROOT
- 1 | 0 | A
- 2 | 1 | B
- 3 | 2 | C
- 4 | 2 | D
- 5 | 0 | E
- 6 | 4 | F
- 7 | 5 | G
- 0 | | ROOT
- 1 | 0 | A
- 2 | 1 | B
- 3 | 2 | C
- 4 | 2 | D
- 5 | 0 | E
- 6 | 4 | F
- 7 | 5 | G
-(24 rows)
-
-with recursive q as (
- select * from department
- union all
- (with recursive x as (
- select * from department
- union all
- (select * from q union all select * from x)
- )
- select * from x)
- )
-select * from q limit 32;
- id | parent_department | name
-----+-------------------+------
- 0 | | ROOT
- 1 | 0 | A
- 2 | 1 | B
- 3 | 2 | C
- 4 | 2 | D
- 5 | 0 | E
- 6 | 4 | F
- 7 | 5 | G
- 0 | | ROOT
- 1 | 0 | A
- 2 | 1 | B
- 3 | 2 | C
- 4 | 2 | D
- 5 | 0 | E
- 6 | 4 | F
- 7 | 5 | G
- 0 | | ROOT
- 1 | 0 | A
- 2 | 1 | B
- 3 | 2 | C
- 4 | 2 | D
- 5 | 0 | E
- 6 | 4 | F
- 7 | 5 | G
- 0 | | ROOT
- 1 | 0 | A
- 2 | 1 | B
- 3 | 2 | C
- 4 | 2 | D
- 5 | 0 | E
- 6 | 4 | F
- 7 | 5 | G
-(32 rows)
-
--- recursive term has sub-UNION
-WITH RECURSIVE t(i,j) AS (
- VALUES (1,2)
- UNION ALL
- SELECT t2.i, t.j+1 FROM
- (SELECT 2 AS i UNION ALL SELECT 3 AS i) AS t2
- JOIN t ON (t2.i = t.i+1))
- SELECT * FROM t;
- i | j
----+---
- 1 | 2
- 2 | 3
- 3 | 4
-(3 rows)
-
---
--- different tree example
---
-CREATE TEMPORARY TABLE tree(
- id INTEGER PRIMARY KEY,
- parent_id INTEGER REFERENCES tree(id)
-);
-INSERT INTO tree
-VALUES (1, NULL), (2, 1), (3,1), (4,2), (5,2), (6,2), (7,3), (8,3),
- (9,4), (10,4), (11,7), (12,7), (13,7), (14, 9), (15,11), (16,11);
---
--- get all paths from "second level" nodes to leaf nodes
---
-WITH RECURSIVE t(id, path) AS (
- VALUES(1,ARRAY[]::integer[])
-UNION ALL
- SELECT tree.id, t.path || tree.id
- FROM tree JOIN t ON (tree.parent_id = t.id)
-)
-SELECT t1.*, t2.* FROM t AS t1 JOIN t AS t2 ON
- (t1.path[1] = t2.path[1] AND
- array_upper(t1.path,1) = 1 AND
- array_upper(t2.path,1) > 1)
- ORDER BY t1.id, t2.id;
- id | path | id | path
-----+------+----+-------------
- 2 | {2} | 4 | {2,4}
- 2 | {2} | 5 | {2,5}
- 2 | {2} | 6 | {2,6}
- 2 | {2} | 9 | {2,4,9}
- 2 | {2} | 10 | {2,4,10}
- 2 | {2} | 14 | {2,4,9,14}
- 3 | {3} | 7 | {3,7}
- 3 | {3} | 8 | {3,8}
- 3 | {3} | 11 | {3,7,11}
- 3 | {3} | 12 | {3,7,12}
- 3 | {3} | 13 | {3,7,13}
- 3 | {3} | 15 | {3,7,11,15}
- 3 | {3} | 16 | {3,7,11,16}
-(13 rows)
-
--- just count 'em
-WITH RECURSIVE t(id, path) AS (
- VALUES(1,ARRAY[]::integer[])
-UNION ALL
- SELECT tree.id, t.path || tree.id
- FROM tree JOIN t ON (tree.parent_id = t.id)
-)
-SELECT t1.id, count(t2.*) FROM t AS t1 JOIN t AS t2 ON
- (t1.path[1] = t2.path[1] AND
- array_upper(t1.path,1) = 1 AND
- array_upper(t2.path,1) > 1)
- GROUP BY t1.id
- ORDER BY t1.id;
- id | count
-----+-------
- 2 | 6
- 3 | 7
-(2 rows)
-
--- this variant tickled a whole-row-variable bug in 8.4devel
-WITH RECURSIVE t(id, path) AS (
- VALUES(1,ARRAY[]::integer[])
-UNION ALL
- SELECT tree.id, t.path || tree.id
- FROM tree JOIN t ON (tree.parent_id = t.id)
-)
-SELECT t1.id, t2.path, t2 FROM t AS t1 JOIN t AS t2 ON
-(t1.id=t2.id);
- id | path | t2
-----+-------------+--------------------
- 1 | {} | (1,{})
- 2 | {2} | (2,{2})
- 3 | {3} | (3,{3})
- 4 | {2,4} | (4,"{2,4}")
- 5 | {2,5} | (5,"{2,5}")
- 6 | {2,6} | (6,"{2,6}")
- 7 | {3,7} | (7,"{3,7}")
- 8 | {3,8} | (8,"{3,8}")
- 9 | {2,4,9} | (9,"{2,4,9}")
- 10 | {2,4,10} | (10,"{2,4,10}")
- 11 | {3,7,11} | (11,"{3,7,11}")
- 12 | {3,7,12} | (12,"{3,7,12}")
- 13 | {3,7,13} | (13,"{3,7,13}")
- 14 | {2,4,9,14} | (14,"{2,4,9,14}")
- 15 | {3,7,11,15} | (15,"{3,7,11,15}")
- 16 | {3,7,11,16} | (16,"{3,7,11,16}")
-(16 rows)
-
--- test that column statistics from a materialized CTE are available
--- to upper planner (otherwise, we'd get a stupider plan)
-explain (costs off)
-with x as materialized (select unique1 from tenk1 b)
-select count(*) from tenk1 a
- where unique1 in (select * from x);
- QUERY PLAN
-------------------------------------------------------------
- Aggregate
- CTE x
- -> Index Only Scan using tenk1_unique1 on tenk1 b
- -> Hash Semi Join
- Hash Cond: (a.unique1 = x.unique1)
- -> Index Only Scan using tenk1_unique1 on tenk1 a
- -> Hash
- -> CTE Scan on x
-(8 rows)
-
-explain (costs off)
-with x as materialized (insert into tenk1 default values returning unique1)
-select count(*) from tenk1 a
- where unique1 in (select * from x);
- QUERY PLAN
-------------------------------------------------------------
- Aggregate
- CTE x
- -> Insert on tenk1
- -> Result
- -> Nested Loop
- -> HashAggregate
- Group Key: x.unique1
- -> CTE Scan on x
- -> Index Only Scan using tenk1_unique1 on tenk1 a
- Index Cond: (unique1 = x.unique1)
-(10 rows)
-
--- test that pathkeys from a materialized CTE are propagated up to the
--- outer query
-explain (costs off)
-with x as materialized (select unique1 from tenk1 b order by unique1)
-select count(*) from tenk1 a
- where unique1 in (select * from x);
- QUERY PLAN
-------------------------------------------------------------
- Aggregate
- CTE x
- -> Index Only Scan using tenk1_unique1 on tenk1 b
- -> Merge Semi Join
- Merge Cond: (a.unique1 = x.unique1)
- -> Index Only Scan using tenk1_unique1 on tenk1 a
- -> CTE Scan on x
-(7 rows)
-
--- SEARCH clause
-create temp table graph0( f int, t int, label text );
-insert into graph0 values
- (1, 2, 'arc 1 -> 2'),
- (1, 3, 'arc 1 -> 3'),
- (2, 3, 'arc 2 -> 3'),
- (1, 4, 'arc 1 -> 4'),
- (4, 5, 'arc 4 -> 5');
-explain (verbose, costs off)
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set seq
-select * from search_graph order by seq;
- QUERY PLAN
-----------------------------------------------------------------------------------------------
- Sort
- Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq
- Sort Key: search_graph.seq
- CTE search_graph
- -> Recursive Union
- -> Seq Scan on pg_temp.graph0 g
- Output: g.f, g.t, g.label, ARRAY[ROW(g.f, g.t)]
- -> Merge Join
- Output: g_1.f, g_1.t, g_1.label, array_cat(sg.seq, ARRAY[ROW(g_1.f, g_1.t)])
- Merge Cond: (g_1.f = sg.t)
- -> Sort
- Output: g_1.f, g_1.t, g_1.label
- Sort Key: g_1.f
- -> Seq Scan on pg_temp.graph0 g_1
- Output: g_1.f, g_1.t, g_1.label
- -> Sort
- Output: sg.seq, sg.t
- Sort Key: sg.t
- -> WorkTable Scan on search_graph sg
- Output: sg.seq, sg.t
- -> CTE Scan on search_graph
- Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq
-(22 rows)
-
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set seq
-select * from search_graph order by seq;
- f | t | label | seq
----+---+------------+-------------------
- 1 | 2 | arc 1 -> 2 | {"(1,2)"}
- 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"}
- 1 | 3 | arc 1 -> 3 | {"(1,3)"}
- 1 | 4 | arc 1 -> 4 | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"}
- 2 | 3 | arc 2 -> 3 | {"(2,3)"}
- 4 | 5 | arc 4 -> 5 | {"(4,5)"}
-(7 rows)
-
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union distinct
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set seq
-select * from search_graph order by seq;
- f | t | label | seq
----+---+------------+-------------------
- 1 | 2 | arc 1 -> 2 | {"(1,2)"}
- 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"}
- 1 | 3 | arc 1 -> 3 | {"(1,3)"}
- 1 | 4 | arc 1 -> 4 | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"}
- 2 | 3 | arc 2 -> 3 | {"(2,3)"}
- 4 | 5 | arc 4 -> 5 | {"(4,5)"}
-(7 rows)
-
-explain (verbose, costs off)
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search breadth first by f, t set seq
-select * from search_graph order by seq;
- QUERY PLAN
--------------------------------------------------------------------------------------------------
- Sort
- Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq
- Sort Key: search_graph.seq
- CTE search_graph
- -> Recursive Union
- -> Seq Scan on pg_temp.graph0 g
- Output: g.f, g.t, g.label, ROW('0'::bigint, g.f, g.t)
- -> Merge Join
- Output: g_1.f, g_1.t, g_1.label, ROW(int8inc((sg.seq)."*DEPTH*"), g_1.f, g_1.t)
- Merge Cond: (g_1.f = sg.t)
- -> Sort
- Output: g_1.f, g_1.t, g_1.label
- Sort Key: g_1.f
- -> Seq Scan on pg_temp.graph0 g_1
- Output: g_1.f, g_1.t, g_1.label
- -> Sort
- Output: sg.seq, sg.t
- Sort Key: sg.t
- -> WorkTable Scan on search_graph sg
- Output: sg.seq, sg.t
- -> CTE Scan on search_graph
- Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq
-(22 rows)
-
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search breadth first by f, t set seq
-select * from search_graph order by seq;
- f | t | label | seq
----+---+------------+---------
- 1 | 2 | arc 1 -> 2 | (0,1,2)
- 1 | 3 | arc 1 -> 3 | (0,1,3)
- 1 | 4 | arc 1 -> 4 | (0,1,4)
- 2 | 3 | arc 2 -> 3 | (0,2,3)
- 4 | 5 | arc 4 -> 5 | (0,4,5)
- 2 | 3 | arc 2 -> 3 | (1,2,3)
- 4 | 5 | arc 4 -> 5 | (1,4,5)
-(7 rows)
-
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union distinct
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search breadth first by f, t set seq
-select * from search_graph order by seq;
- f | t | label | seq
----+---+------------+---------
- 1 | 2 | arc 1 -> 2 | (0,1,2)
- 1 | 3 | arc 1 -> 3 | (0,1,3)
- 1 | 4 | arc 1 -> 4 | (0,1,4)
- 2 | 3 | arc 2 -> 3 | (0,2,3)
- 4 | 5 | arc 4 -> 5 | (0,4,5)
- 2 | 3 | arc 2 -> 3 | (1,2,3)
- 4 | 5 | arc 4 -> 5 | (1,4,5)
-(7 rows)
-
--- a constant initial value causes issues for EXPLAIN
-explain (verbose, costs off)
-with recursive test as (
- select 1 as x
- union all
- select x + 1
- from test
-) search depth first by x set y
-select * from test limit 5;
- QUERY PLAN
------------------------------------------------------------------------------------------
- Limit
- Output: test.x, test.y
- CTE test
- -> Recursive Union
- -> Result
- Output: 1, '{(1)}'::record[]
- -> WorkTable Scan on test test_1
- Output: (test_1.x + 1), array_cat(test_1.y, ARRAY[ROW((test_1.x + 1))])
- -> CTE Scan on test
- Output: test.x, test.y
-(10 rows)
-
-with recursive test as (
- select 1 as x
- union all
- select x + 1
- from test
-) search depth first by x set y
-select * from test limit 5;
- x | y
----+-----------------------
- 1 | {(1)}
- 2 | {(1),(2)}
- 3 | {(1),(2),(3)}
- 4 | {(1),(2),(3),(4)}
- 5 | {(1),(2),(3),(4),(5)}
-(5 rows)
-
-explain (verbose, costs off)
-with recursive test as (
- select 1 as x
- union all
- select x + 1
- from test
-) search breadth first by x set y
-select * from test limit 5;
- QUERY PLAN
---------------------------------------------------------------------------------------------
- Limit
- Output: test.x, test.y
- CTE test
- -> Recursive Union
- -> Result
- Output: 1, '(0,1)'::record
- -> WorkTable Scan on test test_1
- Output: (test_1.x + 1), ROW(int8inc((test_1.y)."*DEPTH*"), (test_1.x + 1))
- -> CTE Scan on test
- Output: test.x, test.y
-(10 rows)
-
-with recursive test as (
- select 1 as x
- union all
- select x + 1
- from test
-) search breadth first by x set y
-select * from test limit 5;
- x | y
----+-------
- 1 | (0,1)
- 2 | (1,2)
- 3 | (2,3)
- 4 | (3,4)
- 5 | (4,5)
-(5 rows)
-
--- various syntax errors
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search depth first by foo, tar set seq
-select * from search_graph;
-ERROR: search column "foo" not in WITH query column list
-LINE 7: ) search depth first by foo, tar set seq
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set label
-select * from search_graph;
-ERROR: search sequence column name "label" already used in WITH query column list
-LINE 7: ) search depth first by f, t set label
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t, f set seq
-select * from search_graph;
-ERROR: search column "f" specified more than once
-LINE 7: ) search depth first by f, t, f set seq
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set seq
-select * from search_graph order by seq;
-ERROR: with a SEARCH or CYCLE clause, the left side of the UNION must be a SELECT
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- (select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t)
-) search depth first by f, t set seq
-select * from search_graph order by seq;
-ERROR: with a SEARCH or CYCLE clause, the right side of the UNION must be a SELECT
--- check that we distinguish same CTE name used at different levels
--- (this case could be supported, perhaps, but it isn't today)
-with recursive x(col) as (
- select 1
- union
- (with x as (select * from x)
- select * from x)
-) search depth first by col set seq
-select * from x;
-ERROR: with a SEARCH or CYCLE clause, the recursive reference to WITH query "x" must be at the top level of its right-hand SELECT
--- test ruleutils and view expansion
-create temp view v_search as
-with recursive search_graph(f, t, label) as (
- select * from graph0 g
- union all
- select g.*
- from graph0 g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set seq
-select f, t, label from search_graph;
-select pg_get_viewdef('v_search');
- pg_get_viewdef
-------------------------------------------------
- WITH RECURSIVE search_graph(f, t, label) AS (+
- SELECT g.f, +
- g.t, +
- g.label +
- FROM graph0 g +
- UNION ALL +
- SELECT g.f, +
- g.t, +
- g.label +
- FROM graph0 g, +
- search_graph sg +
- WHERE (g.f = sg.t) +
- ) SEARCH DEPTH FIRST BY f, t SET seq +
- SELECT f, +
- t, +
- label +
- FROM search_graph;
-(1 row)
-
-select * from v_search;
- f | t | label
----+---+------------
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 2 | 3 | arc 2 -> 3
- 1 | 4 | arc 1 -> 4
- 4 | 5 | arc 4 -> 5
- 2 | 3 | arc 2 -> 3
- 4 | 5 | arc 4 -> 5
-(7 rows)
-
---
--- test cycle detection
---
-create temp table graph( f int, t int, label text );
-insert into graph values
- (1, 2, 'arc 1 -> 2'),
- (1, 3, 'arc 1 -> 3'),
- (2, 3, 'arc 2 -> 3'),
- (1, 4, 'arc 1 -> 4'),
- (4, 5, 'arc 4 -> 5'),
- (5, 1, 'arc 5 -> 1');
-with recursive search_graph(f, t, label, is_cycle, path) as (
- select *, false, array[row(g.f, g.t)] from graph g
- union all
- select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t)
- from graph g, search_graph sg
- where g.f = sg.t and not is_cycle
-)
-select * from search_graph;
- f | t | label | is_cycle | path
----+---+------------+----------+-------------------------------------------
- 1 | 2 | arc 1 -> 2 | f | {"(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(1,3)"}
- 2 | 3 | arc 2 -> 3 | f | {"(2,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | f | {"(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"}
- 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"}
-(25 rows)
-
--- UNION DISTINCT exercises row type hashing support
-with recursive search_graph(f, t, label, is_cycle, path) as (
- select *, false, array[row(g.f, g.t)] from graph g
- union distinct
- select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t)
- from graph g, search_graph sg
- where g.f = sg.t and not is_cycle
-)
-select * from search_graph;
- f | t | label | is_cycle | path
----+---+------------+----------+-------------------------------------------
- 1 | 2 | arc 1 -> 2 | f | {"(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(1,3)"}
- 2 | 3 | arc 2 -> 3 | f | {"(2,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | f | {"(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"}
- 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"}
-(25 rows)
-
--- ordering by the path column has same effect as SEARCH DEPTH FIRST
-with recursive search_graph(f, t, label, is_cycle, path) as (
- select *, false, array[row(g.f, g.t)] from graph g
- union all
- select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t)
- from graph g, search_graph sg
- where g.f = sg.t and not is_cycle
-)
-select * from search_graph order by path;
- f | t | label | is_cycle | path
----+---+------------+----------+-------------------------------------------
- 1 | 2 | arc 1 -> 2 | f | {"(1,2)"}
- 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"}
- 1 | 3 | arc 1 -> 3 | f | {"(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"}
- 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"}
- 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(2,3)"}
- 4 | 5 | arc 4 -> 5 | f | {"(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"}
- 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"}
- 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"}
- 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"}
- 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"}
- 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"}
- 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"}
-(25 rows)
-
--- CYCLE clause
-explain (verbose, costs off)
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle using path
-select * from search_graph;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- CTE Scan on search_graph
- Output: search_graph.f, search_graph.t, search_graph.label, search_graph.is_cycle, search_graph.path
- CTE search_graph
- -> Recursive Union
- -> Seq Scan on pg_temp.graph g
- Output: g.f, g.t, g.label, false, ARRAY[ROW(g.f, g.t)]
- -> Merge Join
- Output: g_1.f, g_1.t, g_1.label, CASE WHEN (ROW(g_1.f, g_1.t) = ANY (sg.path)) THEN true ELSE false END, array_cat(sg.path, ARRAY[ROW(g_1.f, g_1.t)])
- Merge Cond: (g_1.f = sg.t)
- -> Sort
- Output: g_1.f, g_1.t, g_1.label
- Sort Key: g_1.f
- -> Seq Scan on pg_temp.graph g_1
- Output: g_1.f, g_1.t, g_1.label
- -> Sort
- Output: sg.path, sg.t
- Sort Key: sg.t
- -> WorkTable Scan on search_graph sg
- Output: sg.path, sg.t
- Filter: (NOT sg.is_cycle)
-(20 rows)
-
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle using path
-select * from search_graph;
- f | t | label | is_cycle | path
----+---+------------+----------+-------------------------------------------
- 1 | 2 | arc 1 -> 2 | f | {"(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(1,3)"}
- 2 | 3 | arc 2 -> 3 | f | {"(2,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | f | {"(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"}
- 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"}
-(25 rows)
-
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union distinct
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle to 'Y' default 'N' using path
-select * from search_graph;
- f | t | label | is_cycle | path
----+---+------------+----------+-------------------------------------------
- 1 | 2 | arc 1 -> 2 | N | {"(1,2)"}
- 1 | 3 | arc 1 -> 3 | N | {"(1,3)"}
- 2 | 3 | arc 2 -> 3 | N | {"(2,3)"}
- 1 | 4 | arc 1 -> 4 | N | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | N | {"(4,5)"}
- 5 | 1 | arc 5 -> 1 | N | {"(5,1)"}
- 1 | 2 | arc 1 -> 2 | N | {"(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | N | {"(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | N | {"(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | N | {"(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | N | {"(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | N | {"(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | N | {"(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | N | {"(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | N | {"(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | N | {"(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | N | {"(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | N | {"(1,4)","(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | N | {"(1,4)","(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | N | {"(1,4)","(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | Y | {"(1,4)","(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | N | {"(4,5)","(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | Y | {"(4,5)","(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | Y | {"(5,1)","(1,4)","(4,5)","(5,1)"}
- 2 | 3 | arc 2 -> 3 | N | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"}
-(25 rows)
-
-explain (verbose, costs off)
-with recursive test as (
- select 0 as x
- union all
- select (x + 1) % 10
- from test
-) cycle x set is_cycle using path
-select * from test;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- CTE Scan on test
- Output: test.x, test.is_cycle, test.path
- CTE test
- -> Recursive Union
- -> Result
- Output: 0, false, '{(0)}'::record[]
- -> WorkTable Scan on test test_1
- Output: ((test_1.x + 1) % 10), CASE WHEN (ROW(((test_1.x + 1) % 10)) = ANY (test_1.path)) THEN true ELSE false END, array_cat(test_1.path, ARRAY[ROW(((test_1.x + 1) % 10))])
- Filter: (NOT test_1.is_cycle)
-(9 rows)
-
-with recursive test as (
- select 0 as x
- union all
- select (x + 1) % 10
- from test
-) cycle x set is_cycle using path
-select * from test;
- x | is_cycle | path
----+----------+-----------------------------------------------
- 0 | f | {(0)}
- 1 | f | {(0),(1)}
- 2 | f | {(0),(1),(2)}
- 3 | f | {(0),(1),(2),(3)}
- 4 | f | {(0),(1),(2),(3),(4)}
- 5 | f | {(0),(1),(2),(3),(4),(5)}
- 6 | f | {(0),(1),(2),(3),(4),(5),(6)}
- 7 | f | {(0),(1),(2),(3),(4),(5),(6),(7)}
- 8 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8)}
- 9 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9)}
- 0 | t | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(0)}
-(11 rows)
-
-with recursive test as (
- select 0 as x
- union all
- select (x + 1) % 10
- from test
- where not is_cycle -- redundant, but legal
-) cycle x set is_cycle using path
-select * from test;
- x | is_cycle | path
----+----------+-----------------------------------------------
- 0 | f | {(0)}
- 1 | f | {(0),(1)}
- 2 | f | {(0),(1),(2)}
- 3 | f | {(0),(1),(2),(3)}
- 4 | f | {(0),(1),(2),(3),(4)}
- 5 | f | {(0),(1),(2),(3),(4),(5)}
- 6 | f | {(0),(1),(2),(3),(4),(5),(6)}
- 7 | f | {(0),(1),(2),(3),(4),(5),(6),(7)}
- 8 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8)}
- 9 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9)}
- 0 | t | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(0)}
-(11 rows)
-
--- multiple CTEs
-with recursive
-graph(f, t, label) as (
- values (1, 2, 'arc 1 -> 2'),
- (1, 3, 'arc 1 -> 3'),
- (2, 3, 'arc 2 -> 3'),
- (1, 4, 'arc 1 -> 4'),
- (4, 5, 'arc 4 -> 5'),
- (5, 1, 'arc 5 -> 1')
-),
-search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle to true default false using path
-select f, t, label from search_graph;
- f | t | label
----+---+------------
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 2 | 3 | arc 2 -> 3
- 1 | 4 | arc 1 -> 4
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 2 | 3 | arc 2 -> 3
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 1 | 4 | arc 1 -> 4
- 1 | 3 | arc 1 -> 3
- 1 | 2 | arc 1 -> 2
- 5 | 1 | arc 5 -> 1
- 1 | 4 | arc 1 -> 4
- 1 | 3 | arc 1 -> 3
- 1 | 2 | arc 1 -> 2
- 4 | 5 | arc 4 -> 5
- 2 | 3 | arc 2 -> 3
- 1 | 4 | arc 1 -> 4
- 1 | 3 | arc 1 -> 3
- 1 | 2 | arc 1 -> 2
- 4 | 5 | arc 4 -> 5
- 2 | 3 | arc 2 -> 3
- 5 | 1 | arc 5 -> 1
- 2 | 3 | arc 2 -> 3
-(25 rows)
-
--- star expansion
-with recursive a as (
- select 1 as b
- union all
- select * from a
-) cycle b set c using p
-select * from a;
- b | c | p
----+---+-----------
- 1 | f | {(1)}
- 1 | t | {(1),(1)}
-(2 rows)
-
--- search+cycle
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set seq
- cycle f, t set is_cycle using path
-select * from search_graph;
- f | t | label | seq | is_cycle | path
----+---+------------+-------------------------------------------+----------+-------------------------------------------
- 1 | 2 | arc 1 -> 2 | {"(1,2)"} | f | {"(1,2)"}
- 1 | 3 | arc 1 -> 3 | {"(1,3)"} | f | {"(1,3)"}
- 2 | 3 | arc 2 -> 3 | {"(2,3)"} | f | {"(2,3)"}
- 1 | 4 | arc 1 -> 4 | {"(1,4)"} | f | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | {"(4,5)"} | f | {"(4,5)"}
- 5 | 1 | arc 5 -> 1 | {"(5,1)"} | f | {"(5,1)"}
- 1 | 2 | arc 1 -> 2 | {"(5,1)","(1,2)"} | f | {"(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | {"(5,1)","(1,3)"} | f | {"(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | {"(5,1)","(1,4)"} | f | {"(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} | f | {"(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} | f | {"(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | {"(4,5)","(5,1)"} | f | {"(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | {"(4,5)","(5,1)","(1,2)"} | f | {"(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | {"(4,5)","(5,1)","(1,3)"} | f | {"(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | {"(4,5)","(5,1)","(1,4)"} | f | {"(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | {"(5,1)","(1,2)","(2,3)"} | f | {"(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | {"(5,1)","(1,4)","(4,5)"} | f | {"(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | {"(1,4)","(4,5)","(5,1)"} | f | {"(1,4)","(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | {"(1,4)","(4,5)","(5,1)","(1,2)"} | f | {"(1,4)","(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,3)"} | f | {"(1,4)","(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | {"(1,4)","(4,5)","(5,1)","(1,4)"} | t | {"(1,4)","(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | {"(4,5)","(5,1)","(1,2)","(2,3)"} | f | {"(4,5)","(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | {"(4,5)","(5,1)","(1,4)","(4,5)"} | t | {"(4,5)","(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | {"(5,1)","(1,4)","(4,5)","(5,1)"} | t | {"(5,1)","(1,4)","(4,5)","(5,1)"}
- 2 | 3 | arc 2 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"}
-(25 rows)
-
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) search breadth first by f, t set seq
- cycle f, t set is_cycle using path
-select * from search_graph;
- f | t | label | seq | is_cycle | path
----+---+------------+---------+----------+-------------------------------------------
- 1 | 2 | arc 1 -> 2 | (0,1,2) | f | {"(1,2)"}
- 1 | 3 | arc 1 -> 3 | (0,1,3) | f | {"(1,3)"}
- 2 | 3 | arc 2 -> 3 | (0,2,3) | f | {"(2,3)"}
- 1 | 4 | arc 1 -> 4 | (0,1,4) | f | {"(1,4)"}
- 4 | 5 | arc 4 -> 5 | (0,4,5) | f | {"(4,5)"}
- 5 | 1 | arc 5 -> 1 | (0,5,1) | f | {"(5,1)"}
- 1 | 2 | arc 1 -> 2 | (1,1,2) | f | {"(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | (1,1,3) | f | {"(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | (1,1,4) | f | {"(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | (1,2,3) | f | {"(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | (1,4,5) | f | {"(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | (1,5,1) | f | {"(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | (2,1,2) | f | {"(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | (2,1,3) | f | {"(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | (2,1,4) | f | {"(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | (2,2,3) | f | {"(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | (2,4,5) | f | {"(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | (2,5,1) | f | {"(1,4)","(4,5)","(5,1)"}
- 1 | 2 | arc 1 -> 2 | (3,1,2) | f | {"(1,4)","(4,5)","(5,1)","(1,2)"}
- 1 | 3 | arc 1 -> 3 | (3,1,3) | f | {"(1,4)","(4,5)","(5,1)","(1,3)"}
- 1 | 4 | arc 1 -> 4 | (3,1,4) | t | {"(1,4)","(4,5)","(5,1)","(1,4)"}
- 2 | 3 | arc 2 -> 3 | (3,2,3) | f | {"(4,5)","(5,1)","(1,2)","(2,3)"}
- 4 | 5 | arc 4 -> 5 | (3,4,5) | t | {"(4,5)","(5,1)","(1,4)","(4,5)"}
- 5 | 1 | arc 5 -> 1 | (3,5,1) | t | {"(5,1)","(1,4)","(4,5)","(5,1)"}
- 2 | 3 | arc 2 -> 3 | (4,2,3) | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"}
-(25 rows)
-
--- various syntax errors
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle foo, tar set is_cycle using path
-select * from search_graph;
-ERROR: cycle column "foo" not in WITH query column list
-LINE 7: ) cycle foo, tar set is_cycle using path
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle to true default 55 using path
-select * from search_graph;
-ERROR: CYCLE types boolean and integer cannot be matched
-LINE 7: ) cycle f, t set is_cycle to true default 55 using path
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle to point '(1,1)' default point '(0,0)' using path
-select * from search_graph;
-ERROR: could not identify an equality operator for type point
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set label to true default false using path
-select * from search_graph;
-ERROR: cycle mark column name "label" already used in WITH query column list
-LINE 7: ) cycle f, t set label to true default false using path
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle to true default false using label
-select * from search_graph;
-ERROR: cycle path column name "label" already used in WITH query column list
-LINE 7: ) cycle f, t set is_cycle to true default false using label
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set foo to true default false using foo
-select * from search_graph;
-ERROR: cycle mark column name and cycle path column name are the same
-LINE 7: ) cycle f, t set foo to true default false using foo
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t, f set is_cycle to true default false using path
-select * from search_graph;
-ERROR: cycle column "f" specified more than once
-LINE 7: ) cycle f, t, f set is_cycle to true default false using pat...
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set foo
- cycle f, t set foo to true default false using path
-select * from search_graph;
-ERROR: search sequence column name and cycle mark column name are the same
-LINE 7: ) search depth first by f, t set foo
- ^
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) search depth first by f, t set foo
- cycle f, t set is_cycle to true default false using foo
-select * from search_graph;
-ERROR: search sequence column name and cycle path column name are the same
-LINE 7: ) search depth first by f, t set foo
- ^
--- test ruleutils and view expansion
-create temp view v_cycle1 as
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle using path
-select f, t, label from search_graph;
-create temp view v_cycle2 as
-with recursive search_graph(f, t, label) as (
- select * from graph g
- union all
- select g.*
- from graph g, search_graph sg
- where g.f = sg.t
-) cycle f, t set is_cycle to 'Y' default 'N' using path
-select f, t, label from search_graph;
-select pg_get_viewdef('v_cycle1');
- pg_get_viewdef
-------------------------------------------------
- WITH RECURSIVE search_graph(f, t, label) AS (+
- SELECT g.f, +
- g.t, +
- g.label +
- FROM graph g +
- UNION ALL +
- SELECT g.f, +
- g.t, +
- g.label +
- FROM graph g, +
- search_graph sg +
- WHERE (g.f = sg.t) +
- ) CYCLE f, t SET is_cycle USING path +
- SELECT f, +
- t, +
- label +
- FROM search_graph;
-(1 row)
-
-select pg_get_viewdef('v_cycle2');
- pg_get_viewdef
------------------------------------------------------------------------------
- WITH RECURSIVE search_graph(f, t, label) AS ( +
- SELECT g.f, +
- g.t, +
- g.label +
- FROM graph g +
- UNION ALL +
- SELECT g.f, +
- g.t, +
- g.label +
- FROM graph g, +
- search_graph sg +
- WHERE (g.f = sg.t) +
- ) CYCLE f, t SET is_cycle TO 'Y'::text DEFAULT 'N'::text USING path+
- SELECT f, +
- t, +
- label +
- FROM search_graph;
-(1 row)
-
-select * from v_cycle1;
- f | t | label
----+---+------------
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 2 | 3 | arc 2 -> 3
- 1 | 4 | arc 1 -> 4
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 1 | 4 | arc 1 -> 4
- 2 | 3 | arc 2 -> 3
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 1 | 4 | arc 1 -> 4
- 2 | 3 | arc 2 -> 3
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 1 | 4 | arc 1 -> 4
- 2 | 3 | arc 2 -> 3
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 2 | 3 | arc 2 -> 3
-(25 rows)
-
-select * from v_cycle2;
- f | t | label
----+---+------------
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 2 | 3 | arc 2 -> 3
- 1 | 4 | arc 1 -> 4
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 1 | 4 | arc 1 -> 4
- 2 | 3 | arc 2 -> 3
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 1 | 4 | arc 1 -> 4
- 2 | 3 | arc 2 -> 3
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 1 | 2 | arc 1 -> 2
- 1 | 3 | arc 1 -> 3
- 1 | 4 | arc 1 -> 4
- 2 | 3 | arc 2 -> 3
- 4 | 5 | arc 4 -> 5
- 5 | 1 | arc 5 -> 1
- 2 | 3 | arc 2 -> 3
-(25 rows)
-
---
--- test multiple WITH queries
---
-WITH RECURSIVE
- y (id) AS (VALUES (1)),
- x (id) AS (SELECT * FROM y UNION ALL SELECT id+1 FROM x WHERE id < 5)
-SELECT * FROM x;
- id
-----
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
--- forward reference OK
-WITH RECURSIVE
- x(id) AS (SELECT * FROM y UNION ALL SELECT id+1 FROM x WHERE id < 5),
- y(id) AS (values (1))
- SELECT * FROM x;
- id
-----
- 1
- 2
- 3
- 4
- 5
-(5 rows)
-
-WITH RECURSIVE
- x(id) AS
- (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 5),
- y(id) AS
- (VALUES (1) UNION ALL SELECT id+1 FROM y WHERE id < 10)
- SELECT y.*, x.* FROM y LEFT JOIN x USING (id);
- id | id
-----+----
- 1 | 1
- 2 | 2
- 3 | 3
- 4 | 4
- 5 | 5
- 6 |
- 7 |
- 8 |
- 9 |
- 10 |
-(10 rows)
-
-WITH RECURSIVE
- x(id) AS
- (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 5),
- y(id) AS
- (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 10)
- SELECT y.*, x.* FROM y LEFT JOIN x USING (id);
- id | id
-----+----
- 1 | 1
- 2 | 2
- 3 | 3
- 4 | 4
- 5 | 5
- 6 |
-(6 rows)
-
-WITH RECURSIVE
- x(id) AS
- (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 3 ),
- y(id) AS
- (SELECT * FROM x UNION ALL SELECT * FROM x),
- z(id) AS
- (SELECT * FROM x UNION ALL SELECT id+1 FROM z WHERE id < 10)
- SELECT * FROM z;
- id
-----
- 1
- 2
- 3
- 2
- 3
- 4
- 3
- 4
- 5
- 4
- 5
- 6
- 5
- 6
- 7
- 6
- 7
- 8
- 7
- 8
- 9
- 8
- 9
- 10
- 9
- 10
- 10
-(27 rows)
-
-WITH RECURSIVE
- x(id) AS
- (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 3 ),
- y(id) AS
- (SELECT * FROM x UNION ALL SELECT * FROM x),
- z(id) AS
- (SELECT * FROM y UNION ALL SELECT id+1 FROM z WHERE id < 10)
- SELECT * FROM z;
- id
-----
- 1
- 2
- 3
- 1
- 2
- 3
- 2
- 3
- 4
- 2
- 3
- 4
- 3
- 4
- 5
- 3
- 4
- 5
- 4
- 5
- 6
- 4
- 5
- 6
- 5
- 6
- 7
- 5
- 6
- 7
- 6
- 7
- 8
- 6
- 7
- 8
- 7
- 8
- 9
- 7
- 8
- 9
- 8
- 9
- 10
- 8
- 9
- 10
- 9
- 10
- 9
- 10
- 10
- 10
-(54 rows)
-
---
--- Test WITH attached to a data-modifying statement
---
-CREATE TEMPORARY TABLE y (a INTEGER);
-INSERT INTO y SELECT generate_series(1, 10);
-WITH t AS (
- SELECT a FROM y
-)
-INSERT INTO y
-SELECT a+20 FROM t RETURNING *;
- a
-----
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
-(10 rows)
-
-SELECT * FROM y;
- a
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
-(20 rows)
-
-WITH t AS (
- SELECT a FROM y
-)
-UPDATE y SET a = y.a-10 FROM t WHERE y.a > 20 AND t.a = y.a RETURNING y.a;
- a
-----
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
-(10 rows)
-
-SELECT * FROM y;
- a
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
-(20 rows)
-
-WITH RECURSIVE t(a) AS (
- SELECT 11
- UNION ALL
- SELECT a+1 FROM t WHERE a < 50
-)
-DELETE FROM y USING t WHERE t.a = y.a RETURNING y.a;
- a
-----
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
-(10 rows)
-
-SELECT * FROM y;
- a
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
-(10 rows)
-
-DROP TABLE y;
---
--- error cases
---
-WITH x(n, b) AS (SELECT 1)
-SELECT * FROM x;
-ERROR: WITH query "x" has 1 columns available but 2 columns specified
-LINE 1: WITH x(n, b) AS (SELECT 1)
- ^
--- INTERSECT
-WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT SELECT n+1 FROM x)
- SELECT * FROM x;
-ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term
-LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT SELECT n+1 FROM x...
- ^
-WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT ALL SELECT n+1 FROM x)
- SELECT * FROM x;
-ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term
-LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT ALL SELECT n+1 FR...
- ^
--- EXCEPT
-WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT SELECT n+1 FROM x)
- SELECT * FROM x;
-ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term
-LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT SELECT n+1 FROM x)
- ^
-WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT ALL SELECT n+1 FROM x)
- SELECT * FROM x;
-ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term
-LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT ALL SELECT n+1 FROM ...
- ^
--- no non-recursive term
-WITH RECURSIVE x(n) AS (SELECT n FROM x)
- SELECT * FROM x;
-ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term
-LINE 1: WITH RECURSIVE x(n) AS (SELECT n FROM x)
- ^
--- recursive term in the left hand side (strictly speaking, should allow this)
-WITH RECURSIVE x(n) AS (SELECT n FROM x UNION ALL SELECT 1)
- SELECT * FROM x;
-ERROR: recursive reference to query "x" must not appear within its non-recursive term
-LINE 1: WITH RECURSIVE x(n) AS (SELECT n FROM x UNION ALL SELECT 1)
- ^
--- allow this, because we historically have
-WITH RECURSIVE x(n) AS (
- WITH x1 AS (SELECT 1 AS n)
- SELECT 0
- UNION
- SELECT * FROM x1)
- SELECT * FROM x;
- n
----
- 0
- 1
-(2 rows)
-
--- but this should be rejected
-WITH RECURSIVE x(n) AS (
- WITH x1 AS (SELECT 1 FROM x)
- SELECT 0
- UNION
- SELECT * FROM x1)
- SELECT * FROM x;
-ERROR: recursive reference to query "x" must not appear within a subquery
-LINE 2: WITH x1 AS (SELECT 1 FROM x)
- ^
--- and this too
-WITH RECURSIVE x(n) AS (
- (WITH x1 AS (SELECT 1 FROM x) SELECT * FROM x1)
- UNION
- SELECT 0)
- SELECT * FROM x;
-ERROR: recursive reference to query "x" must not appear within its non-recursive term
-LINE 2: (WITH x1 AS (SELECT 1 FROM x) SELECT * FROM x1)
- ^
--- and this
-WITH RECURSIVE x(n) AS (
- SELECT 0 UNION SELECT 1
- ORDER BY (SELECT n FROM x))
- SELECT * FROM x;
-ERROR: ORDER BY in a recursive query is not implemented
-LINE 3: ORDER BY (SELECT n FROM x))
- ^
-CREATE TEMPORARY TABLE y (a INTEGER);
-INSERT INTO y SELECT generate_series(1, 10);
--- LEFT JOIN
-WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1
- UNION ALL
- SELECT x.n+1 FROM y LEFT JOIN x ON x.n = y.a WHERE n < 10)
-SELECT * FROM x;
-ERROR: recursive reference to query "x" must not appear within an outer join
-LINE 3: SELECT x.n+1 FROM y LEFT JOIN x ON x.n = y.a WHERE n < 10)
- ^
--- RIGHT JOIN
-WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1
- UNION ALL
- SELECT x.n+1 FROM x RIGHT JOIN y ON x.n = y.a WHERE n < 10)
-SELECT * FROM x;
-ERROR: recursive reference to query "x" must not appear within an outer join
-LINE 3: SELECT x.n+1 FROM x RIGHT JOIN y ON x.n = y.a WHERE n < 10)
- ^
--- FULL JOIN
-WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1
- UNION ALL
- SELECT x.n+1 FROM x FULL JOIN y ON x.n = y.a WHERE n < 10)
-SELECT * FROM x;
-ERROR: recursive reference to query "x" must not appear within an outer join
-LINE 3: SELECT x.n+1 FROM x FULL JOIN y ON x.n = y.a WHERE n < 10)
- ^
--- subquery
-WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x
- WHERE n IN (SELECT * FROM x))
- SELECT * FROM x;
-ERROR: recursive reference to query "x" must not appear within a subquery
-LINE 2: WHERE n IN (SELECT * FROM x))
- ^
--- aggregate functions
-WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT count(*) FROM x)
- SELECT * FROM x;
-ERROR: aggregate functions are not allowed in a recursive query's recursive term
-LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT count(*) F...
- ^
-WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT sum(n) FROM x)
- SELECT * FROM x;
-ERROR: aggregate functions are not allowed in a recursive query's recursive term
-LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT sum(n) FRO...
- ^
--- ORDER BY
-WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x ORDER BY 1)
- SELECT * FROM x;
-ERROR: ORDER BY in a recursive query is not implemented
-LINE 1: ...VE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x ORDER BY 1)
- ^
--- LIMIT/OFFSET
-WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x LIMIT 10 OFFSET 1)
- SELECT * FROM x;
-ERROR: OFFSET in a recursive query is not implemented
-LINE 1: ... AS (SELECT 1 UNION ALL SELECT n+1 FROM x LIMIT 10 OFFSET 1)
- ^
--- FOR UPDATE
-WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x FOR UPDATE)
- SELECT * FROM x;
-ERROR: FOR UPDATE/SHARE in a recursive query is not implemented
--- target list has a recursive query name
-WITH RECURSIVE x(id) AS (values (1)
- UNION ALL
- SELECT (SELECT * FROM x) FROM x WHERE id < 5
-) SELECT * FROM x;
-ERROR: recursive reference to query "x" must not appear within a subquery
-LINE 3: SELECT (SELECT * FROM x) FROM x WHERE id < 5
- ^
--- mutual recursive query (not implemented)
-WITH RECURSIVE
- x (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM y WHERE id < 5),
- y (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 5)
-SELECT * FROM x;
-ERROR: mutual recursion between WITH items is not implemented
-LINE 2: x (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM y WHERE id ...
- ^
--- non-linear recursion is not allowed
-WITH RECURSIVE foo(i) AS
- (values (1)
- UNION ALL
- (SELECT i+1 FROM foo WHERE i < 10
- UNION ALL
- SELECT i+1 FROM foo WHERE i < 5)
-) SELECT * FROM foo;
-ERROR: recursive reference to query "foo" must not appear more than once
-LINE 6: SELECT i+1 FROM foo WHERE i < 5)
- ^
-WITH RECURSIVE foo(i) AS
- (values (1)
- UNION ALL
- SELECT * FROM
- (SELECT i+1 FROM foo WHERE i < 10
- UNION ALL
- SELECT i+1 FROM foo WHERE i < 5) AS t
-) SELECT * FROM foo;
-ERROR: recursive reference to query "foo" must not appear more than once
-LINE 7: SELECT i+1 FROM foo WHERE i < 5) AS t
- ^
-WITH RECURSIVE foo(i) AS
- (values (1)
- UNION ALL
- (SELECT i+1 FROM foo WHERE i < 10
- EXCEPT
- SELECT i+1 FROM foo WHERE i < 5)
-) SELECT * FROM foo;
-ERROR: recursive reference to query "foo" must not appear within EXCEPT
-LINE 6: SELECT i+1 FROM foo WHERE i < 5)
- ^
-WITH RECURSIVE foo(i) AS
- (values (1)
- UNION ALL
- (SELECT i+1 FROM foo WHERE i < 10
- INTERSECT
- SELECT i+1 FROM foo WHERE i < 5)
-) SELECT * FROM foo;
-ERROR: recursive reference to query "foo" must not appear more than once
-LINE 6: SELECT i+1 FROM foo WHERE i < 5)
- ^
--- Wrong type induced from non-recursive term
-WITH RECURSIVE foo(i) AS
- (SELECT i FROM (VALUES(1),(2)) t(i)
- UNION ALL
- SELECT (i+1)::numeric(10,0) FROM foo WHERE i < 10)
-SELECT * FROM foo;
-ERROR: recursive query "foo" column 1 has type integer in non-recursive term but type numeric overall
-LINE 2: (SELECT i FROM (VALUES(1),(2)) t(i)
- ^
-HINT: Cast the output of the non-recursive term to the correct type.
--- rejects different typmod, too (should we allow this?)
-WITH RECURSIVE foo(i) AS
- (SELECT i::numeric(3,0) FROM (VALUES(1),(2)) t(i)
- UNION ALL
- SELECT (i+1)::numeric(10,0) FROM foo WHERE i < 10)
-SELECT * FROM foo;
-ERROR: recursive query "foo" column 1 has type numeric(3,0) in non-recursive term but type numeric overall
-LINE 2: (SELECT i::numeric(3,0) FROM (VALUES(1),(2)) t(i)
- ^
-HINT: Cast the output of the non-recursive term to the correct type.
--- disallow OLD/NEW reference in CTE
-CREATE TEMPORARY TABLE x (n integer);
-CREATE RULE r2 AS ON UPDATE TO x DO INSTEAD
- WITH t AS (SELECT OLD.*) UPDATE y SET a = t.n FROM t;
-ERROR: cannot refer to OLD within WITH query
---
--- test for bug #4902
---
-with cte(foo) as ( values(42) ) values((select foo from cte));
- column1
----------
- 42
-(1 row)
-
-with cte(foo) as ( select 42 ) select * from ((select foo from cte)) q;
- foo
------
- 42
-(1 row)
-
--- test CTE referencing an outer-level variable (to see that changed-parameter
--- signaling still works properly after fixing this bug)
-select ( with cte(foo) as ( values(f1) )
- select (select foo from cte) )
-from int4_tbl;
- foo
--------------
- 0
- 123456
- -123456
- 2147483647
- -2147483647
-(5 rows)
-
-select ( with cte(foo) as ( values(f1) )
- values((select foo from cte)) )
-from int4_tbl;
- column1
--------------
- 0
- 123456
- -123456
- 2147483647
- -2147483647
-(5 rows)
-
---
--- test for nested-recursive-WITH bug
---
-WITH RECURSIVE t(j) AS (
- WITH RECURSIVE s(i) AS (
- VALUES (1)
- UNION ALL
- SELECT i+1 FROM s WHERE i < 10
- )
- SELECT i FROM s
- UNION ALL
- SELECT j+1 FROM t WHERE j < 10
-)
-SELECT * FROM t;
- j
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 5
- 6
- 7
- 8
- 9
- 10
- 6
- 7
- 8
- 9
- 10
- 7
- 8
- 9
- 10
- 8
- 9
- 10
- 9
- 10
- 10
-(55 rows)
-
---
--- test WITH attached to intermediate-level set operation
---
-WITH outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM innermost
- UNION SELECT 3)
-)
-SELECT * FROM outermost ORDER BY 1;
- x
----
- 1
- 2
- 3
-(3 rows)
-
-WITH outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM outermost -- fail
- UNION SELECT * FROM innermost)
-)
-SELECT * FROM outermost ORDER BY 1;
-ERROR: relation "outermost" does not exist
-LINE 4: SELECT * FROM outermost -- fail
- ^
-DETAIL: There is a WITH item named "outermost", but it cannot be referenced from this part of the query.
-HINT: Use WITH RECURSIVE, or re-order the WITH items to remove forward references.
-WITH RECURSIVE outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM outermost
- UNION SELECT * FROM innermost)
-)
-SELECT * FROM outermost ORDER BY 1;
- x
----
- 1
- 2
-(2 rows)
-
-WITH RECURSIVE outermost(x) AS (
- WITH innermost as (SELECT 2 FROM outermost) -- fail
- SELECT * FROM innermost
- UNION SELECT * from outermost
-)
-SELECT * FROM outermost ORDER BY 1;
-ERROR: recursive reference to query "outermost" must not appear within a subquery
-LINE 2: WITH innermost as (SELECT 2 FROM outermost) -- fail
- ^
---
--- This test will fail with the old implementation of PARAM_EXEC parameter
--- assignment, because the "q1" Var passed down to A's targetlist subselect
--- looks exactly like the "A.id" Var passed down to C's subselect, causing
--- the old code to give them the same runtime PARAM_EXEC slot. But the
--- lifespans of the two parameters overlap, thanks to B also reading A.
---
-with
-A as ( select q2 as id, (select q1) as x from int8_tbl ),
-B as ( select id, row_number() over (partition by id) as r from A ),
-C as ( select A.id, array(select B.id from B where B.id = A.id) from A )
-select * from C;
- id | array
--------------------+-------------------------------------
- 456 | {456}
- 4567890123456789 | {4567890123456789,4567890123456789}
- 123 | {123}
- 4567890123456789 | {4567890123456789,4567890123456789}
- -4567890123456789 | {-4567890123456789}
-(5 rows)
-
---
--- Test CTEs read in non-initialization orders
---
-WITH RECURSIVE
- tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)),
- iter (id_key, row_type, link) AS (
- SELECT 0, 'base', 17
- UNION ALL (
- WITH remaining(id_key, row_type, link, min) AS (
- SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER ()
- FROM tab INNER JOIN iter USING (link)
- WHERE tab.id_key > iter.id_key
- ),
- first_remaining AS (
- SELECT id_key, row_type, link
- FROM remaining
- WHERE id_key=min
- ),
- effect AS (
- SELECT tab.id_key, 'new'::text, tab.link
- FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key
- WHERE e.row_type = 'false'
- )
- SELECT * FROM first_remaining
- UNION ALL SELECT * FROM effect
- )
- )
-SELECT * FROM iter;
- id_key | row_type | link
---------+----------+------
- 0 | base | 17
- 1 | true | 17
- 2 | true | 17
- 3 | true | 17
- 4 | true | 17
- 5 | true | 17
- 6 | true | 17
-(7 rows)
-
-WITH RECURSIVE
- tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)),
- iter (id_key, row_type, link) AS (
- SELECT 0, 'base', 17
- UNION (
- WITH remaining(id_key, row_type, link, min) AS (
- SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER ()
- FROM tab INNER JOIN iter USING (link)
- WHERE tab.id_key > iter.id_key
- ),
- first_remaining AS (
- SELECT id_key, row_type, link
- FROM remaining
- WHERE id_key=min
- ),
- effect AS (
- SELECT tab.id_key, 'new'::text, tab.link
- FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key
- WHERE e.row_type = 'false'
- )
- SELECT * FROM first_remaining
- UNION ALL SELECT * FROM effect
- )
- )
-SELECT * FROM iter;
- id_key | row_type | link
---------+----------+------
- 0 | base | 17
- 1 | true | 17
- 2 | true | 17
- 3 | true | 17
- 4 | true | 17
- 5 | true | 17
- 6 | true | 17
-(7 rows)
-
---
--- Data-modifying statements in WITH
---
--- INSERT ... RETURNING
-WITH t AS (
- INSERT INTO y
- VALUES
- (11),
- (12),
- (13),
- (14),
- (15),
- (16),
- (17),
- (18),
- (19),
- (20)
- RETURNING *
-)
-SELECT * FROM t;
- a
-----
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
-(10 rows)
-
-SELECT * FROM y;
- a
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
-(20 rows)
-
--- UPDATE ... RETURNING
-WITH t AS (
- UPDATE y
- SET a=a+1
- RETURNING *
-)
-SELECT * FROM t;
- a
-----
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
-(20 rows)
-
-SELECT * FROM y;
- a
-----
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
-(20 rows)
-
--- DELETE ... RETURNING
-WITH t AS (
- DELETE FROM y
- WHERE a <= 10
- RETURNING *
-)
-SELECT * FROM t;
- a
-----
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
-(9 rows)
-
-SELECT * FROM y;
- a
-----
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
-(11 rows)
-
--- forward reference
-WITH RECURSIVE t AS (
- INSERT INTO y
- SELECT a+5 FROM t2 WHERE a > 5
- RETURNING *
-), t2 AS (
- UPDATE y SET a=a-11 RETURNING *
-)
-SELECT * FROM t
-UNION ALL
-SELECT * FROM t2;
- a
-----
- 11
- 12
- 13
- 14
- 15
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
-(16 rows)
-
-SELECT * FROM y;
- a
-----
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 11
- 7
- 12
- 8
- 13
- 9
- 14
- 10
- 15
-(16 rows)
-
--- unconditional DO INSTEAD rule
-CREATE RULE y_rule AS ON DELETE TO y DO INSTEAD
- INSERT INTO y VALUES(42) RETURNING *;
-WITH t AS (
- DELETE FROM y RETURNING *
-)
-SELECT * FROM t;
- a
-----
- 42
-(1 row)
-
-SELECT * FROM y;
- a
-----
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 11
- 7
- 12
- 8
- 13
- 9
- 14
- 10
- 15
- 42
-(17 rows)
-
-DROP RULE y_rule ON y;
--- check merging of outer CTE with CTE in a rule action
-CREATE TEMP TABLE bug6051 AS
- select i from generate_series(1,3) as t(i);
-SELECT * FROM bug6051;
- i
----
- 1
- 2
- 3
-(3 rows)
-
-WITH t1 AS ( DELETE FROM bug6051 RETURNING * )
-INSERT INTO bug6051 SELECT * FROM t1;
-SELECT * FROM bug6051;
- i
----
- 1
- 2
- 3
-(3 rows)
-
-CREATE TEMP TABLE bug6051_2 (i int);
-CREATE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD
- INSERT INTO bug6051_2
- VALUES(NEW.i);
-WITH t1 AS ( DELETE FROM bug6051 RETURNING * )
-INSERT INTO bug6051 SELECT * FROM t1;
-SELECT * FROM bug6051;
- i
----
-(0 rows)
-
-SELECT * FROM bug6051_2;
- i
----
- 1
- 2
- 3
-(3 rows)
-
--- check INSERT ... SELECT rule actions are disallowed on commands
--- that have modifyingCTEs
-CREATE OR REPLACE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD
- INSERT INTO bug6051_2
- SELECT NEW.i;
-WITH t1 AS ( DELETE FROM bug6051 RETURNING * )
-INSERT INTO bug6051 SELECT * FROM t1;
-ERROR: INSERT ... SELECT rule actions are not supported for queries having data-modifying statements in WITH
--- silly example to verify that hasModifyingCTE flag is propagated
-CREATE TEMP TABLE bug6051_3 AS
- SELECT a FROM generate_series(11,13) AS a;
-CREATE RULE bug6051_3_ins AS ON INSERT TO bug6051_3 DO INSTEAD
- SELECT i FROM bug6051_2;
-BEGIN; SET LOCAL debug_parallel_query = on;
-WITH t1 AS ( DELETE FROM bug6051_3 RETURNING * )
- INSERT INTO bug6051_3 SELECT * FROM t1;
- i
----
- 1
- 2
- 3
- 1
- 2
- 3
- 1
- 2
- 3
-(9 rows)
-
-COMMIT;
-SELECT * FROM bug6051_3;
- a
----
-(0 rows)
-
--- check case where CTE reference is removed due to optimization
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT q1 FROM
-(
- WITH t_cte AS (SELECT * FROM int8_tbl t)
- SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub
- FROM int8_tbl i8
-) ss;
- QUERY PLAN
---------------------------------------
- Subquery Scan on ss
- Output: ss.q1
- -> Seq Scan on public.int8_tbl i8
- Output: i8.q1, NULL::bigint
-(4 rows)
-
-SELECT q1 FROM
-(
- WITH t_cte AS (SELECT * FROM int8_tbl t)
- SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub
- FROM int8_tbl i8
-) ss;
- q1
-------------------
- 123
- 123
- 4567890123456789
- 4567890123456789
- 4567890123456789
-(5 rows)
-
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT q1 FROM
-(
- WITH t_cte AS MATERIALIZED (SELECT * FROM int8_tbl t)
- SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub
- FROM int8_tbl i8
-) ss;
- QUERY PLAN
----------------------------------------------
- Subquery Scan on ss
- Output: ss.q1
- -> Seq Scan on public.int8_tbl i8
- Output: i8.q1, NULL::bigint
- CTE t_cte
- -> Seq Scan on public.int8_tbl t
- Output: t.q1, t.q2
-(7 rows)
-
-SELECT q1 FROM
-(
- WITH t_cte AS MATERIALIZED (SELECT * FROM int8_tbl t)
- SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub
- FROM int8_tbl i8
-) ss;
- q1
-------------------
- 123
- 123
- 4567890123456789
- 4567890123456789
- 4567890123456789
-(5 rows)
-
--- a truly recursive CTE in the same list
-WITH RECURSIVE t(a) AS (
- SELECT 0
- UNION ALL
- SELECT a+1 FROM t WHERE a+1 < 5
-), t2 as (
- INSERT INTO y
- SELECT * FROM t RETURNING *
-)
-SELECT * FROM t2 JOIN y USING (a) ORDER BY a;
- a
----
- 0
- 1
- 2
- 3
- 4
-(5 rows)
-
-SELECT * FROM y;
- a
-----
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 11
- 7
- 12
- 8
- 13
- 9
- 14
- 10
- 15
- 42
- 0
- 1
- 2
- 3
- 4
-(22 rows)
-
--- data-modifying WITH in a modifying statement
-WITH t AS (
- DELETE FROM y
- WHERE a <= 10
- RETURNING *
-)
-INSERT INTO y SELECT -a FROM t RETURNING *;
- a
------
- 0
- -1
- -2
- -3
- -4
- -5
- -6
- -7
- -8
- -9
- -10
- 0
- -1
- -2
- -3
- -4
-(16 rows)
-
-SELECT * FROM y;
- a
------
- 11
- 12
- 13
- 14
- 15
- 42
- 0
- -1
- -2
- -3
- -4
- -5
- -6
- -7
- -8
- -9
- -10
- 0
- -1
- -2
- -3
- -4
-(22 rows)
-
--- check that WITH query is run to completion even if outer query isn't
-WITH t AS (
- UPDATE y SET a = a * 100 RETURNING *
-)
-SELECT * FROM t LIMIT 10;
- a
-------
- 1100
- 1200
- 1300
- 1400
- 1500
- 4200
- 0
- -100
- -200
- -300
-(10 rows)
-
-SELECT * FROM y;
- a
--------
- 1100
- 1200
- 1300
- 1400
- 1500
- 4200
- 0
- -100
- -200
- -300
- -400
- -500
- -600
- -700
- -800
- -900
- -1000
- 0
- -100
- -200
- -300
- -400
-(22 rows)
-
--- data-modifying WITH containing INSERT...ON CONFLICT DO UPDATE
-CREATE TABLE withz AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i;
-ALTER TABLE withz ADD UNIQUE (k);
-WITH t AS (
- INSERT INTO withz SELECT i, 'insert'
- FROM generate_series(0, 16) i
- ON CONFLICT (k) DO UPDATE SET v = withz.v || ', now update'
- RETURNING *
-)
-SELECT * FROM t JOIN y ON t.k = y.a ORDER BY a, k;
- k | v | a
----+--------+---
- 0 | insert | 0
- 0 | insert | 0
-(2 rows)
-
--- Test EXCLUDED.* reference within CTE
-WITH aa AS (
- INSERT INTO withz VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v
- WHERE withz.k != EXCLUDED.k
- RETURNING *
-)
-SELECT * FROM aa;
- k | v
----+---
-(0 rows)
-
--- New query/snapshot demonstrates side-effects of previous query.
-SELECT * FROM withz ORDER BY k;
- k | v
-----+------------------
- 0 | insert
- 1 | 1 v, now update
- 2 | insert
- 3 | insert
- 4 | 4 v, now update
- 5 | insert
- 6 | insert
- 7 | 7 v, now update
- 8 | insert
- 9 | insert
- 10 | 10 v, now update
- 11 | insert
- 12 | insert
- 13 | 13 v, now update
- 14 | insert
- 15 | insert
- 16 | 16 v, now update
-(17 rows)
-
---
--- Ensure subqueries within the update clause work, even if they
--- reference outside values
---
-WITH aa AS (SELECT 1 a, 2 b)
-INSERT INTO withz VALUES(1, 'insert')
-ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
-WITH aa AS (SELECT 1 a, 2 b)
-INSERT INTO withz VALUES(1, 'insert')
-ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE withz.k = (SELECT a FROM aa);
-WITH aa AS (SELECT 1 a, 2 b)
-INSERT INTO withz VALUES(1, 'insert')
-ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
-WITH aa AS (SELECT 'a' a, 'b' b UNION ALL SELECT 'a' a, 'b' b)
-INSERT INTO withz VALUES(1, 'insert')
-ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 'a' LIMIT 1);
-WITH aa AS (SELECT 1 a, 2 b)
-INSERT INTO withz VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 ))
-ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
--- Update a row more than once, in different parts of a wCTE. That is
--- an allowed, presumably very rare, edge case, but since it was
--- broken in the past, having a test seems worthwhile.
-WITH simpletup AS (
- SELECT 2 k, 'Green' v),
-upsert_cte AS (
- INSERT INTO withz VALUES(2, 'Blue') ON CONFLICT (k) DO
- UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = withz.k)
- RETURNING k, v)
-INSERT INTO withz VALUES(2, 'Red') ON CONFLICT (k) DO
-UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = withz.k)
-RETURNING k, v;
- k | v
----+---
-(0 rows)
-
-DROP TABLE withz;
--- WITH referenced by MERGE statement
-CREATE TABLE m AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i;
-ALTER TABLE m ADD UNIQUE (k);
-WITH RECURSIVE cte_basic AS (SELECT 1 a, 'cte_basic val' b)
-MERGE INTO m USING (select 0 k, 'merge source SubPlan' v) o ON m.k=o.k
-WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1)
-WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
-ERROR: WITH RECURSIVE is not supported for MERGE statement
--- Basic:
-WITH cte_basic AS MATERIALIZED (SELECT 1 a, 'cte_basic val' b)
-MERGE INTO m USING (select 0 k, 'merge source SubPlan' v offset 0) o ON m.k=o.k
-WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1)
-WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
--- Examine
-SELECT * FROM m where k = 0;
- k | v
----+----------------------
- 0 | merge source SubPlan
-(1 row)
-
--- See EXPLAIN output for same query:
-EXPLAIN (VERBOSE, COSTS OFF)
-WITH cte_basic AS MATERIALIZED (SELECT 1 a, 'cte_basic val' b)
-MERGE INTO m USING (select 0 k, 'merge source SubPlan' v offset 0) o ON m.k=o.k
-WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1)
-WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
- QUERY PLAN
--------------------------------------------------------------------
- Merge on public.m
- CTE cte_basic
- -> Result
- Output: 1, 'cte_basic val'::text
- -> Hash Right Join
- Output: m.ctid, o.k, o.v, o.*
- Hash Cond: (m.k = o.k)
- -> Seq Scan on public.m
- Output: m.ctid, m.k
- -> Hash
- Output: o.k, o.v, o.*
- -> Subquery Scan on o
- Output: o.k, o.v, o.*
- -> Result
- Output: 0, 'merge source SubPlan'::text
- SubPlan 2
- -> Limit
- Output: ((cte_basic.b || ' merge update'::text))
- -> CTE Scan on cte_basic
- Output: (cte_basic.b || ' merge update'::text)
- Filter: (cte_basic.a = m.k)
-(21 rows)
-
--- InitPlan
-WITH cte_init AS MATERIALIZED (SELECT 1 a, 'cte_init val' b)
-MERGE INTO m USING (select 1 k, 'merge source InitPlan' v offset 0) o ON m.k=o.k
-WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_init WHERE a = 1 LIMIT 1)
-WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
--- Examine
-SELECT * FROM m where k = 1;
- k | v
----+---------------------------
- 1 | cte_init val merge update
-(1 row)
-
--- See EXPLAIN output for same query:
-EXPLAIN (VERBOSE, COSTS OFF)
-WITH cte_init AS MATERIALIZED (SELECT 1 a, 'cte_init val' b)
-MERGE INTO m USING (select 1 k, 'merge source InitPlan' v offset 0) o ON m.k=o.k
-WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_init WHERE a = 1 LIMIT 1)
-WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v);
- QUERY PLAN
---------------------------------------------------------------------
- Merge on public.m
- CTE cte_init
- -> Result
- Output: 1, 'cte_init val'::text
- InitPlan 2
- -> Limit
- Output: ((cte_init.b || ' merge update'::text))
- -> CTE Scan on cte_init
- Output: (cte_init.b || ' merge update'::text)
- Filter: (cte_init.a = 1)
- -> Hash Right Join
- Output: m.ctid, o.k, o.v, o.*
- Hash Cond: (m.k = o.k)
- -> Seq Scan on public.m
- Output: m.ctid, m.k
- -> Hash
- Output: o.k, o.v, o.*
- -> Subquery Scan on o
- Output: o.k, o.v, o.*
- -> Result
- Output: 1, 'merge source InitPlan'::text
-(21 rows)
-
--- MERGE source comes from CTE:
-WITH merge_source_cte AS MATERIALIZED (SELECT 15 a, 'merge_source_cte val' b)
-MERGE INTO m USING (select * from merge_source_cte) o ON m.k=o.a
-WHEN MATCHED THEN UPDATE SET v = (SELECT b || merge_source_cte.*::text || ' merge update' FROM merge_source_cte WHERE a = 15)
-WHEN NOT MATCHED THEN INSERT VALUES(o.a, o.b || (SELECT merge_source_cte.*::text || ' merge insert' FROM merge_source_cte));
--- Examine
-SELECT * FROM m where k = 15;
- k | v
-----+--------------------------------------------------------------
- 15 | merge_source_cte val(15,"merge_source_cte val") merge insert
-(1 row)
-
--- See EXPLAIN output for same query:
-EXPLAIN (VERBOSE, COSTS OFF)
-WITH merge_source_cte AS MATERIALIZED (SELECT 15 a, 'merge_source_cte val' b)
-MERGE INTO m USING (select * from merge_source_cte) o ON m.k=o.a
-WHEN MATCHED THEN UPDATE SET v = (SELECT b || merge_source_cte.*::text || ' merge update' FROM merge_source_cte WHERE a = 15)
-WHEN NOT MATCHED THEN INSERT VALUES(o.a, o.b || (SELECT merge_source_cte.*::text || ' merge insert' FROM merge_source_cte));
- QUERY PLAN
------------------------------------------------------------------------------------------------------
- Merge on public.m
- CTE merge_source_cte
- -> Result
- Output: 15, 'merge_source_cte val'::text
- InitPlan 2
- -> CTE Scan on merge_source_cte merge_source_cte_1
- Output: ((merge_source_cte_1.b || (merge_source_cte_1.*)::text) || ' merge update'::text)
- Filter: (merge_source_cte_1.a = 15)
- InitPlan 3
- -> CTE Scan on merge_source_cte merge_source_cte_2
- Output: ((merge_source_cte_2.*)::text || ' merge insert'::text)
- -> Hash Right Join
- Output: m.ctid, merge_source_cte.a, merge_source_cte.b, merge_source_cte.*
- Hash Cond: (m.k = merge_source_cte.a)
- -> Seq Scan on public.m
- Output: m.ctid, m.k
- -> Hash
- Output: merge_source_cte.a, merge_source_cte.b, merge_source_cte.*
- -> CTE Scan on merge_source_cte
- Output: merge_source_cte.a, merge_source_cte.b, merge_source_cte.*
-(20 rows)
-
-DROP TABLE m;
--- check that run to completion happens in proper ordering
-TRUNCATE TABLE y;
-INSERT INTO y SELECT generate_series(1, 3);
-CREATE TEMPORARY TABLE yy (a INTEGER);
-WITH RECURSIVE t1 AS (
- INSERT INTO y SELECT * FROM y RETURNING *
-), t2 AS (
- INSERT INTO yy SELECT * FROM t1 RETURNING *
-)
-SELECT 1;
- ?column?
-----------
- 1
-(1 row)
-
-SELECT * FROM y;
- a
----
- 1
- 2
- 3
- 1
- 2
- 3
-(6 rows)
-
-SELECT * FROM yy;
- a
----
- 1
- 2
- 3
-(3 rows)
-
-WITH RECURSIVE t1 AS (
- INSERT INTO yy SELECT * FROM t2 RETURNING *
-), t2 AS (
- INSERT INTO y SELECT * FROM y RETURNING *
-)
-SELECT 1;
- ?column?
-----------
- 1
-(1 row)
-
-SELECT * FROM y;
- a
----
- 1
- 2
- 3
- 1
- 2
- 3
- 1
- 2
- 3
- 1
- 2
- 3
-(12 rows)
-
-SELECT * FROM yy;
- a
----
- 1
- 2
- 3
- 1
- 2
- 3
- 1
- 2
- 3
-(9 rows)
-
--- triggers
-TRUNCATE TABLE y;
-INSERT INTO y SELECT generate_series(1, 10);
-CREATE FUNCTION y_trigger() RETURNS trigger AS $$
-begin
- raise notice 'y_trigger: a = %', new.a;
- return new;
-end;
-$$ LANGUAGE plpgsql;
-CREATE TRIGGER y_trig BEFORE INSERT ON y FOR EACH ROW
- EXECUTE PROCEDURE y_trigger();
-WITH t AS (
- INSERT INTO y
- VALUES
- (21),
- (22),
- (23)
- RETURNING *
-)
-SELECT * FROM t;
-NOTICE: y_trigger: a = 21
-NOTICE: y_trigger: a = 22
-NOTICE: y_trigger: a = 23
- a
-----
- 21
- 22
- 23
-(3 rows)
-
-SELECT * FROM y;
- a
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 21
- 22
- 23
-(13 rows)
-
-DROP TRIGGER y_trig ON y;
-CREATE TRIGGER y_trig AFTER INSERT ON y FOR EACH ROW
- EXECUTE PROCEDURE y_trigger();
-WITH t AS (
- INSERT INTO y
- VALUES
- (31),
- (32),
- (33)
- RETURNING *
-)
-SELECT * FROM t LIMIT 1;
-NOTICE: y_trigger: a = 31
-NOTICE: y_trigger: a = 32
-NOTICE: y_trigger: a = 33
- a
-----
- 31
-(1 row)
-
-SELECT * FROM y;
- a
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 21
- 22
- 23
- 31
- 32
- 33
-(16 rows)
-
-DROP TRIGGER y_trig ON y;
-CREATE OR REPLACE FUNCTION y_trigger() RETURNS trigger AS $$
-begin
- raise notice 'y_trigger';
- return null;
-end;
-$$ LANGUAGE plpgsql;
-CREATE TRIGGER y_trig AFTER INSERT ON y FOR EACH STATEMENT
- EXECUTE PROCEDURE y_trigger();
-WITH t AS (
- INSERT INTO y
- VALUES
- (41),
- (42),
- (43)
- RETURNING *
-)
-SELECT * FROM t;
-NOTICE: y_trigger
- a
-----
- 41
- 42
- 43
-(3 rows)
-
-SELECT * FROM y;
- a
-----
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 21
- 22
- 23
- 31
- 32
- 33
- 41
- 42
- 43
-(19 rows)
-
-DROP TRIGGER y_trig ON y;
-DROP FUNCTION y_trigger();
--- WITH attached to inherited UPDATE or DELETE
-CREATE TEMP TABLE parent ( id int, val text );
-CREATE TEMP TABLE child1 ( ) INHERITS ( parent );
-CREATE TEMP TABLE child2 ( ) INHERITS ( parent );
-INSERT INTO parent VALUES ( 1, 'p1' );
-INSERT INTO child1 VALUES ( 11, 'c11' ),( 12, 'c12' );
-INSERT INTO child2 VALUES ( 23, 'c21' ),( 24, 'c22' );
-WITH rcte AS ( SELECT sum(id) AS totalid FROM parent )
-UPDATE parent SET id = id + totalid FROM rcte;
-SELECT * FROM parent;
- id | val
-----+-----
- 72 | p1
- 82 | c11
- 83 | c12
- 94 | c21
- 95 | c22
-(5 rows)
-
-WITH wcte AS ( INSERT INTO child1 VALUES ( 42, 'new' ) RETURNING id AS newid )
-UPDATE parent SET id = id + newid FROM wcte;
-SELECT * FROM parent;
- id | val
------+-----
- 114 | p1
- 42 | new
- 124 | c11
- 125 | c12
- 136 | c21
- 137 | c22
-(6 rows)
-
-WITH rcte AS ( SELECT max(id) AS maxid FROM parent )
-DELETE FROM parent USING rcte WHERE id = maxid;
-SELECT * FROM parent;
- id | val
------+-----
- 114 | p1
- 42 | new
- 124 | c11
- 125 | c12
- 136 | c21
-(5 rows)
-
-WITH wcte AS ( INSERT INTO child2 VALUES ( 42, 'new2' ) RETURNING id AS newid )
-DELETE FROM parent USING wcte WHERE id = newid;
-SELECT * FROM parent;
- id | val
------+------
- 114 | p1
- 124 | c11
- 125 | c12
- 136 | c21
- 42 | new2
-(5 rows)
-
--- check EXPLAIN VERBOSE for a wCTE with RETURNING
-EXPLAIN (VERBOSE, COSTS OFF)
-WITH wcte AS ( INSERT INTO int8_tbl VALUES ( 42, 47 ) RETURNING q2 )
-DELETE FROM a_star USING wcte WHERE aa = q2;
- QUERY PLAN
----------------------------------------------------------------------------
- Delete on public.a_star
- Delete on public.a_star a_star_1
- Delete on public.b_star a_star_2
- Delete on public.c_star a_star_3
- Delete on public.d_star a_star_4
- Delete on public.e_star a_star_5
- Delete on public.f_star a_star_6
- CTE wcte
- -> Insert on public.int8_tbl
- Output: int8_tbl.q2
- -> Result
- Output: '42'::bigint, '47'::bigint
- -> Hash Join
- Output: wcte.*, a_star.tableoid, a_star.ctid
- Hash Cond: (a_star.aa = wcte.q2)
- -> Append
- -> Seq Scan on public.a_star a_star_1
- Output: a_star_1.aa, a_star_1.tableoid, a_star_1.ctid
- -> Seq Scan on public.b_star a_star_2
- Output: a_star_2.aa, a_star_2.tableoid, a_star_2.ctid
- -> Seq Scan on public.c_star a_star_3
- Output: a_star_3.aa, a_star_3.tableoid, a_star_3.ctid
- -> Seq Scan on public.d_star a_star_4
- Output: a_star_4.aa, a_star_4.tableoid, a_star_4.ctid
- -> Seq Scan on public.e_star a_star_5
- Output: a_star_5.aa, a_star_5.tableoid, a_star_5.ctid
- -> Seq Scan on public.f_star a_star_6
- Output: a_star_6.aa, a_star_6.tableoid, a_star_6.ctid
- -> Hash
- Output: wcte.*, wcte.q2
- -> CTE Scan on wcte
- Output: wcte.*, wcte.q2
-(32 rows)
-
--- error cases
--- data-modifying WITH tries to use its own output
-WITH RECURSIVE t AS (
- INSERT INTO y
- SELECT * FROM t
-)
-VALUES(FALSE);
-ERROR: recursive query "t" must not contain data-modifying statements
-LINE 1: WITH RECURSIVE t AS (
- ^
--- no RETURNING in a referenced data-modifying WITH
-WITH t AS (
- INSERT INTO y VALUES(0)
-)
-SELECT * FROM t;
-ERROR: WITH query "t" does not have a RETURNING clause
-LINE 4: SELECT * FROM t;
- ^
--- RETURNING tries to return its own output
-WITH RECURSIVE t(action, a) AS (
- MERGE INTO y USING (VALUES (11)) v(a) ON y.a = v.a
- WHEN NOT MATCHED THEN INSERT VALUES (v.a)
- RETURNING merge_action(), (SELECT a FROM t)
-)
-SELECT * FROM t;
-ERROR: recursive query "t" must not contain data-modifying statements
-LINE 1: WITH RECURSIVE t(action, a) AS (
- ^
--- data-modifying WITH allowed only at the top level
-SELECT * FROM (
- WITH t AS (UPDATE y SET a=a+1 RETURNING *)
- SELECT * FROM t
-) ss;
-ERROR: WITH clause containing a data-modifying statement must be at the top level
-LINE 2: WITH t AS (UPDATE y SET a=a+1 RETURNING *)
- ^
--- most variants of rules aren't allowed
-CREATE RULE y_rule AS ON INSERT TO y WHERE a=0 DO INSTEAD DELETE FROM y;
-WITH t AS (
- INSERT INTO y VALUES(0)
-)
-VALUES(FALSE);
-ERROR: conditional DO INSTEAD rules are not supported for data-modifying statements in WITH
-CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO INSTEAD NOTHING;
-WITH t AS (
- INSERT INTO y VALUES(0)
-)
-VALUES(FALSE);
-ERROR: DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH
-CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO INSTEAD NOTIFY foo;
-WITH t AS (
- INSERT INTO y VALUES(0)
-)
-VALUES(FALSE);
-ERROR: DO INSTEAD NOTIFY rules are not supported for data-modifying statements in WITH
-CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO ALSO NOTIFY foo;
-WITH t AS (
- INSERT INTO y VALUES(0)
-)
-VALUES(FALSE);
-ERROR: DO ALSO rules are not supported for data-modifying statements in WITH
-CREATE OR REPLACE RULE y_rule AS ON INSERT TO y
- DO INSTEAD (NOTIFY foo; NOTIFY bar);
-WITH t AS (
- INSERT INTO y VALUES(0)
-)
-VALUES(FALSE);
-ERROR: multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH
-DROP RULE y_rule ON y;
--- check that parser lookahead for WITH doesn't cause any odd behavior
-create table foo (with baz); -- fail, WITH is a reserved word
-ERROR: syntax error at or near "with"
-LINE 1: create table foo (with baz);
- ^
-create table foo (with ordinality); -- fail, WITH is a reserved word
-ERROR: syntax error at or near "with"
-LINE 1: create table foo (with ordinality);
- ^
-with ordinality as (select 1 as x) select * from ordinality;
- x
----
- 1
-(1 row)
-
--- check sane response to attempt to modify CTE relation
-WITH with_test AS (SELECT 42) INSERT INTO with_test VALUES (1);
-ERROR: relation "with_test" does not exist
-LINE 1: WITH with_test AS (SELECT 42) INSERT INTO with_test VALUES (...
- ^
--- check response to attempt to modify table with same name as a CTE (perhaps
--- surprisingly it works, because CTEs don't hide tables from data-modifying
--- statements)
-create temp table with_test (i int);
-with with_test as (select 42) insert into with_test select * from with_test;
-select * from with_test;
- i
-----
- 42
-(1 row)
-
-drop table with_test;
+psql: error: connection to server on socket "/tmp/GI28xIHhu1/.s.PGSQL.16360" failed: No such file or directory
+ Is the server running locally and accepting connections on that socket?
diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/xml_1.out /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/xml.out
--- /tmp/cirrus-ci-build/src/test/regress/expected/xml_1.out 2024-11-15 02:50:52.525989802 +0000
+++ /tmp/cirrus-ci-build/build-32/testrun/recovery/027_stream_regress/data/results/xml.out 2024-11-15 02:59:18.181116966 +0000
@@ -1,1482 +1,2 @@
-CREATE TABLE xmltest (
- id int,
- data xml
-);
-INSERT INTO xmltest VALUES (1, 'one');
-ERROR: unsupported XML feature
-LINE 1: INSERT INTO xmltest VALUES (1, 'one');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-INSERT INTO xmltest VALUES (2, 'two');
-ERROR: unsupported XML feature
-LINE 1: INSERT INTO xmltest VALUES (2, 'two');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-INSERT INTO xmltest VALUES (3, 'one', 'xml');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT pg_input_is_valid('one', 'xml');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT message FROM pg_input_error_info('one', 'xml');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT pg_input_is_valid('', 'xml');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT message FROM pg_input_error_info('', 'xml');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlcomment('test');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlcomment('-test');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlcomment('test-');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlcomment('--test');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlcomment('te st');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlconcat(xmlcomment('hello'),
- xmlelement(NAME qux, 'foo'),
- xmlcomment('world'));
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlconcat('hello', 'you');
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlconcat('hello', 'you');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlconcat(1, 2);
-ERROR: argument of XMLCONCAT must be type xml, not type integer
-LINE 1: SELECT xmlconcat(1, 2);
- ^
-SELECT xmlconcat('bad', '', NULL, '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlconcat('', NULL, '', NULL, '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlconcat('', NULL, 'r');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlelement(name foo, xml 'br');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlelement(name foo, array[1, 2, 3]);
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SET xmlbinary TO base64;
-SELECT xmlelement(name foo, bytea 'bar');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SET xmlbinary TO hex;
-SELECT xmlelement(name foo, bytea 'bar');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlelement(name foo, xmlattributes(true as bar));
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar));
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar));
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'br' as funnier));
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content ' ');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content 'abc');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content 'x');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content '&');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content '&idontexist;');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content '&idontexist;');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(content '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document ' ');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document 'abc');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document 'x');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document '&');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document '&idontexist;');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document '&idontexist;');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlparse(document '');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name foo);
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name xml);
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name xmlstuff);
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name foo, 'bar');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name foo, 'in?>valid');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name foo, null);
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name xml, null);
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name xmlstuff, null);
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name foo, ' bar');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot(xml '', version no value, standalone no value);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlroot(xml '', version no value, standalone no...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot(xml '', version '2.0');
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlroot(xml '', version '2.0');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot(xml '', version no value, standalone yes);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlroot(xml '', version no value, standalone ye...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot(xml '', version no value, standalone yes);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlroot(xml '', version no...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot(xmlroot(xml '', version '1.0'), version '1.1', standalone no);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlroot(xmlroot(xml '', version '1.0'), version...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot('', version no value, standalone no);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlroot('...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot('', version no value, standalone no value);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlroot('...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot('', version no value);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlroot('...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlroot (
- xmlelement (
- name gazonk,
- xmlattributes (
- 'val' AS name,
- 1 + 1 AS num
- ),
- xmlelement (
- NAME qux,
- 'foo'
- )
- ),
- version '1.0',
- standalone yes
-);
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(content data as character varying(20)) FROM xmltest;
- xmlserialize
---------------
-(0 rows)
-
-SELECT xmlserialize(content 'good' as char(10));
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(content 'good' as char(10));
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(document 'bad' as text);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(document 'bad' as text);
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- indent
-SELECT xmlserialize(DOCUMENT '42' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '42<...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT '42' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '42<...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- no indent
-SELECT xmlserialize(DOCUMENT '42' AS text NO INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '42<...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT '42' AS text NO INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '42<...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- indent non singly-rooted xml
-SELECT xmlserialize(DOCUMENT '7342' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '734...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT '7342' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '734...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- indent non singly-rooted xml with mixed contents
-SELECT xmlserialize(DOCUMENT 'text node73text node42' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT 'text node73text nod...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT 'text node73text node42' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT 'text node73text nod...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- indent singly-rooted xml with mixed contents
-SELECT xmlserialize(DOCUMENT '42text node73' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '42<...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT '42text node73' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '42<...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- indent empty string
-SELECT xmlserialize(DOCUMENT '' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '' AS text INDENT);
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT '' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '' AS text INDENT);
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- whitespaces
-SELECT xmlserialize(DOCUMENT ' ' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT ' ' AS text INDENT);
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT ' ' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT ' ' AS text INDENT);
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- indent null
-SELECT xmlserialize(DOCUMENT NULL AS text INDENT);
- xmlserialize
---------------
-
-(1 row)
-
-SELECT xmlserialize(CONTENT NULL AS text INDENT);
- xmlserialize
---------------
-
-(1 row)
-
--- indent with XML declaration
-SELECT xmlserialize(DOCUMENT '73' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '73' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '' AS text INDE...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT '' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '' AS text INDE...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- indent xml with empty element
-SELECT xmlserialize(DOCUMENT '' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '' AS tex...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT '' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '' AS tex...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- 'no indent' = not using 'no indent'
-SELECT xmlserialize(DOCUMENT '42' AS text) = xmlserialize(DOCUMENT '42' AS text NO INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT '42<...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT '42' AS text) = xmlserialize(CONTENT '42' AS text NO INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT '42<...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- indent xml strings containing blank nodes
-SELECT xmlserialize(DOCUMENT ' ' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(DOCUMENT ' '...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlserialize(CONTENT 'text node ' AS text INDENT);
-ERROR: unsupported XML feature
-LINE 1: SELECT xmlserialize(CONTENT 'text node ...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xml 'bar' IS DOCUMENT;
-ERROR: unsupported XML feature
-LINE 1: SELECT xml 'bar' IS DOCUMENT;
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xml 'barfoo' IS DOCUMENT;
-ERROR: unsupported XML feature
-LINE 1: SELECT xml 'barfoo' IS DOCUMENT;
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xml '' IS NOT DOCUMENT;
-ERROR: unsupported XML feature
-LINE 1: SELECT xml '' IS NOT DOCUMENT;
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xml 'abc' IS NOT DOCUMENT;
-ERROR: unsupported XML feature
-LINE 1: SELECT xml 'abc' IS NOT DOCUMENT;
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT '<>' IS NOT DOCUMENT;
-ERROR: unsupported XML feature
-LINE 1: SELECT '<>' IS NOT DOCUMENT;
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlagg(data) FROM xmltest;
- xmlagg
---------
-
-(1 row)
-
-SELECT xmlagg(data) FROM xmltest WHERE id > 10;
- xmlagg
---------
-
-(1 row)
-
-SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp;
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
--- Check mapping SQL identifier to XML name
-SELECT xmlpi(name ":::_xml_abc135.%-&_");
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xmlpi(name "123");
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-PREPARE foo (xml) AS SELECT xmlconcat('', $1);
-ERROR: unsupported XML feature
-LINE 1: PREPARE foo (xml) AS SELECT xmlconcat('', $1);
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SET XML OPTION DOCUMENT;
-EXECUTE foo ('');
-ERROR: prepared statement "foo" does not exist
-EXECUTE foo ('bad');
-ERROR: prepared statement "foo" does not exist
-SELECT xml '';
-ERROR: unsupported XML feature
-LINE 1: SELECT xml '';
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SET XML OPTION CONTENT;
-EXECUTE foo ('');
-ERROR: prepared statement "foo" does not exist
-EXECUTE foo ('good');
-ERROR: prepared statement "foo" does not exist
-SELECT xml ' ';
-ERROR: unsupported XML feature
-LINE 1: SELECT xml ' ';
-ERROR: unsupported XML feature
-LINE 1: SELECT xml ' ';
-ERROR: unsupported XML feature
-LINE 1: SELECT xml '';
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xml ' oops ';
-ERROR: unsupported XML feature
-LINE 1: SELECT xml ' oops ';
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xml ' ';
-ERROR: unsupported XML feature
-LINE 1: SELECT xml ' ';
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xml '';
-ERROR: unsupported XML feature
-LINE 1: SELECT xml '';
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- Test backwards parsing
-CREATE VIEW xmlview1 AS SELECT xmlcomment('test');
-CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you');
-ERROR: unsupported XML feature
-LINE 1: CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-CREATE VIEW xmlview5 AS SELECT xmlparse(content 'x');
-CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar');
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml support.
-CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version no value, standalone yes);
-ERROR: unsupported XML feature
-LINE 1: CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10));
-ERROR: unsupported XML feature
-LINE 1: ...EATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as ...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text);
-ERROR: unsupported XML feature
-LINE 1: ...EATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as ...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT table_name, view_definition FROM information_schema.views
- WHERE table_name LIKE 'xmlview%' ORDER BY 1;
- table_name | view_definition
-------------+--------------------------------------------------------------------------------
- xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment;
- xmlview5 | SELECT XMLPARSE(CONTENT 'x'::text STRIP WHITESPACE) AS "xmlparse";
-(2 rows)
-
--- Text XPath expressions evaluation
-SELECT xpath('/value', data) FROM xmltest;
- xpath
--------
-(0 rows)
-
-SELECT xpath(NULL, NULL) IS NULL FROM xmltest;
- ?column?
-----------
-(0 rows)
-
-SELECT xpath('', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath('//text()', 'number one');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('//text()', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('//loc:piece/@id', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('//loc:piece', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('//loc:piece', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('//@value', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath('''<>''', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('''<>''', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath('count(//*)', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('count(//*)', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath('count(//*)=0', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('count(//*)=0', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath('count(//*)=3', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('count(//*)=3', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath('name(/*)', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('name(/*)', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath('/nosuchtag', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('/nosuchtag', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath('root', '');
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath('root', '');
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
--- Round-trip non-ASCII data through xpath().
-DO $$
-DECLARE
- xml_declaration text := '';
- degree_symbol text;
- res xml[];
-BEGIN
- -- Per the documentation, except when the server encoding is UTF8, xpath()
- -- may not work on non-ASCII data. The untranslatable_character and
- -- undefined_function traps below, currently dead code, will become relevant
- -- if we remove this limitation.
- IF current_setting('server_encoding') <> 'UTF8' THEN
- RAISE LOG 'skip: encoding % unsupported for xpath',
- current_setting('server_encoding');
- RETURN;
- END IF;
-
- degree_symbol := convert_from('\xc2b0', 'UTF8');
- res := xpath('text()', (xml_declaration ||
- '' || degree_symbol || '')::xml);
- IF degree_symbol <> res[1]::text THEN
- RAISE 'expected % (%), got % (%)',
- degree_symbol, convert_to(degree_symbol, 'UTF8'),
- res[1], convert_to(res[1]::text, 'UTF8');
- END IF;
-EXCEPTION
- -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8"
- WHEN untranslatable_character
- -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist
- OR undefined_function
- -- unsupported XML feature
- OR feature_not_supported THEN
- RAISE LOG 'skip: %', SQLERRM;
-END
-$$;
--- Test xmlexists and xpath_exists
-SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol');
-ERROR: unsupported XML feature
-LINE 1: ...sts('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol');
-ERROR: unsupported XML feature
-LINE 1: ...sts('//town[text() = ''Cwmbran'']' PASSING BY REF '');
-ERROR: unsupported XML feature
-LINE 1: ...LECT xmlexists('count(/nosuchtag)' PASSING BY REF '')...
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-SELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml);
-ERROR: unsupported XML feature
-LINE 1: ...ELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml);
-ERROR: unsupported XML feature
-LINE 1: ...ELECT xpath_exists('//town[text() = ''Cwmbran'']',''::xml);
-ERROR: unsupported XML feature
-LINE 1: SELECT xpath_exists('count(/nosuchtag)', ''::xml);
- ^
-DETAIL: This functionality requires the server to be built with libxml support.
-INSERT INTO xmltest VALUES (4, ''::xml);
-ERROR: unsupported XML feature
-LINE 1: INSERT INTO xmltest VALUES (4, ''::xml);
-ERROR: unsupported XML feature
-LINE 1: INSERT INTO xmltest VALUES (5, '