Remove whitespace from end of lines
pgindent and perltidy should clean up the rest.
This commit is contained in:
parent
131b4d6473
commit
c8e086795a
@ -13,8 +13,8 @@ Here are the steps needed to create a regression database dump file:
|
||||
This database can be created by running 'gmake installcheck' from
|
||||
src/test/regression.
|
||||
|
||||
2) Use pg_dump to dump out the regression database. Use the new
|
||||
cluster's pg_dump on the old database to minimize whitespace
|
||||
2) Use pg_dump to dump out the regression database. Use the new
|
||||
cluster's pg_dump on the old database to minimize whitespace
|
||||
differences in the diff.
|
||||
|
||||
3) Adjust the regression database dump file
|
||||
|
@ -188,7 +188,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
|
||||
obsolete but still accepted spelling of <literal>samerole</>.)
|
||||
Superusers are not considered to be members of a role for the
|
||||
purposes of <literal>samerole</> unless they are explicitly
|
||||
members of the role, directly or indirectly, and not just by
|
||||
members of the role, directly or indirectly, and not just by
|
||||
virtue of being a superuser.
|
||||
The value <literal>replication</> specifies that the record
|
||||
matches if a replication connection is requested (note that
|
||||
|
@ -9648,8 +9648,8 @@ table2-mapping
|
||||
<literal>array_to_json(anyarray [, pretty_bool])</literal>
|
||||
</entry>
|
||||
<entry>
|
||||
Returns the array as JSON. A Postgres multi-dimensional array
|
||||
becomes a JSON array of arrays. Line feeds will be added between
|
||||
Returns the array as JSON. A Postgres multi-dimensional array
|
||||
becomes a JSON array of arrays. Line feeds will be added between
|
||||
dimension 1 elements if pretty_bool is true.
|
||||
</entry>
|
||||
<entry><literal>array_to_json('{{1,5},{99,100}}'::int[])</literal></entry>
|
||||
@ -9663,7 +9663,7 @@ table2-mapping
|
||||
<literal>row_to_json(record [, pretty_bool])</literal>
|
||||
</entry>
|
||||
<entry>
|
||||
Returns the row as JSON. Line feeds will be added between level
|
||||
Returns the row as JSON. Line feeds will be added between level
|
||||
1 elements if pretty_bool is true.
|
||||
</entry>
|
||||
<entry><literal>row_to_json(row(1,'foo'))</literal></entry>
|
||||
@ -13812,7 +13812,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype);
|
||||
<row>
|
||||
<entry><literal><function>pg_get_viewdef(<parameter>view_name</parameter>, <parameter>pretty_bool</>)</function></literal></entry>
|
||||
<entry><type>text</type></entry>
|
||||
<entry>get underlying <command>SELECT</command> command for view,
|
||||
<entry>get underlying <command>SELECT</command> command for view,
|
||||
lines with fields are wrapped to 80 columns if pretty_bool is true (<emphasis>deprecated</emphasis>)</entry>
|
||||
</row>
|
||||
<row>
|
||||
@ -13823,13 +13823,13 @@ SELECT pg_type_is_visible('myschema.widget'::regtype);
|
||||
<row>
|
||||
<entry><literal><function>pg_get_viewdef(<parameter>view_oid</parameter>, <parameter>pretty_bool</>)</function></literal></entry>
|
||||
<entry><type>text</type></entry>
|
||||
<entry>get underlying <command>SELECT</command> command for view,
|
||||
<entry>get underlying <command>SELECT</command> command for view,
|
||||
lines with fields are wrapped to 80 columns if pretty_bool is true</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry><literal><function>pg_get_viewdef(<parameter>view_oid</parameter>, <parameter>wrap_int</>)</function></literal></entry>
|
||||
<entry><type>text</type></entry>
|
||||
<entry>get underlying <command>SELECT</command> command for view,
|
||||
<entry>get underlying <command>SELECT</command> command for view,
|
||||
wrapping lines with fields as specified, pretty printing is implied</entry>
|
||||
</row>
|
||||
<row>
|
||||
|
@ -1622,7 +1622,7 @@ PostgreSQL, contrib and HTML documentation successfully made. Ready to install.
|
||||
On some systems with shared libraries
|
||||
you need to tell the system how to find the newly installed
|
||||
shared libraries. The systems on which this is
|
||||
<emphasis>not</emphasis> necessary include
|
||||
<emphasis>not</emphasis> necessary include
|
||||
<systemitem class="osname">FreeBSD</>,
|
||||
<systemitem class="osname">HP-UX</>, <systemitem
|
||||
class="osname">IRIX</>, <systemitem class="osname">Linux</>,
|
||||
|
@ -270,7 +270,7 @@ gmake prefix=/usr/local/pgsql.new install
|
||||
|
||||
<para>
|
||||
Install any custom shared object files (or DLLs) used by the old cluster
|
||||
into the new cluster, e.g. <filename>pgcrypto.so</filename>,
|
||||
into the new cluster, e.g. <filename>pgcrypto.so</filename>,
|
||||
whether they are from <filename>contrib</filename>
|
||||
or some other source. Do not install the schema definitions, e.g.
|
||||
<filename>pgcrypto.sql</>, because these will be upgraded from the old cluster.
|
||||
|
@ -403,11 +403,11 @@ PostgreSQL documentation
|
||||
Dump only the object definitions (schema), not data.
|
||||
</para>
|
||||
<para>
|
||||
To exclude table data for only a subset of tables in the database,
|
||||
To exclude table data for only a subset of tables in the database,
|
||||
see <option>--exclude-table-data</>.
|
||||
</para>
|
||||
<para>
|
||||
This option is equivalent to specifying
|
||||
This option is equivalent to specifying
|
||||
<option>--section=pre-data --section=post-data</>.
|
||||
</para>
|
||||
</listitem>
|
||||
@ -417,13 +417,13 @@ PostgreSQL documentation
|
||||
<term><option>--section=<replaceable class="parameter">sectionname</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Only dump the named section. The name can be one of <option>pre-data</>, <option>data</>
|
||||
and <option>post-data</>.
|
||||
Only dump the named section. The name can be one of <option>pre-data</>, <option>data</>
|
||||
and <option>post-data</>.
|
||||
This option can be specified more than once. The default is to dump all sections.
|
||||
</para>
|
||||
<para>
|
||||
Post-data items consist of definitions of indexes, triggers, rules
|
||||
and constraints other than validated check constraints.
|
||||
Post-data items consist of definitions of indexes, triggers, rules
|
||||
and constraints other than validated check constraints.
|
||||
Pre-data items consist of all other data definition items.
|
||||
</para>
|
||||
</listitem>
|
||||
@ -640,7 +640,7 @@ PostgreSQL documentation
|
||||
Do not dump data for any tables matching the <replaceable
|
||||
class="parameter">table</replaceable> pattern. The pattern is
|
||||
interpreted according to the same rules as for <option>-t</>.
|
||||
<option>--exclude-table-data</> can be given more than once to
|
||||
<option>--exclude-table-data</> can be given more than once to
|
||||
exclude tables matching any of several patterns. This option is
|
||||
useful when you need the definition of a particular table even
|
||||
though you do not need the data in it.
|
||||
|
@ -363,7 +363,7 @@
|
||||
uses the word <quote>schema</> in a different meaning.)
|
||||
</para>
|
||||
<para>
|
||||
This option is equivalent to specifying
|
||||
This option is equivalent to specifying
|
||||
<option>--section=pre-data --section=post-data</>.
|
||||
</para>
|
||||
</listitem>
|
||||
@ -515,13 +515,13 @@
|
||||
<term><option>--section=<replaceable class="parameter">sectionname</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Only restore the named section. The name can be one of <option>pre-data</>, <option>data</>
|
||||
and <option>post-data</>.
|
||||
Only restore the named section. The name can be one of <option>pre-data</>, <option>data</>
|
||||
and <option>post-data</>.
|
||||
This option can be specified more than once. The default is to restore all sections.
|
||||
</para>
|
||||
<para>
|
||||
Post-data items consist of definitions of indexes, triggers, rules
|
||||
and constraints other than validated check constraints.
|
||||
Post-data items consist of definitions of indexes, triggers, rules
|
||||
and constraints other than validated check constraints.
|
||||
Pre-data items consist of all other data definition items.
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -29,7 +29,7 @@ non-ASCII characters find using grep -P '[\x80-\xFF]'
|
||||
does not support it
|
||||
http://www.pemberley.com/janeinfo/latin1.html#latexta
|
||||
|
||||
do not use numeric _UTF_ numeric character escapes (&#nnn;),
|
||||
do not use numeric _UTF_ numeric character escapes (&#nnn;),
|
||||
we can only use Latin1
|
||||
|
||||
Example: Alvaro Herrera is Álvaro Herrera
|
||||
|
@ -431,8 +431,8 @@ ParseConfigFile(const char *config_file, const char *calling_file, bool strict,
|
||||
return false;
|
||||
}
|
||||
|
||||
ereport(LOG,
|
||||
(errmsg("skipping missing configuration file \"%s\"",
|
||||
ereport(LOG,
|
||||
(errmsg("skipping missing configuration file \"%s\"",
|
||||
config_file)));
|
||||
return OK;
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ backend_src = $(top_srcdir)/src/backend
|
||||
# compile with appropriate options to build a shared lib, we can't
|
||||
# necessarily use the same object files as the backend uses. Instead,
|
||||
# symlink the source files in here and build our own object file.
|
||||
# For some libpgport modules, this only happens if configure decides
|
||||
# For some libpgport modules, this only happens if configure decides
|
||||
# the module is needed (see filter hack in OBJS, above).
|
||||
|
||||
chklocale.c crypt.c getaddrinfo.c getpeereid.c inet_aton.c inet_net_ntop.c noblock.c open.c pgsleep.c pgstrcasecmp.c snprintf.c strerror.c strlcpy.c thread.c win32error.c win32setlocale.c: % : $(top_srcdir)/src/port/%
|
||||
@ -141,7 +141,7 @@ clean distclean: clean-lib
|
||||
rm -f pg_config_paths.h
|
||||
rm -f inet_net_ntop.c noblock.c pgstrcasecmp.c thread.c
|
||||
rm -f chklocale.c crypt.c getaddrinfo.c getpeereid.c inet_aton.c open.c snprintf.c strerror.c strlcpy.c win32error.c win32setlocale.c
|
||||
rm -f pgsleep.c
|
||||
rm -f pgsleep.c
|
||||
rm -f md5.c ip.c
|
||||
rm -f encnames.c wchar.c
|
||||
|
||||
|
@ -131,7 +131,7 @@ CREATE OR REPLACE FUNCTION plperl_sum_row_elements(rowfoo) RETURNS TEXT AS $$
|
||||
}
|
||||
else {
|
||||
$result = $row_ref->{bar};
|
||||
die "not an array reference".ref ($row_ref->{baz})
|
||||
die "not an array reference".ref ($row_ref->{baz})
|
||||
unless (is_array_ref($row_ref->{baz}));
|
||||
# process a single-dimensional array
|
||||
foreach my $elem (@{$row_ref->{baz}}) {
|
||||
@ -159,7 +159,7 @@ CREATE OR REPLACE FUNCTION plperl_sum_array_of_rows(rowbar) RETURNS TEXT AS $$
|
||||
foreach my $row_ref (@{$row_array_ref}) {
|
||||
if (ref $row_ref eq 'HASH') {
|
||||
$result += $row_ref->{bar};
|
||||
die "not an array reference".ref ($row_ref->{baz})
|
||||
die "not an array reference".ref ($row_ref->{baz})
|
||||
unless (is_array_ref($row_ref->{baz}));
|
||||
foreach my $elem (@{$row_ref->{baz}}) {
|
||||
$result += $elem unless ref $elem;
|
||||
@ -177,7 +177,7 @@ CREATE OR REPLACE FUNCTION plperl_sum_array_of_rows(rowbar) RETURNS TEXT AS $$
|
||||
}
|
||||
return $result;
|
||||
$$ LANGUAGE plperl;
|
||||
select plperl_sum_array_of_rows(ROW(ARRAY[ROW(1, ARRAY[2,3,4,5,6,7,8,9,10])::rowfoo,
|
||||
select plperl_sum_array_of_rows(ROW(ARRAY[ROW(1, ARRAY[2,3,4,5,6,7,8,9,10])::rowfoo,
|
||||
ROW(11, ARRAY[12,13,14,15,16,17,18,19,20])::rowfoo])::rowbar);
|
||||
plperl_sum_array_of_rows
|
||||
--------------------------
|
||||
|
@ -90,7 +90,7 @@ CREATE OR REPLACE FUNCTION plperl_sum_row_elements(rowfoo) RETURNS TEXT AS $$
|
||||
}
|
||||
else {
|
||||
$result = $row_ref->{bar};
|
||||
die "not an array reference".ref ($row_ref->{baz})
|
||||
die "not an array reference".ref ($row_ref->{baz})
|
||||
unless (is_array_ref($row_ref->{baz}));
|
||||
# process a single-dimensional array
|
||||
foreach my $elem (@{$row_ref->{baz}}) {
|
||||
@ -116,7 +116,7 @@ CREATE OR REPLACE FUNCTION plperl_sum_array_of_rows(rowbar) RETURNS TEXT AS $$
|
||||
foreach my $row_ref (@{$row_array_ref}) {
|
||||
if (ref $row_ref eq 'HASH') {
|
||||
$result += $row_ref->{bar};
|
||||
die "not an array reference".ref ($row_ref->{baz})
|
||||
die "not an array reference".ref ($row_ref->{baz})
|
||||
unless (is_array_ref($row_ref->{baz}));
|
||||
foreach my $elem (@{$row_ref->{baz}}) {
|
||||
$result += $elem unless ref $elem;
|
||||
@ -135,7 +135,7 @@ CREATE OR REPLACE FUNCTION plperl_sum_array_of_rows(rowbar) RETURNS TEXT AS $$
|
||||
return $result;
|
||||
$$ LANGUAGE plperl;
|
||||
|
||||
select plperl_sum_array_of_rows(ROW(ARRAY[ROW(1, ARRAY[2,3,4,5,6,7,8,9,10])::rowfoo,
|
||||
select plperl_sum_array_of_rows(ROW(ARRAY[ROW(1, ARRAY[2,3,4,5,6,7,8,9,10])::rowfoo,
|
||||
ROW(11, ARRAY[12,13,14,15,16,17,18,19,20])::rowfoo])::rowbar);
|
||||
|
||||
-- check arrays as out parameters
|
||||
|
@ -384,7 +384,7 @@ DROP TABLE tmp2;
|
||||
-- NOT VALID with plan invalidation -- ensure we don't use a constraint for
|
||||
-- exclusion until validated
|
||||
set constraint_exclusion TO 'partition';
|
||||
create table nv_parent (d date);
|
||||
create table nv_parent (d date);
|
||||
create table nv_child_2010 () inherits (nv_parent);
|
||||
create table nv_child_2011 () inherits (nv_parent);
|
||||
alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid;
|
||||
@ -1968,7 +1968,7 @@ create view alter1.v1 as select * from alter1.t1;
|
||||
create function alter1.plus1(int) returns int as 'select $1+1' language sql;
|
||||
create domain alter1.posint integer check (value > 0);
|
||||
create type alter1.ctype as (f1 int, f2 text);
|
||||
create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql
|
||||
create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql
|
||||
as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2';
|
||||
create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype);
|
||||
create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as
|
||||
|
@ -8,7 +8,7 @@ CREATE SCHEMA temp_func_test;
|
||||
GRANT ALL ON SCHEMA temp_func_test TO public;
|
||||
SET search_path TO temp_func_test, public;
|
||||
--
|
||||
-- ARGUMENT and RETURN TYPES
|
||||
-- ARGUMENT and RETURN TYPES
|
||||
--
|
||||
CREATE FUNCTION functest_A_1(text, date) RETURNS bool LANGUAGE 'sql'
|
||||
AS 'SELECT $1 = ''abcd'' AND $2 > ''2001-01-01''';
|
||||
|
@ -1183,7 +1183,7 @@ NOTICE: drop cascades to user mapping for public
|
||||
DROP SERVER t2;
|
||||
DROP USER MAPPING FOR regress_test_role SERVER s6;
|
||||
-- This test causes some order dependent cascade detail output,
|
||||
-- so switch to terse mode for it.
|
||||
-- so switch to terse mode for it.
|
||||
\set VERBOSITY terse
|
||||
DROP FOREIGN DATA WRAPPER foo CASCADE;
|
||||
NOTICE: drop cascades to 5 other objects
|
||||
|
@ -279,10 +279,10 @@ SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from gen
|
||||
(1 row)
|
||||
|
||||
SELECT array_to_json(array_agg(q),false)
|
||||
FROM ( SELECT $$a$$ || x AS b, y AS c,
|
||||
FROM ( SELECT $$a$$ || x AS b, y AS c,
|
||||
ARRAY[ROW(x.*,ARRAY[1,2,3]),
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
generate_series(4,5) y) q;
|
||||
array_to_json
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
@ -308,12 +308,12 @@ SELECT row_to_json(row(1,'foo'));
|
||||
{"f1":1,"f2":"foo"}
|
||||
(1 row)
|
||||
|
||||
SELECT row_to_json(q)
|
||||
FROM (SELECT $$a$$ || x AS b,
|
||||
y AS c,
|
||||
SELECT row_to_json(q)
|
||||
FROM (SELECT $$a$$ || x AS b,
|
||||
y AS c,
|
||||
ARRAY[ROW(x.*,ARRAY[1,2,3]),
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
generate_series(4,5) y) q;
|
||||
row_to_json
|
||||
--------------------------------------------------------------------
|
||||
@ -323,12 +323,12 @@ FROM (SELECT $$a$$ || x AS b,
|
||||
{"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
|
||||
(4 rows)
|
||||
|
||||
SELECT row_to_json(q,true)
|
||||
FROM (SELECT $$a$$ || x AS b,
|
||||
y AS c,
|
||||
SELECT row_to_json(q,true)
|
||||
FROM (SELECT $$a$$ || x AS b,
|
||||
y AS c,
|
||||
ARRAY[ROW(x.*,ARRAY[1,2,3]),
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
generate_series(4,5) y) q;
|
||||
row_to_json
|
||||
-----------------------------------------------------
|
||||
@ -349,7 +349,7 @@ FROM (SELECT $$a$$ || x AS b,
|
||||
CREATE TEMP TABLE rows AS
|
||||
SELECT x, 'txt' || x as y
|
||||
FROM generate_series(1,3) AS x;
|
||||
SELECT row_to_json(q,true)
|
||||
SELECT row_to_json(q,true)
|
||||
FROM rows q;
|
||||
row_to_json
|
||||
--------------
|
||||
|
@ -327,7 +327,7 @@ DROP TABLE tmp2;
|
||||
-- NOT VALID with plan invalidation -- ensure we don't use a constraint for
|
||||
-- exclusion until validated
|
||||
set constraint_exclusion TO 'partition';
|
||||
create table nv_parent (d date);
|
||||
create table nv_parent (d date);
|
||||
create table nv_child_2010 () inherits (nv_parent);
|
||||
create table nv_child_2011 () inherits (nv_parent);
|
||||
alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid;
|
||||
@ -1348,7 +1348,7 @@ create domain alter1.posint integer check (value > 0);
|
||||
|
||||
create type alter1.ctype as (f1 int, f2 text);
|
||||
|
||||
create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql
|
||||
create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql
|
||||
as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2';
|
||||
|
||||
create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype);
|
||||
|
@ -11,7 +11,7 @@ GRANT ALL ON SCHEMA temp_func_test TO public;
|
||||
SET search_path TO temp_func_test, public;
|
||||
|
||||
--
|
||||
-- ARGUMENT and RETURN TYPES
|
||||
-- ARGUMENT and RETURN TYPES
|
||||
--
|
||||
CREATE FUNCTION functest_A_1(text, date) RETURNS bool LANGUAGE 'sql'
|
||||
AS 'SELECT $1 = ''abcd'' AND $2 > ''2001-01-01''';
|
||||
|
@ -484,7 +484,7 @@ DROP SERVER t1 CASCADE;
|
||||
DROP SERVER t2;
|
||||
DROP USER MAPPING FOR regress_test_role SERVER s6;
|
||||
-- This test causes some order dependent cascade detail output,
|
||||
-- so switch to terse mode for it.
|
||||
-- so switch to terse mode for it.
|
||||
\set VERBOSITY terse
|
||||
DROP FOREIGN DATA WRAPPER foo CASCADE;
|
||||
\set VERBOSITY default
|
||||
|
@ -62,10 +62,10 @@ SELECT array_to_json(array(select 1 as a));
|
||||
SELECT array_to_json(array_agg(q),false) from (select x as b, x * 2 as c from generate_series(1,3) x) q;
|
||||
SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from generate_series(1,3) x) q;
|
||||
SELECT array_to_json(array_agg(q),false)
|
||||
FROM ( SELECT $$a$$ || x AS b, y AS c,
|
||||
FROM ( SELECT $$a$$ || x AS b, y AS c,
|
||||
ARRAY[ROW(x.*,ARRAY[1,2,3]),
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
generate_series(4,5) y) q;
|
||||
SELECT array_to_json(array_agg(x),false) from generate_series(5,10) x;
|
||||
SELECT array_to_json('{{1,5},{99,100}}'::int[]);
|
||||
@ -73,27 +73,27 @@ SELECT array_to_json('{{1,5},{99,100}}'::int[]);
|
||||
-- row_to_json
|
||||
SELECT row_to_json(row(1,'foo'));
|
||||
|
||||
SELECT row_to_json(q)
|
||||
FROM (SELECT $$a$$ || x AS b,
|
||||
y AS c,
|
||||
SELECT row_to_json(q)
|
||||
FROM (SELECT $$a$$ || x AS b,
|
||||
y AS c,
|
||||
ARRAY[ROW(x.*,ARRAY[1,2,3]),
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
generate_series(4,5) y) q;
|
||||
|
||||
SELECT row_to_json(q,true)
|
||||
FROM (SELECT $$a$$ || x AS b,
|
||||
y AS c,
|
||||
SELECT row_to_json(q,true)
|
||||
FROM (SELECT $$a$$ || x AS b,
|
||||
y AS c,
|
||||
ARRAY[ROW(x.*,ARRAY[1,2,3]),
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
ROW(y.*,ARRAY[4,5,6])] AS z
|
||||
FROM generate_series(1,2) x,
|
||||
generate_series(4,5) y) q;
|
||||
|
||||
CREATE TEMP TABLE rows AS
|
||||
SELECT x, 'txt' || x as y
|
||||
FROM generate_series(1,3) AS x;
|
||||
|
||||
SELECT row_to_json(q,true)
|
||||
SELECT row_to_json(q,true)
|
||||
FROM rows q;
|
||||
|
||||
SELECT row_to_json(row((select array_agg(x) as d from generate_series(5,10) x)),false);
|
||||
@ -111,4 +111,3 @@ FROM (SELECT '-Infinity'::float8 AS "float8field") q;
|
||||
-- json input
|
||||
SELECT row_to_json(q)
|
||||
FROM (SELECT '{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}'::json AS "jsonfield") q;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/perl
|
||||
#!/usr/bin/perl
|
||||
#################################################################
|
||||
# copyright.pl -- update copyright notices throughout the source tree, idempotently.
|
||||
#
|
||||
|
@ -76,7 +76,7 @@ Solution.pm module containing the code to generate the Visual
|
||||
Studio solution files.
|
||||
VCBuildProject.pm module containing the code to generate VCBuild based
|
||||
project files (Visual Studio 2005/2008)
|
||||
VSObjectFactory.pm factory module providing the code to create the
|
||||
VSObjectFactory.pm factory module providing the code to create the
|
||||
appropriate project/solution files for the current
|
||||
environment
|
||||
|
||||
|
@ -18,7 +18,7 @@ IF NOT EXIST buildenv.pl goto nobuildenv
|
||||
perl -e "require 'buildenv.pl'; while(($k,$v) = each %%ENV) { print qq[\@SET $k=$v\n]; }" > bldenv.bat
|
||||
CALL bldenv.bat
|
||||
del bldenv.bat
|
||||
:nobuildenv
|
||||
:nobuildenv
|
||||
|
||||
perl install.pl "%1"
|
||||
|
||||
|
@ -43,7 +43,7 @@ verbose_output() {
|
||||
|
||||
process_includes_in_file() {
|
||||
# loop through all includes mentioned in the file
|
||||
cat "$FILE" |
|
||||
cat "$FILE" |
|
||||
grep "^#include\>" |
|
||||
grep -v '/\* *pgrminclude *ignore *\*/' |
|
||||
sed 's/^#include[ ]*[<"]\([^>"]*\).*$/\1/g' |
|
||||
@ -80,7 +80,7 @@ compile_file() {
|
||||
grep -v "^#else" |
|
||||
grep -v "^#elif" |
|
||||
grep -v "^#endif" |
|
||||
# with #if blocks gone, now undef #defines to avoid redefine
|
||||
# with #if blocks gone, now undef #defines to avoid redefine
|
||||
# warning and failure
|
||||
sed 's/#define[ ][ ]*\([A-Za-z0-9_]*\).*$/#undef \1\n&/' >/tmp/$$a
|
||||
|
||||
@ -121,7 +121,7 @@ compile_file() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Process include files first because they can affect the compilation
|
||||
# Process include files first because they can affect the compilation
|
||||
# of *.c files.
|
||||
(find . \( -name .git -a -prune \) -o -type f -name '*.h' -print | sort;
|
||||
find . \( -name .git -a -prune \) -o -type f -name '*.c' -print | sort) |
|
||||
|
Loading…
Reference in New Issue
Block a user