mirror of
https://github.com/postgres/postgres.git
synced 2025-07-28 23:42:10 +03:00
Remove whitespace from end of lines
This commit is contained in:
@ -632,7 +632,7 @@ HINT: Stop the postmaster and use a standalone backend to VACUUM in "mydb".
|
|||||||
<varname>autovacuum_max_workers</> databases to be processed,
|
<varname>autovacuum_max_workers</> databases to be processed,
|
||||||
the next database will be processed as soon as the first worker finishes.
|
the next database will be processed as soon as the first worker finishes.
|
||||||
Each worker process will check each table within its database and
|
Each worker process will check each table within its database and
|
||||||
execute <command>VACUUM</> and/or <command>ANALYZE</> as needed.
|
execute <command>VACUUM</> and/or <command>ANALYZE</> as needed.
|
||||||
<varname>log_autovacuum_min_duration</varname> can be used to monitor
|
<varname>log_autovacuum_min_duration</varname> can be used to monitor
|
||||||
autovacuum activity.
|
autovacuum activity.
|
||||||
</para>
|
</para>
|
||||||
|
@ -336,7 +336,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
|
|||||||
<para>
|
<para>
|
||||||
Not-null constraints are always copied to the new table.
|
Not-null constraints are always copied to the new table.
|
||||||
<literal>CHECK</literal> constraints will be copied only if
|
<literal>CHECK</literal> constraints will be copied only if
|
||||||
<literal>INCLUDING CONSTRAINTS</literal> is specified.
|
<literal>INCLUDING CONSTRAINTS</literal> is specified.
|
||||||
Indexes, <literal>PRIMARY KEY</>, and <literal>UNIQUE</> constraints
|
Indexes, <literal>PRIMARY KEY</>, and <literal>UNIQUE</> constraints
|
||||||
on the original table will be created on the new table only if the
|
on the original table will be created on the new table only if the
|
||||||
<literal>INCLUDING INDEXES</literal> clause is specified.
|
<literal>INCLUDING INDEXES</literal> clause is specified.
|
||||||
|
@ -33,7 +33,7 @@ NOTIFY <replaceable class="PARAMETER">channel</replaceable> [ , <replaceable cla
|
|||||||
with an optional <quote>payload</> string to each client application that
|
with an optional <quote>payload</> string to each client application that
|
||||||
has previously executed
|
has previously executed
|
||||||
<command>LISTEN <replaceable class="parameter">channel</></command>
|
<command>LISTEN <replaceable class="parameter">channel</></command>
|
||||||
for the specified channel name in the current database.
|
for the specified channel name in the current database.
|
||||||
Notifications are visible to all users.
|
Notifications are visible to all users.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
|
@ -324,7 +324,7 @@ PostgreSQL documentation
|
|||||||
For a consistent backup, the database server needs to support synchronized snapshots,
|
For a consistent backup, the database server needs to support synchronized snapshots,
|
||||||
a feature that was introduced in <productname>PostgreSQL</productname> 9.2. With this
|
a feature that was introduced in <productname>PostgreSQL</productname> 9.2. With this
|
||||||
feature, database clients can ensure they see the same data set even though they use
|
feature, database clients can ensure they see the same data set even though they use
|
||||||
different connections. <command>pg_dump -j</command> uses multiple database
|
different connections. <command>pg_dump -j</command> uses multiple database
|
||||||
connections; it connects to the database once with the master process and
|
connections; it connects to the database once with the master process and
|
||||||
once again for each worker job. Without the synchronized snapshot feature, the
|
once again for each worker job. Without the synchronized snapshot feature, the
|
||||||
different worker jobs wouldn't be guaranteed to see the same data in each connection,
|
different worker jobs wouldn't be guaranteed to see the same data in each connection,
|
||||||
|
@ -434,7 +434,7 @@ PL_TESTDB = pl_regression
|
|||||||
CONTRIB_TESTDB = contrib_regression
|
CONTRIB_TESTDB = contrib_regression
|
||||||
ifneq ($(MODULE_big),)
|
ifneq ($(MODULE_big),)
|
||||||
CONTRIB_TESTDB_MODULE = contrib_regression_$(MODULE_big)
|
CONTRIB_TESTDB_MODULE = contrib_regression_$(MODULE_big)
|
||||||
else
|
else
|
||||||
ifneq ($(MODULES),)
|
ifneq ($(MODULES),)
|
||||||
CONTRIB_TESTDB_MODULE = contrib_regression_$(MODULES)
|
CONTRIB_TESTDB_MODULE = contrib_regression_$(MODULES)
|
||||||
else
|
else
|
||||||
|
@ -788,10 +788,10 @@ CREATE OR REPLACE FUNCTION
|
|||||||
pg_start_backup(label text, fast boolean DEFAULT false)
|
pg_start_backup(label text, fast boolean DEFAULT false)
|
||||||
RETURNS text STRICT VOLATILE LANGUAGE internal AS 'pg_start_backup';
|
RETURNS text STRICT VOLATILE LANGUAGE internal AS 'pg_start_backup';
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION
|
CREATE OR REPLACE FUNCTION
|
||||||
json_populate_record(base anyelement, from_json json, use_json_as_text boolean DEFAULT false)
|
json_populate_record(base anyelement, from_json json, use_json_as_text boolean DEFAULT false)
|
||||||
RETURNS anyelement LANGUAGE internal STABLE AS 'json_populate_record';
|
RETURNS anyelement LANGUAGE internal STABLE AS 'json_populate_record';
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION
|
CREATE OR REPLACE FUNCTION
|
||||||
json_populate_recordset(base anyelement, from_json json, use_json_as_text boolean DEFAULT false)
|
json_populate_recordset(base anyelement, from_json json, use_json_as_text boolean DEFAULT false)
|
||||||
RETURNS SETOF anyelement LANGUAGE internal STABLE ROWS 100 AS 'json_populate_recordset';
|
RETURNS SETOF anyelement LANGUAGE internal STABLE ROWS 100 AS 'json_populate_recordset';
|
||||||
|
@ -466,15 +466,15 @@ INSERT INTO test_json VALUES
|
|||||||
('scalar','"a scalar"'),
|
('scalar','"a scalar"'),
|
||||||
('array','["zero", "one","two",null,"four","five"]'),
|
('array','["zero", "one","two",null,"four","five"]'),
|
||||||
('object','{"field1":"val1","field2":"val2","field3":null}');
|
('object','{"field1":"val1","field2":"val2","field3":null}');
|
||||||
SELECT test_json -> 'x'
|
SELECT test_json -> 'x'
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'scalar';
|
WHERE json_type = 'scalar';
|
||||||
ERROR: cannot extract element from a scalar
|
ERROR: cannot extract element from a scalar
|
||||||
SELECT test_json -> 'x'
|
SELECT test_json -> 'x'
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'array';
|
WHERE json_type = 'array';
|
||||||
ERROR: cannot extract field from a non-object
|
ERROR: cannot extract field from a non-object
|
||||||
SELECT test_json -> 'x'
|
SELECT test_json -> 'x'
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'object';
|
WHERE json_type = 'object';
|
||||||
?column?
|
?column?
|
||||||
@ -490,7 +490,7 @@ WHERE json_type = 'object';
|
|||||||
"val2"
|
"val2"
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT test_json->>'field2'
|
SELECT test_json->>'field2'
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'object';
|
WHERE json_type = 'object';
|
||||||
?column?
|
?column?
|
||||||
@ -498,11 +498,11 @@ WHERE json_type = 'object';
|
|||||||
val2
|
val2
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT test_json -> 2
|
SELECT test_json -> 2
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'scalar';
|
WHERE json_type = 'scalar';
|
||||||
ERROR: cannot extract element from a scalar
|
ERROR: cannot extract element from a scalar
|
||||||
SELECT test_json -> 2
|
SELECT test_json -> 2
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'array';
|
WHERE json_type = 'array';
|
||||||
?column?
|
?column?
|
||||||
|
@ -139,15 +139,15 @@ INSERT INTO test_json VALUES
|
|||||||
('array','["zero", "one","two",null,"four","five"]'),
|
('array','["zero", "one","two",null,"four","five"]'),
|
||||||
('object','{"field1":"val1","field2":"val2","field3":null}');
|
('object','{"field1":"val1","field2":"val2","field3":null}');
|
||||||
|
|
||||||
SELECT test_json -> 'x'
|
SELECT test_json -> 'x'
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'scalar';
|
WHERE json_type = 'scalar';
|
||||||
|
|
||||||
SELECT test_json -> 'x'
|
SELECT test_json -> 'x'
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'array';
|
WHERE json_type = 'array';
|
||||||
|
|
||||||
SELECT test_json -> 'x'
|
SELECT test_json -> 'x'
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'object';
|
WHERE json_type = 'object';
|
||||||
|
|
||||||
@ -155,15 +155,15 @@ SELECT test_json->'field2'
|
|||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'object';
|
WHERE json_type = 'object';
|
||||||
|
|
||||||
SELECT test_json->>'field2'
|
SELECT test_json->>'field2'
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'object';
|
WHERE json_type = 'object';
|
||||||
|
|
||||||
SELECT test_json -> 2
|
SELECT test_json -> 2
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'scalar';
|
WHERE json_type = 'scalar';
|
||||||
|
|
||||||
SELECT test_json -> 2
|
SELECT test_json -> 2
|
||||||
FROM test_json
|
FROM test_json
|
||||||
WHERE json_type = 'array';
|
WHERE json_type = 'array';
|
||||||
|
|
||||||
|
@ -523,8 +523,8 @@ sub mkvcbuild
|
|||||||
my $mf = Project::read_file(
|
my $mf = Project::read_file(
|
||||||
'src\backend\utils\mb\conversion_procs\\' . $sub . '\Makefile');
|
'src\backend\utils\mb\conversion_procs\\' . $sub . '\Makefile');
|
||||||
my $p = $solution->AddProject($sub, 'dll', 'conversion procs');
|
my $p = $solution->AddProject($sub, 'dll', 'conversion procs');
|
||||||
$p->AddFile('src\backend\utils\mb\conversion_procs\\'
|
$p->AddFile('src\backend\utils\mb\conversion_procs\\'
|
||||||
. $sub . '\\'
|
. $sub . '\\'
|
||||||
. $sub
|
. $sub
|
||||||
. '.c');
|
. '.c');
|
||||||
if ($mf =~ m{^SRCS\s*\+=\s*(.*)$}m)
|
if ($mf =~ m{^SRCS\s*\+=\s*(.*)$}m)
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
--add-whitespace
|
--add-whitespace
|
||||||
--backup-and-modify-in-place
|
--backup-and-modify-in-place
|
||||||
--delete-old-whitespace
|
--delete-old-whitespace
|
||||||
--entab-leading-whitespace=4
|
--entab-leading-whitespace=4
|
||||||
--keep-old-blank-lines=2
|
--keep-old-blank-lines=2
|
||||||
--maximum-line-length=78
|
--maximum-line-length=78
|
||||||
--nospace-for-semicolon
|
--nospace-for-semicolon
|
||||||
--opening-brace-on-new-line
|
--opening-brace-on-new-line
|
||||||
--output-line-ending=unix
|
--output-line-ending=unix
|
||||||
--paren-tightness=2
|
--paren-tightness=2
|
||||||
--vertical-tightness=2
|
--vertical-tightness=2
|
||||||
--vertical-tightness-closing=2
|
--vertical-tightness-closing=2
|
||||||
|
@ -310,7 +310,7 @@ sub post_indent
|
|||||||
$source =~ s!
|
$source =~ s!
|
||||||
(\n$ident[^(\n]*)\n # e.g. static void
|
(\n$ident[^(\n]*)\n # e.g. static void
|
||||||
(
|
(
|
||||||
$ident\(\n? # func_name(
|
$ident\(\n? # func_name(
|
||||||
(.*,([ \t]*$comment)?\n)* # args b4 final ln
|
(.*,([ \t]*$comment)?\n)* # args b4 final ln
|
||||||
.*\);([ \t]*$comment)?$ # final line
|
.*\);([ \t]*$comment)?$ # final line
|
||||||
)
|
)
|
||||||
|
@ -6,9 +6,9 @@ or the environment.
|
|||||||
In its simplest form, if all the required objects are installed, simply run
|
In its simplest form, if all the required objects are installed, simply run
|
||||||
it without any parameters at the top of the source tree you want to process.
|
it without any parameters at the top of the source tree you want to process.
|
||||||
|
|
||||||
pgindent
|
pgindent
|
||||||
|
|
||||||
If you don't have all the requirements installed, pgindent will fetch and build
|
If you don't have all the requirements installed, pgindent will fetch and build
|
||||||
them for you, if you're in a PostgreSQL source tree:
|
them for you, if you're in a PostgreSQL source tree:
|
||||||
|
|
||||||
|
|
||||||
@ -23,7 +23,7 @@ command line option --indent:
|
|||||||
Similarly, the entab program can be specified using the PGENTAB environment
|
Similarly, the entab program can be specified using the PGENTAB environment
|
||||||
variable, or using the --entab command line option.
|
variable, or using the --entab command line option.
|
||||||
|
|
||||||
pgindent also needs a file containing a list of typedefs. This can be
|
pgindent also needs a file containing a list of typedefs. This can be
|
||||||
specified using the PGTYPEDEFS environment variable, or via the command line
|
specified using the PGTYPEDEFS environment variable, or via the command line
|
||||||
--typedefs option. If neither is used, it will look for it within the
|
--typedefs option. If neither is used, it will look for it within the
|
||||||
current source tree, or in /usr/local/etc/typedefs.list.
|
current source tree, or in /usr/local/etc/typedefs.list.
|
||||||
@ -40,6 +40,6 @@ src/tools/pgindent/exclude_file_patterns.
|
|||||||
Any non-option arguments are taken as the names of files to be indented. In this
|
Any non-option arguments are taken as the names of files to be indented. In this
|
||||||
case only these files will be changed, and nothing else will be touched. If the
|
case only these files will be changed, and nothing else will be touched. If the
|
||||||
first non-option argument is not a .c or .h file, it is treated as the name
|
first non-option argument is not a .c or .h file, it is treated as the name
|
||||||
of a typedefs file for legacy reasons, but this use is deprecated - use the
|
of a typedefs file for legacy reasons, but this use is deprecated - use the
|
||||||
--typedefs option instead.
|
--typedefs option instead.
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user