1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-30 11:03:19 +03:00

Post-pgindent cleanup

Make slightly better decisions about indentation than what pgindent
is capable of.  Mostly breaking out long function calls into one
line per argument, with a few other minor adjustments.

No functional changes- all whitespace.
pgindent ran cleanly (didn't change anything) after.
Passes all regressions.
This commit is contained in:
Stephen Frost
2013-06-01 09:38:15 -04:00
parent dedf7e9919
commit 551938ae22
13 changed files with 88 additions and 44 deletions

View File

@ -1300,7 +1300,8 @@ hstore_to_json_loose(PG_FUNCTION_ARGS)
* digit as numeric - could be a zip code or similar
*/
if (src->len > 0 &&
!(src->data[0] == '0' && isdigit((unsigned char) src->data[1])) &&
!(src->data[0] == '0' &&
isdigit((unsigned char) src->data[1])) &&
strspn(src->data, "+-0123456789Ee.") == src->len)
{
/*

View File

@ -441,6 +441,8 @@ print_rel_infos(RelInfoArr *rel_arr)
for (relnum = 0; relnum < rel_arr->nrels; relnum++)
pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n",
rel_arr->rels[relnum].nspname, rel_arr->rels[relnum].relname,
rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace);
rel_arr->rels[relnum].nspname,
rel_arr->rels[relnum].relname,
rel_arr->rels[relnum].reloid,
rel_arr->rels[relnum].tablespace);
}

View File

@ -341,10 +341,13 @@ create_new_objects(void)
* pg_dump only produces its output at the end, so there is little
* parallelism if using the pipe.
*/
parallel_exec_prog(log_file_name, NULL,
parallel_exec_prog(log_file_name,
NULL,
"\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
new_cluster.bindir, cluster_conn_opts(&new_cluster),
old_db->db_name, sql_file_name);
new_cluster.bindir,
cluster_conn_opts(&new_cluster),
old_db->db_name,
sql_file_name);
}
/* reap all children */

View File

@ -53,8 +53,11 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
new_pgdata, old_pgdata);
for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
new_pgdata, os_info.old_tablespaces[tblnum]);
parallel_transfer_all_new_dbs(old_db_arr,
new_db_arr,
old_pgdata,
new_pgdata,
os_info.old_tablespaces[tblnum]);
/* reap all children */
while (reap_child(true) == true)
;
@ -230,12 +233,20 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
else
snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno);
snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", map->old_tablespace,
map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
type_suffix, extent_suffix);
snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", map->new_tablespace,
map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
type_suffix, extent_suffix);
snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s",
map->old_tablespace,
map->old_tablespace_suffix,
map->old_db_oid,
map->old_relfilenode,
type_suffix,
extent_suffix);
snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s",
map->new_tablespace,
map->new_tablespace_suffix,
map->new_db_oid,
map->new_relfilenode,
type_suffix,
extent_suffix);
/* Is it an extent, fsm, or vm file? */
if (type_suffix[0] != '\0' || segno != 0)

View File

@ -999,8 +999,12 @@ top:
* this in a special way (see below).
*/
fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f\n",
agg->start_time, agg->cnt, agg->sum, agg->sum2,
agg->min_duration, agg->max_duration);
agg->start_time,
agg->cnt,
agg->sum,
agg->sum2,
agg->min_duration,
agg->max_duration);
/* move to the next inteval */
agg->start_time = agg->start_time + agg_interval;
@ -1625,7 +1629,6 @@ init(bool is_no_vacuum)
/* have we reached the next interval (or end)? */
if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS))
{
fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n",
j, (int64) naccounts * scale,
(int) (((int64) j * 100) / (naccounts * scale)), elapsed_sec, remaining_sec);