mirror of
https://github.com/postgres/postgres.git
synced 2025-12-24 06:01:07 +03:00
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
This commit is contained in:
@@ -23,7 +23,7 @@ static int auto_explain_log_min_duration = -1; /* msec or -1 */
|
||||
static bool auto_explain_log_analyze = false;
|
||||
static bool auto_explain_log_verbose = false;
|
||||
static bool auto_explain_log_buffers = false;
|
||||
static bool auto_explain_log_timing = false;
|
||||
static bool auto_explain_log_timing = false;
|
||||
static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT;
|
||||
static bool auto_explain_log_nested_statements = false;
|
||||
|
||||
|
||||
@@ -1140,7 +1140,7 @@ storeHandler(PGresult *res, const PGdataValue *columns,
|
||||
* strings and add null termination. As a micro-optimization, allocate
|
||||
* all the strings with one palloc.
|
||||
*/
|
||||
pbuflen = nfields; /* count the null terminators themselves */
|
||||
pbuflen = nfields; /* count the null terminators themselves */
|
||||
for (i = 0; i < nfields; i++)
|
||||
{
|
||||
int len = columns[i].len;
|
||||
|
||||
@@ -109,17 +109,17 @@ PG_FUNCTION_INFO_V1(file_fdw_validator);
|
||||
* FDW callback routines
|
||||
*/
|
||||
static void fileGetForeignRelSize(PlannerInfo *root,
|
||||
RelOptInfo *baserel,
|
||||
Oid foreigntableid);
|
||||
RelOptInfo *baserel,
|
||||
Oid foreigntableid);
|
||||
static void fileGetForeignPaths(PlannerInfo *root,
|
||||
RelOptInfo *baserel,
|
||||
Oid foreigntableid);
|
||||
RelOptInfo *baserel,
|
||||
Oid foreigntableid);
|
||||
static ForeignScan *fileGetForeignPlan(PlannerInfo *root,
|
||||
RelOptInfo *baserel,
|
||||
Oid foreigntableid,
|
||||
ForeignPath *best_path,
|
||||
List *tlist,
|
||||
List *scan_clauses);
|
||||
RelOptInfo *baserel,
|
||||
Oid foreigntableid,
|
||||
ForeignPath *best_path,
|
||||
List *tlist,
|
||||
List *scan_clauses);
|
||||
static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es);
|
||||
static void fileBeginForeignScan(ForeignScanState *node, int eflags);
|
||||
static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node);
|
||||
@@ -141,7 +141,7 @@ static void estimate_size(PlannerInfo *root, RelOptInfo *baserel,
|
||||
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
|
||||
FileFdwPlanState *fdw_private,
|
||||
Cost *startup_cost, Cost *total_cost);
|
||||
static int file_acquire_sample_rows(Relation onerel, int elevel,
|
||||
static int file_acquire_sample_rows(Relation onerel, int elevel,
|
||||
HeapTuple *rows, int targrows,
|
||||
double *totalrows, double *totaldeadrows);
|
||||
|
||||
@@ -180,7 +180,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
|
||||
List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
|
||||
Oid catalog = PG_GETARG_OID(1);
|
||||
char *filename = NULL;
|
||||
DefElem *force_not_null = NULL;
|
||||
DefElem *force_not_null = NULL;
|
||||
List *other_options = NIL;
|
||||
ListCell *cell;
|
||||
|
||||
@@ -233,7 +233,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
|
||||
buf.len > 0
|
||||
? errhint("Valid options in this context are: %s",
|
||||
buf.data)
|
||||
: errhint("There are no valid options in this context.")));
|
||||
: errhint("There are no valid options in this context.")));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -393,13 +393,13 @@ get_file_fdw_attribute_options(Oid relid)
|
||||
options = GetForeignColumnOptions(relid, attnum);
|
||||
foreach(lc, options)
|
||||
{
|
||||
DefElem *def = (DefElem *) lfirst(lc);
|
||||
DefElem *def = (DefElem *) lfirst(lc);
|
||||
|
||||
if (strcmp(def->defname, "force_not_null") == 0)
|
||||
{
|
||||
if (defGetBoolean(def))
|
||||
{
|
||||
char *attname = pstrdup(NameStr(attr->attname));
|
||||
char *attname = pstrdup(NameStr(attr->attname));
|
||||
|
||||
fnncolumns = lappend(fnncolumns, makeString(attname));
|
||||
}
|
||||
@@ -429,8 +429,8 @@ fileGetForeignRelSize(PlannerInfo *root,
|
||||
FileFdwPlanState *fdw_private;
|
||||
|
||||
/*
|
||||
* Fetch options. We only need filename at this point, but we might
|
||||
* as well get everything and not need to re-fetch it later in planning.
|
||||
* Fetch options. We only need filename at this point, but we might as
|
||||
* well get everything and not need to re-fetch it later in planning.
|
||||
*/
|
||||
fdw_private = (FileFdwPlanState *) palloc(sizeof(FileFdwPlanState));
|
||||
fileGetOptions(foreigntableid,
|
||||
@@ -468,13 +468,14 @@ fileGetForeignPaths(PlannerInfo *root,
|
||||
baserel->rows,
|
||||
startup_cost,
|
||||
total_cost,
|
||||
NIL, /* no pathkeys */
|
||||
NULL, /* no outer rel either */
|
||||
NIL)); /* no fdw_private data */
|
||||
NIL, /* no pathkeys */
|
||||
NULL, /* no outer rel either */
|
||||
NIL)); /* no fdw_private data */
|
||||
|
||||
/*
|
||||
* If data file was sorted, and we knew it somehow, we could insert
|
||||
* appropriate pathkeys into the ForeignPath node to tell the planner that.
|
||||
* appropriate pathkeys into the ForeignPath node to tell the planner
|
||||
* that.
|
||||
*/
|
||||
}
|
||||
|
||||
@@ -505,8 +506,8 @@ fileGetForeignPlan(PlannerInfo *root,
|
||||
return make_foreignscan(tlist,
|
||||
scan_clauses,
|
||||
scan_relid,
|
||||
NIL, /* no expressions to evaluate */
|
||||
NIL); /* no private state either */
|
||||
NIL, /* no expressions to evaluate */
|
||||
NIL); /* no private state either */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -665,14 +666,14 @@ fileAnalyzeForeignTable(Relation relation,
|
||||
{
|
||||
char *filename;
|
||||
List *options;
|
||||
struct stat stat_buf;
|
||||
struct stat stat_buf;
|
||||
|
||||
/* Fetch options of foreign table */
|
||||
fileGetOptions(RelationGetRelid(relation), &filename, &options);
|
||||
|
||||
/*
|
||||
* Get size of the file. (XXX if we fail here, would it be better to
|
||||
* just return false to skip analyzing the table?)
|
||||
* Get size of the file. (XXX if we fail here, would it be better to just
|
||||
* return false to skip analyzing the table?)
|
||||
*/
|
||||
if (stat(filename, &stat_buf) < 0)
|
||||
ereport(ERROR,
|
||||
@@ -746,7 +747,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel,
|
||||
* planner's idea of the relation width; which is bogus if not all
|
||||
* columns are being read, not to mention that the text representation
|
||||
* of a row probably isn't the same size as its internal
|
||||
* representation. Possibly we could do something better, but the
|
||||
* representation. Possibly we could do something better, but the
|
||||
* real answer to anyone who complains is "ANALYZE" ...
|
||||
*/
|
||||
int tuple_width;
|
||||
@@ -811,7 +812,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
|
||||
* which must have at least targrows entries.
|
||||
* The actual number of rows selected is returned as the function result.
|
||||
* We also count the total number of rows in the file and return it into
|
||||
* *totalrows. Note that *totaldeadrows is always set to 0.
|
||||
* *totalrows. Note that *totaldeadrows is always set to 0.
|
||||
*
|
||||
* Note that the returned list of rows is not always in order by physical
|
||||
* position in the file. Therefore, correlation estimates derived later
|
||||
@@ -824,7 +825,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
|
||||
double *totalrows, double *totaldeadrows)
|
||||
{
|
||||
int numrows = 0;
|
||||
double rowstoskip = -1; /* -1 means not set yet */
|
||||
double rowstoskip = -1; /* -1 means not set yet */
|
||||
double rstate;
|
||||
TupleDesc tupDesc;
|
||||
Datum *values;
|
||||
@@ -853,8 +854,8 @@ file_acquire_sample_rows(Relation onerel, int elevel,
|
||||
cstate = BeginCopyFrom(onerel, filename, NIL, options);
|
||||
|
||||
/*
|
||||
* Use per-tuple memory context to prevent leak of memory used to read rows
|
||||
* from the file with Copy routines.
|
||||
* Use per-tuple memory context to prevent leak of memory used to read
|
||||
* rows from the file with Copy routines.
|
||||
*/
|
||||
tupcontext = AllocSetContextCreate(CurrentMemoryContext,
|
||||
"file_fdw temporary context",
|
||||
@@ -912,10 +913,10 @@ file_acquire_sample_rows(Relation onerel, int elevel,
|
||||
if (rowstoskip <= 0)
|
||||
{
|
||||
/*
|
||||
* Found a suitable tuple, so save it, replacing one
|
||||
* old tuple at random
|
||||
* Found a suitable tuple, so save it, replacing one old tuple
|
||||
* at random
|
||||
*/
|
||||
int k = (int) (targrows * anl_random_fract());
|
||||
int k = (int) (targrows * anl_random_fract());
|
||||
|
||||
Assert(k >= 0 && k < targrows);
|
||||
heap_freetuple(rows[k]);
|
||||
|
||||
@@ -37,7 +37,7 @@ const char *progname;
|
||||
/* Options and defaults */
|
||||
bool debug = false; /* are we debugging? */
|
||||
bool dryrun = false; /* are we performing a dry-run operation? */
|
||||
char *additional_ext = NULL; /* Extension to remove from filenames */
|
||||
char *additional_ext = NULL; /* Extension to remove from filenames */
|
||||
|
||||
char *archiveLocation; /* where to find the archive? */
|
||||
char *restartWALFileName; /* the file from which we can restart restore */
|
||||
@@ -136,12 +136,13 @@ CleanupPriorWALFiles(void)
|
||||
* they were originally written, in case this worries you.
|
||||
*/
|
||||
if (strlen(walfile) == XLOG_DATA_FNAME_LEN &&
|
||||
strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
|
||||
strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
|
||||
strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
|
||||
{
|
||||
/*
|
||||
* Use the original file name again now, including any extension
|
||||
* that might have been chopped off before testing the sequence.
|
||||
/*
|
||||
* Use the original file name again now, including any
|
||||
* extension that might have been chopped off before testing
|
||||
* the sequence.
|
||||
*/
|
||||
snprintf(WALFilePath, MAXPGPATH, "%s/%s",
|
||||
archiveLocation, xlde->d_name);
|
||||
@@ -150,7 +151,7 @@ CleanupPriorWALFiles(void)
|
||||
{
|
||||
/*
|
||||
* Prints the name of the file to be removed and skips the
|
||||
* actual removal. The regular printout is so that the
|
||||
* actual removal. The regular printout is so that the
|
||||
* user can pipe the output into some other program.
|
||||
*/
|
||||
printf("%s\n", WALFilePath);
|
||||
@@ -298,7 +299,8 @@ main(int argc, char **argv)
|
||||
dryrun = true;
|
||||
break;
|
||||
case 'x':
|
||||
additional_ext = optarg; /* Extension to remove from xlogfile names */
|
||||
additional_ext = optarg; /* Extension to remove from
|
||||
* xlogfile names */
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
|
||||
|
||||
@@ -103,19 +103,19 @@ typedef struct Counters
|
||||
int64 calls; /* # of times executed */
|
||||
double total_time; /* total execution time, in msec */
|
||||
int64 rows; /* total # of retrieved or affected rows */
|
||||
int64 shared_blks_hit; /* # of shared buffer hits */
|
||||
int64 shared_blks_hit; /* # of shared buffer hits */
|
||||
int64 shared_blks_read; /* # of shared disk blocks read */
|
||||
int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
|
||||
int64 shared_blks_written; /* # of shared disk blocks written */
|
||||
int64 local_blks_hit; /* # of local buffer hits */
|
||||
int64 local_blks_read; /* # of local disk blocks read */
|
||||
int64 local_blks_hit; /* # of local buffer hits */
|
||||
int64 local_blks_read; /* # of local disk blocks read */
|
||||
int64 local_blks_dirtied; /* # of local disk blocks dirtied */
|
||||
int64 local_blks_written; /* # of local disk blocks written */
|
||||
int64 temp_blks_read; /* # of temp blocks read */
|
||||
int64 temp_blks_read; /* # of temp blocks read */
|
||||
int64 temp_blks_written; /* # of temp blocks written */
|
||||
double blk_read_time; /* time spent reading, in msec */
|
||||
double blk_write_time; /* time spent writing, in msec */
|
||||
double usage; /* usage factor */
|
||||
double blk_read_time; /* time spent reading, in msec */
|
||||
double blk_write_time; /* time spent writing, in msec */
|
||||
double usage; /* usage factor */
|
||||
} Counters;
|
||||
|
||||
/*
|
||||
@@ -140,7 +140,7 @@ typedef struct pgssSharedState
|
||||
{
|
||||
LWLockId lock; /* protects hashtable search/modification */
|
||||
int query_size; /* max query length in bytes */
|
||||
double cur_median_usage; /* current median usage in hashtable */
|
||||
double cur_median_usage; /* current median usage in hashtable */
|
||||
} pgssSharedState;
|
||||
|
||||
/*
|
||||
@@ -150,7 +150,7 @@ typedef struct pgssLocationLen
|
||||
{
|
||||
int location; /* start offset in query text */
|
||||
int length; /* length in bytes, or -1 to ignore */
|
||||
} pgssLocationLen;
|
||||
} pgssLocationLen;
|
||||
|
||||
/*
|
||||
* Working state for computing a query jumble and producing a normalized
|
||||
@@ -172,7 +172,7 @@ typedef struct pgssJumbleState
|
||||
|
||||
/* Current number of valid entries in clocations array */
|
||||
int clocations_count;
|
||||
} pgssJumbleState;
|
||||
} pgssJumbleState;
|
||||
|
||||
/*---- Local variables ----*/
|
||||
|
||||
@@ -248,21 +248,21 @@ static uint32 pgss_hash_string(const char *str);
|
||||
static void pgss_store(const char *query, uint32 queryId,
|
||||
double total_time, uint64 rows,
|
||||
const BufferUsage *bufusage,
|
||||
pgssJumbleState * jstate);
|
||||
pgssJumbleState *jstate);
|
||||
static Size pgss_memsize(void);
|
||||
static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
|
||||
int query_len, bool sticky);
|
||||
int query_len, bool sticky);
|
||||
static void entry_dealloc(void);
|
||||
static void entry_reset(void);
|
||||
static void AppendJumble(pgssJumbleState * jstate,
|
||||
static void AppendJumble(pgssJumbleState *jstate,
|
||||
const unsigned char *item, Size size);
|
||||
static void JumbleQuery(pgssJumbleState * jstate, Query *query);
|
||||
static void JumbleRangeTable(pgssJumbleState * jstate, List *rtable);
|
||||
static void JumbleExpr(pgssJumbleState * jstate, Node *node);
|
||||
static void RecordConstLocation(pgssJumbleState * jstate, int location);
|
||||
static char *generate_normalized_query(pgssJumbleState * jstate, const char *query,
|
||||
static void JumbleQuery(pgssJumbleState *jstate, Query *query);
|
||||
static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
|
||||
static void JumbleExpr(pgssJumbleState *jstate, Node *node);
|
||||
static void RecordConstLocation(pgssJumbleState *jstate, int location);
|
||||
static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
|
||||
int *query_len_p, int encoding);
|
||||
static void fill_in_constant_lengths(pgssJumbleState * jstate, const char *query);
|
||||
static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query);
|
||||
static int comp_location(const void *a, const void *b);
|
||||
|
||||
|
||||
@@ -513,8 +513,8 @@ pgss_shmem_startup(void)
|
||||
FreeFile(file);
|
||||
|
||||
/*
|
||||
* Remove the file so it's not included in backups/replication
|
||||
* slaves, etc. A new file will be written on next shutdown.
|
||||
* Remove the file so it's not included in backups/replication slaves,
|
||||
* etc. A new file will be written on next shutdown.
|
||||
*/
|
||||
unlink(PGSS_DUMP_FILE);
|
||||
|
||||
@@ -600,7 +600,7 @@ error:
|
||||
ereport(LOG,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not write pg_stat_statement file \"%s\": %m",
|
||||
PGSS_DUMP_FILE ".tmp")));
|
||||
PGSS_DUMP_FILE ".tmp")));
|
||||
if (file)
|
||||
FreeFile(file);
|
||||
unlink(PGSS_DUMP_FILE ".tmp");
|
||||
@@ -626,8 +626,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
|
||||
* the statement contains an optimizable statement for which a queryId
|
||||
* could be derived (such as EXPLAIN or DECLARE CURSOR). For such cases,
|
||||
* runtime control will first go through ProcessUtility and then the
|
||||
* executor, and we don't want the executor hooks to do anything, since
|
||||
* we are already measuring the statement's costs at the utility level.
|
||||
* executor, and we don't want the executor hooks to do anything, since we
|
||||
* are already measuring the statement's costs at the utility level.
|
||||
*/
|
||||
if (query->utilityStmt)
|
||||
{
|
||||
@@ -768,7 +768,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
|
||||
|
||||
pgss_store(queryDesc->sourceText,
|
||||
queryId,
|
||||
queryDesc->totaltime->total * 1000.0, /* convert to msec */
|
||||
queryDesc->totaltime->total * 1000.0, /* convert to msec */
|
||||
queryDesc->estate->es_processed,
|
||||
&queryDesc->totaltime->bufusage,
|
||||
NULL);
|
||||
@@ -789,10 +789,9 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
|
||||
DestReceiver *dest, char *completionTag)
|
||||
{
|
||||
/*
|
||||
* If it's an EXECUTE statement, we don't track it and don't increment
|
||||
* the nesting level. This allows the cycles to be charged to the
|
||||
* underlying PREPARE instead (by the Executor hooks), which is much more
|
||||
* useful.
|
||||
* If it's an EXECUTE statement, we don't track it and don't increment the
|
||||
* nesting level. This allows the cycles to be charged to the underlying
|
||||
* PREPARE instead (by the Executor hooks), which is much more useful.
|
||||
*
|
||||
* We also don't track execution of PREPARE. If we did, we would get one
|
||||
* hash table entry for the PREPARE (with hash calculated from the query
|
||||
@@ -942,7 +941,7 @@ static void
|
||||
pgss_store(const char *query, uint32 queryId,
|
||||
double total_time, uint64 rows,
|
||||
const BufferUsage *bufusage,
|
||||
pgssJumbleState * jstate)
|
||||
pgssJumbleState *jstate)
|
||||
{
|
||||
pgssHashKey key;
|
||||
pgssEntry *entry;
|
||||
@@ -1355,7 +1354,7 @@ entry_reset(void)
|
||||
* the current jumble.
|
||||
*/
|
||||
static void
|
||||
AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
|
||||
AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size)
|
||||
{
|
||||
unsigned char *jumble = jstate->jumble;
|
||||
Size jumble_len = jstate->jumble_len;
|
||||
@@ -1404,7 +1403,7 @@ AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
|
||||
* of information).
|
||||
*/
|
||||
static void
|
||||
JumbleQuery(pgssJumbleState * jstate, Query *query)
|
||||
JumbleQuery(pgssJumbleState *jstate, Query *query)
|
||||
{
|
||||
Assert(IsA(query, Query));
|
||||
Assert(query->utilityStmt == NULL);
|
||||
@@ -1431,7 +1430,7 @@ JumbleQuery(pgssJumbleState * jstate, Query *query)
|
||||
* Jumble a range table
|
||||
*/
|
||||
static void
|
||||
JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
|
||||
JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
|
||||
{
|
||||
ListCell *lc;
|
||||
|
||||
@@ -1485,11 +1484,11 @@ JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
|
||||
*
|
||||
* Note: the reason we don't simply use expression_tree_walker() is that the
|
||||
* point of that function is to support tree walkers that don't care about
|
||||
* most tree node types, but here we care about all types. We should complain
|
||||
* most tree node types, but here we care about all types. We should complain
|
||||
* about any unrecognized node type.
|
||||
*/
|
||||
static void
|
||||
JumbleExpr(pgssJumbleState * jstate, Node *node)
|
||||
JumbleExpr(pgssJumbleState *jstate, Node *node)
|
||||
{
|
||||
ListCell *temp;
|
||||
|
||||
@@ -1874,7 +1873,7 @@ JumbleExpr(pgssJumbleState * jstate, Node *node)
|
||||
* that is currently being walked.
|
||||
*/
|
||||
static void
|
||||
RecordConstLocation(pgssJumbleState * jstate, int location)
|
||||
RecordConstLocation(pgssJumbleState *jstate, int location)
|
||||
{
|
||||
/* -1 indicates unknown or undefined location */
|
||||
if (location >= 0)
|
||||
@@ -1909,7 +1908,7 @@ RecordConstLocation(pgssJumbleState * jstate, int location)
|
||||
* Returns a palloc'd string, which is not necessarily null-terminated.
|
||||
*/
|
||||
static char *
|
||||
generate_normalized_query(pgssJumbleState * jstate, const char *query,
|
||||
generate_normalized_query(pgssJumbleState *jstate, const char *query,
|
||||
int *query_len_p, int encoding)
|
||||
{
|
||||
char *norm_query;
|
||||
@@ -2007,7 +2006,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
|
||||
* a problem.
|
||||
*
|
||||
* Duplicate constant pointers are possible, and will have their lengths
|
||||
* marked as '-1', so that they are later ignored. (Actually, we assume the
|
||||
* marked as '-1', so that they are later ignored. (Actually, we assume the
|
||||
* lengths were initialized as -1 to start with, and don't change them here.)
|
||||
*
|
||||
* N.B. There is an assumption that a '-' character at a Const location begins
|
||||
@@ -2015,7 +2014,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
|
||||
* reason for a constant to start with a '-'.
|
||||
*/
|
||||
static void
|
||||
fill_in_constant_lengths(pgssJumbleState * jstate, const char *query)
|
||||
fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
|
||||
{
|
||||
pgssLocationLen *locs;
|
||||
core_yyscan_t yyscanner;
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
|
||||
/* These are macros to avoid timing the function call overhead. */
|
||||
#ifndef WIN32
|
||||
#define START_TIMER \
|
||||
#define START_TIMER \
|
||||
do { \
|
||||
alarm_triggered = false; \
|
||||
alarm(secs_per_test); \
|
||||
@@ -37,7 +37,7 @@ do { \
|
||||
} while (0)
|
||||
#else
|
||||
/* WIN32 doesn't support alarm, so we create a thread and sleep there */
|
||||
#define START_TIMER \
|
||||
#define START_TIMER \
|
||||
do { \
|
||||
alarm_triggered = false; \
|
||||
if (CreateThread(NULL, 0, process_alarm, NULL, 0, NULL) == \
|
||||
@@ -55,7 +55,7 @@ do { \
|
||||
gettimeofday(&stop_t, NULL); \
|
||||
print_elapse(start_t, stop_t, ops); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
static const char *progname;
|
||||
|
||||
@@ -77,6 +77,7 @@ static void test_sync(int writes_per_op);
|
||||
static void test_open_syncs(void);
|
||||
static void test_open_sync(const char *msg, int writes_size);
|
||||
static void test_file_descriptor_sync(void);
|
||||
|
||||
#ifndef WIN32
|
||||
static void process_alarm(int sig);
|
||||
#else
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* pg_test_timing.c
|
||||
* tests overhead of timing calls and their monotonicity: that
|
||||
* they always move forward
|
||||
* tests overhead of timing calls and their monotonicity: that
|
||||
* they always move forward
|
||||
*/
|
||||
|
||||
#include "postgres_fe.h"
|
||||
@@ -35,8 +35,8 @@ handle_args(int argc, char *argv[])
|
||||
{"duration", required_argument, NULL, 'd'},
|
||||
{NULL, 0, NULL, 0}
|
||||
};
|
||||
int option; /* Command line option */
|
||||
int optindex = 0; /* used by getopt_long */
|
||||
int option; /* Command line option */
|
||||
int optindex = 0; /* used by getopt_long */
|
||||
|
||||
if (argc > 1)
|
||||
{
|
||||
@@ -87,7 +87,7 @@ handle_args(int argc, char *argv[])
|
||||
else
|
||||
{
|
||||
fprintf(stderr,
|
||||
"%s: duration must be a positive integer (duration is \"%d\")\n",
|
||||
"%s: duration must be a positive integer (duration is \"%d\")\n",
|
||||
progname, test_duration);
|
||||
fprintf(stderr, "Try \"%s --help\" for more information.\n",
|
||||
progname);
|
||||
@@ -98,16 +98,22 @@ handle_args(int argc, char *argv[])
|
||||
static void
|
||||
test_timing(int32 duration)
|
||||
{
|
||||
uint64 total_time;
|
||||
int64 time_elapsed = 0;
|
||||
uint64 loop_count = 0;
|
||||
uint64 prev, cur;
|
||||
int32 diff, i, bits, found;
|
||||
uint64 total_time;
|
||||
int64 time_elapsed = 0;
|
||||
uint64 loop_count = 0;
|
||||
uint64 prev,
|
||||
cur;
|
||||
int32 diff,
|
||||
i,
|
||||
bits,
|
||||
found;
|
||||
|
||||
instr_time start_time, end_time, temp;
|
||||
instr_time start_time,
|
||||
end_time,
|
||||
temp;
|
||||
|
||||
static int64 histogram[32];
|
||||
char buf[100];
|
||||
char buf[100];
|
||||
|
||||
total_time = duration > 0 ? duration * 1000000 : 0;
|
||||
|
||||
@@ -146,7 +152,7 @@ test_timing(int32 duration)
|
||||
INSTR_TIME_SUBTRACT(end_time, start_time);
|
||||
|
||||
printf("Per loop time including overhead: %0.2f nsec\n",
|
||||
INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
|
||||
INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
|
||||
printf("Histogram of timing durations:\n");
|
||||
printf("%9s: %10s %9s\n", "< usec", "count", "percent");
|
||||
|
||||
|
||||
@@ -199,9 +199,9 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
|
||||
* trigram extraction is relatively CPU-expensive. We must include
|
||||
* strategy number because trigram extraction depends on strategy.
|
||||
*
|
||||
* The cached structure contains the strategy number, then the input
|
||||
* query (starting at a MAXALIGN boundary), then the TRGM value (also
|
||||
* starting at a MAXALIGN boundary).
|
||||
* The cached structure contains the strategy number, then the input query
|
||||
* (starting at a MAXALIGN boundary), then the TRGM value (also starting
|
||||
* at a MAXALIGN boundary).
|
||||
*/
|
||||
if (cache == NULL ||
|
||||
strategy != *((StrategyNumber *) cache) ||
|
||||
@@ -341,8 +341,7 @@ gtrgm_distance(PG_FUNCTION_ARGS)
|
||||
char *cache = (char *) fcinfo->flinfo->fn_extra;
|
||||
|
||||
/*
|
||||
* Cache the generated trigrams across multiple calls with the same
|
||||
* query.
|
||||
* Cache the generated trigrams across multiple calls with the same query.
|
||||
*/
|
||||
if (cache == NULL ||
|
||||
VARSIZE(cache) != querysize ||
|
||||
|
||||
@@ -168,7 +168,7 @@ issue_warnings(char *sequence_script_file_name)
|
||||
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
|
||||
"--set ON_ERROR_STOP=on "
|
||||
"--no-psqlrc --port %d --username \"%s\" "
|
||||
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
sequence_script_file_name, UTILITY_LOG_FILE);
|
||||
unlink(sequence_script_file_name);
|
||||
@@ -204,7 +204,7 @@ output_completion_banner(char *analyze_script_file_name,
|
||||
else
|
||||
pg_log(PG_REPORT,
|
||||
"Optimizer statistics and free space information are not transferred\n"
|
||||
"by pg_upgrade so, once you start the new server, consider running:\n"
|
||||
"by pg_upgrade so, once you start the new server, consider running:\n"
|
||||
" %s\n\n", analyze_script_file_name);
|
||||
|
||||
pg_log(PG_REPORT,
|
||||
@@ -238,7 +238,8 @@ check_cluster_versions(void)
|
||||
|
||||
/*
|
||||
* We can't allow downgrading because we use the target pg_dumpall, and
|
||||
* pg_dumpall cannot operate on new database versions, only older versions.
|
||||
* pg_dumpall cannot operate on new database versions, only older
|
||||
* versions.
|
||||
*/
|
||||
if (old_cluster.major_version > new_cluster.major_version)
|
||||
pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
|
||||
@@ -402,31 +403,31 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
|
||||
#endif
|
||||
|
||||
fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %shave the default level of optimizer statistics.%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo\n\n");
|
||||
|
||||
fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo\n\n");
|
||||
|
||||
fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %sthis script and run:%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %s vacuumdb --all %s%s\n", ECHO_QUOTE,
|
||||
/* Did we copy the free space files? */
|
||||
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
|
||||
"--analyze-only" : "--analyze", ECHO_QUOTE);
|
||||
/* Did we copy the free space files? */
|
||||
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
|
||||
"--analyze-only" : "--analyze", ECHO_QUOTE);
|
||||
fprintf(script, "echo\n\n");
|
||||
|
||||
#ifndef WIN32
|
||||
@@ -441,15 +442,15 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
|
||||
#endif
|
||||
|
||||
fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %s--------------------------------------------------%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "vacuumdb --all --analyze-only\n");
|
||||
fprintf(script, "echo\n");
|
||||
fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo\n\n");
|
||||
|
||||
#ifndef WIN32
|
||||
@@ -462,9 +463,9 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
|
||||
#endif
|
||||
|
||||
fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %s---------------------------------------------------%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "vacuumdb --all --analyze-only\n");
|
||||
fprintf(script, "echo\n\n");
|
||||
|
||||
@@ -475,17 +476,17 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
|
||||
#endif
|
||||
|
||||
fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "echo %s-------------------------------------------------------------%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
fprintf(script, "vacuumdb --all %s\n",
|
||||
/* Did we copy the free space files? */
|
||||
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
|
||||
"--analyze-only" : "--analyze");
|
||||
/* Did we copy the free space files? */
|
||||
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
|
||||
"--analyze-only" : "--analyze");
|
||||
|
||||
fprintf(script, "echo\n\n");
|
||||
fprintf(script, "echo %sDone%s\n",
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
ECHO_QUOTE, ECHO_QUOTE);
|
||||
|
||||
fclose(script);
|
||||
|
||||
@@ -716,8 +717,8 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
|
||||
pg_log(PG_REPORT, "fatal\n");
|
||||
pg_log(PG_FATAL,
|
||||
"Your installation contains \"contrib/isn\" functions which rely on the\n"
|
||||
"bigint data type. Your old and new clusters pass bigint values\n"
|
||||
"differently so this cluster cannot currently be upgraded. You can\n"
|
||||
"bigint data type. Your old and new clusters pass bigint values\n"
|
||||
"differently so this cluster cannot currently be upgraded. You can\n"
|
||||
"manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
|
||||
"\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
|
||||
"the problem functions is in the file:\n"
|
||||
@@ -764,9 +765,9 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
|
||||
PGconn *conn = connectToServer(cluster, active_db->db_name);
|
||||
|
||||
/*
|
||||
* While several relkinds don't store any data, e.g. views, they
|
||||
* can be used to define data types of other columns, so we
|
||||
* check all relkinds.
|
||||
* While several relkinds don't store any data, e.g. views, they can
|
||||
* be used to define data types of other columns, so we check all
|
||||
* relkinds.
|
||||
*/
|
||||
res = executeQueryOrDie(conn,
|
||||
"SELECT n.nspname, c.relname, a.attname "
|
||||
@@ -777,16 +778,16 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
|
||||
" NOT a.attisdropped AND "
|
||||
" a.atttypid IN ( "
|
||||
" 'pg_catalog.regproc'::pg_catalog.regtype, "
|
||||
" 'pg_catalog.regprocedure'::pg_catalog.regtype, "
|
||||
" 'pg_catalog.regprocedure'::pg_catalog.regtype, "
|
||||
" 'pg_catalog.regoper'::pg_catalog.regtype, "
|
||||
" 'pg_catalog.regoperator'::pg_catalog.regtype, "
|
||||
" 'pg_catalog.regoperator'::pg_catalog.regtype, "
|
||||
/* regclass.oid is preserved, so 'regclass' is OK */
|
||||
/* regtype.oid is preserved, so 'regtype' is OK */
|
||||
" 'pg_catalog.regconfig'::pg_catalog.regtype, "
|
||||
" 'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
|
||||
" c.relnamespace = n.oid AND "
|
||||
" n.nspname != 'pg_catalog' AND "
|
||||
" n.nspname != 'information_schema'");
|
||||
" 'pg_catalog.regconfig'::pg_catalog.regtype, "
|
||||
" 'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
|
||||
" c.relnamespace = n.oid AND "
|
||||
" n.nspname != 'pg_catalog' AND "
|
||||
" n.nspname != 'information_schema'");
|
||||
|
||||
ntups = PQntuples(res);
|
||||
i_nspname = PQfnumber(res, "nspname");
|
||||
@@ -822,8 +823,8 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
|
||||
pg_log(PG_REPORT, "fatal\n");
|
||||
pg_log(PG_FATAL,
|
||||
"Your installation contains one of the reg* data types in user tables.\n"
|
||||
"These data types reference system OIDs that are not preserved by\n"
|
||||
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
|
||||
"These data types reference system OIDs that are not preserved by\n"
|
||||
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
|
||||
"remove the problem tables and restart the upgrade. A list of the problem\n"
|
||||
"columns is in the file:\n"
|
||||
" %s\n\n", output_path);
|
||||
@@ -836,9 +837,11 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
|
||||
static void
|
||||
get_bin_version(ClusterInfo *cluster)
|
||||
{
|
||||
char cmd[MAXPGPATH], cmd_output[MAX_STRING];
|
||||
char cmd[MAXPGPATH],
|
||||
cmd_output[MAX_STRING];
|
||||
FILE *output;
|
||||
int pre_dot, post_dot;
|
||||
int pre_dot,
|
||||
post_dot;
|
||||
|
||||
snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir);
|
||||
|
||||
@@ -858,4 +861,3 @@ get_bin_version(ClusterInfo *cluster)
|
||||
|
||||
cluster->bin_version = (pre_dot * 100 + post_dot) * 100;
|
||||
}
|
||||
|
||||
|
||||
@@ -129,6 +129,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
|
||||
pg_log(PG_VERBOSE, "%s", bufin);
|
||||
|
||||
#ifdef WIN32
|
||||
|
||||
/*
|
||||
* Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
|
||||
* work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a
|
||||
@@ -506,7 +507,7 @@ check_control_data(ControlData *oldctrl,
|
||||
* This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
|
||||
*/
|
||||
pg_log(PG_FATAL,
|
||||
"You will need to rebuild the new server with configure option\n"
|
||||
"You will need to rebuild the new server with configure option\n"
|
||||
"--disable-integer-datetimes or get server binaries built with those\n"
|
||||
"options.\n");
|
||||
}
|
||||
@@ -531,6 +532,6 @@ disable_old_cluster(void)
|
||||
pg_log(PG_REPORT, "\n"
|
||||
"If you want to start the old cluster, you will need to remove\n"
|
||||
"the \".old\" suffix from %s/global/pg_control.old.\n"
|
||||
"Because \"link\" mode was used, the old cluster cannot be safely\n"
|
||||
"started once the new cluster has been started.\n\n", old_cluster.pgdata);
|
||||
"Because \"link\" mode was used, the old cluster cannot be safely\n"
|
||||
"started once the new cluster has been started.\n\n", old_cluster.pgdata);
|
||||
}
|
||||
|
||||
@@ -18,8 +18,9 @@
|
||||
static void check_data_dir(const char *pg_data);
|
||||
static void check_bin_dir(ClusterInfo *cluster);
|
||||
static void validate_exec(const char *dir, const char *cmdName);
|
||||
|
||||
#ifdef WIN32
|
||||
static int win32_check_directory_write_permissions(void);
|
||||
static int win32_check_directory_write_permissions(void);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -64,7 +65,7 @@ exec_prog(bool throw_error, bool is_priv,
|
||||
pg_log(throw_error ? PG_FATAL : PG_REPORT,
|
||||
"Consult the last few lines of \"%s\" for\n"
|
||||
"the probable cause of the failure.\n",
|
||||
log_file);
|
||||
log_file);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -142,12 +143,12 @@ verify_directories(void)
|
||||
static int
|
||||
win32_check_directory_write_permissions(void)
|
||||
{
|
||||
int fd;
|
||||
int fd;
|
||||
|
||||
/*
|
||||
* We open a file we would normally create anyway. We do this even in
|
||||
* 'check' mode, which isn't ideal, but this is the best we can do.
|
||||
*/
|
||||
* We open a file we would normally create anyway. We do this even in
|
||||
* 'check' mode, which isn't ideal, but this is the best we can do.
|
||||
*/
|
||||
if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0)
|
||||
return -1;
|
||||
close(fd);
|
||||
@@ -184,7 +185,7 @@ check_data_dir(const char *pg_data)
|
||||
struct stat statBuf;
|
||||
|
||||
snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data,
|
||||
/* Win32 can't stat() a directory with a trailing slash. */
|
||||
/* Win32 can't stat() a directory with a trailing slash. */
|
||||
*requiredSubdirs[subdirnum] ? "/" : "",
|
||||
requiredSubdirs[subdirnum]);
|
||||
|
||||
|
||||
@@ -233,7 +233,7 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
|
||||
* large number of times.
|
||||
*/
|
||||
int
|
||||
load_directory(const char *dirname, struct dirent ***namelist)
|
||||
load_directory(const char *dirname, struct dirent *** namelist)
|
||||
{
|
||||
DIR *dirdesc;
|
||||
struct dirent *direntry;
|
||||
@@ -251,7 +251,7 @@ load_directory(const char *dirname, struct dirent ***namelist)
|
||||
count++;
|
||||
|
||||
*namelist = (struct dirent **) realloc((void *) (*namelist),
|
||||
(size_t) ((name_num + 1) * sizeof(struct dirent *)));
|
||||
(size_t) ((name_num + 1) * sizeof(struct dirent *)));
|
||||
|
||||
if (*namelist == NULL)
|
||||
{
|
||||
@@ -314,7 +314,6 @@ win32_pghardlink(const char *src, const char *dst)
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -322,13 +321,11 @@ win32_pghardlink(const char *src, const char *dst)
|
||||
FILE *
|
||||
fopen_priv(const char *path, const char *mode)
|
||||
{
|
||||
mode_t old_umask = umask(S_IRWXG | S_IRWXO);
|
||||
FILE *fp;
|
||||
mode_t old_umask = umask(S_IRWXG | S_IRWXO);
|
||||
FILE *fp;
|
||||
|
||||
fp = fopen(path, mode);
|
||||
umask(old_umask);
|
||||
|
||||
return fp;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -133,7 +133,7 @@ get_loadable_libraries(void)
|
||||
int totaltups;
|
||||
int dbnum;
|
||||
bool found_public_plpython_handler = false;
|
||||
|
||||
|
||||
ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *));
|
||||
totaltups = 0;
|
||||
|
||||
@@ -144,10 +144,10 @@ get_loadable_libraries(void)
|
||||
PGconn *conn = connectToServer(&old_cluster, active_db->db_name);
|
||||
|
||||
/*
|
||||
* Fetch all libraries referenced in this DB. We can't exclude
|
||||
* the "pg_catalog" schema because, while such functions are not
|
||||
* explicitly dumped by pg_dump, they do reference implicit objects
|
||||
* that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
|
||||
* Fetch all libraries referenced in this DB. We can't exclude the
|
||||
* "pg_catalog" schema because, while such functions are not
|
||||
* explicitly dumped by pg_dump, they do reference implicit objects
|
||||
* that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
|
||||
*/
|
||||
ress[dbnum] = executeQueryOrDie(conn,
|
||||
"SELECT DISTINCT probin "
|
||||
@@ -158,26 +158,26 @@ get_loadable_libraries(void)
|
||||
FirstNormalObjectId);
|
||||
totaltups += PQntuples(ress[dbnum]);
|
||||
|
||||
/*
|
||||
* Systems that install plpython before 8.1 have
|
||||
* plpython_call_handler() defined in the "public" schema, causing
|
||||
* pg_dumpall to dump it. However that function still references
|
||||
* "plpython" (no "2"), so it throws an error on restore. This code
|
||||
* checks for the problem function, reports affected databases to the
|
||||
* user and explains how to remove them.
|
||||
* 8.1 git commit: e0dedd0559f005d60c69c9772163e69c204bac69
|
||||
* http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
|
||||
* http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
|
||||
*/
|
||||
/*
|
||||
* Systems that install plpython before 8.1 have
|
||||
* plpython_call_handler() defined in the "public" schema, causing
|
||||
* pg_dumpall to dump it. However that function still references
|
||||
* "plpython" (no "2"), so it throws an error on restore. This code
|
||||
* checks for the problem function, reports affected databases to the
|
||||
* user and explains how to remove them. 8.1 git commit:
|
||||
* e0dedd0559f005d60c69c9772163e69c204bac69
|
||||
* http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
|
||||
* http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
|
||||
*/
|
||||
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901)
|
||||
{
|
||||
PGresult *res;
|
||||
PGresult *res;
|
||||
|
||||
res = executeQueryOrDie(conn,
|
||||
"SELECT 1 "
|
||||
"FROM pg_catalog.pg_proc JOIN pg_namespace "
|
||||
" ON pronamespace = pg_namespace.oid "
|
||||
"WHERE proname = 'plpython_call_handler' AND "
|
||||
"FROM pg_catalog.pg_proc JOIN pg_namespace "
|
||||
" ON pronamespace = pg_namespace.oid "
|
||||
"WHERE proname = 'plpython_call_handler' AND "
|
||||
"nspname = 'public' AND "
|
||||
"prolang = 13 /* C */ AND "
|
||||
"probin = '$libdir/plpython' AND "
|
||||
@@ -188,23 +188,23 @@ get_loadable_libraries(void)
|
||||
if (!found_public_plpython_handler)
|
||||
{
|
||||
pg_log(PG_WARNING,
|
||||
"\nThe old cluster has a \"plpython_call_handler\" function defined\n"
|
||||
"in the \"public\" schema which is a duplicate of the one defined\n"
|
||||
"in the \"pg_catalog\" schema. You can confirm this by executing\n"
|
||||
"in psql:\n"
|
||||
"\n"
|
||||
" \\df *.plpython_call_handler\n"
|
||||
"\n"
|
||||
"The \"public\" schema version of this function was created by a\n"
|
||||
"pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
|
||||
"to complete because it references a now-obsolete \"plpython\"\n"
|
||||
"shared object file. You can remove the \"public\" schema version\n"
|
||||
"of this function by running the following command:\n"
|
||||
"\n"
|
||||
" DROP FUNCTION public.plpython_call_handler()\n"
|
||||
"\n"
|
||||
"in each affected database:\n"
|
||||
"\n");
|
||||
"\nThe old cluster has a \"plpython_call_handler\" function defined\n"
|
||||
"in the \"public\" schema which is a duplicate of the one defined\n"
|
||||
"in the \"pg_catalog\" schema. You can confirm this by executing\n"
|
||||
"in psql:\n"
|
||||
"\n"
|
||||
" \\df *.plpython_call_handler\n"
|
||||
"\n"
|
||||
"The \"public\" schema version of this function was created by a\n"
|
||||
"pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
|
||||
"to complete because it references a now-obsolete \"plpython\"\n"
|
||||
"shared object file. You can remove the \"public\" schema version\n"
|
||||
"of this function by running the following command:\n"
|
||||
"\n"
|
||||
" DROP FUNCTION public.plpython_call_handler()\n"
|
||||
"\n"
|
||||
"in each affected database:\n"
|
||||
"\n");
|
||||
}
|
||||
pg_log(PG_WARNING, " %s\n", active_db->db_name);
|
||||
found_public_plpython_handler = true;
|
||||
@@ -217,9 +217,9 @@ get_loadable_libraries(void)
|
||||
|
||||
if (found_public_plpython_handler)
|
||||
pg_log(PG_FATAL,
|
||||
"Remove the problem functions from the old cluster to continue.\n");
|
||||
|
||||
totaltups++; /* reserve for pg_upgrade_support */
|
||||
"Remove the problem functions from the old cluster to continue.\n");
|
||||
|
||||
totaltups++; /* reserve for pg_upgrade_support */
|
||||
|
||||
/* Allocate what's certainly enough space */
|
||||
os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *));
|
||||
@@ -293,17 +293,17 @@ check_loadable_libraries(void)
|
||||
PGresult *res;
|
||||
|
||||
/*
|
||||
* In Postgres 9.0, Python 3 support was added, and to do that, a
|
||||
* plpython2u language was created with library name plpython2.so
|
||||
* as a symbolic link to plpython.so. In Postgres 9.1, only the
|
||||
* plpython2.so library was created, and both plpythonu and
|
||||
* plpython2u pointing to it. For this reason, any reference to
|
||||
* library name "plpython" in an old PG <= 9.1 cluster must look
|
||||
* for "plpython2" in the new cluster.
|
||||
* In Postgres 9.0, Python 3 support was added, and to do that, a
|
||||
* plpython2u language was created with library name plpython2.so as a
|
||||
* symbolic link to plpython.so. In Postgres 9.1, only the
|
||||
* plpython2.so library was created, and both plpythonu and plpython2u
|
||||
* pointing to it. For this reason, any reference to library name
|
||||
* "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
|
||||
* the new cluster.
|
||||
*
|
||||
* For this case, we could check pg_pltemplate, but that only works
|
||||
* for languages, and does not help with function shared objects,
|
||||
* so we just do a general fix.
|
||||
* For this case, we could check pg_pltemplate, but that only works
|
||||
* for languages, and does not help with function shared objects, so
|
||||
* we just do a general fix.
|
||||
*/
|
||||
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
|
||||
strcmp(lib, "$libdir/plpython") == 0)
|
||||
@@ -325,7 +325,7 @@ check_loadable_libraries(void)
|
||||
/* exit and report missing support library with special message */
|
||||
if (strcmp(lib, PG_UPGRADE_SUPPORT) == 0)
|
||||
pg_log(PG_FATAL,
|
||||
"The pg_upgrade_support module must be created and installed in the new cluster.\n");
|
||||
"The pg_upgrade_support module must be created and installed in the new cluster.\n");
|
||||
|
||||
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
|
||||
pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
|
||||
|
||||
@@ -57,12 +57,12 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
|
||||
old_db->db_name, old_rel->reloid, new_rel->reloid);
|
||||
|
||||
/*
|
||||
* TOAST table names initially match the heap pg_class oid.
|
||||
* In pre-8.4, TOAST table names change during CLUSTER; in pre-9.0,
|
||||
* TOAST table names change during ALTER TABLE ALTER COLUMN SET TYPE.
|
||||
* In >= 9.0, TOAST relation names always use heap table oids, hence
|
||||
* we cannot check relation names when upgrading from pre-9.0.
|
||||
* Clusters upgraded to 9.0 will get matching TOAST names.
|
||||
* TOAST table names initially match the heap pg_class oid. In
|
||||
* pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST
|
||||
* table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
|
||||
* 9.0, TOAST relation names always use heap table oids, hence we
|
||||
* cannot check relation names when upgrading from pre-9.0. Clusters
|
||||
* upgraded to 9.0 will get matching TOAST names.
|
||||
*/
|
||||
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
|
||||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
|
||||
@@ -194,16 +194,16 @@ get_db_infos(ClusterInfo *cluster)
|
||||
char query[QUERY_ALLOC];
|
||||
|
||||
snprintf(query, sizeof(query),
|
||||
"SELECT d.oid, d.datname, %s "
|
||||
"FROM pg_catalog.pg_database d "
|
||||
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
|
||||
" ON d.dattablespace = t.oid "
|
||||
"WHERE d.datallowconn = true "
|
||||
"SELECT d.oid, d.datname, %s "
|
||||
"FROM pg_catalog.pg_database d "
|
||||
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
|
||||
" ON d.dattablespace = t.oid "
|
||||
"WHERE d.datallowconn = true "
|
||||
/* we don't preserve pg_database.oid so we sort by name */
|
||||
"ORDER BY 2",
|
||||
"ORDER BY 2",
|
||||
/* 9.2 removed the spclocation column */
|
||||
(GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
|
||||
"t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
|
||||
(GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
|
||||
"t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
|
||||
|
||||
res = executeQueryOrDie(conn, "%s", query);
|
||||
|
||||
@@ -276,7 +276,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
|
||||
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
|
||||
" ON c.reltablespace = t.oid "
|
||||
"WHERE relkind IN ('r','t', 'i'%s) AND "
|
||||
/* exclude possible orphaned temp tables */
|
||||
/* exclude possible orphaned temp tables */
|
||||
" ((n.nspname !~ '^pg_temp_' AND "
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND "
|
||||
|
||||
@@ -56,10 +56,10 @@ parseCommandLine(int argc, char *argv[])
|
||||
int option; /* Command line option */
|
||||
int optindex = 0; /* used by getopt_long */
|
||||
int os_user_effective_id;
|
||||
FILE *fp;
|
||||
char **filename;
|
||||
FILE *fp;
|
||||
char **filename;
|
||||
time_t run_time = time(NULL);
|
||||
|
||||
|
||||
user_opts.transfer_mode = TRANSFER_MODE_COPY;
|
||||
|
||||
os_info.progname = get_progname(argv[0]);
|
||||
@@ -138,11 +138,11 @@ parseCommandLine(int argc, char *argv[])
|
||||
new_cluster.pgopts = pg_strdup(optarg);
|
||||
break;
|
||||
|
||||
/*
|
||||
* Someday, the port number option could be removed and
|
||||
* passed using -o/-O, but that requires postmaster -C
|
||||
* to be supported on all old/new versions.
|
||||
*/
|
||||
/*
|
||||
* Someday, the port number option could be removed and passed
|
||||
* using -o/-O, but that requires postmaster -C to be
|
||||
* supported on all old/new versions.
|
||||
*/
|
||||
case 'p':
|
||||
if ((old_cluster.port = atoi(optarg)) <= 0)
|
||||
{
|
||||
@@ -196,21 +196,21 @@ parseCommandLine(int argc, char *argv[])
|
||||
/* Start with newline because we might be appending to a file. */
|
||||
fprintf(fp, "\n"
|
||||
"-----------------------------------------------------------------\n"
|
||||
" pg_upgrade run on %s"
|
||||
"-----------------------------------------------------------------\n\n",
|
||||
ctime(&run_time));
|
||||
" pg_upgrade run on %s"
|
||||
"-----------------------------------------------------------------\n\n",
|
||||
ctime(&run_time));
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
/* Get values from env if not already set */
|
||||
check_required_directory(&old_cluster.bindir, "PGBINOLD", "-b",
|
||||
"old cluster binaries reside");
|
||||
"old cluster binaries reside");
|
||||
check_required_directory(&new_cluster.bindir, "PGBINNEW", "-B",
|
||||
"new cluster binaries reside");
|
||||
"new cluster binaries reside");
|
||||
check_required_directory(&old_cluster.pgdata, "PGDATAOLD", "-d",
|
||||
"old cluster data resides");
|
||||
"old cluster data resides");
|
||||
check_required_directory(&new_cluster.pgdata, "PGDATANEW", "-D",
|
||||
"new cluster data resides");
|
||||
"new cluster data resides");
|
||||
}
|
||||
|
||||
|
||||
@@ -285,7 +285,7 @@ or\n"), old_cluster.port, new_cluster.port, os_info.user);
|
||||
*/
|
||||
static void
|
||||
check_required_directory(char **dirpath, char *envVarName,
|
||||
char *cmdLineOption, char *description)
|
||||
char *cmdLineOption, char *description)
|
||||
{
|
||||
if (*dirpath == NULL || strlen(*dirpath) == 0)
|
||||
{
|
||||
@@ -322,8 +322,10 @@ void
|
||||
adjust_data_dir(ClusterInfo *cluster)
|
||||
{
|
||||
char filename[MAXPGPATH];
|
||||
char cmd[MAXPGPATH], cmd_output[MAX_STRING];
|
||||
FILE *fp, *output;
|
||||
char cmd[MAXPGPATH],
|
||||
cmd_output[MAX_STRING];
|
||||
FILE *fp,
|
||||
*output;
|
||||
|
||||
/* If there is no postgresql.conf, it can't be a config-only dir */
|
||||
snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig);
|
||||
@@ -345,10 +347,9 @@ adjust_data_dir(ClusterInfo *cluster)
|
||||
CLUSTER_NAME(cluster));
|
||||
|
||||
/*
|
||||
* We don't have a data directory yet, so we can't check the PG
|
||||
* version, so this might fail --- only works for PG 9.2+. If this
|
||||
* fails, pg_upgrade will fail anyway because the data files will not
|
||||
* be found.
|
||||
* We don't have a data directory yet, so we can't check the PG version,
|
||||
* so this might fail --- only works for PG 9.2+. If this fails,
|
||||
* pg_upgrade will fail anyway because the data files will not be found.
|
||||
*/
|
||||
snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
|
||||
cluster->bindir, cluster->pgconfig);
|
||||
@@ -356,7 +357,7 @@ adjust_data_dir(ClusterInfo *cluster)
|
||||
if ((output = popen(cmd, "r")) == NULL ||
|
||||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
|
||||
pg_log(PG_FATAL, "Could not get data directory using %s: %s\n",
|
||||
cmd, getErrorText(errno));
|
||||
cmd, getErrorText(errno));
|
||||
|
||||
pclose(output);
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ ClusterInfo old_cluster,
|
||||
new_cluster;
|
||||
OSInfo os_info;
|
||||
|
||||
char *output_files[] = {
|
||||
char *output_files[] = {
|
||||
SERVER_LOG_FILE,
|
||||
#ifdef WIN32
|
||||
/* unique file for pg_ctl start */
|
||||
@@ -122,11 +122,10 @@ main(int argc, char **argv)
|
||||
stop_postmaster(false);
|
||||
|
||||
/*
|
||||
* Most failures happen in create_new_objects(), which has
|
||||
* completed at this point. We do this here because it is just
|
||||
* before linking, which will link the old and new cluster data
|
||||
* files, preventing the old cluster from being safely started
|
||||
* once the new cluster is started.
|
||||
* Most failures happen in create_new_objects(), which has completed at
|
||||
* this point. We do this here because it is just before linking, which
|
||||
* will link the old and new cluster data files, preventing the old
|
||||
* cluster from being safely started once the new cluster is started.
|
||||
*/
|
||||
if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
|
||||
disable_old_cluster();
|
||||
@@ -215,8 +214,8 @@ prepare_new_cluster(void)
|
||||
exec_prog(true, true, UTILITY_LOG_FILE,
|
||||
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
|
||||
"--all --analyze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
|
||||
check_ok();
|
||||
|
||||
/*
|
||||
@@ -229,8 +228,8 @@ prepare_new_cluster(void)
|
||||
exec_prog(true, true, UTILITY_LOG_FILE,
|
||||
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
|
||||
"--all --freeze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
|
||||
check_ok();
|
||||
|
||||
get_pg_database_relfilenode(&new_cluster);
|
||||
@@ -252,8 +251,8 @@ prepare_new_databases(void)
|
||||
|
||||
/*
|
||||
* Install support functions in the global-object restore database to
|
||||
* preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
|
||||
* database so objects we add into 'template1' are not propogated. They
|
||||
* preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
|
||||
* database so objects we add into 'template1' are not propogated. They
|
||||
* are removed on pg_upgrade exit.
|
||||
*/
|
||||
install_support_functions_in_new_db("template1");
|
||||
@@ -267,7 +266,7 @@ prepare_new_databases(void)
|
||||
exec_prog(true, true, RESTORE_LOG_FILE,
|
||||
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
|
||||
"--set ON_ERROR_STOP=on "
|
||||
/* --no-psqlrc prevents AUTOCOMMIT=off */
|
||||
/* --no-psqlrc prevents AUTOCOMMIT=off */
|
||||
"--no-psqlrc --port %d --username \"%s\" "
|
||||
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
new_cluster.bindir, new_cluster.port, os_info.user,
|
||||
@@ -453,13 +452,13 @@ set_frozenxids(void)
|
||||
static void
|
||||
cleanup(void)
|
||||
{
|
||||
|
||||
|
||||
fclose(log_opts.internal);
|
||||
|
||||
/* Remove dump and log files? */
|
||||
if (!log_opts.retain)
|
||||
{
|
||||
char **filename;
|
||||
char **filename;
|
||||
|
||||
for (filename = output_files; *filename != NULL; filename++)
|
||||
unlink(*filename);
|
||||
|
||||
@@ -75,7 +75,7 @@ extern char *output_files[];
|
||||
#define RM_CMD "rm -f"
|
||||
#define RMDIR_CMD "rm -rf"
|
||||
#define SCRIPT_EXT "sh"
|
||||
#define ECHO_QUOTE "'"
|
||||
#define ECHO_QUOTE "'"
|
||||
#else
|
||||
#define pg_copy_file CopyFile
|
||||
#define pg_mv_file pgrename
|
||||
@@ -85,7 +85,7 @@ extern char *output_files[];
|
||||
#define RMDIR_CMD "RMDIR /s/q"
|
||||
#define SCRIPT_EXT "bat"
|
||||
#define EXE_EXT ".exe"
|
||||
#define ECHO_QUOTE ""
|
||||
#define ECHO_QUOTE ""
|
||||
#endif
|
||||
|
||||
#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \
|
||||
@@ -98,7 +98,7 @@ extern char *output_files[];
|
||||
/* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1 development */
|
||||
#define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251
|
||||
/*
|
||||
* Visibility map changed with this 9.2 commit,
|
||||
* Visibility map changed with this 9.2 commit,
|
||||
* 8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version.
|
||||
*/
|
||||
#define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031
|
||||
@@ -114,7 +114,7 @@ typedef struct
|
||||
Oid reloid; /* relation oid */
|
||||
Oid relfilenode; /* relation relfile node */
|
||||
/* relation tablespace path, or "" for the cluster default */
|
||||
char tablespace[MAXPGPATH];
|
||||
char tablespace[MAXPGPATH];
|
||||
} RelInfo;
|
||||
|
||||
typedef struct
|
||||
@@ -222,9 +222,11 @@ typedef struct
|
||||
ControlData controldata; /* pg_control information */
|
||||
DbInfoArr dbarr; /* dbinfos array */
|
||||
char *pgdata; /* pathname for cluster's $PGDATA directory */
|
||||
char *pgconfig; /* pathname for cluster's config file directory */
|
||||
char *pgconfig; /* pathname for cluster's config file
|
||||
* directory */
|
||||
char *bindir; /* pathname for cluster's executable directory */
|
||||
char *pgopts; /* options to pass to the server, like pg_ctl -o */
|
||||
char *pgopts; /* options to pass to the server, like pg_ctl
|
||||
* -o */
|
||||
unsigned short port; /* port number where postmaster is waiting */
|
||||
uint32 major_version; /* PG_VERSION of cluster */
|
||||
char major_version_str[64]; /* string PG_VERSION of cluster */
|
||||
@@ -291,8 +293,8 @@ void check_old_cluster(bool live_check,
|
||||
void check_new_cluster(void);
|
||||
void report_clusters_compatible(void);
|
||||
void issue_warnings(char *sequence_script_file_name);
|
||||
void output_completion_banner(char *analyze_script_file_name,
|
||||
char *deletion_script_file_name);
|
||||
void output_completion_banner(char *analyze_script_file_name,
|
||||
char *deletion_script_file_name);
|
||||
void check_cluster_versions(void);
|
||||
void check_cluster_compatibility(bool live_check);
|
||||
void create_script_for_old_cluster_deletion(char **deletion_script_file_name);
|
||||
@@ -314,9 +316,10 @@ void split_old_dump(void);
|
||||
|
||||
/* exec.c */
|
||||
|
||||
int exec_prog(bool throw_error, bool is_priv,
|
||||
const char *log_file, const char *cmd, ...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
|
||||
int
|
||||
exec_prog(bool throw_error, bool is_priv,
|
||||
const char *log_file, const char *cmd,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
|
||||
void verify_directories(void);
|
||||
bool is_server_running(const char *datadir);
|
||||
|
||||
@@ -353,14 +356,14 @@ const char *setupPageConverter(pageCnvCtx **result);
|
||||
typedef void *pageCnvCtx;
|
||||
#endif
|
||||
|
||||
int load_directory(const char *dirname, struct dirent ***namelist);
|
||||
int load_directory(const char *dirname, struct dirent *** namelist);
|
||||
const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
|
||||
const char *dst, bool force);
|
||||
const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
|
||||
const char *dst);
|
||||
|
||||
void check_hard_link(void);
|
||||
FILE *fopen_priv(const char *path, const char *mode);
|
||||
FILE *fopen_priv(const char *path, const char *mode);
|
||||
|
||||
/* function.c */
|
||||
|
||||
@@ -399,8 +402,9 @@ void init_tablespaces(void);
|
||||
/* server.c */
|
||||
|
||||
PGconn *connectToServer(ClusterInfo *cluster, const char *db_name);
|
||||
PGresult *executeQueryOrDie(PGconn *conn, const char *fmt, ...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
||||
PGresult *
|
||||
executeQueryOrDie(PGconn *conn, const char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
||||
|
||||
void start_postmaster(ClusterInfo *cluster);
|
||||
void stop_postmaster(bool fast);
|
||||
@@ -413,12 +417,15 @@ void check_pghost_envvar(void);
|
||||
char *quote_identifier(const char *s);
|
||||
int get_user_info(char **user_name);
|
||||
void check_ok(void);
|
||||
void report_status(eLogType type, const char *fmt, ...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
||||
void pg_log(eLogType type, char *fmt, ...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
||||
void prep_status(const char *fmt, ...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
|
||||
void
|
||||
report_status(eLogType type, const char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
||||
void
|
||||
pg_log(eLogType type, char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
||||
void
|
||||
prep_status(const char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
|
||||
void check_ok(void);
|
||||
char *pg_strdup(const char *s);
|
||||
void *pg_malloc(int size);
|
||||
|
||||
@@ -34,26 +34,28 @@ const char *
|
||||
transfer_all_new_dbs(DbInfoArr *old_db_arr,
|
||||
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
|
||||
{
|
||||
int old_dbnum, new_dbnum;
|
||||
int old_dbnum,
|
||||
new_dbnum;
|
||||
const char *msg = NULL;
|
||||
|
||||
prep_status("%s user relation files\n",
|
||||
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
|
||||
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
|
||||
|
||||
/* Scan the old cluster databases and transfer their files */
|
||||
for (old_dbnum = new_dbnum = 0;
|
||||
old_dbnum < old_db_arr->ndbs;
|
||||
old_dbnum++, new_dbnum++)
|
||||
{
|
||||
DbInfo *old_db = &old_db_arr->dbs[old_dbnum], *new_db = NULL;
|
||||
DbInfo *old_db = &old_db_arr->dbs[old_dbnum],
|
||||
*new_db = NULL;
|
||||
FileNameMap *mappings;
|
||||
int n_maps;
|
||||
pageCnvCtx *pageConverter = NULL;
|
||||
|
||||
/*
|
||||
* Advance past any databases that exist in the new cluster
|
||||
* but not in the old, e.g. "postgres". (The user might
|
||||
* have removed the 'postgres' database from the old cluster.)
|
||||
* Advance past any databases that exist in the new cluster but not in
|
||||
* the old, e.g. "postgres". (The user might have removed the
|
||||
* 'postgres' database from the old cluster.)
|
||||
*/
|
||||
for (; new_dbnum < new_db_arr->ndbs; new_dbnum++)
|
||||
{
|
||||
@@ -83,8 +85,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
|
||||
}
|
||||
}
|
||||
|
||||
prep_status(" "); /* in case nothing printed; pass a space so gcc
|
||||
* doesn't complain about empty format
|
||||
prep_status(" "); /* in case nothing printed; pass a space so
|
||||
* gcc doesn't complain about empty format
|
||||
* string */
|
||||
check_ok();
|
||||
|
||||
@@ -137,14 +139,14 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
|
||||
int mapnum;
|
||||
int fileno;
|
||||
bool vm_crashsafe_change = false;
|
||||
|
||||
|
||||
old_dir[0] = '\0';
|
||||
|
||||
/* Do not copy non-crashsafe vm files for binaries that assume crashsafety */
|
||||
if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
|
||||
new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
|
||||
vm_crashsafe_change = true;
|
||||
|
||||
|
||||
for (mapnum = 0; mapnum < size; mapnum++)
|
||||
{
|
||||
char old_file[MAXPGPATH];
|
||||
@@ -190,8 +192,8 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
|
||||
|
||||
for (fileno = 0; fileno < numFiles; fileno++)
|
||||
{
|
||||
char *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
|
||||
bool is_vm_file = false;
|
||||
char *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
|
||||
bool is_vm_file = false;
|
||||
|
||||
/* Is a visibility map file? (name ends with _vm) */
|
||||
if (vm_offset && strlen(vm_offset) == strlen("_vm"))
|
||||
|
||||
@@ -161,7 +161,7 @@ start_postmaster(ClusterInfo *cluster)
|
||||
snprintf(cmd, sizeof(cmd),
|
||||
SYSTEMQUOTE "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" "
|
||||
"-o \"-p %d %s %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
|
||||
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
|
||||
(cluster->controldata.cat_ver >=
|
||||
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
|
||||
"-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
|
||||
@@ -172,11 +172,11 @@ start_postmaster(ClusterInfo *cluster)
|
||||
* it might supply a reason for the failure.
|
||||
*/
|
||||
pg_ctl_return = exec_prog(false, true,
|
||||
/* pass both file names if the differ */
|
||||
(strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
|
||||
SERVER_LOG_FILE :
|
||||
SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
|
||||
"%s", cmd);
|
||||
/* pass both file names if the differ */
|
||||
(strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
|
||||
SERVER_LOG_FILE :
|
||||
SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
|
||||
"%s", cmd);
|
||||
|
||||
/* Check to see if we can connect to the server; if not, report it. */
|
||||
if ((conn = get_db_conn(cluster, "template1")) == NULL ||
|
||||
@@ -211,14 +211,14 @@ stop_postmaster(bool fast)
|
||||
else if (os_info.running_cluster == &new_cluster)
|
||||
cluster = &new_cluster;
|
||||
else
|
||||
return; /* no cluster running */
|
||||
return; /* no cluster running */
|
||||
|
||||
snprintf(cmd, sizeof(cmd),
|
||||
SYSTEMQUOTE "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" "
|
||||
"%s stop >> \"%s\" 2>&1" SYSTEMQUOTE,
|
||||
cluster->bindir, cluster->pgconfig,
|
||||
cluster->pgopts ? cluster->pgopts : "",
|
||||
fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
|
||||
fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
|
||||
|
||||
exec_prog(fast ? false : true, true, SERVER_STOP_LOG_FILE, "%s", cmd);
|
||||
|
||||
|
||||
@@ -52,8 +52,8 @@ get_tablespace_paths(void)
|
||||
"WHERE spcname != 'pg_default' AND "
|
||||
" spcname != 'pg_global'",
|
||||
/* 9.2 removed the spclocation column */
|
||||
(GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
|
||||
"spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
|
||||
(GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
|
||||
"spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
|
||||
|
||||
res = executeQueryOrDie(conn, "%s", query);
|
||||
|
||||
|
||||
@@ -60,10 +60,10 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
|
||||
" NOT a.attisdropped AND "
|
||||
" a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND "
|
||||
" c.relnamespace = n.oid AND "
|
||||
/* exclude possible orphaned temp tables */
|
||||
/* exclude possible orphaned temp tables */
|
||||
" n.nspname !~ '^pg_temp_' AND "
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
||||
|
||||
ntups = PQntuples(res);
|
||||
i_nspname = PQfnumber(res, "nspname");
|
||||
@@ -98,9 +98,9 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
|
||||
pg_log(PG_REPORT, "fatal\n");
|
||||
pg_log(PG_FATAL,
|
||||
"Your installation contains the \"name\" data type in user tables. This\n"
|
||||
"data type changed its internal alignment between your old and new\n"
|
||||
"data type changed its internal alignment between your old and new\n"
|
||||
"clusters so this cluster cannot currently be upgraded. You can remove\n"
|
||||
"the problem tables and restart the upgrade. A list of the problem\n"
|
||||
"the problem tables and restart the upgrade. A list of the problem\n"
|
||||
"columns is in the file:\n"
|
||||
" %s\n\n", output_path);
|
||||
}
|
||||
@@ -150,10 +150,10 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
|
||||
" NOT a.attisdropped AND "
|
||||
" a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND "
|
||||
" c.relnamespace = n.oid AND "
|
||||
/* exclude possible orphaned temp tables */
|
||||
/* exclude possible orphaned temp tables */
|
||||
" n.nspname !~ '^pg_temp_' AND "
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
||||
|
||||
ntups = PQntuples(res);
|
||||
i_nspname = PQfnumber(res, "nspname");
|
||||
@@ -189,7 +189,7 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
|
||||
pg_log(PG_FATAL,
|
||||
"Your installation contains the \"tsquery\" data type. This data type\n"
|
||||
"added a new internal field between your old and new clusters so this\n"
|
||||
"cluster cannot currently be upgraded. You can remove the problem\n"
|
||||
"cluster cannot currently be upgraded. You can remove the problem\n"
|
||||
"columns and restart the upgrade. A list of the problem columns is in the\n"
|
||||
"file:\n"
|
||||
" %s\n\n", output_path);
|
||||
@@ -328,10 +328,10 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
|
||||
" NOT a.attisdropped AND "
|
||||
" a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND "
|
||||
" c.relnamespace = n.oid AND "
|
||||
/* exclude possible orphaned temp tables */
|
||||
/* exclude possible orphaned temp tables */
|
||||
" n.nspname !~ '^pg_temp_' AND "
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
||||
|
||||
/*
|
||||
* This macro is used below to avoid reindexing indexes already rebuilt
|
||||
@@ -527,7 +527,7 @@ old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode)
|
||||
"must be reindexed with the REINDEX command. The file:\n"
|
||||
" %s\n"
|
||||
"when executed by psql by the database superuser will recreate all invalid\n"
|
||||
"indexes; until then, none of these indexes will be used.\n\n",
|
||||
"indexes; until then, none of these indexes will be used.\n\n",
|
||||
output_path);
|
||||
}
|
||||
else
|
||||
@@ -648,10 +648,10 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
|
||||
pg_log(PG_WARNING, "\n"
|
||||
"Your installation contains indexes using \"bpchar_pattern_ops\". These\n"
|
||||
"indexes have different internal formats between your old and new clusters\n"
|
||||
"so they must be reindexed with the REINDEX command. The file:\n"
|
||||
"so they must be reindexed with the REINDEX command. The file:\n"
|
||||
" %s\n"
|
||||
"when executed by psql by the database superuser will recreate all invalid\n"
|
||||
"indexes; until then, none of these indexes will be used.\n\n",
|
||||
"indexes; until then, none of these indexes will be used.\n\n",
|
||||
output_path);
|
||||
}
|
||||
else
|
||||
@@ -699,10 +699,10 @@ old_8_3_create_sequence_script(ClusterInfo *cluster)
|
||||
" pg_catalog.pg_namespace n "
|
||||
"WHERE c.relkind = 'S' AND "
|
||||
" c.relnamespace = n.oid AND "
|
||||
/* exclude possible orphaned temp tables */
|
||||
/* exclude possible orphaned temp tables */
|
||||
" n.nspname !~ '^pg_temp_' AND "
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
||||
" n.nspname !~ '^pg_toast_temp_' AND "
|
||||
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
||||
|
||||
ntups = PQntuples(res);
|
||||
i_nspname = PQfnumber(res, "nspname");
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
typedef struct win32_pthread *pthread_t;
|
||||
typedef int pthread_attr_t;
|
||||
|
||||
static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
|
||||
static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
|
||||
static int pthread_join(pthread_t th, void **thread_return);
|
||||
#elif defined(ENABLE_THREAD_SAFETY)
|
||||
/* Use platform-dependent pthread capability */
|
||||
@@ -84,7 +84,7 @@ static int pthread_join(pthread_t th, void **thread_return);
|
||||
typedef struct fork_pthread *pthread_t;
|
||||
typedef int pthread_attr_t;
|
||||
|
||||
static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
|
||||
static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
|
||||
static int pthread_join(pthread_t th, void **thread_return);
|
||||
#endif
|
||||
|
||||
@@ -198,7 +198,7 @@ typedef struct
|
||||
instr_time start_time; /* thread start time */
|
||||
instr_time *exec_elapsed; /* time spent executing cmds (per Command) */
|
||||
int *exec_count; /* number of cmd executions (per Command) */
|
||||
unsigned short random_state[3]; /* separate randomness for each thread */
|
||||
unsigned short random_state[3]; /* separate randomness for each thread */
|
||||
} TState;
|
||||
|
||||
#define INVALID_THREAD ((pthread_t) 0)
|
||||
@@ -1075,7 +1075,7 @@ top:
|
||||
|
||||
/*
|
||||
* getrand() neeeds to be able to subtract max from min and add
|
||||
* one the result without overflowing. Since we know max > min,
|
||||
* one the result without overflowing. Since we know max > min,
|
||||
* we can detect overflow just by checking for a negative result.
|
||||
* But we must check both that the subtraction doesn't overflow,
|
||||
* and that adding one to the result doesn't overflow either.
|
||||
@@ -1267,10 +1267,11 @@ init(void)
|
||||
* versions. Since pgbench has never pretended to be fully TPC-B
|
||||
* compliant anyway, we stick with the historical behavior.
|
||||
*/
|
||||
struct ddlinfo {
|
||||
char *table;
|
||||
char *cols;
|
||||
int declare_fillfactor;
|
||||
struct ddlinfo
|
||||
{
|
||||
char *table;
|
||||
char *cols;
|
||||
int declare_fillfactor;
|
||||
};
|
||||
struct ddlinfo DDLs[] = {
|
||||
{
|
||||
@@ -1321,15 +1322,16 @@ init(void)
|
||||
/* Construct new create table statement. */
|
||||
opts[0] = '\0';
|
||||
if (ddl->declare_fillfactor)
|
||||
snprintf(opts+strlen(opts), 256-strlen(opts),
|
||||
" with (fillfactor=%d)", fillfactor);
|
||||
snprintf(opts + strlen(opts), 256 - strlen(opts),
|
||||
" with (fillfactor=%d)", fillfactor);
|
||||
if (tablespace != NULL)
|
||||
{
|
||||
char *escape_tablespace;
|
||||
char *escape_tablespace;
|
||||
|
||||
escape_tablespace = PQescapeIdentifier(con, tablespace,
|
||||
strlen(tablespace));
|
||||
snprintf(opts+strlen(opts), 256-strlen(opts),
|
||||
" tablespace %s", escape_tablespace);
|
||||
snprintf(opts + strlen(opts), 256 - strlen(opts),
|
||||
" tablespace %s", escape_tablespace);
|
||||
PQfreemem(escape_tablespace);
|
||||
}
|
||||
snprintf(buffer, 256, "create%s table %s(%s)%s",
|
||||
@@ -1404,17 +1406,18 @@ init(void)
|
||||
fprintf(stderr, "set primary key...\n");
|
||||
for (i = 0; i < lengthof(DDLAFTERs); i++)
|
||||
{
|
||||
char buffer[256];
|
||||
char buffer[256];
|
||||
|
||||
strncpy(buffer, DDLAFTERs[i], 256);
|
||||
|
||||
if (index_tablespace != NULL)
|
||||
{
|
||||
char *escape_tablespace;
|
||||
char *escape_tablespace;
|
||||
|
||||
escape_tablespace = PQescapeIdentifier(con, index_tablespace,
|
||||
strlen(index_tablespace));
|
||||
snprintf(buffer+strlen(buffer), 256-strlen(buffer),
|
||||
" using index tablespace %s", escape_tablespace);
|
||||
snprintf(buffer + strlen(buffer), 256 - strlen(buffer),
|
||||
" using index tablespace %s", escape_tablespace);
|
||||
PQfreemem(escape_tablespace);
|
||||
}
|
||||
|
||||
@@ -1861,10 +1864,10 @@ main(int argc, char **argv)
|
||||
int i;
|
||||
|
||||
static struct option long_options[] = {
|
||||
{"index-tablespace", required_argument, NULL, 3},
|
||||
{"tablespace", required_argument, NULL, 2},
|
||||
{"unlogged-tables", no_argument, &unlogged_tables, 1},
|
||||
{NULL, 0, NULL, 0}
|
||||
{"index-tablespace", required_argument, NULL, 3},
|
||||
{"tablespace", required_argument, NULL, 2},
|
||||
{"unlogged-tables", no_argument, &unlogged_tables, 1},
|
||||
{NULL, 0, NULL, 0}
|
||||
};
|
||||
|
||||
#ifdef HAVE_GETRLIMIT
|
||||
@@ -2065,10 +2068,10 @@ main(int argc, char **argv)
|
||||
case 0:
|
||||
/* This covers long options which take no argument. */
|
||||
break;
|
||||
case 2: /* tablespace */
|
||||
case 2: /* tablespace */
|
||||
tablespace = optarg;
|
||||
break;
|
||||
case 3: /* index-tablespace */
|
||||
case 3: /* index-tablespace */
|
||||
index_tablespace = optarg;
|
||||
break;
|
||||
default:
|
||||
@@ -2571,7 +2574,7 @@ typedef struct fork_pthread
|
||||
|
||||
static int
|
||||
pthread_create(pthread_t *thread,
|
||||
pthread_attr_t * attr,
|
||||
pthread_attr_t *attr,
|
||||
void *(*start_routine) (void *),
|
||||
void *arg)
|
||||
{
|
||||
@@ -2687,7 +2690,7 @@ win32_pthread_run(void *arg)
|
||||
|
||||
static int
|
||||
pthread_create(pthread_t *thread,
|
||||
pthread_attr_t * attr,
|
||||
pthread_attr_t *attr,
|
||||
void *(*start_routine) (void *),
|
||||
void *arg)
|
||||
{
|
||||
|
||||
@@ -34,8 +34,8 @@ char *
|
||||
px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen)
|
||||
{
|
||||
static char *magic = "$1$"; /* This string is magic for this algorithm.
|
||||
* Having it this way, we can get better
|
||||
* later on */
|
||||
* Having it this way, we can get better later
|
||||
* on */
|
||||
static char *p;
|
||||
static const char *sp,
|
||||
*ep;
|
||||
|
||||
@@ -204,8 +204,9 @@ const char *px_resolve_alias(const PX_Alias *aliases, const char *name);
|
||||
void px_set_debug_handler(void (*handler) (const char *));
|
||||
|
||||
#ifdef PX_DEBUG
|
||||
void px_debug(const char *fmt, ...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
|
||||
void
|
||||
px_debug(const char *fmt,...)
|
||||
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
|
||||
#else
|
||||
#define px_debug(...)
|
||||
#endif
|
||||
|
||||
@@ -95,7 +95,7 @@ pgstatindex(PG_FUNCTION_ARGS)
|
||||
BlockNumber nblocks;
|
||||
BlockNumber blkno;
|
||||
BTIndexStat indexStat;
|
||||
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
|
||||
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
|
||||
|
||||
if (!superuser())
|
||||
ereport(ERROR,
|
||||
@@ -160,7 +160,7 @@ pgstatindex(PG_FUNCTION_ARGS)
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/* Read and lock buffer */
|
||||
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
|
||||
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
|
||||
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
||||
|
||||
page = BufferGetPage(buffer);
|
||||
|
||||
@@ -62,7 +62,7 @@ typedef struct pgstattuple_type
|
||||
} pgstattuple_type;
|
||||
|
||||
typedef void (*pgstat_page) (pgstattuple_type *, Relation, BlockNumber,
|
||||
BufferAccessStrategy);
|
||||
BufferAccessStrategy);
|
||||
|
||||
static Datum build_pgstattuple_type(pgstattuple_type *stat,
|
||||
FunctionCallInfo fcinfo);
|
||||
|
||||
@@ -32,19 +32,19 @@ void
|
||||
sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
|
||||
{
|
||||
Relation rel;
|
||||
ScanKeyData skey;
|
||||
SysScanDesc sscan;
|
||||
ScanKeyData skey;
|
||||
SysScanDesc sscan;
|
||||
HeapTuple tuple;
|
||||
char *tcontext;
|
||||
char *ncontext;
|
||||
char audit_name[NAMEDATALEN + 20];
|
||||
ObjectAddress object;
|
||||
Form_pg_database datForm;
|
||||
ObjectAddress object;
|
||||
Form_pg_database datForm;
|
||||
|
||||
/*
|
||||
* Oid of the source database is not saved in pg_database catalog,
|
||||
* so we collect its identifier using contextual information.
|
||||
* If NULL, its default is "template1" according to createdb().
|
||||
* Oid of the source database is not saved in pg_database catalog, so we
|
||||
* collect its identifier using contextual information. If NULL, its
|
||||
* default is "template1" according to createdb().
|
||||
*/
|
||||
if (!dtemplate)
|
||||
dtemplate = "template1";
|
||||
@@ -56,6 +56,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
|
||||
tcontext = sepgsql_get_label(object.classId,
|
||||
object.objectId,
|
||||
object.objectSubId);
|
||||
|
||||
/*
|
||||
* check db_database:{getattr} permission
|
||||
*/
|
||||
@@ -67,11 +68,11 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
|
||||
true);
|
||||
|
||||
/*
|
||||
* Compute a default security label of the newly created database
|
||||
* based on a pair of security label of client and source database.
|
||||
* Compute a default security label of the newly created database based on
|
||||
* a pair of security label of client and source database.
|
||||
*
|
||||
* XXX - uncoming version of libselinux supports to take object
|
||||
* name to handle special treatment on default security label.
|
||||
* XXX - uncoming version of libselinux supports to take object name to
|
||||
* handle special treatment on default security label.
|
||||
*/
|
||||
rel = heap_open(DatabaseRelationId, AccessShareLock);
|
||||
|
||||
@@ -91,6 +92,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
|
||||
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
|
||||
tcontext,
|
||||
SEPG_CLASS_DB_DATABASE);
|
||||
|
||||
/*
|
||||
* check db_database:{create} permission
|
||||
*/
|
||||
@@ -126,8 +128,8 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
|
||||
void
|
||||
sepgsql_database_drop(Oid databaseId)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
|
||||
/*
|
||||
* check db_database:{drop} permission
|
||||
@@ -153,8 +155,8 @@ sepgsql_database_drop(Oid databaseId)
|
||||
void
|
||||
sepgsql_database_relabel(Oid databaseId, const char *seclabel)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
|
||||
object.classId = DatabaseRelationId;
|
||||
object.objectId = databaseId;
|
||||
@@ -170,6 +172,7 @@ sepgsql_database_relabel(Oid databaseId, const char *seclabel)
|
||||
SEPG_DB_DATABASE__RELABELFROM,
|
||||
audit_name,
|
||||
true);
|
||||
|
||||
/*
|
||||
* check db_database:{relabelto} permission
|
||||
*/
|
||||
|
||||
@@ -150,7 +150,7 @@ check_relation_privileges(Oid relOid,
|
||||
uint32 required,
|
||||
bool abort)
|
||||
{
|
||||
ObjectAddress object;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
Bitmapset *columns;
|
||||
int index;
|
||||
|
||||
@@ -52,9 +52,9 @@ typedef struct
|
||||
* command. Elsewhere (including the case of default) NULL.
|
||||
*/
|
||||
const char *createdb_dtemplate;
|
||||
} sepgsql_context_info_t;
|
||||
} sepgsql_context_info_t;
|
||||
|
||||
static sepgsql_context_info_t sepgsql_context_info;
|
||||
static sepgsql_context_info_t sepgsql_context_info;
|
||||
|
||||
/*
|
||||
* GUC: sepgsql.permissive = (on|off)
|
||||
@@ -101,7 +101,7 @@ sepgsql_object_access(ObjectAccessType access,
|
||||
{
|
||||
case DatabaseRelationId:
|
||||
sepgsql_database_post_create(objectId,
|
||||
sepgsql_context_info.createdb_dtemplate);
|
||||
sepgsql_context_info.createdb_dtemplate);
|
||||
break;
|
||||
|
||||
case NamespaceRelationId:
|
||||
@@ -115,9 +115,8 @@ sepgsql_object_access(ObjectAccessType access,
|
||||
* All cases we want to apply permission checks on
|
||||
* creation of a new relation are invocation of the
|
||||
* heap_create_with_catalog via DefineRelation or
|
||||
* OpenIntoRel.
|
||||
* Elsewhere, we need neither assignment of security
|
||||
* label nor permission checks.
|
||||
* OpenIntoRel. Elsewhere, we need neither assignment
|
||||
* of security label nor permission checks.
|
||||
*/
|
||||
switch (sepgsql_context_info.cmdtype)
|
||||
{
|
||||
@@ -150,12 +149,12 @@ sepgsql_object_access(ObjectAccessType access,
|
||||
|
||||
case OAT_DROP:
|
||||
{
|
||||
ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg;
|
||||
ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
|
||||
|
||||
/*
|
||||
* No need to apply permission checks on object deletion
|
||||
* due to internal cleanups; such as removal of temporary
|
||||
* database object on session closed.
|
||||
* No need to apply permission checks on object deletion due
|
||||
* to internal cleanups; such as removal of temporary database
|
||||
* object on session closed.
|
||||
*/
|
||||
if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0)
|
||||
break;
|
||||
@@ -219,13 +218,13 @@ sepgsql_exec_check_perms(List *rangeTabls, bool abort)
|
||||
/*
|
||||
* sepgsql_executor_start
|
||||
*
|
||||
* It saves contextual information during ExecutorStart to distinguish
|
||||
* It saves contextual information during ExecutorStart to distinguish
|
||||
* a case with/without permission checks later.
|
||||
*/
|
||||
static void
|
||||
sepgsql_executor_start(QueryDesc *queryDesc, int eflags)
|
||||
{
|
||||
sepgsql_context_info_t saved_context_info = sepgsql_context_info;
|
||||
sepgsql_context_info_t saved_context_info = sepgsql_context_info;
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
@@ -270,28 +269,29 @@ sepgsql_utility_command(Node *parsetree,
|
||||
DestReceiver *dest,
|
||||
char *completionTag)
|
||||
{
|
||||
sepgsql_context_info_t saved_context_info = sepgsql_context_info;
|
||||
ListCell *cell;
|
||||
sepgsql_context_info_t saved_context_info = sepgsql_context_info;
|
||||
ListCell *cell;
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
/*
|
||||
* Check command tag to avoid nefarious operations, and save the
|
||||
* current contextual information to determine whether we should
|
||||
* apply permission checks here, or not.
|
||||
* current contextual information to determine whether we should apply
|
||||
* permission checks here, or not.
|
||||
*/
|
||||
sepgsql_context_info.cmdtype = nodeTag(parsetree);
|
||||
|
||||
switch (nodeTag(parsetree))
|
||||
{
|
||||
case T_CreatedbStmt:
|
||||
|
||||
/*
|
||||
* We hope to reference name of the source database, but it
|
||||
* does not appear in system catalog. So, we save it here.
|
||||
*/
|
||||
foreach (cell, ((CreatedbStmt *) parsetree)->options)
|
||||
foreach(cell, ((CreatedbStmt *) parsetree)->options)
|
||||
{
|
||||
DefElem *defel = (DefElem *) lfirst(cell);
|
||||
DefElem *defel = (DefElem *) lfirst(cell);
|
||||
|
||||
if (strcmp(defel->defname, "template") == 0)
|
||||
{
|
||||
@@ -303,6 +303,7 @@ sepgsql_utility_command(Node *parsetree,
|
||||
break;
|
||||
|
||||
case T_LoadStmt:
|
||||
|
||||
/*
|
||||
* We reject LOAD command across the board on enforcing mode,
|
||||
* because a binary module can arbitrarily override hooks.
|
||||
@@ -315,6 +316,7 @@ sepgsql_utility_command(Node *parsetree,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
||||
/*
|
||||
* Right now we don't check any other utility commands,
|
||||
* because it needs more detailed information to make access
|
||||
|
||||
@@ -58,17 +58,18 @@ static fmgr_hook_type next_fmgr_hook = NULL;
|
||||
* we use the list client_label_pending of pending_label to keep track of which
|
||||
* labels were set during the (sub-)transactions.
|
||||
*/
|
||||
static char *client_label_peer = NULL; /* set by getpeercon(3) */
|
||||
static List *client_label_pending = NIL; /* pending list being set by
|
||||
* sepgsql_setcon() */
|
||||
static char *client_label_committed = NULL; /* set by sepgsql_setcon(),
|
||||
* and already committed */
|
||||
static char *client_label_func = NULL; /* set by trusted procedure */
|
||||
static char *client_label_peer = NULL; /* set by getpeercon(3) */
|
||||
static List *client_label_pending = NIL; /* pending list being set by
|
||||
* sepgsql_setcon() */
|
||||
static char *client_label_committed = NULL; /* set by sepgsql_setcon(),
|
||||
* and already committed */
|
||||
static char *client_label_func = NULL; /* set by trusted procedure */
|
||||
|
||||
typedef struct {
|
||||
SubTransactionId subid;
|
||||
char *label;
|
||||
} pending_label;
|
||||
typedef struct
|
||||
{
|
||||
SubTransactionId subid;
|
||||
char *label;
|
||||
} pending_label;
|
||||
|
||||
/*
|
||||
* sepgsql_get_client_label
|
||||
@@ -87,7 +88,7 @@ sepgsql_get_client_label(void)
|
||||
/* uncommitted sepgsql_setcon() value */
|
||||
if (client_label_pending)
|
||||
{
|
||||
pending_label *plabel = llast(client_label_pending);
|
||||
pending_label *plabel = llast(client_label_pending);
|
||||
|
||||
if (plabel->label)
|
||||
return plabel->label;
|
||||
@@ -104,16 +105,16 @@ sepgsql_get_client_label(void)
|
||||
* sepgsql_set_client_label
|
||||
*
|
||||
* This routine tries to switch the current security label of the client, and
|
||||
* checks related permissions. The supplied new label shall be added to the
|
||||
* checks related permissions. The supplied new label shall be added to the
|
||||
* client_label_pending list, then saved at transaction-commit time to ensure
|
||||
* transaction-awareness.
|
||||
*/
|
||||
static void
|
||||
sepgsql_set_client_label(const char *new_label)
|
||||
{
|
||||
const char *tcontext;
|
||||
MemoryContext oldcxt;
|
||||
pending_label *plabel;
|
||||
const char *tcontext;
|
||||
MemoryContext oldcxt;
|
||||
pending_label *plabel;
|
||||
|
||||
/* Reset to the initial client label, if NULL */
|
||||
if (!new_label)
|
||||
@@ -140,9 +141,10 @@ sepgsql_set_client_label(const char *new_label)
|
||||
SEPG_PROCESS__DYNTRANSITION,
|
||||
NULL,
|
||||
true);
|
||||
|
||||
/*
|
||||
* Append the supplied new_label on the pending list until
|
||||
* the current transaction is committed.
|
||||
* Append the supplied new_label on the pending list until the current
|
||||
* transaction is committed.
|
||||
*/
|
||||
oldcxt = MemoryContextSwitchTo(CurTransactionContext);
|
||||
|
||||
@@ -158,7 +160,7 @@ sepgsql_set_client_label(const char *new_label)
|
||||
/*
|
||||
* sepgsql_xact_callback
|
||||
*
|
||||
* A callback routine of transaction commit/abort/prepare. Commmit or abort
|
||||
* A callback routine of transaction commit/abort/prepare. Commmit or abort
|
||||
* changes in the client_label_pending list.
|
||||
*/
|
||||
static void
|
||||
@@ -168,8 +170,8 @@ sepgsql_xact_callback(XactEvent event, void *arg)
|
||||
{
|
||||
if (client_label_pending != NIL)
|
||||
{
|
||||
pending_label *plabel = llast(client_label_pending);
|
||||
char *new_label;
|
||||
pending_label *plabel = llast(client_label_pending);
|
||||
char *new_label;
|
||||
|
||||
if (plabel->label)
|
||||
new_label = MemoryContextStrdup(TopMemoryContext,
|
||||
@@ -181,10 +183,11 @@ sepgsql_xact_callback(XactEvent event, void *arg)
|
||||
pfree(client_label_committed);
|
||||
|
||||
client_label_committed = new_label;
|
||||
|
||||
/*
|
||||
* XXX - Note that items of client_label_pending are allocated
|
||||
* on CurTransactionContext, thus, all acquired memory region
|
||||
* shall be released implicitly.
|
||||
* XXX - Note that items of client_label_pending are allocated on
|
||||
* CurTransactionContext, thus, all acquired memory region shall
|
||||
* be released implicitly.
|
||||
*/
|
||||
client_label_pending = NIL;
|
||||
}
|
||||
@@ -212,7 +215,8 @@ sepgsql_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
|
||||
prev = NULL;
|
||||
for (cell = list_head(client_label_pending); cell; cell = next)
|
||||
{
|
||||
pending_label *plabel = lfirst(cell);
|
||||
pending_label *plabel = lfirst(cell);
|
||||
|
||||
next = lnext(cell);
|
||||
|
||||
if (plabel->subid == mySubid)
|
||||
@@ -272,7 +276,7 @@ sepgsql_client_auth(Port *port, int status)
|
||||
static bool
|
||||
sepgsql_needs_fmgr_hook(Oid functionId)
|
||||
{
|
||||
ObjectAddress object;
|
||||
ObjectAddress object;
|
||||
|
||||
if (next_needs_fmgr_hook &&
|
||||
(*next_needs_fmgr_hook) (functionId))
|
||||
@@ -340,8 +344,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
|
||||
|
||||
/*
|
||||
* process:transition permission between old and new label,
|
||||
* when user tries to switch security label of the client
|
||||
* on execution of trusted procedure.
|
||||
* when user tries to switch security label of the client on
|
||||
* execution of trusted procedure.
|
||||
*/
|
||||
if (stack->new_label)
|
||||
sepgsql_avc_check_perms_label(stack->new_label,
|
||||
|
||||
@@ -42,9 +42,9 @@ sepgsql_proc_post_create(Oid functionId)
|
||||
char *tcontext;
|
||||
char *ncontext;
|
||||
int i;
|
||||
StringInfoData audit_name;
|
||||
ObjectAddress object;
|
||||
Form_pg_proc proForm;
|
||||
StringInfoData audit_name;
|
||||
ObjectAddress object;
|
||||
Form_pg_proc proForm;
|
||||
|
||||
/*
|
||||
* Fetch namespace of the new procedure. Because pg_proc entry is not
|
||||
@@ -77,6 +77,7 @@ sepgsql_proc_post_create(Oid functionId)
|
||||
SEPG_DB_SCHEMA__ADD_NAME,
|
||||
getObjectDescription(&object),
|
||||
true);
|
||||
|
||||
/*
|
||||
* XXX - db_language:{implement} also should be checked here
|
||||
*/
|
||||
@@ -97,9 +98,10 @@ sepgsql_proc_post_create(Oid functionId)
|
||||
*/
|
||||
initStringInfo(&audit_name);
|
||||
appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname));
|
||||
for (i=0; i < proForm->pronargs; i++)
|
||||
for (i = 0; i < proForm->pronargs; i++)
|
||||
{
|
||||
Oid typeoid = proForm->proargtypes.values[i];
|
||||
Oid typeoid = proForm->proargtypes.values[i];
|
||||
|
||||
if (i > 0)
|
||||
appendStringInfoChar(&audit_name, ',');
|
||||
appendStringInfoString(&audit_name, format_type_be(typeoid));
|
||||
@@ -111,6 +113,7 @@ sepgsql_proc_post_create(Oid functionId)
|
||||
SEPG_DB_PROCEDURE__CREATE,
|
||||
audit_name.data,
|
||||
true);
|
||||
|
||||
/*
|
||||
* Assign the default security label on a new procedure
|
||||
*/
|
||||
@@ -138,8 +141,8 @@ sepgsql_proc_post_create(Oid functionId)
|
||||
void
|
||||
sepgsql_proc_drop(Oid functionId)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
|
||||
/*
|
||||
* check db_schema:{remove_name} permission
|
||||
@@ -156,19 +159,19 @@ sepgsql_proc_drop(Oid functionId)
|
||||
true);
|
||||
pfree(audit_name);
|
||||
|
||||
/*
|
||||
* check db_procedure:{drop} permission
|
||||
*/
|
||||
/*
|
||||
* check db_procedure:{drop} permission
|
||||
*/
|
||||
object.classId = ProcedureRelationId;
|
||||
object.objectId = functionId;
|
||||
object.objectSubId = 0;
|
||||
audit_name = getObjectDescription(&object);
|
||||
|
||||
sepgsql_avc_check_perms(&object,
|
||||
SEPG_CLASS_DB_PROCEDURE,
|
||||
SEPG_DB_PROCEDURE__DROP,
|
||||
audit_name,
|
||||
true);
|
||||
sepgsql_avc_check_perms(&object,
|
||||
SEPG_CLASS_DB_PROCEDURE,
|
||||
SEPG_DB_PROCEDURE__DROP,
|
||||
audit_name,
|
||||
true);
|
||||
pfree(audit_name);
|
||||
}
|
||||
|
||||
@@ -181,8 +184,8 @@ sepgsql_proc_drop(Oid functionId)
|
||||
void
|
||||
sepgsql_proc_relabel(Oid functionId, const char *seclabel)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
|
||||
object.classId = ProcedureRelationId;
|
||||
object.objectId = functionId;
|
||||
@@ -198,6 +201,7 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
|
||||
SEPG_DB_PROCEDURE__RELABELFROM,
|
||||
audit_name,
|
||||
true);
|
||||
|
||||
/*
|
||||
* check db_procedure:{relabelto} permission
|
||||
*/
|
||||
|
||||
@@ -44,9 +44,9 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
|
||||
char *scontext;
|
||||
char *tcontext;
|
||||
char *ncontext;
|
||||
char audit_name[2*NAMEDATALEN + 20];
|
||||
char audit_name[2 * NAMEDATALEN + 20];
|
||||
ObjectAddress object;
|
||||
Form_pg_attribute attForm;
|
||||
Form_pg_attribute attForm;
|
||||
|
||||
/*
|
||||
* Only attributes within regular relation have individual security
|
||||
@@ -84,6 +84,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
|
||||
tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
|
||||
ncontext = sepgsql_compute_create(scontext, tcontext,
|
||||
SEPG_CLASS_DB_COLUMN);
|
||||
|
||||
/*
|
||||
* check db_column:{create} permission
|
||||
*/
|
||||
@@ -118,8 +119,8 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
|
||||
void
|
||||
sepgsql_attribute_drop(Oid relOid, AttrNumber attnum)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
|
||||
if (get_rel_relkind(relOid) != RELKIND_RELATION)
|
||||
return;
|
||||
@@ -151,7 +152,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
|
||||
const char *seclabel)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
char *audit_name;
|
||||
|
||||
if (get_rel_relkind(relOid) != RELKIND_RELATION)
|
||||
ereport(ERROR,
|
||||
@@ -172,6 +173,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
|
||||
SEPG_DB_COLUMN__RELABELFROM,
|
||||
audit_name,
|
||||
true);
|
||||
|
||||
/*
|
||||
* check db_column:{relabelto} permission
|
||||
*/
|
||||
@@ -203,7 +205,7 @@ sepgsql_relation_post_create(Oid relOid)
|
||||
char *tcontext; /* schema */
|
||||
char *rcontext; /* relation */
|
||||
char *ccontext; /* column */
|
||||
char audit_name[2*NAMEDATALEN + 20];
|
||||
char audit_name[2 * NAMEDATALEN + 20];
|
||||
|
||||
/*
|
||||
* Fetch catalog record of the new relation. Because pg_class entry is not
|
||||
@@ -254,6 +256,7 @@ sepgsql_relation_post_create(Oid relOid)
|
||||
SEPG_DB_SCHEMA__ADD_NAME,
|
||||
getObjectDescription(&object),
|
||||
true);
|
||||
|
||||
/*
|
||||
* Compute a default security label when we create a new relation object
|
||||
* under the specified namespace.
|
||||
@@ -273,6 +276,7 @@ sepgsql_relation_post_create(Oid relOid)
|
||||
SEPG_DB_DATABASE__CREATE,
|
||||
audit_name,
|
||||
true);
|
||||
|
||||
/*
|
||||
* Assign the default security label on the new relation
|
||||
*/
|
||||
@@ -288,10 +292,10 @@ sepgsql_relation_post_create(Oid relOid)
|
||||
if (classForm->relkind == RELKIND_RELATION)
|
||||
{
|
||||
Relation arel;
|
||||
ScanKeyData akey;
|
||||
SysScanDesc ascan;
|
||||
ScanKeyData akey;
|
||||
SysScanDesc ascan;
|
||||
HeapTuple atup;
|
||||
Form_pg_attribute attForm;
|
||||
Form_pg_attribute attForm;
|
||||
|
||||
arel = heap_open(AttributeRelationId, AccessShareLock);
|
||||
|
||||
@@ -315,6 +319,7 @@ sepgsql_relation_post_create(Oid relOid)
|
||||
ccontext = sepgsql_compute_create(scontext,
|
||||
rcontext,
|
||||
SEPG_CLASS_DB_COLUMN);
|
||||
|
||||
/*
|
||||
* check db_column:{create} permission
|
||||
*/
|
||||
@@ -348,10 +353,10 @@ out:
|
||||
void
|
||||
sepgsql_relation_drop(Oid relOid)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
uint16_t tclass = 0;
|
||||
char relkind;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
uint16_t tclass = 0;
|
||||
char relkind;
|
||||
|
||||
relkind = get_rel_relkind(relOid);
|
||||
if (relkind == RELKIND_RELATION)
|
||||
@@ -398,13 +403,13 @@ sepgsql_relation_drop(Oid relOid)
|
||||
*/
|
||||
if (relkind == RELKIND_RELATION)
|
||||
{
|
||||
Form_pg_attribute attForm;
|
||||
Form_pg_attribute attForm;
|
||||
CatCList *attrList;
|
||||
HeapTuple atttup;
|
||||
int i;
|
||||
|
||||
attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid));
|
||||
for (i=0; i < attrList->n_members; i++)
|
||||
for (i = 0; i < attrList->n_members; i++)
|
||||
{
|
||||
atttup = &attrList->members[i]->tuple;
|
||||
attForm = (Form_pg_attribute) GETSTRUCT(atttup);
|
||||
@@ -436,7 +441,7 @@ sepgsql_relation_drop(Oid relOid)
|
||||
void
|
||||
sepgsql_relation_relabel(Oid relOid, const char *seclabel)
|
||||
{
|
||||
ObjectAddress object;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
char relkind;
|
||||
uint16_t tclass = 0;
|
||||
@@ -468,6 +473,7 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
|
||||
SEPG_DB_TABLE__RELABELFROM,
|
||||
audit_name,
|
||||
true);
|
||||
|
||||
/*
|
||||
* check db_xxx:{relabelto} permission
|
||||
*/
|
||||
|
||||
@@ -35,22 +35,22 @@ void
|
||||
sepgsql_schema_post_create(Oid namespaceId)
|
||||
{
|
||||
Relation rel;
|
||||
ScanKeyData skey;
|
||||
SysScanDesc sscan;
|
||||
ScanKeyData skey;
|
||||
SysScanDesc sscan;
|
||||
HeapTuple tuple;
|
||||
char *tcontext;
|
||||
char *ncontext;
|
||||
char audit_name[NAMEDATALEN + 20];
|
||||
ObjectAddress object;
|
||||
Form_pg_namespace nspForm;
|
||||
ObjectAddress object;
|
||||
Form_pg_namespace nspForm;
|
||||
|
||||
/*
|
||||
* Compute a default security label when we create a new schema object
|
||||
* under the working database.
|
||||
*
|
||||
* XXX - uncoming version of libselinux supports to take object
|
||||
* name to handle special treatment on default security label;
|
||||
* such as special label on "pg_temp" schema.
|
||||
* XXX - uncoming version of libselinux supports to take object name to
|
||||
* handle special treatment on default security label; such as special
|
||||
* label on "pg_temp" schema.
|
||||
*/
|
||||
rel = heap_open(NamespaceRelationId, AccessShareLock);
|
||||
|
||||
@@ -71,6 +71,7 @@ sepgsql_schema_post_create(Oid namespaceId)
|
||||
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
|
||||
tcontext,
|
||||
SEPG_CLASS_DB_SCHEMA);
|
||||
|
||||
/*
|
||||
* check db_schema:{create}
|
||||
*/
|
||||
@@ -104,8 +105,8 @@ sepgsql_schema_post_create(Oid namespaceId)
|
||||
void
|
||||
sepgsql_schema_drop(Oid namespaceId)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
|
||||
/*
|
||||
* check db_schema:{drop} permission
|
||||
@@ -116,7 +117,7 @@ sepgsql_schema_drop(Oid namespaceId)
|
||||
audit_name = getObjectDescription(&object);
|
||||
|
||||
sepgsql_avc_check_perms(&object,
|
||||
SEPG_CLASS_DB_SCHEMA,
|
||||
SEPG_CLASS_DB_SCHEMA,
|
||||
SEPG_DB_SCHEMA__DROP,
|
||||
audit_name,
|
||||
true);
|
||||
@@ -132,8 +133,8 @@ sepgsql_schema_drop(Oid namespaceId)
|
||||
void
|
||||
sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
|
||||
{
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
ObjectAddress object;
|
||||
char *audit_name;
|
||||
|
||||
object.classId = NamespaceRelationId;
|
||||
object.objectId = namespaceId;
|
||||
@@ -149,6 +150,7 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
|
||||
SEPG_DB_SCHEMA__RELABELFROM,
|
||||
audit_name,
|
||||
true);
|
||||
|
||||
/*
|
||||
* check db_schema:{relabelto} permission
|
||||
*/
|
||||
|
||||
@@ -248,20 +248,21 @@ extern bool sepgsql_check_perms(const char *scontext,
|
||||
uint32 required,
|
||||
const char *audit_name,
|
||||
bool abort);
|
||||
|
||||
/*
|
||||
* uavc.c
|
||||
*/
|
||||
#define SEPGSQL_AVC_NOAUDIT ((void *)(-1))
|
||||
extern bool sepgsql_avc_check_perms_label(const char *tcontext,
|
||||
uint16 tclass,
|
||||
uint32 required,
|
||||
const char *audit_name,
|
||||
bool abort);
|
||||
uint16 tclass,
|
||||
uint32 required,
|
||||
const char *audit_name,
|
||||
bool abort);
|
||||
extern bool sepgsql_avc_check_perms(const ObjectAddress *tobject,
|
||||
uint16 tclass,
|
||||
uint32 required,
|
||||
const char *audit_name,
|
||||
bool abort);
|
||||
uint16 tclass,
|
||||
uint32 required,
|
||||
const char *audit_name,
|
||||
bool abort);
|
||||
extern char *sepgsql_avc_trusted_proc(Oid functionId);
|
||||
extern void sepgsql_avc_init(void);
|
||||
|
||||
@@ -269,7 +270,7 @@ extern void sepgsql_avc_init(void);
|
||||
* label.c
|
||||
*/
|
||||
extern char *sepgsql_get_client_label(void);
|
||||
extern void sepgsql_init_client_label(void);
|
||||
extern void sepgsql_init_client_label(void);
|
||||
extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId);
|
||||
|
||||
extern void sepgsql_object_relabel(const ObjectAddress *object,
|
||||
@@ -290,7 +291,7 @@ extern bool sepgsql_dml_privileges(List *rangeTabls, bool abort);
|
||||
* database.c
|
||||
*/
|
||||
extern void sepgsql_database_post_create(Oid databaseId,
|
||||
const char *dtemplate);
|
||||
const char *dtemplate);
|
||||
extern void sepgsql_database_drop(Oid databaseId);
|
||||
extern void sepgsql_database_relabel(Oid databaseId, const char *seclabel);
|
||||
|
||||
|
||||
@@ -30,22 +30,22 @@
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
uint32 hash; /* hash value of this cache entry */
|
||||
char *scontext; /* security context of the subject */
|
||||
char *tcontext; /* security context of the target */
|
||||
uint16 tclass; /* object class of the target */
|
||||
uint32 hash; /* hash value of this cache entry */
|
||||
char *scontext; /* security context of the subject */
|
||||
char *tcontext; /* security context of the target */
|
||||
uint16 tclass; /* object class of the target */
|
||||
|
||||
uint32 allowed; /* permissions to be allowed */
|
||||
uint32 auditallow; /* permissions to be audited on allowed */
|
||||
uint32 auditdeny; /* permissions to be audited on denied */
|
||||
uint32 allowed; /* permissions to be allowed */
|
||||
uint32 auditallow; /* permissions to be audited on allowed */
|
||||
uint32 auditdeny; /* permissions to be audited on denied */
|
||||
|
||||
bool permissive; /* true, if permissive rule */
|
||||
bool hot_cache; /* true, if recently referenced */
|
||||
bool permissive; /* true, if permissive rule */
|
||||
bool hot_cache; /* true, if recently referenced */
|
||||
bool tcontext_is_valid;
|
||||
/* true, if tcontext is valid */
|
||||
char *ncontext; /* temporary scontext on execution of trusted
|
||||
* procedure, or NULL elsewhere */
|
||||
} avc_cache;
|
||||
/* true, if tcontext is valid */
|
||||
char *ncontext; /* temporary scontext on execution of trusted
|
||||
* procedure, or NULL elsewhere */
|
||||
} avc_cache;
|
||||
|
||||
/*
|
||||
* Declaration of static variables
|
||||
@@ -54,12 +54,12 @@ typedef struct
|
||||
#define AVC_NUM_RECLAIM 16
|
||||
#define AVC_DEF_THRESHOLD 384
|
||||
|
||||
static MemoryContext avc_mem_cxt;
|
||||
static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */
|
||||
static int avc_num_caches; /* number of caches currently used */
|
||||
static int avc_lru_hint; /* index of the buckets to be reclaimed next */
|
||||
static int avc_threshold; /* threshold to launch cache-reclaiming */
|
||||
static char *avc_unlabeled; /* system 'unlabeled' label */
|
||||
static MemoryContext avc_mem_cxt;
|
||||
static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */
|
||||
static int avc_num_caches; /* number of caches currently used */
|
||||
static int avc_lru_hint; /* index of the buckets to be reclaimed next */
|
||||
static int avc_threshold; /* threshold to launch cache-reclaiming */
|
||||
static char *avc_unlabeled; /* system 'unlabeled' label */
|
||||
|
||||
/*
|
||||
* Hash function
|
||||
@@ -67,8 +67,8 @@ static char *avc_unlabeled; /* system 'unlabeled' label */
|
||||
static uint32
|
||||
sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass)
|
||||
{
|
||||
return hash_any((const unsigned char *)scontext, strlen(scontext))
|
||||
^ hash_any((const unsigned char *)tcontext, strlen(tcontext))
|
||||
return hash_any((const unsigned char *) scontext, strlen(scontext))
|
||||
^ hash_any((const unsigned char *) tcontext, strlen(tcontext))
|
||||
^ tclass;
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ sepgsql_avc_reset(void)
|
||||
|
||||
/*
|
||||
* Reclaim caches recently unreferenced
|
||||
*/
|
||||
*/
|
||||
static void
|
||||
sepgsql_avc_reclaim(void)
|
||||
{
|
||||
@@ -142,15 +142,15 @@ sepgsql_avc_reclaim(void)
|
||||
* Access control decisions must be atomic, but multiple system calls may
|
||||
* be required to make a decision; thus, when referencing the access vector
|
||||
* cache, we must loop until we complete without an intervening cache flush
|
||||
* event. In practice, looping even once should be very rare. Callers should
|
||||
* event. In practice, looping even once should be very rare. Callers should
|
||||
* do something like this:
|
||||
*
|
||||
* sepgsql_avc_check_valid();
|
||||
* do {
|
||||
* :
|
||||
* <reference to uavc>
|
||||
* :
|
||||
* } while (!sepgsql_avc_check_valid())
|
||||
* sepgsql_avc_check_valid();
|
||||
* do {
|
||||
* :
|
||||
* <reference to uavc>
|
||||
* :
|
||||
* } while (!sepgsql_avc_check_valid())
|
||||
*
|
||||
* -------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -169,7 +169,7 @@ sepgsql_avc_check_valid(void)
|
||||
/*
|
||||
* sepgsql_avc_unlabeled
|
||||
*
|
||||
* Returns an alternative label to be applied when no label or an invalid
|
||||
* Returns an alternative label to be applied when no label or an invalid
|
||||
* label would otherwise be assigned.
|
||||
*/
|
||||
static char *
|
||||
@@ -177,12 +177,12 @@ sepgsql_avc_unlabeled(void)
|
||||
{
|
||||
if (!avc_unlabeled)
|
||||
{
|
||||
security_context_t unlabeled;
|
||||
security_context_t unlabeled;
|
||||
|
||||
if (security_get_initial_context_raw("unlabeled", &unlabeled) < 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INTERNAL_ERROR),
|
||||
errmsg("SELinux: failed to get initial security label: %m")));
|
||||
(errcode(ERRCODE_INTERNAL_ERROR),
|
||||
errmsg("SELinux: failed to get initial security label: %m")));
|
||||
PG_TRY();
|
||||
{
|
||||
avc_unlabeled = MemoryContextStrdup(avc_mem_cxt, unlabeled);
|
||||
@@ -200,7 +200,7 @@ sepgsql_avc_unlabeled(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* sepgsql_avc_compute
|
||||
* sepgsql_avc_compute
|
||||
*
|
||||
* A fallback path, when cache mishit. It asks SELinux its access control
|
||||
* decision for the supplied pair of security context and object class.
|
||||
@@ -208,24 +208,24 @@ sepgsql_avc_unlabeled(void)
|
||||
static avc_cache *
|
||||
sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
|
||||
{
|
||||
char *ucontext = NULL;
|
||||
char *ncontext = NULL;
|
||||
MemoryContext oldctx;
|
||||
avc_cache *cache;
|
||||
uint32 hash;
|
||||
int index;
|
||||
struct av_decision avd;
|
||||
char *ucontext = NULL;
|
||||
char *ncontext = NULL;
|
||||
MemoryContext oldctx;
|
||||
avc_cache *cache;
|
||||
uint32 hash;
|
||||
int index;
|
||||
struct av_decision avd;
|
||||
|
||||
hash = sepgsql_avc_hash(scontext, tcontext, tclass);
|
||||
index = hash % AVC_NUM_SLOTS;
|
||||
|
||||
/*
|
||||
* Validation check of the supplied security context.
|
||||
* Because it always invoke system-call, frequent check should be avoided.
|
||||
* Unless security policy is reloaded, validation status shall be kept, so
|
||||
* we also cache whether the supplied security context was valid, or not.
|
||||
* Validation check of the supplied security context. Because it always
|
||||
* invoke system-call, frequent check should be avoided. Unless security
|
||||
* policy is reloaded, validation status shall be kept, so we also cache
|
||||
* whether the supplied security context was valid, or not.
|
||||
*/
|
||||
if (security_check_context_raw((security_context_t)tcontext) != 0)
|
||||
if (security_check_context_raw((security_context_t) tcontext) != 0)
|
||||
ucontext = sepgsql_avc_unlabeled();
|
||||
|
||||
/*
|
||||
@@ -237,15 +237,14 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
|
||||
sepgsql_compute_avd(scontext, ucontext, tclass, &avd);
|
||||
|
||||
/*
|
||||
* It also caches a security label to be switched when a client
|
||||
* labeled as 'scontext' executes a procedure labeled as 'tcontext',
|
||||
* not only access control decision on the procedure.
|
||||
* The security label to be switched shall be computed uniquely on
|
||||
* a pair of 'scontext' and 'tcontext', thus, it is reasonable to
|
||||
* cache the new label on avc, and enables to reduce unnecessary
|
||||
* system calls.
|
||||
* It shall be referenced at sepgsql_needs_fmgr_hook to check whether
|
||||
* the supplied function is a trusted procedure, or not.
|
||||
* It also caches a security label to be switched when a client labeled as
|
||||
* 'scontext' executes a procedure labeled as 'tcontext', not only access
|
||||
* control decision on the procedure. The security label to be switched
|
||||
* shall be computed uniquely on a pair of 'scontext' and 'tcontext',
|
||||
* thus, it is reasonable to cache the new label on avc, and enables to
|
||||
* reduce unnecessary system calls. It shall be referenced at
|
||||
* sepgsql_needs_fmgr_hook to check whether the supplied function is a
|
||||
* trusted procedure, or not.
|
||||
*/
|
||||
if (tclass == SEPG_CLASS_DB_PROCEDURE)
|
||||
{
|
||||
@@ -269,7 +268,7 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
|
||||
|
||||
cache = palloc0(sizeof(avc_cache));
|
||||
|
||||
cache->hash = hash;
|
||||
cache->hash = hash;
|
||||
cache->scontext = pstrdup(scontext);
|
||||
cache->tcontext = pstrdup(tcontext);
|
||||
cache->tclass = tclass;
|
||||
@@ -314,7 +313,7 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass)
|
||||
hash = sepgsql_avc_hash(scontext, tcontext, tclass);
|
||||
index = hash % AVC_NUM_SLOTS;
|
||||
|
||||
foreach (cell, avc_slots[index])
|
||||
foreach(cell, avc_slots[index])
|
||||
{
|
||||
cache = lfirst(cell);
|
||||
|
||||
@@ -348,14 +347,15 @@ sepgsql_avc_check_perms_label(const char *tcontext,
|
||||
uint16 tclass, uint32 required,
|
||||
const char *audit_name, bool abort)
|
||||
{
|
||||
char *scontext = sepgsql_get_client_label();
|
||||
char *scontext = sepgsql_get_client_label();
|
||||
avc_cache *cache;
|
||||
uint32 denied;
|
||||
uint32 audited;
|
||||
bool result;
|
||||
|
||||
sepgsql_avc_check_valid();
|
||||
do {
|
||||
do
|
||||
{
|
||||
result = true;
|
||||
|
||||
/*
|
||||
@@ -377,16 +377,16 @@ sepgsql_avc_check_perms_label(const char *tcontext,
|
||||
audited = (denied ? (denied & ~0) : (required & ~0));
|
||||
else
|
||||
audited = denied ? (denied & cache->auditdeny)
|
||||
: (required & cache->auditallow);
|
||||
: (required & cache->auditallow);
|
||||
|
||||
if (denied)
|
||||
{
|
||||
/*
|
||||
* In permissive mode or permissive domain, violated permissions
|
||||
* shall be audited to the log files at once, and then implicitly
|
||||
* allowed to avoid a flood of access denied logs, because
|
||||
* the purpose of permissive mode/domain is to collect a violation
|
||||
* log that will make it possible to fix up the security policy.
|
||||
* allowed to avoid a flood of access denied logs, because the
|
||||
* purpose of permissive mode/domain is to collect a violation log
|
||||
* that will make it possible to fix up the security policy.
|
||||
*/
|
||||
if (!sepgsql_getenforce() || cache->permissive)
|
||||
cache->allowed |= required;
|
||||
@@ -397,10 +397,10 @@ sepgsql_avc_check_perms_label(const char *tcontext,
|
||||
|
||||
/*
|
||||
* In the case when we have something auditable actions here,
|
||||
* sepgsql_audit_log shall be called with text representation of
|
||||
* security labels for both of subject and object.
|
||||
* It records this access violation, so DBA will be able to find
|
||||
* out unexpected security problems later.
|
||||
* sepgsql_audit_log shall be called with text representation of security
|
||||
* labels for both of subject and object. It records this access
|
||||
* violation, so DBA will be able to find out unexpected security problems
|
||||
* later.
|
||||
*/
|
||||
if (audited != 0 &&
|
||||
audit_name != SEPGSQL_AVC_NOAUDIT &&
|
||||
@@ -428,8 +428,8 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject,
|
||||
uint16 tclass, uint32 required,
|
||||
const char *audit_name, bool abort)
|
||||
{
|
||||
char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
|
||||
bool rc;
|
||||
char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
|
||||
bool rc;
|
||||
|
||||
rc = sepgsql_avc_check_perms_label(tcontext,
|
||||
tclass, required,
|
||||
@@ -450,10 +450,10 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject,
|
||||
char *
|
||||
sepgsql_avc_trusted_proc(Oid functionId)
|
||||
{
|
||||
char *scontext = sepgsql_get_client_label();
|
||||
char *tcontext;
|
||||
ObjectAddress tobject;
|
||||
avc_cache *cache;
|
||||
char *scontext = sepgsql_get_client_label();
|
||||
char *tcontext;
|
||||
ObjectAddress tobject;
|
||||
avc_cache *cache;
|
||||
|
||||
tobject.classId = ProcedureRelationId;
|
||||
tobject.objectId = functionId;
|
||||
@@ -461,7 +461,8 @@ sepgsql_avc_trusted_proc(Oid functionId)
|
||||
tcontext = GetSecurityLabel(&tobject, SEPGSQL_LABEL_TAG);
|
||||
|
||||
sepgsql_avc_check_valid();
|
||||
do {
|
||||
do
|
||||
{
|
||||
if (tcontext)
|
||||
cache = sepgsql_avc_lookup(scontext, tcontext,
|
||||
SEPG_CLASS_DB_PROCEDURE);
|
||||
@@ -492,7 +493,7 @@ sepgsql_avc_exit(int code, Datum arg)
|
||||
void
|
||||
sepgsql_avc_init(void)
|
||||
{
|
||||
int rc;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* All the avc stuff shall be allocated on avc_mem_cxt
|
||||
@@ -508,12 +509,11 @@ sepgsql_avc_init(void)
|
||||
avc_threshold = AVC_DEF_THRESHOLD;
|
||||
|
||||
/*
|
||||
* SELinux allows to mmap(2) its kernel status page in read-only mode
|
||||
* to inform userspace applications its status updating (such as
|
||||
* policy reloading) without system-call invocations.
|
||||
* This feature is only supported in Linux-2.6.38 or later, however,
|
||||
* libselinux provides a fallback mode to know its status using
|
||||
* netlink sockets.
|
||||
* SELinux allows to mmap(2) its kernel status page in read-only mode to
|
||||
* inform userspace applications its status updating (such as policy
|
||||
* reloading) without system-call invocations. This feature is only
|
||||
* supported in Linux-2.6.38 or later, however, libselinux provides a
|
||||
* fallback mode to know its status using netlink sockets.
|
||||
*/
|
||||
rc = selinux_status_open(1);
|
||||
if (rc < 0)
|
||||
|
||||
@@ -536,8 +536,7 @@ check_foreign_key(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* Remember that SPI_prepare places plan in current memory context
|
||||
* - so, we have to save plan in Top memory context for later
|
||||
* use.
|
||||
* - so, we have to save plan in Top memory context for later use.
|
||||
*/
|
||||
if (SPI_keepplan(pplan))
|
||||
/* internal error */
|
||||
|
||||
@@ -69,7 +69,7 @@ vacuumlo(const char *database, const struct _param * param)
|
||||
int i;
|
||||
static char *password = NULL;
|
||||
bool new_pass;
|
||||
bool success = true;
|
||||
bool success = true;
|
||||
|
||||
/* Note: password can be carried over from a previous call */
|
||||
if (param->pg_prompt == TRI_YES && password == NULL)
|
||||
@@ -261,8 +261,8 @@ vacuumlo(const char *database, const struct _param * param)
|
||||
* We don't want to run each delete as an individual transaction, because
|
||||
* the commit overhead would be high. However, since 9.0 the backend will
|
||||
* acquire a lock per deleted LO, so deleting too many LOs per transaction
|
||||
* risks running out of room in the shared-memory lock table.
|
||||
* Accordingly, we delete up to transaction_limit LOs per transaction.
|
||||
* risks running out of room in the shared-memory lock table. Accordingly,
|
||||
* we delete up to transaction_limit LOs per transaction.
|
||||
*/
|
||||
res = PQexec(conn, "begin");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
@@ -459,8 +459,8 @@ main(int argc, char **argv)
|
||||
if (param.transaction_limit < 0)
|
||||
{
|
||||
fprintf(stderr,
|
||||
"%s: transaction limit must not be negative (0 disables)\n",
|
||||
progname);
|
||||
"%s: transaction limit must not be negative (0 disables)\n",
|
||||
progname);
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -702,126 +702,126 @@ xpath_table(PG_FUNCTION_ARGS)
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
/* For each row i.e. document returned from SPI */
|
||||
for (i = 0; i < proc; i++)
|
||||
{
|
||||
char *pkey;
|
||||
char *xmldoc;
|
||||
xmlXPathContextPtr ctxt;
|
||||
xmlXPathObjectPtr res;
|
||||
xmlChar *resstr;
|
||||
xmlXPathCompExprPtr comppath;
|
||||
/* For each row i.e. document returned from SPI */
|
||||
for (i = 0; i < proc; i++)
|
||||
{
|
||||
char *pkey;
|
||||
char *xmldoc;
|
||||
xmlXPathContextPtr ctxt;
|
||||
xmlXPathObjectPtr res;
|
||||
xmlChar *resstr;
|
||||
xmlXPathCompExprPtr comppath;
|
||||
|
||||
/* Extract the row data as C Strings */
|
||||
spi_tuple = tuptable->vals[i];
|
||||
pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
|
||||
xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
|
||||
/* Extract the row data as C Strings */
|
||||
spi_tuple = tuptable->vals[i];
|
||||
pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
|
||||
xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
|
||||
|
||||
/*
|
||||
* Clear the values array, so that not-well-formed documents return
|
||||
* NULL in all columns. Note that this also means that spare columns
|
||||
* will be NULL.
|
||||
*/
|
||||
for (j = 0; j < ret_tupdesc->natts; j++)
|
||||
values[j] = NULL;
|
||||
/*
|
||||
* Clear the values array, so that not-well-formed documents
|
||||
* return NULL in all columns. Note that this also means that
|
||||
* spare columns will be NULL.
|
||||
*/
|
||||
for (j = 0; j < ret_tupdesc->natts; j++)
|
||||
values[j] = NULL;
|
||||
|
||||
/* Insert primary key */
|
||||
values[0] = pkey;
|
||||
/* Insert primary key */
|
||||
values[0] = pkey;
|
||||
|
||||
/* Parse the document */
|
||||
if (xmldoc)
|
||||
doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
|
||||
else /* treat NULL as not well-formed */
|
||||
/* Parse the document */
|
||||
if (xmldoc)
|
||||
doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
|
||||
else /* treat NULL as not well-formed */
|
||||
doctree = NULL;
|
||||
|
||||
if (doctree == NULL)
|
||||
{
|
||||
/* not well-formed, so output all-NULL tuple */
|
||||
ret_tuple = BuildTupleFromCStrings(attinmeta, values);
|
||||
tuplestore_puttuple(tupstore, ret_tuple);
|
||||
heap_freetuple(ret_tuple);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* New loop here - we have to deal with nodeset results */
|
||||
rownr = 0;
|
||||
|
||||
do
|
||||
{
|
||||
/* Now evaluate the set of xpaths. */
|
||||
had_values = false;
|
||||
for (j = 0; j < numpaths; j++)
|
||||
{
|
||||
ctxt = xmlXPathNewContext(doctree);
|
||||
ctxt->node = xmlDocGetRootElement(doctree);
|
||||
|
||||
/* compile the path */
|
||||
comppath = xmlXPathCompile(xpaths[j]);
|
||||
if (comppath == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR,
|
||||
ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"XPath Syntax Error");
|
||||
|
||||
/* Now evaluate the path expression. */
|
||||
res = xmlXPathCompiledEval(comppath, ctxt);
|
||||
xmlXPathFreeCompExpr(comppath);
|
||||
|
||||
if (res != NULL)
|
||||
{
|
||||
switch (res->type)
|
||||
{
|
||||
case XPATH_NODESET:
|
||||
/* We see if this nodeset has enough nodes */
|
||||
if (res->nodesetval != NULL &&
|
||||
rownr < res->nodesetval->nodeNr)
|
||||
{
|
||||
resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
|
||||
had_values = true;
|
||||
}
|
||||
else
|
||||
resstr = NULL;
|
||||
|
||||
break;
|
||||
|
||||
case XPATH_STRING:
|
||||
resstr = xmlStrdup(res->stringval);
|
||||
break;
|
||||
|
||||
default:
|
||||
elog(NOTICE, "unsupported XQuery result: %d", res->type);
|
||||
resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert this into the appropriate column in the
|
||||
* result tuple.
|
||||
*/
|
||||
values[j + 1] = (char *) resstr;
|
||||
}
|
||||
xmlXPathFreeContext(ctxt);
|
||||
}
|
||||
|
||||
/* Now add the tuple to the output, if there is one. */
|
||||
if (had_values)
|
||||
{
|
||||
ret_tuple = BuildTupleFromCStrings(attinmeta, values);
|
||||
tuplestore_puttuple(tupstore, ret_tuple);
|
||||
heap_freetuple(ret_tuple);
|
||||
}
|
||||
|
||||
rownr++;
|
||||
} while (had_values);
|
||||
}
|
||||
|
||||
if (doctree != NULL)
|
||||
xmlFreeDoc(doctree);
|
||||
doctree = NULL;
|
||||
|
||||
if (doctree == NULL)
|
||||
{
|
||||
/* not well-formed, so output all-NULL tuple */
|
||||
ret_tuple = BuildTupleFromCStrings(attinmeta, values);
|
||||
tuplestore_puttuple(tupstore, ret_tuple);
|
||||
heap_freetuple(ret_tuple);
|
||||
if (pkey)
|
||||
pfree(pkey);
|
||||
if (xmldoc)
|
||||
pfree(xmldoc);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* New loop here - we have to deal with nodeset results */
|
||||
rownr = 0;
|
||||
|
||||
do
|
||||
{
|
||||
/* Now evaluate the set of xpaths. */
|
||||
had_values = false;
|
||||
for (j = 0; j < numpaths; j++)
|
||||
{
|
||||
ctxt = xmlXPathNewContext(doctree);
|
||||
ctxt->node = xmlDocGetRootElement(doctree);
|
||||
|
||||
/* compile the path */
|
||||
comppath = xmlXPathCompile(xpaths[j]);
|
||||
if (comppath == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR,
|
||||
ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"XPath Syntax Error");
|
||||
|
||||
/* Now evaluate the path expression. */
|
||||
res = xmlXPathCompiledEval(comppath, ctxt);
|
||||
xmlXPathFreeCompExpr(comppath);
|
||||
|
||||
if (res != NULL)
|
||||
{
|
||||
switch (res->type)
|
||||
{
|
||||
case XPATH_NODESET:
|
||||
/* We see if this nodeset has enough nodes */
|
||||
if (res->nodesetval != NULL &&
|
||||
rownr < res->nodesetval->nodeNr)
|
||||
{
|
||||
resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
|
||||
had_values = true;
|
||||
}
|
||||
else
|
||||
resstr = NULL;
|
||||
|
||||
break;
|
||||
|
||||
case XPATH_STRING:
|
||||
resstr = xmlStrdup(res->stringval);
|
||||
break;
|
||||
|
||||
default:
|
||||
elog(NOTICE, "unsupported XQuery result: %d", res->type);
|
||||
resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert this into the appropriate column in the
|
||||
* result tuple.
|
||||
*/
|
||||
values[j + 1] = (char *) resstr;
|
||||
}
|
||||
xmlXPathFreeContext(ctxt);
|
||||
}
|
||||
|
||||
/* Now add the tuple to the output, if there is one. */
|
||||
if (had_values)
|
||||
{
|
||||
ret_tuple = BuildTupleFromCStrings(attinmeta, values);
|
||||
tuplestore_puttuple(tupstore, ret_tuple);
|
||||
heap_freetuple(ret_tuple);
|
||||
}
|
||||
|
||||
rownr++;
|
||||
} while (had_values);
|
||||
}
|
||||
|
||||
if (doctree != NULL)
|
||||
xmlFreeDoc(doctree);
|
||||
doctree = NULL;
|
||||
|
||||
if (pkey)
|
||||
pfree(pkey);
|
||||
if (xmldoc)
|
||||
pfree(xmldoc);
|
||||
}
|
||||
}
|
||||
PG_CATCH();
|
||||
{
|
||||
|
||||
@@ -85,40 +85,40 @@ xslt_process(PG_FUNCTION_ARGS)
|
||||
{
|
||||
/* Check to see if document is a file or a literal */
|
||||
|
||||
if (VARDATA(doct)[0] == '<')
|
||||
doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
|
||||
else
|
||||
doctree = xmlParseFile(text_to_cstring(doct));
|
||||
if (VARDATA(doct)[0] == '<')
|
||||
doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
|
||||
else
|
||||
doctree = xmlParseFile(text_to_cstring(doct));
|
||||
|
||||
if (doctree == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"error parsing XML document");
|
||||
|
||||
/* Same for stylesheet */
|
||||
if (VARDATA(ssheet)[0] == '<')
|
||||
{
|
||||
ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
|
||||
VARSIZE(ssheet) - VARHDRSZ);
|
||||
if (ssdoc == NULL)
|
||||
if (doctree == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"error parsing stylesheet as XML document");
|
||||
"error parsing XML document");
|
||||
|
||||
stylesheet = xsltParseStylesheetDoc(ssdoc);
|
||||
}
|
||||
else
|
||||
stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
|
||||
/* Same for stylesheet */
|
||||
if (VARDATA(ssheet)[0] == '<')
|
||||
{
|
||||
ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
|
||||
VARSIZE(ssheet) - VARHDRSZ);
|
||||
if (ssdoc == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"error parsing stylesheet as XML document");
|
||||
|
||||
if (stylesheet == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"failed to parse stylesheet");
|
||||
stylesheet = xsltParseStylesheetDoc(ssdoc);
|
||||
}
|
||||
else
|
||||
stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
|
||||
|
||||
restree = xsltApplyStylesheet(stylesheet, doctree, params);
|
||||
if (stylesheet == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"failed to parse stylesheet");
|
||||
|
||||
if (restree == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"failed to apply stylesheet");
|
||||
restree = xsltApplyStylesheet(stylesheet, doctree, params);
|
||||
|
||||
resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
|
||||
if (restree == NULL)
|
||||
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
|
||||
"failed to apply stylesheet");
|
||||
|
||||
resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
|
||||
}
|
||||
PG_CATCH();
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user