1
0
mirror of https://github.com/postgres/postgres.git synced 2025-08-05 07:41:25 +03:00

pgindent run.

This commit is contained in:
Bruce Momjian
2002-09-04 20:31:48 +00:00
parent c91ceec21d
commit e50f52a074
446 changed files with 14942 additions and 13363 deletions

View File

@@ -299,6 +299,7 @@ gts_compress(PG_FUNCTION_ARGS)
if (entry->leafkey)
{
TSKEY *r = (TSKEY *) palloc(sizeof(TSKEY));
retval = palloc(sizeof(GISTENTRY));
r->lower = r->upper = *(Timestamp *) (entry->key);
gistentryinit(*retval, PointerGetDatum(r),

View File

@@ -792,6 +792,7 @@ cube_lt(NDBOX * a, NDBOX * b)
if (max(a->x[i], a->x[a->dim + i]) < 0)
return (TRUE);
}
/*
* if all common dimensions are equal, the cube with more
* dimensions wins
@@ -814,6 +815,7 @@ cube_lt(NDBOX * a, NDBOX * b)
if (max(b->x[i], b->x[b->dim + i]) < 0)
return (FALSE);
}
/*
* if all common dimensions are equal, the cube with more
* dimensions wins
@@ -874,6 +876,7 @@ cube_gt(NDBOX * a, NDBOX * b)
if (max(a->x[i], a->x[a->dim + i]) > 0)
return (TRUE);
}
/*
* if all common dimensions are equal, the cube with more
* dimensions wins
@@ -896,6 +899,7 @@ cube_gt(NDBOX * a, NDBOX * b)
if (max(b->x[i], b->x[b->dim + i]) > 0)
return (FALSE);
}
/*
* if all common dimensions are equal, the cube with more
* dimensions wins
@@ -937,9 +941,9 @@ cube_same(NDBOX * a, NDBOX * b)
/*
* all dimensions of (b) are compared to those of (a); instead of
* those in (a) absent in (b), compare (a) to zero
* Since both LL and UR coordinates are compared to zero, we can
* just check them all without worrying about which is which.
* those in (a) absent in (b), compare (a) to zero Since both LL and
* UR coordinates are compared to zero, we can just check them all
* without worrying about which is which.
*/
for (i = b->dim; i < a->dim; i++)
{
@@ -974,9 +978,9 @@ cube_contains(NDBOX * a, NDBOX * b)
{
/*
* the further comparisons will make sense if the excess
* dimensions of (b) were zeroes
* Since both UL and UR coordinates must be zero, we can
* check them all without worrying about which is which.
* dimensions of (b) were zeroes Since both UL and UR coordinates
* must be zero, we can check them all without worrying about
* which is which.
*/
for (i = a->dim; i < b->dim; i++)
{
@@ -1124,9 +1128,11 @@ cube_is_point(NDBOX * a)
{
int i,
j;
for (i = 0, j = a->dim; i < a->dim; i++, j++)
{
if (a->x[i] != a->x[j]) return FALSE;
if (a->x[i] != a->x[j])
return FALSE;
}
return TRUE;
@@ -1145,6 +1151,7 @@ double *
cube_ll_coord(NDBOX * a, int n)
{
double *result;
result = (double *) palloc(sizeof(double));
*result = 0;
if (a->dim >= n && n > 0)
@@ -1157,6 +1164,7 @@ double *
cube_ur_coord(NDBOX * a, int n)
{
double *result;
result = (double *) palloc(sizeof(double));
*result = 0;
if (a->dim >= n && n > 0)
@@ -1173,8 +1181,11 @@ cube_enlarge(NDBOX * a, double * r, int n)
int size;
int i,
j;
if (*r > 0 && n > 0) dim = n;
if (a->dim > dim) dim = a->dim;
if (*r > 0 && n > 0)
dim = n;
if (a->dim > dim)
dim = a->dim;
size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2;
result = (NDBOX *) palloc(size);
memset(result, 0, size);

View File

@@ -277,7 +277,10 @@ dblink_fetch(PG_FUNCTION_ARGS)
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/* switch to memory context appropriate for multiple function calls */
/*
* switch to memory context appropriate for multiple function
* calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
if (persistent_conn != NULL)
@@ -378,7 +381,8 @@ dblink_fetch(PG_FUNCTION_ARGS)
SRF_RETURN_NEXT(funcctx, result);
}
else /* do when there is no more left */
else
/* do when there is no more left */
{
PQclear(res);
SRF_RETURN_DONE(funcctx);
@@ -417,7 +421,10 @@ dblink_record(PG_FUNCTION_ARGS)
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/* switch to memory context appropriate for multiple function calls */
/*
* switch to memory context appropriate for multiple function
* calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
if (fcinfo->nargs == 2)
@@ -563,7 +570,8 @@ dblink_record(PG_FUNCTION_ARGS)
SRF_RETURN_NEXT(funcctx, result);
}
else /* do when there is no more left */
else
/* do when there is no more left */
{
PQclear(res);
SRF_RETURN_DONE(funcctx);
@@ -633,8 +641,8 @@ dblink_exec(PG_FUNCTION_ARGS)
TEXTOID, -1, 0, false);
/*
* and save a copy of the command status string to return
* as our result tuple
* and save a copy of the command status string to return as
* our result tuple
*/
sql_cmd_status = PQcmdStatus(res);
}
@@ -721,8 +729,8 @@ dblink(PG_FUNCTION_ARGS)
results->res = res;
/*
* Append node to res_id to hold pointer to results.
* Needed by dblink_tok to access the data
* Append node to res_id to hold pointer to results. Needed by
* dblink_tok to access the data
*/
append_res_ptr(results);
@@ -874,7 +882,10 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/* switch to memory context appropriate for multiple function calls */
/*
* switch to memory context appropriate for multiple function
* calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* convert relname to rel Oid */
@@ -882,7 +893,10 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
if (!OidIsValid(relid))
elog(ERROR, "dblink_get_pkey: relation does not exist");
/* need a tuple descriptor representing one INT and one TEXT column */
/*
* need a tuple descriptor representing one INT and one TEXT
* column
*/
tupdesc = CreateTemplateTupleDesc(2, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "position",
INT4OID, -1, 0, false);
@@ -896,8 +910,8 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
funcctx->slot = slot;
/*
* Generate attribute metadata needed later to produce tuples from raw
* C strings
* Generate attribute metadata needed later to produce tuples from
* raw C strings
*/
attinmeta = TupleDescGetAttInMetadata(tupdesc);
funcctx->attinmeta = attinmeta;
@@ -912,7 +926,8 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
/* got results, keep track of them */
funcctx->user_fctx = results;
}
else /* fast track when no results */
else
/* fast track when no results */
SRF_RETURN_DONE(funcctx);
MemoryContextSwitchTo(oldcontext);
@@ -953,7 +968,8 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
SRF_RETURN_NEXT(funcctx, result);
}
else /* do when there is no more left */
else
/* do when there is no more left */
SRF_RETURN_DONE(funcctx);
}
@@ -1043,6 +1059,7 @@ dblink_build_sql_insert(PG_FUNCTION_ARGS)
pkattnums = (int16 *) PG_GETARG_POINTER(1);
pknumatts = PG_GETARG_INT16(2);
/*
* There should be at least one key attribute
*/
@@ -1053,8 +1070,8 @@ dblink_build_sql_insert(PG_FUNCTION_ARGS)
tgt_pkattvals_arry = PG_GETARG_ARRAYTYPE_P(4);
/*
* Source array is made up of key values that will be used to
* locate the tuple of interest from the local system.
* Source array is made up of key values that will be used to locate
* the tuple of interest from the local system.
*/
src_ndim = ARR_NDIM(src_pkattvals_arry);
src_dim = ARR_DIMS(src_pkattvals_arry);
@@ -1083,8 +1100,8 @@ dblink_build_sql_insert(PG_FUNCTION_ARGS)
}
/*
* Target array is made up of key values that will be used to
* build the SQL string for use on the remote system.
* Target array is made up of key values that will be used to build
* the SQL string for use on the remote system.
*/
tgt_ndim = ARR_NDIM(tgt_pkattvals_arry);
tgt_dim = ARR_DIMS(tgt_pkattvals_arry);
@@ -1176,6 +1193,7 @@ dblink_build_sql_delete(PG_FUNCTION_ARGS)
pkattnums = (int16 *) PG_GETARG_POINTER(1);
pknumatts = PG_GETARG_INT16(2);
/*
* There should be at least one key attribute
*/
@@ -1185,8 +1203,8 @@ dblink_build_sql_delete(PG_FUNCTION_ARGS)
tgt_pkattvals_arry = PG_GETARG_ARRAYTYPE_P(3);
/*
* Target array is made up of key values that will be used to
* build the SQL string for use on the remote system.
* Target array is made up of key values that will be used to build
* the SQL string for use on the remote system.
*/
tgt_ndim = ARR_NDIM(tgt_pkattvals_arry);
tgt_dim = ARR_DIMS(tgt_pkattvals_arry);
@@ -1287,6 +1305,7 @@ dblink_build_sql_update(PG_FUNCTION_ARGS)
pkattnums = (int16 *) PG_GETARG_POINTER(1);
pknumatts = PG_GETARG_INT16(2);
/*
* There should be one source array key values for each key attnum
*/
@@ -1297,8 +1316,8 @@ dblink_build_sql_update(PG_FUNCTION_ARGS)
tgt_pkattvals_arry = PG_GETARG_ARRAYTYPE_P(4);
/*
* Source array is made up of key values that will be used to
* locate the tuple of interest from the local system.
* Source array is made up of key values that will be used to locate
* the tuple of interest from the local system.
*/
src_ndim = ARR_NDIM(src_pkattvals_arry);
src_dim = ARR_DIMS(src_pkattvals_arry);
@@ -1327,8 +1346,8 @@ dblink_build_sql_update(PG_FUNCTION_ARGS)
}
/*
* Target array is made up of key values that will be used to
* build the SQL string for use on the remote system.
* Target array is made up of key values that will be used to build
* the SQL string for use on the remote system.
*/
tgt_ndim = ARR_NDIM(tgt_pkattvals_arry);
tgt_dim = ARR_DIMS(tgt_pkattvals_arry);
@@ -1756,8 +1775,7 @@ get_attnum_pk_pos(int16 *pkattnums, int16 pknumatts, int16 key)
int i;
/*
* Not likely a long list anyway, so just scan for
* the value
* Not likely a long list anyway, so just scan for the value
*/
for (i = 0; i < pknumatts; i++)
if (key == pkattnums[i])
@@ -1794,8 +1812,8 @@ get_tuple_of_interest(Oid relid, int16 *pkattnums, int16 pknumatts, char **src_p
elog(ERROR, "get_tuple_of_interest: SPI_connect returned %d", ret);
/*
* Build sql statement to look up tuple of interest
* Use src_pkattvals as the criteria.
* Build sql statement to look up tuple of interest Use src_pkattvals
* as the criteria.
*/
appendStringInfo(str, "SELECT * FROM %s WHERE ", quote_ident_cstr(relname));
@@ -1822,6 +1840,7 @@ get_tuple_of_interest(Oid relid, int16 *pkattnums, int16 pknumatts, char **src_p
sql = pstrdup(str->data);
pfree(str->data);
pfree(str);
/*
* Retrieve the desired tuple
*/
@@ -1832,12 +1851,11 @@ get_tuple_of_interest(Oid relid, int16 *pkattnums, int16 pknumatts, char **src_p
* Only allow one qualifying tuple
*/
if ((ret == SPI_OK_SELECT) && (SPI_processed > 1))
{
elog(ERROR, "get_tuple_of_interest: Source criteria may not match more than one record.");
}
else if (ret == SPI_OK_SELECT && SPI_processed == 1)
{
SPITupleTable *tuptable = SPI_tuptable;
tuple = SPI_copytuple(tuptable->vals[0]);
return tuple;
@@ -1888,6 +1906,7 @@ get_res_ptr(int32 res_id_index)
foreach(ptr, res_id)
{
dblink_results *this_res_id = (dblink_results *) lfirst(ptr);
if (this_res_id->res_id_index == res_id_index)
return this_res_id;
}
@@ -1943,8 +1962,8 @@ pgresultGetTupleDesc(PGresult *res)
for (i = 0; i < natts; i++)
{
/*
* for each field, get the name and type information from the query
* result and have TupleDescInitEntry fill in the attribute
* for each field, get the name and type information from the
* query result and have TupleDescInitEntry fill in the attribute
* information we need.
*/
attnum++;

View File

@@ -1,6 +1,6 @@
/****************************************************************************
* pending.c
* $Id: pending.c,v 1.1 2002/06/23 21:58:08 momjian Exp $
* $Id: pending.c,v 1.2 2002/09/04 20:31:06 momjian Exp $
*
* This file contains a trigger for Postgresql-7.x to record changes to tables
* to a pending table for mirroring.
@@ -22,7 +22,10 @@
#include <commands/trigger.h>
#include <postgres.h>
enum FieldUsage {PRIMARY=0,NONPRIMARY,ALL,NUM_FIELDUSAGE};
enum FieldUsage
{
PRIMARY = 0, NONPRIMARY, ALL, NUM_FIELDUSAGE
};
int storePending(char *cpTableName, HeapTuple tBeforeTuple,
HeapTuple tAfterTuple,
@@ -45,6 +48,7 @@ char * packageData(HeapTuple tTupleData, TupleDesc tTupleDecs,
extern Datum recordchange(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(recordchange);
@@ -54,7 +58,9 @@ PG_FUNCTION_INFO_V1(recordchange);
* table the trigger was applied to. If this name is incorrect so will the
* mirroring.
****************************************************************************/
Datum recordchange(PG_FUNCTION_ARGS) {
Datum
recordchange(PG_FUNCTION_ARGS)
{
TriggerData *trigdata;
TupleDesc tupdesc;
HeapTuple beforeTuple = NULL;
@@ -62,9 +68,12 @@ Datum recordchange(PG_FUNCTION_ARGS) {
HeapTuple retTuple = NULL;
char *tblname;
char op;
if(fcinfo->context!=NULL) {
if(SPI_connect() < 0) {
if (fcinfo->context != NULL)
{
if (SPI_connect() < 0)
{
elog(NOTICE, "storePending could not connect to SPI");
return -1;
}
@@ -72,25 +81,29 @@ Datum recordchange(PG_FUNCTION_ARGS) {
/* Extract the table name */
tblname = SPI_getrelname(trigdata->tg_relation);
tupdesc = trigdata->tg_relation->rd_att;
if(TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) {
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
{
retTuple = trigdata->tg_newtuple;
beforeTuple = trigdata->tg_trigtuple;
afterTuple = trigdata->tg_newtuple;
op = 'u';
}
else if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) {
else if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
{
retTuple = trigdata->tg_trigtuple;
afterTuple = trigdata->tg_trigtuple;
op = 'i';
}
else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) {
else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
{
retTuple = trigdata->tg_trigtuple;
beforeTuple = trigdata->tg_trigtuple;
op = 'd';
}
if(storePending(tblname,beforeTuple,afterTuple,tupdesc,trigdata,op)) {
if (storePending(tblname, beforeTuple, afterTuple, tupdesc, trigdata, op))
{
/* An error occoured. Skip the operation. */
elog(ERROR, "Operation could not be mirrored");
return PointerGetDatum(NULL);
@@ -102,7 +115,8 @@ Datum recordchange(PG_FUNCTION_ARGS) {
SPI_finish();
return PointerGetDatum(retTuple);
}
else {
else
{
/*
* Not being called as a trigger.
*/
@@ -115,14 +129,18 @@ Datum recordchange(PG_FUNCTION_ARGS) {
* Constructs and executes an SQL query to write a record of this tuple change
* to the pending table.
*****************************************************************************/
int storePending(char * cpTableName, HeapTuple tBeforeTuple,
int
storePending(char *cpTableName, HeapTuple tBeforeTuple,
HeapTuple tAfterTuple,
TupleDesc tTupDesc,
TriggerData * tpTrigData,char cOp) {
TriggerData *tpTrigData, char cOp)
{
char *cpQueryBase = "INSERT INTO \"Pending\" (\"TableName\",\"Op\",\"XID\") VALUES ($1,$2,$3)";
int iResult = 0;
HeapTuple tCurTuple; // Points the current tuple(before or after)
HeapTuple tCurTuple;
//Points the current tuple(before or after)
Datum saPlanData[4];
Oid taPlanArgTypes[3] = {NAMEOID, CHAROID, INT4OID};
void *vpPlan;
@@ -133,10 +151,9 @@ int storePending(char * cpTableName, HeapTuple tBeforeTuple,
vpPlan = SPI_prepare(cpQueryBase, 3, taPlanArgTypes);
if(vpPlan==NULL) {
if (vpPlan == NULL)
elog(NOTICE, "Error creating plan");
}
// SPI_saveplan(vpPlan);
/* SPI_saveplan(vpPlan); */
saPlanData[0] = PointerGetDatum(cpTableName);
saPlanData[1] = CharGetDatum(cOp);
@@ -144,23 +161,24 @@ int storePending(char * cpTableName, HeapTuple tBeforeTuple,
iResult = SPI_execp(vpPlan, saPlanData, NULL, 1);
if(iResult < 0) {
if (iResult < 0)
elog(NOTICE, "storedPending fired (%s) returned %d", cpQueryBase, iResult);
}
#if defined DEBUG_OUTPUT
elog(NOTICE, "row successfully stored in pending table");
#endif
if(cOp=='d') {
if (cOp == 'd')
{
/**
* This is a record of a delete operation.
* Just store the key data.
*/
iResult = storeKeyInfo(cpTableName, tBeforeTuple, tTupDesc, tpTrigData);
}
else if (cOp=='i') {
else if (cOp == 'i')
{
/**
* An Insert operation.
* Store all data
@@ -168,7 +186,8 @@ int storePending(char * cpTableName, HeapTuple tBeforeTuple,
iResult = storeData(cpTableName, tAfterTuple, tTupDesc, tpTrigData, TRUE);
}
else {
else
{
/* op must be an update. */
iResult = storeKeyInfo(cpTableName, tBeforeTuple, tTupDesc, tpTrigData);
iResult = iResult ? iResult : storeData(cpTableName, tAfterTuple, tTupDesc,
@@ -183,9 +202,11 @@ int storePending(char * cpTableName, HeapTuple tBeforeTuple,
}
int storeKeyInfo(char * cpTableName, HeapTuple tTupleData,
int
storeKeyInfo(char *cpTableName, HeapTuple tTupleData,
TupleDesc tTupleDesc,
TriggerData * tpTrigData) {
TriggerData *tpTrigData)
{
Oid saPlanArgTypes[1] = {NAMEOID};
char *insQuery = "INSERT INTO \"PendingData\" (\"SeqId\",\"IsKey\",\"Data\") VALUES(currval('\"Pending_SeqId_seq\"'),'t',$1)";
@@ -195,12 +216,13 @@ int storeKeyInfo(char * cpTableName, HeapTuple tTupleData,
int iRetCode;
pplan = SPI_prepare(insQuery, 1, saPlanArgTypes);
if(pplan==NULL) {
if (pplan == NULL)
{
elog(NOTICE, "Could not prepare INSERT plan");
return -1;
}
// pplan = SPI_saveplan(pplan);
/* pplan = SPI_saveplan(pplan); */
cpKeyData = packageData(tTupleData, tTupleDesc, tpTrigData, PRIMARY);
#if defined DEBUG_OUTPUT
elog(NOTICE, cpKeyData);
@@ -209,11 +231,11 @@ int storeKeyInfo(char * cpTableName, HeapTuple tTupleData,
iRetCode = SPI_execp(pplan, saPlanData, NULL, 1);
if(cpKeyData!=NULL) {
if (cpKeyData != NULL)
SPI_pfree(cpKeyData);
}
if(iRetCode != SPI_OK_INSERT ) {
if (iRetCode != SPI_OK_INSERT)
{
elog(NOTICE, "Error inserting row in pendingDelete");
return -1;
}
@@ -228,7 +250,9 @@ int storeKeyInfo(char * cpTableName, HeapTuple tTupleData,
int2vector * getPrimaryKey(Oid tblOid) {
int2vector *
getPrimaryKey(Oid tblOid)
{
char *queryBase;
char *query;
bool isNull;
@@ -237,11 +261,13 @@ int2vector * getPrimaryKey(Oid tblOid) {
HeapTuple resTuple;
Datum resDatum;
int ret;
queryBase = "SELECT indkey FROM pg_index WHERE indisprimary='t' AND indrelid=";
query = SPI_palloc(strlen(queryBase) + MAX_OID_LEN + 1);
sprintf(query, "%s%d", queryBase, tblOid);
ret = SPI_exec(query, 1);
if(ret != SPI_OK_SELECT || SPI_processed != 1 ) {
if (ret != SPI_OK_SELECT || SPI_processed != 1)
{
elog(NOTICE, "Could not select primary index key");
return NULL;
}
@@ -260,8 +286,10 @@ int2vector * getPrimaryKey(Oid tblOid) {
/******************************************************************************
* Stores a copy of the non-key data for the row.
*****************************************************************************/
int storeData(char * cpTableName,HeapTuple tTupleData,TupleDesc tTupleDesc,
TriggerData * tpTrigData,int iIncludeKeyData) {
int
storeData(char *cpTableName, HeapTuple tTupleData, TupleDesc tTupleDesc,
TriggerData *tpTrigData, int iIncludeKeyData)
{
Oid planArgTypes[1] = {NAMEOID};
char *insQuery = "INSERT INTO \"PendingData\" (\"SeqId\",\"IsKey\",\"Data\") VALUES(currval('\"Pending_SeqId_seq\"'),'f',$1)";
@@ -271,27 +299,26 @@ int storeData(char * cpTableName,HeapTuple tTupleData,TupleDesc tTupleDesc,
int iRetValue;
pplan = SPI_prepare(insQuery, 1, planArgTypes);
if(pplan==NULL) {
if (pplan == NULL)
{
elog(NOTICE, "Could not prepare INSERT plan");
return -1;
}
// pplan = SPI_saveplan(pplan);
if(iIncludeKeyData==0) {
/* pplan = SPI_saveplan(pplan); */
if (iIncludeKeyData == 0)
cpKeyData = packageData(tTupleData, tTupleDesc, tpTrigData, NONPRIMARY);
}
else {
else
cpKeyData = packageData(tTupleData, tTupleDesc, tpTrigData, ALL);
}
planData[0] = PointerGetDatum(cpKeyData);
iRetValue = SPI_execp(pplan, planData, NULL, 1);
if(cpKeyData!=0) {
if (cpKeyData != 0)
SPI_pfree(cpKeyData);
}
if(iRetValue != SPI_OK_INSERT ) {
if (iRetValue != SPI_OK_INSERT)
{
elog(NOTICE, "Error inserting row in pendingDelete");
return -1;
}
@@ -317,9 +344,11 @@ int storeData(char * cpTableName,HeapTuple tTupleData,TupleDesc tTupleDesc,
* NONPRIMARY implies include only non-primary key fields.
* ALL implies include all fields.
*/
char * packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
char *
packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
TriggerData *tpTrigData,
enum FieldUsage eKeyUsage ) {
enum FieldUsage eKeyUsage)
{
int iNumCols;
int2vector *tpPKeys = NULL;
int iColumnCounter;
@@ -329,22 +358,22 @@ char * packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
iNumCols = tTupleDesc->natts;
if(eKeyUsage!=ALL) {
if (eKeyUsage != ALL)
{
tpPKeys = getPrimaryKey(tpTrigData->tg_relation->rd_id);
if(tpPKeys==NULL) {
if (tpPKeys == NULL)
return NULL;
}
}
#if defined DEBUG_OUTPUT
if(tpPKeys!=NULL) {
if (tpPKeys != NULL)
elog(NOTICE, "Have primary keys");
}
#endif
cpDataBlock = SPI_palloc(BUFFER_SIZE);
iDataBlockSize = BUFFER_SIZE;
iUsedDataBlock = 0; /* To account for the null */
for(iColumnCounter=1; iColumnCounter <=iNumCols; iColumnCounter++) {
for (iColumnCounter = 1; iColumnCounter <= iNumCols; iColumnCounter++)
{
int iIsPrimaryKey;
int iPrimaryKeyIndex;
char *cpUnFormatedPtr;
@@ -352,17 +381,22 @@ char * packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
char *cpFieldName;
char *cpFieldData;
if(eKeyUsage!=ALL) {
//Determine if this is a primary key or not.
if (eKeyUsage != ALL)
{
/* Determine if this is a primary key or not. */
iIsPrimaryKey = 0;
for (iPrimaryKeyIndex = 0; (*tpPKeys)[iPrimaryKeyIndex] != 0;
iPrimaryKeyIndex++) {
if((*tpPKeys)[iPrimaryKeyIndex]==iColumnCounter) {
iPrimaryKeyIndex++)
{
if ((*tpPKeys)[iPrimaryKeyIndex] == iColumnCounter)
{
iIsPrimaryKey = 1;
break;
}
}
if( iIsPrimaryKey ? (eKeyUsage!=PRIMARY) : (eKeyUsage!=NONPRIMARY)) {
if (iIsPrimaryKey ? (eKeyUsage != PRIMARY) : (eKeyUsage != NONPRIMARY))
{
/**
* Don't use.
*/
@@ -377,7 +411,8 @@ char * packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
#if defined DEBUG_OUTPUT
elog(NOTICE, cpFieldName);
#endif
while(iDataBlockSize - iUsedDataBlock < strlen(cpFieldName) +4) {
while (iDataBlockSize - iUsedDataBlock < strlen(cpFieldName) + 4)
{
cpDataBlock = SPI_repalloc(cpDataBlock, iDataBlockSize + BUFFER_SIZE);
iDataBlockSize = iDataBlockSize + BUFFER_SIZE;
}
@@ -387,12 +422,14 @@ char * packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
cpUnFormatedPtr = cpFieldData;
cpFormatedPtr = cpDataBlock + iUsedDataBlock;
if(cpFieldData!=NULL) {
if (cpFieldData != NULL)
{
*cpFormatedPtr = '\'';
iUsedDataBlock++;
cpFormatedPtr++;
}
else {
else
{
*cpFormatedPtr = ' ';
iUsedDataBlock++;
cpFormatedPtr++;
@@ -403,13 +440,16 @@ char * packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
elog(NOTICE, cpFieldData);
elog(NOTICE, "Starting format loop");
#endif
while(*cpUnFormatedPtr!=0) {
while(iDataBlockSize - iUsedDataBlock < 2) {
while (*cpUnFormatedPtr != 0)
{
while (iDataBlockSize - iUsedDataBlock < 2)
{
cpDataBlock = SPI_repalloc(cpDataBlock, iDataBlockSize + BUFFER_SIZE);
iDataBlockSize = iDataBlockSize + BUFFER_SIZE;
cpFormatedPtr = cpDataBlock + iUsedDataBlock;
}
if(*cpUnFormatedPtr=='\\' || *cpUnFormatedPtr=='\'') {
if (*cpUnFormatedPtr == '\\' || *cpUnFormatedPtr == '\'')
{
*cpFormatedPtr = '\\';
cpFormatedPtr++;
iUsedDataBlock++;
@@ -422,7 +462,8 @@ char * packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
SPI_pfree(cpFieldData);
while(iDataBlockSize - iUsedDataBlock < 3) {
while (iDataBlockSize - iUsedDataBlock < 3)
{
cpDataBlock = SPI_repalloc(cpDataBlock, iDataBlockSize + BUFFER_SIZE);
iDataBlockSize = iDataBlockSize + BUFFER_SIZE;
cpFormatedPtr = cpDataBlock + iUsedDataBlock;
@@ -434,9 +475,8 @@ char * packageData(HeapTuple tTupleData, TupleDesc tTupleDesc,
#endif
} /* for iColumnCounter */
if(tpPKeys!=NULL) {
if (tpPKeys != NULL)
SPI_pfree(tpPKeys);
}
#if defined DEBUG_OUTPUT
elog(NOTICE, "Returning");
#endif

View File

@@ -80,7 +80,8 @@ PG_FUNCTION_INFO_V1(int_enum);
* Manage the aggregation state of the array
* You need to specify the correct memory context, or it will vanish!
*/
static PGARRAY * GetPGArray(int4 state, int fAdd)
static PGARRAY *
GetPGArray(int4 state, int fAdd)
{
PGARRAY *p = (PGARRAY *) state;
@@ -137,9 +138,11 @@ static PGARRAY * GetPGArray(int4 state, int fAdd)
/* Shrinks the array to its actual size and moves it into the standard
* memory allocation context, frees working memory */
static PGARRAY *ShrinkPGArray(PGARRAY *p)
static PGARRAY *
ShrinkPGArray(PGARRAY * p)
{
PGARRAY *pnew = NULL;
if (p)
{
/* get target size */
@@ -150,7 +153,10 @@ static PGARRAY *ShrinkPGArray(PGARRAY *p)
if (pnew)
{
/* Fix up the fields in the new structure, so Postgres understands */
/*
* Fix up the fields in the new structure, so Postgres
* understands
*/
memcpy(pnew, p, cb);
pnew->a.size = cb;
pnew->a.ndim = 1;
@@ -161,53 +167,46 @@ static PGARRAY *ShrinkPGArray(PGARRAY *p)
pnew->lower = 0;
}
else
{
elog(ERROR, "Integer aggregator, can't allocate memory");
}
pfree(p);
}
return pnew;
}
/* Called for each iteration during an aggregate function */
Datum int_agg_state(PG_FUNCTION_ARGS)
Datum
int_agg_state(PG_FUNCTION_ARGS)
{
int4 state = PG_GETARG_INT32(0);
int4 value = PG_GETARG_INT32(1);
PGARRAY *p = GetPGArray(state, 1);
if (!p)
{
elog(ERROR, "No aggregate storage");
}
else if (p->items >= p->lower)
{
elog(ERROR, "aggregate storage too small");
}
else
{
p->array[p->items++] = value;
}
PG_RETURN_INT32(p);
}
/* This is the final function used for the integer aggregator. It returns all the integers
* collected as a one dimentional integer array */
Datum int_agg_final_array(PG_FUNCTION_ARGS)
Datum
int_agg_final_array(PG_FUNCTION_ARGS)
{
PGARRAY *pnew = ShrinkPGArray(GetPGArray(PG_GETARG_INT32(0), 0));
if (pnew)
{
PG_RETURN_POINTER(pnew);
}
else
{
PG_RETURN_NULL();
}
}
/* This function accepts an array, and returns one item for each entry in the array */
Datum int_enum(PG_FUNCTION_ARGS)
Datum
int_enum(PG_FUNCTION_ARGS)
{
PGARRAY *p = (PGARRAY *) PG_GETARG_POINTER(0);
CTX *pc;
@@ -248,10 +247,9 @@ Datum int_enum(PG_FUNCTION_ARGS)
fcinfo->context = (Node *) pc;
pc->num = 0;
}
else /* use an existing one */
{
else
/* use an existing one */
pc = (CTX *) fcinfo->context;
}
/* Are we done yet? */
if (pc->num >= pc->p->items)
{
@@ -262,9 +260,11 @@ Datum int_enum(PG_FUNCTION_ARGS)
fcinfo->context = NULL;
rsi->isDone = ExprEndResult;
}
else /* nope, return the next value */
else
/* nope, return the next value */
{
int val = pc->p->array[pc->num++];
rsi->isDone = ExprMultipleResult;
PG_RETURN_INT32(val);
}

File diff suppressed because it is too large Load Diff

View File

@@ -13,14 +13,19 @@
PG_FUNCTION_INFO_V1(_ltree_compress);
Datum _ltree_compress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(_ltree_same);
Datum _ltree_same(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(_ltree_union);
Datum _ltree_union(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(_ltree_penalty);
Datum _ltree_penalty(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(_ltree_picksplit);
Datum _ltree_picksplit(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(_ltree_consistent);
Datum _ltree_consistent(PG_FUNCTION_ARGS);
@@ -39,12 +44,14 @@ Datum _ltree_consistent(PG_FUNCTION_ARGS);
#define WISH_F(a,b,c) (double)( -(double)(((a)-(b))*((a)-(b))*((a)-(b)))*(c) )
static void
hashing(BITVECP sign, ltree *t) {
hashing(BITVECP sign, ltree * t)
{
int tlen = t->numlevel;
ltree_level *cur = LTREE_FIRST(t);
int hash;
while(tlen > 0) {
while (tlen > 0)
{
hash = ltree_crc32_sz(cur->name, cur->len);
AHASH(sign, hash);
cur = LEVEL_NEXT(cur);
@@ -53,11 +60,13 @@ hashing(BITVECP sign, ltree *t) {
}
Datum
_ltree_compress(PG_FUNCTION_ARGS) {
_ltree_compress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval = entry;
if ( entry->leafkey ) { /* ltree */
if (entry->leafkey)
{ /* ltree */
ltree_gist *key;
ArrayType *val = DatumGetArrayTypeP(entry->key);
int4 len = LTG_HDRSIZE + ASIGLEN;
@@ -72,7 +81,8 @@ _ltree_compress(PG_FUNCTION_ARGS) {
key->flag = 0;
MemSet(LTG_SIGN(key), 0, sizeof(ASIGLEN));
while( num>0 ) {
while (num > 0)
{
hashing(LTG_SIGN(key), item);
num--;
item = NEXTVAL(item);
@@ -85,8 +95,11 @@ _ltree_compress(PG_FUNCTION_ARGS) {
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, key->len, FALSE);
} else {
int4 i,len;
}
else
{
int4 i,
len;
ltree_gist *key;
BITVECP sign = LTG_SIGN(DatumGetPointer(entry->key));
@@ -110,23 +123,28 @@ _ltree_compress(PG_FUNCTION_ARGS) {
}
Datum
_ltree_same(PG_FUNCTION_ARGS) {
_ltree_same(PG_FUNCTION_ARGS)
{
ltree_gist *a = (ltree_gist *) PG_GETARG_POINTER(0);
ltree_gist *b = (ltree_gist *) PG_GETARG_POINTER(1);
bool *result = (bool *) PG_GETARG_POINTER(2);
if ( LTG_ISALLTRUE(a) && LTG_ISALLTRUE(b) ) {
if (LTG_ISALLTRUE(a) && LTG_ISALLTRUE(b))
*result = true;
} else if ( LTG_ISALLTRUE(a) ) {
else if (LTG_ISALLTRUE(a))
*result = false;
} else if ( LTG_ISALLTRUE(b) ) {
else if (LTG_ISALLTRUE(b))
*result = false;
} else {
else
{
int4 i;
BITVECP sa=LTG_SIGN(a), sb=LTG_SIGN(b);
BITVECP sa = LTG_SIGN(a),
sb = LTG_SIGN(b);
*result = true;
ALOOPBYTE(
if ( sa[i] != sb[i] ) {
if (sa[i] != sb[i])
{
*result = false;
break;
}
@@ -136,7 +154,8 @@ _ltree_same(PG_FUNCTION_ARGS) {
}
static int4
unionkey( BITVECP sbase, ltree_gist *add ) {
unionkey(BITVECP sbase, ltree_gist * add)
{
int4 i;
BITVECP sadd = LTG_SIGN(add);
@@ -150,7 +169,8 @@ unionkey( BITVECP sbase, ltree_gist *add ) {
}
Datum
_ltree_union(PG_FUNCTION_ARGS) {
_ltree_union(PG_FUNCTION_ARGS)
{
bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
int *size = (int *) PG_GETARG_POINTER(1);
ABITVEC base;
@@ -160,8 +180,10 @@ _ltree_union(PG_FUNCTION_ARGS) {
ltree_gist *result;
MemSet((void *) base, 0, sizeof(ABITVEC));
for(i=0;i<len;i++) {
if ( unionkey( base, GETENTRY(entryvec, i) ) ) {
for (i = 0; i < len; i++)
{
if (unionkey(base, GETENTRY(entryvec, i)))
{
flag = LTG_ALLTRUE;
break;
}
@@ -178,8 +200,11 @@ _ltree_union(PG_FUNCTION_ARGS) {
}
static int4
sizebitvec( BITVECP sign ) {
int4 size=0, i;
sizebitvec(BITVECP sign)
{
int4 size = 0,
i;
ALOOPBYTE(
size += SUMBIT(*(char *) sign);
sign = (BITVECP) (((char *) sign) + 1);
@@ -188,23 +213,27 @@ sizebitvec( BITVECP sign ) {
}
Datum
_ltree_penalty(PG_FUNCTION_ARGS) {
_ltree_penalty(PG_FUNCTION_ARGS)
{
ltree_gist *origval = (ltree_gist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
ltree_gist *newval = (ltree_gist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *penalty = (float *) PG_GETARG_POINTER(2);
BITVECP orig = LTG_SIGN(origval);
if ( LTG_ISALLTRUE(origval) ) {
if (LTG_ISALLTRUE(origval))
{
*penalty = 0.0;
PG_RETURN_POINTER(penalty);
}
if ( LTG_ISALLTRUE(newval) ) {
if (LTG_ISALLTRUE(newval))
*penalty = (float) (ASIGLENBIT - sizebitvec(orig));
} else {
else
{
unsigned char valtmp;
BITVECP nval = LTG_SIGN(newval);
int4 i, unionsize=0;
int4 i,
unionsize = 0;
ALOOPBYTE(
valtmp = nval[i] | orig[i];
@@ -215,54 +244,75 @@ _ltree_penalty(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(penalty);
}
typedef struct {
typedef struct
{
OffsetNumber pos;
int4 cost;
} SPLITCOST;
static int
comparecost( const void *a, const void *b ) {
comparecost(const void *a, const void *b)
{
return ((SPLITCOST *) a)->cost - ((SPLITCOST *) b)->cost;
}
Datum
_ltree_picksplit(PG_FUNCTION_ARGS) {
_ltree_picksplit(PG_FUNCTION_ARGS)
{
bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1);
OffsetNumber k,j;
ltree_gist *datum_l, *datum_r;
ABITVEC union_l, union_r;
OffsetNumber k,
j;
ltree_gist *datum_l,
*datum_r;
ABITVEC union_l,
union_r;
bool firsttime = true;
int4 size_alpha,size_beta,sizeu,sizei;
int4 size_waste, waste = 0.0;
int4 size_l, size_r;
int4 size_alpha,
size_beta,
sizeu,
sizei;
int4 size_waste,
waste = 0.0;
int4 size_l,
size_r;
int4 nbytes;
OffsetNumber seed_1=0, seed_2=0;
OffsetNumber *left, *right;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
*right;
OffsetNumber maxoff;
BITVECP ptra, ptrb, ptrc;
BITVECP ptra,
ptrb,
ptrc;
int i;
unsigned char valtmp;
SPLITCOST *costvector;
ltree_gist *_k, *_j;
ltree_gist *_k,
*_j;
maxoff = ((VARSIZE(entryvec) - VARHDRSZ) / sizeof(GISTENTRY)) - 2;
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
v->spl_left = (OffsetNumber *) palloc(nbytes);
v->spl_right = (OffsetNumber *) palloc(nbytes);
for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) {
for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k))
{
_k = GETENTRY(entryvec, k);
for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) {
for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j))
{
_j = GETENTRY(entryvec, j);
if ( LTG_ISALLTRUE(_k) || LTG_ISALLTRUE(_j) ) {
if (LTG_ISALLTRUE(_k) || LTG_ISALLTRUE(_j))
{
sizeu = ASIGLENBIT;
if (LTG_ISALLTRUE(_k) && LTG_ISALLTRUE(_j))
sizei = ASIGLENBIT;
else
sizei = (LTG_ISALLTRUE(_k)) ?
sizebitvec(LTG_SIGN(_j)) : sizebitvec(LTG_SIGN(_k));
} else {
}
else
{
sizeu = sizei = 0;
ptra = LTG_SIGN(_j);
ptrb = LTG_SIGN(_k);
@@ -291,7 +341,8 @@ _ltree_picksplit(PG_FUNCTION_ARGS) {
);
}
size_waste = sizeu - sizei;
if (size_waste > waste || firsttime) {
if (size_waste > waste || firsttime)
{
waste = size_waste;
seed_1 = k;
seed_2 = j;
@@ -305,29 +356,40 @@ _ltree_picksplit(PG_FUNCTION_ARGS) {
right = v->spl_right;
v->spl_nright = 0;
if ( seed_1 == 0 || seed_2 == 0 ) {
if (seed_1 == 0 || seed_2 == 0)
{
seed_1 = 1;
seed_2 = 2;
}
/* form initial .. */
if ( LTG_ISALLTRUE(GETENTRY(entryvec,seed_1)) ) {
if (LTG_ISALLTRUE(GETENTRY(entryvec, seed_1)))
{
datum_l = (ltree_gist *) palloc(LTG_HDRSIZE);
datum_l->len = LTG_HDRSIZE; datum_l->flag = LTG_ALLTRUE;
datum_l->len = LTG_HDRSIZE;
datum_l->flag = LTG_ALLTRUE;
size_l = ASIGLENBIT;
} else {
}
else
{
datum_l = (ltree_gist *) palloc(LTG_HDRSIZE + ASIGLEN);
datum_l->len = LTG_HDRSIZE + ASIGLEN; datum_l->flag = 0;
datum_l->len = LTG_HDRSIZE + ASIGLEN;
datum_l->flag = 0;
memcpy((void *) LTG_SIGN(datum_l), (void *) LTG_SIGN(GETENTRY(entryvec, seed_1)), sizeof(ABITVEC));
size_l = sizebitvec(LTG_SIGN(datum_l));
}
if ( LTG_ISALLTRUE(GETENTRY(entryvec,seed_2)) ) {
if (LTG_ISALLTRUE(GETENTRY(entryvec, seed_2)))
{
datum_r = (ltree_gist *) palloc(LTG_HDRSIZE);
datum_r->len = LTG_HDRSIZE; datum_r->flag = LTG_ALLTRUE;
datum_r->len = LTG_HDRSIZE;
datum_r->flag = LTG_ALLTRUE;
size_r = ASIGLENBIT;
} else {
}
else
{
datum_r = (ltree_gist *) palloc(LTG_HDRSIZE + ASIGLEN);
datum_r->len = LTG_HDRSIZE + ASIGLEN; datum_r->flag = 0;
datum_r->len = LTG_HDRSIZE + ASIGLEN;
datum_r->flag = 0;
memcpy((void *) LTG_SIGN(datum_r), (void *) LTG_SIGN(GETENTRY(entryvec, seed_2)), sizeof(ABITVEC));
size_r = sizebitvec(LTG_SIGN(datum_r));
}
@@ -335,32 +397,43 @@ _ltree_picksplit(PG_FUNCTION_ARGS) {
maxoff = OffsetNumberNext(maxoff);
/* sort before ... */
costvector = (SPLITCOST *) palloc(sizeof(SPLITCOST) * maxoff);
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) {
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j))
{
costvector[j - 1].pos = j;
_j = GETENTRY(entryvec, j);
if ( LTG_ISALLTRUE(_j) ) {
if (LTG_ISALLTRUE(_j))
{
size_alpha = ASIGLENBIT - size_l;
size_beta = ASIGLENBIT - size_r;
} else {
}
else
{
ptra = LTG_SIGN(datum_l);
ptrb = LTG_SIGN(datum_r);
ptrc = LTG_SIGN(_j);
size_beta = size_alpha = 0;
if ( LTG_ISALLTRUE(datum_l) ) {
if ( !LTG_ISALLTRUE(datum_r) ) {
if (LTG_ISALLTRUE(datum_l))
{
if (!LTG_ISALLTRUE(datum_r))
{
ALOOPBIT(
if (GETBIT(ptrc, i) && !GETBIT(ptrb, i))
size_beta++;
);
}
} else if ( LTG_ISALLTRUE(datum_r) ) {
if ( !LTG_ISALLTRUE(datum_l) ) {
}
else if (LTG_ISALLTRUE(datum_r))
{
if (!LTG_ISALLTRUE(datum_l))
{
ALOOPBIT(
if (GETBIT(ptrc, i) && !GETBIT(ptra, i))
size_alpha++;
);
}
} else {
}
else
{
ALOOPBIT(
if (GETBIT(ptrc, i) && !GETBIT(ptra, i))
size_alpha++;
@@ -373,22 +446,27 @@ _ltree_picksplit(PG_FUNCTION_ARGS) {
}
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
for (k = 0; k < maxoff; k++) {
for (k = 0; k < maxoff; k++)
{
j = costvector[k].pos;
_j = GETENTRY(entryvec, j);
if ( j == seed_1 ) {
if (j == seed_1)
{
*left++ = j;
v->spl_nleft++;
continue;
} else if ( j == seed_2 ) {
}
else if (j == seed_2)
{
*right++ = j;
v->spl_nright++;
continue;
}
if ( LTG_ISALLTRUE(datum_l) || LTG_ISALLTRUE(_j) ) {
if (LTG_ISALLTRUE(datum_l) || LTG_ISALLTRUE(_j))
size_alpha = ASIGLENBIT;
} else {
else
{
ptra = LTG_SIGN(_j);
ptrb = LTG_SIGN(datum_l);
size_alpha = 0;
@@ -398,9 +476,10 @@ _ltree_picksplit(PG_FUNCTION_ARGS) {
);
}
if ( LTG_ISALLTRUE(datum_r) || LTG_ISALLTRUE(_j) ) {
if (LTG_ISALLTRUE(datum_r) || LTG_ISALLTRUE(_j))
size_beta = ASIGLENBIT;
} else {
else
{
ptra = LTG_SIGN(_j);
ptrb = LTG_SIGN(datum_r);
size_beta = 0;
@@ -410,23 +489,32 @@ _ltree_picksplit(PG_FUNCTION_ARGS) {
);
}
if (size_alpha - size_l < size_beta - size_r + WISH_F(v->spl_nleft, v->spl_nright, 0.1)) {
if ( ! LTG_ISALLTRUE( datum_l ) ) {
if ( size_alpha == ASIGLENBIT ) {
if (size_alpha - size_l < size_beta - size_r + WISH_F(v->spl_nleft, v->spl_nright, 0.1))
{
if (!LTG_ISALLTRUE(datum_l))
{
if (size_alpha == ASIGLENBIT)
{
if (size_alpha != size_l)
MemSet((void *) LTG_SIGN(datum_l), 0xff, sizeof(ABITVEC));
} else
}
else
memcpy((void *) LTG_SIGN(datum_l), (void *) union_l, sizeof(ABITVEC));
}
size_l = size_alpha;
*left++ = j;
v->spl_nleft++;
} else {
if ( ! LTG_ISALLTRUE( datum_r ) ) {
if ( size_beta == ASIGLENBIT ) {
}
else
{
if (!LTG_ISALLTRUE(datum_r))
{
if (size_beta == ASIGLENBIT)
{
if (size_beta != size_r)
MemSet((void *) LTG_SIGN(datum_r), 0xff, sizeof(ABITVEC));
} else
}
else
memcpy((void *) LTG_SIGN(datum_r), (void *) union_r, sizeof(ABITVEC));
}
size_r = size_beta;
@@ -445,7 +533,8 @@ _ltree_picksplit(PG_FUNCTION_ARGS) {
}
static bool
gist_te(ltree_gist *key, ltree* query) {
gist_te(ltree_gist * key, ltree * query)
{
ltree_level *curq = LTREE_FIRST(query);
BITVECP sign = LTG_SIGN(key);
int qlen = query->numlevel;
@@ -454,7 +543,8 @@ gist_te(ltree_gist *key, ltree* query) {
if (LTG_ISALLTRUE(key))
return true;
while( qlen>0 ) {
while (qlen > 0)
{
hv = ltree_crc32_sz(curq->name, curq->len);
if (!GETBIT(sign, AHASHVAL(hv)))
return false;
@@ -466,12 +556,14 @@ gist_te(ltree_gist *key, ltree* query) {
}
static bool
checkcondition_bit(void *checkval, ITEM* val ) {
checkcondition_bit(void *checkval, ITEM * val)
{
return (FLG_CANLOOKSIGN(val->flag)) ? GETBIT(checkval, AHASHVAL(val->val)) : true;
}
static bool
gist_qtxt(ltree_gist *key, ltxtquery* query) {
gist_qtxt(ltree_gist * key, ltxtquery * query)
{
if (LTG_ISALLTRUE(key))
return true;
@@ -483,7 +575,8 @@ gist_qtxt(ltree_gist *key, ltxtquery* query) {
}
static bool
gist_qe(ltree_gist *key, lquery* query) {
gist_qe(ltree_gist * key, lquery * query)
{
lquery_level *curq = LQUERY_FIRST(query);
BITVECP sign = LTG_SIGN(key);
int qlen = query->numlevel;
@@ -491,13 +584,18 @@ gist_qe(ltree_gist *key, lquery* query) {
if (LTG_ISALLTRUE(key))
return true;
while( qlen>0 ) {
if ( curq->numvar && LQL_CANLOOKSIGN(curq) ) {
while (qlen > 0)
{
if (curq->numvar && LQL_CANLOOKSIGN(curq))
{
bool isexist = false;
int vlen = curq->numvar;
lquery_variant *curv = LQL_FIRST(curq);
while( vlen>0 ) {
if ( GETBIT( sign, AHASHVAL( curv->val ) ) ) {
while (vlen > 0)
{
if (GETBIT(sign, AHASHVAL(curv->val)))
{
isexist = true;
break;
}
@@ -517,7 +615,8 @@ gist_qe(ltree_gist *key, lquery* query) {
Datum
_ltree_consistent(PG_FUNCTION_ARGS) {
_ltree_consistent(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
char *query = (char *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
ltree_gist *key = (ltree_gist *) DatumGetPointer(entry->key);
@@ -528,7 +627,8 @@ _ltree_consistent(PG_FUNCTION_ARGS) {
#define assert_enabled 0
#endif
switch( strategy ) {
switch (strategy)
{
case 10:
case 11:
res = gist_te(key, (ltree *) query);
@@ -546,4 +646,3 @@ _ltree_consistent(PG_FUNCTION_ARGS) {
}
PG_RETURN_BOOL(res);
}

View File

@@ -32,10 +32,12 @@ PG_FUNCTION_INFO_V1(_lca);
Datum _lca(PG_FUNCTION_ARGS);
typedef Datum (*PGCALL2) (PG_FUNCTION_ARGS);
#define NEXTVAL(x) ( (ltree*)( (char*)(x) + INTALIGN( VARSIZE(x) ) ) )
static bool
array_iterator( ArrayType *la, PGCALL2 callback, void* param, ltree ** found) {
array_iterator(ArrayType *la, PGCALL2 callback, void *param, ltree ** found)
{
int num = ArrayGetNItems(ARR_NDIM(la), ARR_DIMS(la));
ltree *item = (ltree *) ARR_DATA_PTR(la);
@@ -44,9 +46,11 @@ array_iterator( ArrayType *la, PGCALL2 callback, void* param, ltree ** found) {
if (found)
*found = NULL;
while( num>0 ) {
while (num > 0)
{
if (DatumGetBool(DirectFunctionCall2(callback,
PointerGetDatum(item), PointerGetDatum(param) ) ) ) {
PointerGetDatum(item), PointerGetDatum(param))))
{
if (found)
*found = item;
@@ -60,17 +64,20 @@ array_iterator( ArrayType *la, PGCALL2 callback, void* param, ltree ** found) {
}
Datum
_ltree_isparent(PG_FUNCTION_ARGS) {
_ltree_isparent(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
ltree *query = PG_GETARG_LTREE(1);
bool res = array_iterator(la, ltree_isparent, (void *) query, NULL);
PG_FREE_IF_COPY(la, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(res);
}
Datum
_ltree_r_isparent(PG_FUNCTION_ARGS) {
_ltree_r_isparent(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(_ltree_isparent,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
@@ -78,17 +85,20 @@ _ltree_r_isparent(PG_FUNCTION_ARGS) {
}
Datum
_ltree_risparent(PG_FUNCTION_ARGS) {
_ltree_risparent(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
ltree *query = PG_GETARG_LTREE(1);
bool res = array_iterator(la, ltree_risparent, (void *) query, NULL);
PG_FREE_IF_COPY(la, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(res);
}
Datum
_ltree_r_risparent(PG_FUNCTION_ARGS) {
_ltree_r_risparent(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(_ltree_risparent,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
@@ -96,17 +106,20 @@ _ltree_r_risparent(PG_FUNCTION_ARGS) {
}
Datum
_ltq_regex(PG_FUNCTION_ARGS) {
_ltq_regex(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
lquery *query = PG_GETARG_LQUERY(1);
bool res = array_iterator(la, ltq_regex, (void *) query, NULL);
PG_FREE_IF_COPY(la, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(res);
}
Datum
_ltq_rregex(PG_FUNCTION_ARGS) {
_ltq_rregex(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(_ltq_regex,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
@@ -114,17 +127,20 @@ _ltq_rregex(PG_FUNCTION_ARGS) {
}
Datum
_ltxtq_exec(PG_FUNCTION_ARGS) {
_ltxtq_exec(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
ltxtquery *query = PG_GETARG_LTXTQUERY(1);
bool res = array_iterator(la, ltxtq_exec, (void *) query, NULL);
PG_FREE_IF_COPY(la, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(res);
}
Datum
_ltxtq_rexec(PG_FUNCTION_ARGS) {
_ltxtq_rexec(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(_ltxtq_exec,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
@@ -133,12 +149,15 @@ _ltxtq_rexec(PG_FUNCTION_ARGS) {
Datum
_ltree_extract_isparent(PG_FUNCTION_ARGS) {
_ltree_extract_isparent(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
ltree *query = PG_GETARG_LTREE(1);
ltree *found,*item;
ltree *found,
*item;
if ( !array_iterator( la, ltree_isparent, (void*)query, &found ) ) {
if (!array_iterator(la, ltree_isparent, (void *) query, &found))
{
PG_FREE_IF_COPY(la, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_NULL();
@@ -153,12 +172,15 @@ _ltree_extract_isparent(PG_FUNCTION_ARGS) {
}
Datum
_ltree_extract_risparent(PG_FUNCTION_ARGS) {
_ltree_extract_risparent(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
ltree *query = PG_GETARG_LTREE(1);
ltree *found,*item;
ltree *found,
*item;
if ( !array_iterator( la, ltree_risparent, (void*)query, &found ) ) {
if (!array_iterator(la, ltree_risparent, (void *) query, &found))
{
PG_FREE_IF_COPY(la, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_NULL();
@@ -173,12 +195,15 @@ _ltree_extract_risparent(PG_FUNCTION_ARGS) {
}
Datum
_ltq_extract_regex(PG_FUNCTION_ARGS) {
_ltq_extract_regex(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
lquery *query = PG_GETARG_LQUERY(1);
ltree *found,*item;
ltree *found,
*item;
if ( !array_iterator( la, ltq_regex, (void*)query, &found ) ) {
if (!array_iterator(la, ltq_regex, (void *) query, &found))
{
PG_FREE_IF_COPY(la, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_NULL();
@@ -193,12 +218,15 @@ _ltq_extract_regex(PG_FUNCTION_ARGS) {
}
Datum
_ltxtq_extract_exec(PG_FUNCTION_ARGS) {
_ltxtq_extract_exec(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
ltxtquery *query = PG_GETARG_LTXTQUERY(1);
ltree *found,*item;
ltree *found,
*item;
if ( !array_iterator( la, ltxtq_exec, (void*)query, &found ) ) {
if (!array_iterator(la, ltxtq_exec, (void *) query, &found))
{
PG_FREE_IF_COPY(la, 0);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_NULL();
@@ -213,14 +241,17 @@ _ltxtq_extract_exec(PG_FUNCTION_ARGS) {
}
Datum
_lca(PG_FUNCTION_ARGS) {
_lca(PG_FUNCTION_ARGS)
{
ArrayType *la = PG_GETARG_ARRAYTYPE_P(0);
int num = ArrayGetNItems(ARR_NDIM(la), ARR_DIMS(la));
ltree *item = (ltree *) ARR_DATA_PTR(la);
ltree **a,*res;
ltree **a,
*res;
a = (ltree **) palloc(sizeof(ltree *) * num);
while( num>0 ) {
while (num > 0)
{
num--;
a[num] = item;
item = NEXTVAL(item);
@@ -235,4 +266,3 @@ _lca(PG_FUNCTION_ARGS) {
else
PG_RETURN_NULL();
}

View File

@@ -9,7 +9,8 @@
PG_FUNCTION_INFO_V1(ltq_regex);
PG_FUNCTION_INFO_V1(ltq_rregex);
typedef struct {
typedef struct
{
lquery_level *q;
int nq;
ltree_level *t;
@@ -19,7 +20,8 @@ typedef struct {
} FieldNot;
static char *
getlexem(char *start, char *end, int *len) {
getlexem(char *start, char *end, int *len)
{
char *ptr;
while (start < end && *start == '_')
@@ -37,23 +39,28 @@ getlexem(char *start, char *end, int *len) {
}
bool
compare_subnode( ltree_level *t, char *qn, int len, int (*cmpptr)(const char *,const char *,size_t), bool anyend ) {
compare_subnode(ltree_level * t, char *qn, int len, int (*cmpptr) (const char *, const char *, size_t), bool anyend)
{
char *endt = t->name + t->len;
char *endq = qn + len;
char *tn;
int lent,lenq;
int lent,
lenq;
bool isok;
while( (qn=getlexem(qn,endq,&lenq)) != NULL ) {
while ((qn = getlexem(qn, endq, &lenq)) != NULL)
{
tn = t->name;
isok = false;
while( (tn=getlexem(tn,endt,&lent)) != NULL ) {
while ((tn = getlexem(tn, endt, &lent)) != NULL)
{
if (
(
lent == lenq ||
(lent > lenq && anyend)
) &&
(*cmpptr)(qn,tn,lenq) == 0 ) {
(*cmpptr) (qn, tn, lenq) == 0)
{
isok = true;
break;
@@ -70,23 +77,28 @@ compare_subnode( ltree_level *t, char *qn, int len, int (*cmpptr)(const char *,c
}
static bool
checkLevel( lquery_level *curq, ltree_level *curt ) {
checkLevel(lquery_level * curq, ltree_level * curt)
{
int (*cmpptr) (const char *, const char *, size_t);
lquery_variant *curvar = LQL_FIRST(curq);
int i;
for(i=0;i<curq->numvar;i++) {
for (i = 0; i < curq->numvar; i++)
{
cmpptr = (curvar->flag & LVAR_INCASE) ? strncasecmp : strncmp;
if ( curvar->flag & LVAR_SUBLEXEM ) {
if (curvar->flag & LVAR_SUBLEXEM)
{
if (compare_subnode(curt, curvar->name, curvar->len, cmpptr, (curvar->flag & LVAR_ANYEND)))
return true;
} else if (
}
else if (
(
curvar->len == curt->len ||
(curt->len > curvar->len && (curvar->flag & LVAR_ANYEND))
) &&
(*cmpptr)( curvar->name, curt->name, curvar->len) == 0 ) {
(*cmpptr) (curvar->name, curt->name, curvar->len) == 0)
{
return true;
}
@@ -106,17 +118,24 @@ printFieldNot(FieldNot *fn ) {
*/
static bool
checkCond( lquery_level *curq, int query_numlevel, ltree_level *curt, int tree_numlevel, FieldNot *ptr ) {
uint32 low_pos=0,high_pos=0,cur_tpos=0;
int tlen = tree_numlevel, qlen = query_numlevel;
checkCond(lquery_level * curq, int query_numlevel, ltree_level * curt, int tree_numlevel, FieldNot * ptr)
{
uint32 low_pos = 0,
high_pos = 0,
cur_tpos = 0;
int tlen = tree_numlevel,
qlen = query_numlevel;
int isok;
lquery_level *prevq = NULL;
ltree_level *prevt = NULL;
while( tlen >0 && qlen>0 ) {
if ( curq->numvar ) {
while (tlen > 0 && qlen > 0)
{
if (curq->numvar)
{
prevt = curt;
while ( cur_tpos < low_pos ) {
while (cur_tpos < low_pos)
{
curt = LEVEL_NEXT(curt);
tlen--;
cur_tpos++;
@@ -126,17 +145,21 @@ checkCond( lquery_level *curq, int query_numlevel, ltree_level *curt, int tree_n
ptr->nt++;
}
if ( ptr && curq->flag & LQL_NOT ) {
if (ptr && curq->flag & LQL_NOT)
{
if (!(prevq && prevq->numvar == 0))
prevq = curq;
if ( ptr->q == NULL ) {
if (ptr->q == NULL)
{
ptr->t = prevt;
ptr->q = prevq;
ptr->nt = 1;
ptr->nq = 1 + ((prevq == curq) ? 0 : 1);
ptr->posq = query_numlevel - qlen - ((prevq == curq) ? 0 : 1);
ptr->post = cur_tpos;
} else {
}
else
{
ptr->nt++;
ptr->nq++;
}
@@ -148,9 +171,12 @@ checkCond( lquery_level *curq, int query_numlevel, ltree_level *curt, int tree_n
cur_tpos++;
if (high_pos < cur_tpos)
high_pos++;
} else {
}
else
{
isok = false;
while( cur_tpos <= high_pos && tlen > 0 && !isok) {
while (cur_tpos <= high_pos && tlen > 0 && !isok)
{
isok = checkLevel(curq, curt);
curt = LEVEL_NEXT(curt);
tlen--;
@@ -161,17 +187,22 @@ checkCond( lquery_level *curq, int query_numlevel, ltree_level *curt, int tree_n
if (!isok)
return false;
if (ptr && ptr->q) {
if (ptr && ptr->q)
{
if (checkCond(ptr->q, ptr->nq, ptr->t, ptr->nt, NULL))
return false;
ptr->q = NULL;
}
low_pos=cur_tpos; high_pos=cur_tpos;
low_pos = cur_tpos;
high_pos = cur_tpos;
}
} else {
}
else
{
low_pos = cur_tpos + curq->low;
high_pos = cur_tpos + curq->high;
if ( ptr && ptr->q ) {
if (ptr && ptr->q)
{
ptr->nq++;
if (qlen == 1)
ptr->nt = tree_numlevel - ptr->post;
@@ -186,11 +217,15 @@ checkCond( lquery_level *curq, int query_numlevel, ltree_level *curt, int tree_n
if (low_pos > tree_numlevel || tree_numlevel > high_pos)
return false;
while( qlen>0 ) {
if ( curq->numvar ) {
while (qlen > 0)
{
if (curq->numvar)
{
if (!(curq->flag & LQL_NOT))
return false;
} else {
}
else
{
low_pos = cur_tpos + curq->low;
high_pos = cur_tpos + curq->high;
}
@@ -209,19 +244,23 @@ checkCond( lquery_level *curq, int query_numlevel, ltree_level *curt, int tree_n
}
Datum
ltq_regex(PG_FUNCTION_ARGS) {
ltq_regex(PG_FUNCTION_ARGS)
{
ltree *tree = PG_GETARG_LTREE(0);
lquery *query = PG_GETARG_LQUERY(1);
bool res = false;
if ( query->flag & LQUERY_HASNOT ) {
if (query->flag & LQUERY_HASNOT)
{
FieldNot fn;
fn.q = NULL;
res = checkCond(LQUERY_FIRST(query), query->numlevel,
LTREE_FIRST(tree), tree->numlevel, &fn);
} else {
}
else
{
res = checkCond(LQUERY_FIRST(query), query->numlevel,
LTREE_FIRST(tree), tree->numlevel, NULL);
}
@@ -232,7 +271,8 @@ ltq_regex(PG_FUNCTION_ARGS) {
}
Datum
ltq_rregex(PG_FUNCTION_ARGS) {
ltq_rregex(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(ltq_regex,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)

View File

@@ -6,7 +6,8 @@
#include "utils/palloc.h"
#include "utils/builtins.h"
typedef struct {
typedef struct
{
uint8 len;
char name[1];
} ltree_level;
@@ -14,7 +15,8 @@ typedef struct {
#define LEVEL_HDRSIZE (sizeof(uint8))
#define LEVEL_NEXT(x) ( (ltree_level*)( ((char*)(x)) + MAXALIGN(((ltree_level*)(x))->len + LEVEL_HDRSIZE) ) )
typedef struct {
typedef struct
{
int32 len;
uint16 numlevel;
char data[1];
@@ -26,7 +28,8 @@ typedef struct {
/* lquery */
typedef struct {
typedef struct
{
int4 val;
uint8 len;
uint8 flag;
@@ -40,7 +43,8 @@ typedef struct {
#define LVAR_INCASE 0x02
#define LVAR_SUBLEXEM 0x04
typedef struct {
typedef struct
{
uint16 totallen;
uint16 flag;
uint16 numvar;
@@ -61,7 +65,8 @@ typedef struct {
#endif
#define LQL_CANLOOKSIGN(x) FLG_CANLOOKSIGN( ((lquery_level*)(x))->flag )
typedef struct {
typedef struct
{
int32 len;
uint16 numlevel;
uint16 firstgood;
@@ -203,7 +208,8 @@ typedef unsigned char *BITVECP;
*
*/
typedef struct {
typedef struct
{
int4 len;
uint32 flag;
char data[1];
@@ -249,4 +255,3 @@ typedef unsigned char ABITVEC[ASIGLEN];
/* type of key is the same to ltree_gist */
#endif

View File

@@ -12,33 +12,42 @@
PG_FUNCTION_INFO_V1(ltree_gist_in);
Datum ltree_gist_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_gist_out);
Datum ltree_gist_out(PG_FUNCTION_ARGS);
Datum
ltree_gist_in(PG_FUNCTION_ARGS) {
ltree_gist_in(PG_FUNCTION_ARGS)
{
elog(ERROR, "Unimplemented");
PG_RETURN_DATUM(0);
}
Datum
ltree_gist_out(PG_FUNCTION_ARGS) {
ltree_gist_out(PG_FUNCTION_ARGS)
{
elog(ERROR, "Unimplemented");
PG_RETURN_DATUM(0);
}
PG_FUNCTION_INFO_V1(ltree_compress);
Datum ltree_compress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_decompress);
Datum ltree_decompress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_same);
Datum ltree_same(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_union);
Datum ltree_union(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_penalty);
Datum ltree_penalty(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_picksplit);
Datum ltree_picksplit(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_consistent);
Datum ltree_consistent(PG_FUNCTION_ARGS);
@@ -46,11 +55,13 @@ Datum ltree_consistent(PG_FUNCTION_ARGS);
#define GETENTRY(vec,pos) ((ltree_gist *) DatumGetPointer(((GISTENTRY *) VARDATA(vec))[(pos)].key))
Datum
ltree_compress(PG_FUNCTION_ARGS) {
ltree_compress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval = entry;
if ( entry->leafkey ) { /* ltree */
if (entry->leafkey)
{ /* ltree */
ltree_gist *key;
ltree *val = (ltree *) DatumGetPointer(PG_DETOAST_DATUM(entry->key));
int4 len = LTG_HDRSIZE + val->len;
@@ -72,12 +83,15 @@ ltree_compress(PG_FUNCTION_ARGS) {
}
Datum
ltree_decompress(PG_FUNCTION_ARGS) {
ltree_decompress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
ltree_gist *key = (ltree_gist *) DatumGetPointer(PG_DETOAST_DATUM(entry->key));
if ( PointerGetDatum(key) != entry->key ) {
if (PointerGetDatum(key) != entry->key)
{
GISTENTRY *retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, key->len, FALSE);
@@ -87,7 +101,8 @@ ltree_decompress(PG_FUNCTION_ARGS) {
}
Datum
ltree_same(PG_FUNCTION_ARGS) {
ltree_same(PG_FUNCTION_ARGS)
{
ltree_gist *a = (ltree_gist *) PG_GETARG_POINTER(0);
ltree_gist *b = (ltree_gist *) PG_GETARG_POINTER(1);
bool *result = (bool *) PG_GETARG_POINTER(2);
@@ -96,11 +111,13 @@ ltree_same(PG_FUNCTION_ARGS) {
if (LTG_ISONENODE(a) != LTG_ISONENODE(b))
PG_RETURN_POINTER(result);
if ( LTG_ISONENODE(a) ) {
if (LTG_ISONENODE(a))
*result = (ISEQ(LTG_NODE(a), LTG_NODE(b))) ? true : false;
} else {
else
{
int4 i;
BITVECP sa=LTG_SIGN(a), sb=LTG_SIGN(b);
BITVECP sa = LTG_SIGN(a),
sb = LTG_SIGN(b);
if (LTG_ISALLTRUE(a) != LTG_ISALLTRUE(b))
PG_RETURN_POINTER(result);
@@ -113,7 +130,8 @@ ltree_same(PG_FUNCTION_ARGS) {
*result = true;
if (!LTG_ISALLTRUE(a))
LOOPBYTE(
if ( sa[i] != sb[i] ) {
if (sa[i] != sb[i])
{
*result = false;
break;
}
@@ -124,12 +142,14 @@ ltree_same(PG_FUNCTION_ARGS) {
}
static void
hashing(BITVECP sign, ltree *t) {
hashing(BITVECP sign, ltree * t)
{
int tlen = t->numlevel;
ltree_level *cur = LTREE_FIRST(t);
int hash;
while(tlen > 0) {
while (tlen > 0)
{
hash = ltree_crc32_sz(cur->name, cur->len);
HASH(sign, hash);
cur = LEVEL_NEXT(cur);
@@ -138,32 +158,43 @@ hashing(BITVECP sign, ltree *t) {
}
Datum
ltree_union(PG_FUNCTION_ARGS) {
ltree_union(PG_FUNCTION_ARGS)
{
bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
int *size = (int *) PG_GETARG_POINTER(1);
BITVEC base;
int4 len = (VARSIZE(entryvec) - VARHDRSZ) / sizeof(GISTENTRY);
int4 i,j;
ltree_gist *result,*cur;
ltree *left=NULL, *right=NULL, *curtree;
int4 i,
j;
ltree_gist *result,
*cur;
ltree *left = NULL,
*right = NULL,
*curtree;
bool isalltrue = false;
bool isleqr;
MemSet((void *) base, 0, sizeof(BITVEC));
for(j=0;j<len;j++) {
for (j = 0; j < len; j++)
{
cur = GETENTRY(entryvec, j);
if ( LTG_ISONENODE(cur) ) {
if (LTG_ISONENODE(cur))
{
curtree = LTG_NODE(cur);
hashing(base, curtree);
if (!left || ltree_compare(left, curtree) > 0)
left = curtree;
if (!right || ltree_compare(right, curtree) < 0)
right = curtree;
} else {
}
else
{
if (isalltrue || LTG_ISALLTRUE(cur))
isalltrue = true;
else {
else
{
BITVECP sc = LTG_SIGN(cur);
LOOPBYTE(
((unsigned char *) base)[i] |= sc[i];
);
@@ -178,10 +209,12 @@ ltree_union(PG_FUNCTION_ARGS) {
}
}
if ( isalltrue == false ) {
if (isalltrue == false)
{
isalltrue = true;
LOOPBYTE(
if ( ((unsigned char*)base)[i] != 0xff ) {
if (((unsigned char *) base)[i] != 0xff)
{
isalltrue = false;
break;
}
@@ -210,11 +243,13 @@ ltree_union(PG_FUNCTION_ARGS) {
}
Datum
ltree_penalty(PG_FUNCTION_ARGS) {
ltree_penalty(PG_FUNCTION_ARGS)
{
ltree_gist *origval = (ltree_gist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
ltree_gist *newval = (ltree_gist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *penalty = (float *) PG_GETARG_POINTER(2);
int4 cmpr,cmpl;
int4 cmpr,
cmpl;
cmpl = ltree_compare(LTG_GETLNODE(origval), LTG_GETLNODE(newval));
cmpr = ltree_compare(LTG_GETRNODE(newval), LTG_GETRNODE(origval));
@@ -225,13 +260,15 @@ ltree_penalty(PG_FUNCTION_ARGS) {
}
/* used for sorting */
typedef struct rix {
typedef struct rix
{
int index;
ltree *r;
} RIX;
static int
treekey_cmp(const void *a, const void *b) {
treekey_cmp(const void *a, const void *b)
{
return ltree_compare(
((RIX *) a)->r,
((RIX *) b)->r
@@ -240,7 +277,8 @@ treekey_cmp(const void *a, const void *b) {
Datum
ltree_picksplit(PG_FUNCTION_ARGS) {
ltree_picksplit(PG_FUNCTION_ARGS)
{
bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1);
OffsetNumber j;
@@ -249,10 +287,17 @@ ltree_picksplit(PG_FUNCTION_ARGS) {
OffsetNumber maxoff;
int nbytes;
int size;
ltree *lu_l,*lu_r, *ru_l, *ru_r;
ltree_gist *lu, *ru;
BITVEC ls,rs;
bool lisat=false, risat=false, isleqr;
ltree *lu_l,
*lu_r,
*ru_l,
*ru_r;
ltree_gist *lu,
*ru;
BITVEC ls,
rs;
bool lisat = false,
risat = false,
isleqr;
memset((void *) ls, 0, sizeof(BITVEC));
memset((void *) rs, 0, sizeof(BITVEC));
@@ -265,7 +310,8 @@ ltree_picksplit(PG_FUNCTION_ARGS) {
array = (RIX *) palloc(sizeof(RIX) * (maxoff + 1));
/* copy the data into RIXes, and sort the RIXes */
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) {
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j))
{
array[j].index = j;
lu = GETENTRY(entryvec, j); /* use as tmp val */
array[j].r = LTG_GETLNODE(lu);
@@ -275,37 +321,47 @@ ltree_picksplit(PG_FUNCTION_ARGS) {
sizeof(RIX), treekey_cmp);
lu_l = lu_r = ru_l = ru_r = NULL;
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) {
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j))
{
lu = GETENTRY(entryvec, array[j].index); /* use as tmp val */
if (j <= (maxoff - FirstOffsetNumber + 1) / 2) {
if (j <= (maxoff - FirstOffsetNumber + 1) / 2)
{
v->spl_left[v->spl_nleft] = array[j].index;
v->spl_nleft++;
if (lu_r == NULL || ltree_compare(LTG_GETRNODE(lu), lu_r) > 0)
lu_r = LTG_GETRNODE(lu);
if (LTG_ISONENODE(lu))
hashing(ls, LTG_NODE(lu));
else {
else
{
if (lisat || LTG_ISALLTRUE(lu))
lisat = true;
else {
else
{
BITVECP sc = LTG_SIGN(lu);
LOOPBYTE(
((unsigned char *) ls)[i] |= sc[i];
);
}
}
} else {
}
else
{
v->spl_right[v->spl_nright] = array[j].index;
v->spl_nright++;
if (ru_r == NULL || ltree_compare(LTG_GETRNODE(lu), ru_r) > 0)
ru_r = LTG_GETRNODE(lu);
if (LTG_ISONENODE(lu))
hashing(rs, LTG_NODE(lu));
else {
else
{
if (risat || LTG_ISALLTRUE(lu))
risat = true;
else {
else
{
BITVECP sc = LTG_SIGN(lu);
LOOPBYTE(
((unsigned char *) rs)[i] |= sc[i];
);
@@ -314,20 +370,24 @@ ltree_picksplit(PG_FUNCTION_ARGS) {
}
}
if ( lisat == false ) {
if (lisat == false)
{
lisat = true;
LOOPBYTE(
if ( ((unsigned char*)ls)[i] != 0xff ) {
if (((unsigned char *) ls)[i] != 0xff)
{
lisat = false;
break;
}
);
}
if ( risat == false ) {
if (risat == false)
{
risat = true;
LOOPBYTE(
if ( ((unsigned char*)rs)[i] != 0xff ) {
if (((unsigned char *) rs)[i] != 0xff)
{
risat = false;
break;
}
@@ -375,13 +435,16 @@ ltree_picksplit(PG_FUNCTION_ARGS) {
}
static bool
gist_isparent(ltree_gist *key, ltree *query) {
gist_isparent(ltree_gist * key, ltree * query)
{
int4 numlevel = query->numlevel;
int i;
for(i=query->numlevel;i>=0;i--) {
for (i = query->numlevel; i >= 0; i--)
{
query->numlevel = i;
if ( ltree_compare(query,LTG_GETLNODE(key)) >=0 && ltree_compare(query,LTG_GETRNODE(key)) <= 0 ) {
if (ltree_compare(query, LTG_GETLNODE(key)) >= 0 && ltree_compare(query, LTG_GETRNODE(key)) <= 0)
{
query->numlevel = numlevel;
return true;
}
@@ -392,7 +455,8 @@ gist_isparent(ltree_gist *key, ltree *query) {
}
static bool
gist_ischild(ltree_gist *key, ltree *query) {
gist_ischild(ltree_gist * key, ltree * query)
{
ltree *left = LTG_GETLNODE(key);
ltree *right = LTG_GETRNODE(key);
int4 numlevelL = left->numlevel;
@@ -417,7 +481,8 @@ gist_ischild(ltree_gist *key, ltree *query) {
}
static bool
gist_qe(ltree_gist *key, lquery* query) {
gist_qe(ltree_gist * key, lquery * query)
{
lquery_level *curq = LQUERY_FIRST(query);
BITVECP sign = LTG_SIGN(key);
int qlen = query->numlevel;
@@ -425,13 +490,18 @@ gist_qe(ltree_gist *key, lquery* query) {
if (LTG_ISALLTRUE(key))
return true;
while( qlen>0 ) {
if ( curq->numvar && LQL_CANLOOKSIGN(curq) ) {
while (qlen > 0)
{
if (curq->numvar && LQL_CANLOOKSIGN(curq))
{
bool isexist = false;
int vlen = curq->numvar;
lquery_variant *curv = LQL_FIRST(curq);
while( vlen>0 ) {
if ( GETBIT( sign, HASHVAL( curv->val ) ) ) {
while (vlen > 0)
{
if (GETBIT(sign, HASHVAL(curv->val)))
{
isexist = true;
break;
}
@@ -450,7 +520,8 @@ gist_qe(ltree_gist *key, lquery* query) {
}
static int
gist_tqcmp(ltree* t, lquery* q) {
gist_tqcmp(ltree * t, lquery * q)
{
ltree_level *al = LTREE_FIRST(t);
lquery_level *ql = LQUERY_FIRST(q);
lquery_variant *bl;
@@ -458,14 +529,18 @@ gist_tqcmp(ltree* t, lquery* q) {
int bn = q->firstgood;
int res = 0;
while( an>0 && bn>0 ) {
while (an > 0 && bn > 0)
{
bl = LQL_FIRST(ql);
if ( (res = strncmp( al->name, bl->name, min(al->len, bl->len))) == 0 ) {
if ((res = strncmp(al->name, bl->name, min(al->len, bl->len))) == 0)
{
if (al->len != bl->len)
return al->len - bl->len;
} else
}
else
return res;
an--; bn--;
an--;
bn--;
al = LEVEL_NEXT(al);
ql = LQL_NEXT(ql);
}
@@ -474,7 +549,8 @@ gist_tqcmp(ltree* t, lquery* q) {
}
static bool
gist_between(ltree_gist *key, lquery* query) {
gist_between(ltree_gist * key, lquery * query)
{
ltree *left = LTG_GETLNODE(key);
ltree *right = LTG_GETRNODE(key);
int4 numlevelL = left->numlevel;
@@ -502,12 +578,14 @@ gist_between(ltree_gist *key, lquery* query) {
}
static bool
checkcondition_bit(void *checkval, ITEM* val ) {
checkcondition_bit(void *checkval, ITEM * val)
{
return (FLG_CANLOOKSIGN(val->flag)) ? GETBIT(checkval, HASHVAL(val->val)) : true;
}
static bool
gist_qtxt(ltree_gist *key, ltxtquery* query) {
gist_qtxt(ltree_gist * key, ltxtquery * query)
{
if (LTG_ISALLTRUE(key))
return true;
@@ -520,7 +598,8 @@ gist_qtxt(ltree_gist *key, ltxtquery* query) {
Datum
ltree_consistent(PG_FUNCTION_ARGS) {
ltree_consistent(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
char *query = (char *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
ltree_gist *key = (ltree_gist *) DatumGetPointer(entry->key);
@@ -531,7 +610,8 @@ ltree_consistent(PG_FUNCTION_ARGS) {
#define assert_enabled 0
#endif
switch( strategy ) {
switch (strategy)
{
case BTLessStrategyNumber:
res = (GIST_LEAF(entry)) ?
(ltree_compare((ltree *) query, LTG_NODE(key)) > 0)
@@ -597,4 +677,3 @@ ltree_consistent(PG_FUNCTION_ARGS) {
}
PG_RETURN_BOOL(res);
}

View File

@@ -9,18 +9,21 @@
PG_FUNCTION_INFO_V1(ltree_in);
Datum ltree_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltree_out);
Datum ltree_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(lquery_in);
Datum lquery_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(lquery_out);
Datum lquery_out(PG_FUNCTION_ARGS);
#define UNCHAR elog(ERROR,"Syntax error in position %d near '%c'", (int)(ptr-buf), *ptr)
typedef struct {
typedef struct
{
char *start;
int len;
int flag;
@@ -30,17 +33,21 @@ typedef struct {
#define LTPRS_WAITDELIM 1
Datum
ltree_in(PG_FUNCTION_ARGS) {
ltree_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
char *ptr;
nodeitem *list, *lptr;
int num=0, totallen = 0;
nodeitem *list,
*lptr;
int num = 0,
totallen = 0;
int state = LTPRS_WAITNAME;
ltree *result;
ltree_level *curlevel;
ptr = buf;
while( *ptr ) {
while (*ptr)
{
if (*ptr == '.')
num++;
ptr++;
@@ -48,15 +55,22 @@ ltree_in(PG_FUNCTION_ARGS) {
list = lptr = (nodeitem *) palloc(sizeof(nodeitem) * (num + 1));
ptr = buf;
while( *ptr ) {
if ( state == LTPRS_WAITNAME ) {
if ( ISALNUM(*ptr) ) {
while (*ptr)
{
if (state == LTPRS_WAITNAME)
{
if (ISALNUM(*ptr))
{
lptr->start = ptr;
state = LTPRS_WAITDELIM;
} else
}
else
UNCHAR;
} else if ( state == LTPRS_WAITDELIM ) {
if ( *ptr == '.' ) {
}
else if (state == LTPRS_WAITDELIM)
{
if (*ptr == '.')
{
lptr->len = ptr - lptr->start;
if (lptr->len > 255)
elog(ERROR, "Name of level is too long (%d, must be < 256) in position %d",
@@ -64,21 +78,25 @@ ltree_in(PG_FUNCTION_ARGS) {
totallen += MAXALIGN(lptr->len + LEVEL_HDRSIZE);
lptr++;
state = LTPRS_WAITNAME;
} else if ( !ISALNUM(*ptr) )
}
else if (!ISALNUM(*ptr))
UNCHAR;
} else
}
else
elog(ERROR, "Inner error in parser");
ptr++;
}
if ( state == LTPRS_WAITDELIM ) {
if (state == LTPRS_WAITDELIM)
{
lptr->len = ptr - lptr->start;
if (lptr->len > 255)
elog(ERROR, "Name of level is too long (%d, must be < 256) in position %d",
lptr->len, (int) (lptr->start - buf));
totallen += MAXALIGN(lptr->len + LEVEL_HDRSIZE);
lptr++;
} else if ( ! (state == LTPRS_WAITNAME && lptr == list) )
}
else if (!(state == LTPRS_WAITNAME && lptr == list))
elog(ERROR, "Unexpected end of line");
result = (ltree *) palloc(LTREE_HDRSIZE + totallen);
@@ -86,7 +104,8 @@ ltree_in(PG_FUNCTION_ARGS) {
result->numlevel = lptr - list;
curlevel = LTREE_FIRST(result);
lptr = list;
while( lptr-list < result->numlevel ) {
while (lptr - list < result->numlevel)
{
curlevel->len = (uint8) lptr->len;
memcpy(curlevel->name, lptr->start, lptr->len);
curlevel = LEVEL_NEXT(curlevel);
@@ -98,16 +117,20 @@ ltree_in(PG_FUNCTION_ARGS) {
}
Datum
ltree_out(PG_FUNCTION_ARGS) {
ltree_out(PG_FUNCTION_ARGS)
{
ltree *in = PG_GETARG_LTREE(0);
char *buf,*ptr;
char *buf,
*ptr;
int i;
ltree_level *curlevel;
ptr = buf = (char *) palloc(in->len);
curlevel = LTREE_FIRST(in);
for(i=0;i<in->numlevel;i++) {
if ( i!=0 ) {
for (i = 0; i < in->numlevel; i++)
{
if (i != 0)
{
*ptr = '.';
ptr++;
}
@@ -138,20 +161,26 @@ ltree_out(PG_FUNCTION_ARGS) {
#define NEXTLEV(x) ( (lquery_level*)( ((char*)(x)) + ITEMSIZE) )
Datum
lquery_in(PG_FUNCTION_ARGS) {
lquery_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
char *ptr;
int num=0, totallen = 0, numOR=0;
int num = 0,
totallen = 0,
numOR = 0;
int state = LQPRS_WAITLEVEL;
lquery *result;
nodeitem *lptr = NULL;
lquery_level *cur,*curqlevel, *tmpql;
lquery_level *cur,
*curqlevel,
*tmpql;
lquery_variant *lrptr = NULL;
bool hasnot = false;
bool wasbad = false;
ptr = buf;
while( *ptr ) {
while (*ptr)
{
if (*ptr == '.')
num++;
else if (*ptr == '|')
@@ -163,15 +192,20 @@ lquery_in(PG_FUNCTION_ARGS) {
curqlevel = tmpql = (lquery_level *) palloc(ITEMSIZE * num);
memset((void *) tmpql, 0, ITEMSIZE * num);
ptr = buf;
while( *ptr ) {
if ( state==LQPRS_WAITLEVEL ) {
if ( ISALNUM(*ptr) ) {
while (*ptr)
{
if (state == LQPRS_WAITLEVEL)
{
if (ISALNUM(*ptr))
{
GETVAR(curqlevel) = lptr = (nodeitem *) palloc(sizeof(nodeitem) * (numOR + 1));
memset((void *) GETVAR(curqlevel), 0, sizeof(nodeitem) * (numOR + 1));
lptr->start = ptr;
state = LQPRS_WAITDELIM;
curqlevel->numvar = 1;
} else if ( *ptr == '!' ) {
}
else if (*ptr == '!')
{
GETVAR(curqlevel) = lptr = (nodeitem *) palloc(sizeof(nodeitem) * (numOR + 1));
memset((void *) GETVAR(curqlevel), 0, sizeof(nodeitem) * (numOR + 1));
lptr->start = ptr + 1;
@@ -179,35 +213,49 @@ lquery_in(PG_FUNCTION_ARGS) {
curqlevel->numvar = 1;
curqlevel->flag |= LQL_NOT;
hasnot = true;
} else if ( *ptr == '*' ) {
}
else if (*ptr == '*')
state = LQPRS_WAITOPEN;
} else
else
UNCHAR;
} else if ( state==LQPRS_WAITVAR ) {
if ( ISALNUM(*ptr) ) {
}
else if (state == LQPRS_WAITVAR)
{
if (ISALNUM(*ptr))
{
lptr++;
lptr->start = ptr;
state = LQPRS_WAITDELIM;
curqlevel->numvar++;
} else
}
else
UNCHAR;
} else if ( state==LQPRS_WAITDELIM ) {
if ( *ptr == '@' ) {
}
else if (state == LQPRS_WAITDELIM)
{
if (*ptr == '@')
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_INCASE;
curqlevel->flag |= LVAR_INCASE;
} else if ( *ptr == '*' ) {
}
else if (*ptr == '*')
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_ANYEND;
curqlevel->flag |= LVAR_ANYEND;
} else if ( *ptr == '%' ) {
}
else if (*ptr == '%')
{
if (lptr->start == ptr)
UNCHAR;
lptr->flag |= LVAR_SUBLEXEM;
curqlevel->flag |= LVAR_SUBLEXEM;
} else if ( *ptr == '|' ) {
}
else if (*ptr == '|')
{
lptr->len = ptr - lptr->start -
((lptr->flag & LVAR_SUBLEXEM) ? 1 : 0) -
((lptr->flag & LVAR_INCASE) ? 1 : 0) -
@@ -216,7 +264,9 @@ lquery_in(PG_FUNCTION_ARGS) {
elog(ERROR, "Name of level is too long (%d, must be < 256) in position %d",
lptr->len, (int) (lptr->start - buf));
state = LQPRS_WAITVAR;
} else if ( *ptr == '.' ) {
}
else if (*ptr == '.')
{
lptr->len = ptr - lptr->start -
((lptr->flag & LVAR_SUBLEXEM) ? 1 : 0) -
((lptr->flag & LVAR_INCASE) ? 1 : 0) -
@@ -226,63 +276,92 @@ lquery_in(PG_FUNCTION_ARGS) {
lptr->len, (int) (lptr->start - buf));
state = LQPRS_WAITLEVEL;
curqlevel = NEXTLEV(curqlevel);
} else if ( ISALNUM(*ptr) ) {
}
else if (ISALNUM(*ptr))
{
if (lptr->flag)
UNCHAR;
} else
}
else
UNCHAR;
} else if ( state == LQPRS_WAITOPEN ) {
if ( *ptr == '{' ) {
}
else if (state == LQPRS_WAITOPEN)
{
if (*ptr == '{')
state = LQPRS_WAITFNUM;
} else if ( *ptr == '.' ) {
else if (*ptr == '.')
{
curqlevel->low = 0;
curqlevel->high = 0xffff;
curqlevel = NEXTLEV(curqlevel);
state = LQPRS_WAITLEVEL;
} else
}
else
UNCHAR;
} else if ( state == LQPRS_WAITFNUM ) {
if ( *ptr == ',' ) {
}
else if (state == LQPRS_WAITFNUM)
{
if (*ptr == ',')
state = LQPRS_WAITSNUM;
} else if ( isdigit((unsigned int)*ptr) ) {
else if (isdigit((unsigned int) *ptr))
{
curqlevel->low = atoi(ptr);
state = LQPRS_WAITND;
} else
}
else
UNCHAR;
} else if ( state == LQPRS_WAITSNUM ) {
if ( isdigit((unsigned int)*ptr) ) {
}
else if (state == LQPRS_WAITSNUM)
{
if (isdigit((unsigned int) *ptr))
{
curqlevel->high = atoi(ptr);
state = LQPRS_WAITCLOSE;
} else if ( *ptr == '}' ) {
}
else if (*ptr == '}')
{
curqlevel->high = 0xffff;
state = LQPRS_WAITEND;
} else
}
else
UNCHAR;
} else if ( state == LQPRS_WAITCLOSE ) {
}
else if (state == LQPRS_WAITCLOSE)
{
if (*ptr == '}')
state = LQPRS_WAITEND;
else if (!isdigit((unsigned int) *ptr))
UNCHAR;
} else if ( state == LQPRS_WAITND ) {
if ( *ptr == '}' ) {
}
else if (state == LQPRS_WAITND)
{
if (*ptr == '}')
{
curqlevel->high = curqlevel->low;
state = LQPRS_WAITEND;
} else if ( *ptr == ',' )
}
else if (*ptr == ',')
state = LQPRS_WAITSNUM;
else if (!isdigit((unsigned int) *ptr))
UNCHAR;
} else if ( state == LQPRS_WAITEND ) {
if ( *ptr == '.' ) {
}
else if (state == LQPRS_WAITEND)
{
if (*ptr == '.')
{
state = LQPRS_WAITLEVEL;
curqlevel = NEXTLEV(curqlevel);
} else
}
else
UNCHAR;
} else
}
else
elog(ERROR, "Inner error in parser");
ptr++;
}
if ( state==LQPRS_WAITDELIM ) {
if (state == LQPRS_WAITDELIM)
{
if (lptr->start == ptr)
elog(ERROR, "Unexpected end of line");
lptr->len = ptr - lptr->start -
@@ -294,22 +373,27 @@ lquery_in(PG_FUNCTION_ARGS) {
if (lptr->len > 255)
elog(ERROR, "Name of level is too long (%d, must be < 256) in position %d",
lptr->len, (int) (lptr->start - buf));
} else if ( state == LQPRS_WAITOPEN ) {
}
else if (state == LQPRS_WAITOPEN)
curqlevel->high = 0xffff;
} else if ( state != LQPRS_WAITEND )
else if (state != LQPRS_WAITEND)
elog(ERROR, "Unexpected end of line");
curqlevel = tmpql;
totallen = LQUERY_HDRSIZE;
while( (char*)curqlevel-(char*)tmpql < num*ITEMSIZE ) {
while ((char *) curqlevel - (char *) tmpql < num * ITEMSIZE)
{
totallen += LQL_HDRSIZE;
if ( curqlevel->numvar ) {
if (curqlevel->numvar)
{
lptr = GETVAR(curqlevel);
while( lptr-GETVAR(curqlevel) < curqlevel->numvar ) {
while (lptr - GETVAR(curqlevel) < curqlevel->numvar)
{
totallen += MAXALIGN(LVAR_HDRSIZE + lptr->len);
lptr++;
}
} else if ( curqlevel->low > curqlevel->high )
}
else if (curqlevel->low > curqlevel->high)
elog(ERROR, "Low limit(%d) is greater than upper(%d)", curqlevel->low, curqlevel->high);
curqlevel = NEXTLEV(curqlevel);
}
@@ -323,13 +407,16 @@ lquery_in(PG_FUNCTION_ARGS) {
result->flag |= LQUERY_HASNOT;
cur = LQUERY_FIRST(result);
curqlevel = tmpql;
while( (char*)curqlevel-(char*)tmpql < num*ITEMSIZE ) {
while ((char *) curqlevel - (char *) tmpql < num * ITEMSIZE)
{
memcpy(cur, curqlevel, LQL_HDRSIZE);
cur->totallen = LQL_HDRSIZE;
if ( curqlevel->numvar ) {
if (curqlevel->numvar)
{
lrptr = LQL_FIRST(cur);
lptr = GETVAR(curqlevel);
while( lptr-GETVAR(curqlevel) < curqlevel->numvar ) {
while (lptr - GETVAR(curqlevel) < curqlevel->numvar)
{
cur->totallen += MAXALIGN(LVAR_HDRSIZE + lptr->len);
lrptr->len = lptr->len;
lrptr->flag = lptr->flag;
@@ -343,7 +430,8 @@ lquery_in(PG_FUNCTION_ARGS) {
wasbad = true;
else if (wasbad == false)
(result->firstgood)++;
} else
}
else
wasbad = true;
curqlevel = NEXTLEV(curqlevel);
cur = LQL_NEXT(cur);
@@ -354,15 +442,20 @@ lquery_in(PG_FUNCTION_ARGS) {
}
Datum
lquery_out(PG_FUNCTION_ARGS) {
lquery_out(PG_FUNCTION_ARGS)
{
lquery *in = PG_GETARG_LQUERY(0);
char *buf,*ptr;
int i,j,totallen=0;
char *buf,
*ptr;
int i,
j,
totallen = 0;
lquery_level *curqlevel;
lquery_variant *curtlevel;
curqlevel = LQUERY_FIRST(in);
for(i=0;i<in->numlevel;i++) {
for (i = 0; i < in->numlevel; i++)
{
if (curqlevel->numvar)
totallen = (curqlevel->numvar * 4) + 1 + curqlevel->totallen;
else
@@ -374,50 +467,69 @@ lquery_out(PG_FUNCTION_ARGS) {
ptr = buf = (char *) palloc(totallen);
curqlevel = LQUERY_FIRST(in);
for(i=0;i<in->numlevel;i++) {
if ( i!=0 ) {
for (i = 0; i < in->numlevel; i++)
{
if (i != 0)
{
*ptr = '.';
ptr++;
}
if ( curqlevel->numvar ) {
if ( curqlevel->flag & LQL_NOT ) {
if (curqlevel->numvar)
{
if (curqlevel->flag & LQL_NOT)
{
*ptr = '!';
ptr++;
}
curtlevel = LQL_FIRST(curqlevel);
for(j=0;j<curqlevel->numvar;j++) {
if ( j!=0 ) {
for (j = 0; j < curqlevel->numvar; j++)
{
if (j != 0)
{
*ptr = '|';
ptr++;
}
memcpy(ptr, curtlevel->name, curtlevel->len);
ptr += curtlevel->len;
if ( (curtlevel->flag & LVAR_SUBLEXEM) ) {
if ((curtlevel->flag & LVAR_SUBLEXEM))
{
*ptr = '%';
ptr++;
}
if ( (curtlevel->flag & LVAR_INCASE) ) {
if ((curtlevel->flag & LVAR_INCASE))
{
*ptr = '@';
ptr++;
}
if ( (curtlevel->flag & LVAR_ANYEND) ) {
if ((curtlevel->flag & LVAR_ANYEND))
{
*ptr = '*';
ptr++;
}
curtlevel = LVAR_NEXT(curtlevel);
}
} else {
if ( curqlevel->low == curqlevel->high ) {
}
else
{
if (curqlevel->low == curqlevel->high)
{
sprintf(ptr, "*{%d}", curqlevel->low);
} else if ( curqlevel->low == 0 ) {
if ( curqlevel->high == 0xffff ) {
}
else if (curqlevel->low == 0)
{
if (curqlevel->high == 0xffff)
{
*ptr = '*';
*(ptr + 1) = '\0';
} else
}
else
sprintf(ptr, "*{,%d}", curqlevel->high);
} else if ( curqlevel->high == 0xffff ) {
}
else if (curqlevel->high == 0xffff)
{
sprintf(ptr, "*{%d,}", curqlevel->low);
} else
}
else
sprintf(ptr, "*{%d,%d}", curqlevel->low, curqlevel->high);
ptr = strchr(ptr, '\0');
}
@@ -430,5 +542,3 @@ lquery_out(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(buf);
}

View File

@@ -39,21 +39,26 @@ Datum ltree_textadd(PG_FUNCTION_ARGS);
Datum lca(PG_FUNCTION_ARGS);
int
ltree_compare(const ltree *a, const ltree *b) {
ltree_compare(const ltree * a, const ltree * b)
{
ltree_level *al = LTREE_FIRST(a);
ltree_level *bl = LTREE_FIRST(b);
int an = a->numlevel;
int bn = b->numlevel;
int res = 0;
while( an>0 && bn>0 ) {
if ( (res = strncmp( al->name, bl->name, min(al->len, bl->len))) == 0 ) {
while (an > 0 && bn > 0)
{
if ((res = strncmp(al->name, bl->name, min(al->len, bl->len))) == 0)
{
if (al->len != bl->len)
return (al->len - bl->len) * 10 * (an + 1);
} else
}
else
return res * 10 * (an + 1);
an--; bn--;
an--;
bn--;
al = LEVEL_NEXT(al);
bl = LEVEL_NEXT(bl);
}
@@ -69,57 +74,67 @@ PG_FREE_IF_COPY(a,0); \
PG_FREE_IF_COPY(b,1); \
Datum
ltree_cmp(PG_FUNCTION_ARGS) {
ltree_cmp(PG_FUNCTION_ARGS)
{
RUNCMP
PG_RETURN_INT32(res);
}
Datum
ltree_lt(PG_FUNCTION_ARGS) {
ltree_lt(PG_FUNCTION_ARGS)
{
RUNCMP
PG_RETURN_BOOL((res < 0) ? true : false);
}
Datum
ltree_le(PG_FUNCTION_ARGS) {
ltree_le(PG_FUNCTION_ARGS)
{
RUNCMP
PG_RETURN_BOOL((res <= 0) ? true : false);
}
Datum
ltree_eq(PG_FUNCTION_ARGS) {
ltree_eq(PG_FUNCTION_ARGS)
{
RUNCMP
PG_RETURN_BOOL((res == 0) ? true : false);
}
Datum
ltree_ge(PG_FUNCTION_ARGS) {
ltree_ge(PG_FUNCTION_ARGS)
{
RUNCMP
PG_RETURN_BOOL((res >= 0) ? true : false);
}
Datum
ltree_gt(PG_FUNCTION_ARGS) {
ltree_gt(PG_FUNCTION_ARGS)
{
RUNCMP
PG_RETURN_BOOL((res > 0) ? true : false);
}
Datum
ltree_ne(PG_FUNCTION_ARGS) {
ltree_ne(PG_FUNCTION_ARGS)
{
RUNCMP
PG_RETURN_BOOL((res != 0) ? true : false);
}
Datum
nlevel(PG_FUNCTION_ARGS) {
nlevel(PG_FUNCTION_ARGS)
{
ltree *a = PG_GETARG_LTREE(0);
int res = a->numlevel;
PG_FREE_IF_COPY(a, 0);
PG_RETURN_INT32(res);
}
bool
inner_isparent(const ltree *c, const ltree *p) {
inner_isparent(const ltree * c, const ltree * p)
{
ltree_level *cl = LTREE_FIRST(c);
ltree_level *pl = LTREE_FIRST(p);
int pn = p->numlevel;
@@ -127,7 +142,8 @@ inner_isparent(const ltree *c, const ltree *p) {
if (pn > c->numlevel)
return false;
while( pn>0 ) {
while (pn > 0)
{
if (cl->len != pl->len)
return false;
if (strncmp(cl->name, pl->name, cl->len))
@@ -141,20 +157,24 @@ inner_isparent(const ltree *c, const ltree *p) {
}
Datum
ltree_isparent(PG_FUNCTION_ARGS) {
ltree_isparent(PG_FUNCTION_ARGS)
{
ltree *c = PG_GETARG_LTREE(1);
ltree *p = PG_GETARG_LTREE(0);
bool res = inner_isparent(c, p);
PG_FREE_IF_COPY(c, 1);
PG_FREE_IF_COPY(p, 0);
PG_RETURN_BOOL(res);
}
Datum
ltree_risparent(PG_FUNCTION_ARGS) {
ltree_risparent(PG_FUNCTION_ARGS)
{
ltree *c = PG_GETARG_LTREE(0);
ltree *p = PG_GETARG_LTREE(1);
bool res = inner_isparent(c, p);
PG_FREE_IF_COPY(c, 0);
PG_FREE_IF_COPY(p, 1);
PG_RETURN_BOOL(res);
@@ -162,8 +182,10 @@ ltree_risparent(PG_FUNCTION_ARGS) {
static ltree *
inner_subltree(ltree *t, int4 startpos, int4 endpos) {
char *start=NULL,*end=NULL;
inner_subltree(ltree * t, int4 startpos, int4 endpos)
{
char *start = NULL,
*end = NULL;
ltree_level *ptr = LTREE_FIRST(t);
ltree *res;
int i;
@@ -174,10 +196,12 @@ inner_subltree(ltree *t, int4 startpos, int4 endpos) {
if (endpos > t->numlevel)
endpos = t->numlevel;
for(i=0;i<endpos ;i++) {
for (i = 0; i < endpos; i++)
{
if (i == startpos)
start = (char *) ptr;
if ( i==endpos-1 ) {
if (i == endpos - 1)
{
end = (char *) LEVEL_NEXT(ptr);
break;
}
@@ -194,7 +218,8 @@ inner_subltree(ltree *t, int4 startpos, int4 endpos) {
}
Datum
subltree(PG_FUNCTION_ARGS) {
subltree(PG_FUNCTION_ARGS)
{
ltree *t = PG_GETARG_LTREE(0);
ltree *res = inner_subltree(t, PG_GETARG_INT32(1), PG_GETARG_INT32(2));
@@ -203,7 +228,8 @@ subltree(PG_FUNCTION_ARGS) {
}
Datum
subpath(PG_FUNCTION_ARGS) {
subpath(PG_FUNCTION_ARGS)
{
ltree *t = PG_GETARG_LTREE(0);
int4 start = PG_GETARG_INT32(1);
int4 len = (fcinfo->nargs == 3) ? PG_GETARG_INT32(2) : 0;
@@ -212,11 +238,13 @@ subpath(PG_FUNCTION_ARGS) {
end = start + len;
if ( start < 0 ) {
if (start < 0)
{
start = t->numlevel + start;
end = start + len;
}
if ( start < 0 ) { /* start > t->numlevel */
if (start < 0)
{ /* start > t->numlevel */
start = t->numlevel + start;
end = start + len;
}
@@ -233,8 +261,10 @@ subpath(PG_FUNCTION_ARGS) {
}
static ltree *
ltree_concat( ltree *a, ltree *b) {
ltree_concat(ltree * a, ltree * b)
{
ltree *r;
r = (ltree *) palloc(a->len + b->len - LTREE_HDRSIZE);
r->len = a->len + b->len - LTREE_HDRSIZE;
r->numlevel = a->numlevel + b->numlevel;
@@ -246,7 +276,8 @@ ltree_concat( ltree *a, ltree *b) {
}
Datum
ltree_addltree(PG_FUNCTION_ARGS) {
ltree_addltree(PG_FUNCTION_ARGS)
{
ltree *a = PG_GETARG_LTREE(0);
ltree *b = PG_GETARG_LTREE(1);
ltree *r;
@@ -258,11 +289,13 @@ ltree_addltree(PG_FUNCTION_ARGS) {
}
Datum
ltree_addtext(PG_FUNCTION_ARGS) {
ltree_addtext(PG_FUNCTION_ARGS)
{
ltree *a = PG_GETARG_LTREE(0);
text *b = PG_GETARG_TEXT_P(1);
char *s;
ltree *r,*tmp;
ltree *r,
*tmp;
s = (char *) palloc(VARSIZE(b) - VARHDRSZ + 1);
memcpy(s, VARDATA(b), VARSIZE(b) - VARHDRSZ);
@@ -285,11 +318,13 @@ ltree_addtext(PG_FUNCTION_ARGS) {
}
Datum
ltree_textadd(PG_FUNCTION_ARGS) {
ltree_textadd(PG_FUNCTION_ARGS)
{
ltree *a = PG_GETARG_LTREE(1);
text *b = PG_GETARG_TEXT_P(0);
char *s;
ltree *r,*tmp;
ltree *r,
*tmp;
s = (char *) palloc(VARSIZE(b) - VARHDRSZ + 1);
memcpy(s, VARDATA(b), VARSIZE(b) - VARHDRSZ);
@@ -312,27 +347,35 @@ ltree_textadd(PG_FUNCTION_ARGS) {
}
ltree *
lca_inner(ltree** a, int len) {
int tmp,num=( (*a)->numlevel ) ? (*a)->numlevel-1 : 0;
lca_inner(ltree ** a, int len)
{
int tmp,
num = ((*a)->numlevel) ? (*a)->numlevel - 1 : 0;
ltree **ptr = a + 1;
int i,reslen=LTREE_HDRSIZE;
ltree_level *l1, *l2;
int i,
reslen = LTREE_HDRSIZE;
ltree_level *l1,
*l2;
ltree *res;
if ((*a)->numlevel == 0)
return NULL;
while( ptr-a < len ) {
while (ptr - a < len)
{
if ((*ptr)->numlevel == 0)
return NULL;
else if ((*ptr)->numlevel == 1)
num = 0;
else {
else
{
l1 = LTREE_FIRST(*a);
l2 = LTREE_FIRST(*ptr);
tmp=num; num=0;
for(i=0;i<min(tmp, (*ptr)->numlevel-1); i++) {
tmp = num;
num = 0;
for (i = 0; i < min(tmp, (*ptr)->numlevel - 1); i++)
{
if (l1->len == l2->len && strncmp(l1->name, l2->name, l1->len) == 0)
num = i + 1;
else
@@ -345,7 +388,8 @@ lca_inner(ltree** a, int len) {
}
l1 = LTREE_FIRST(*a);
for(i=0;i<num;i++) {
for (i = 0; i < num; i++)
{
reslen += MAXALIGN(l1->len + LEVEL_HDRSIZE);
l1 = LEVEL_NEXT(l1);
}
@@ -357,7 +401,8 @@ lca_inner(ltree** a, int len) {
l1 = LTREE_FIRST(*a);
l2 = LTREE_FIRST(res);
for(i=0;i<num;i++) {
for (i = 0; i < num; i++)
{
memcpy(l2, l1, MAXALIGN(l1->len + LEVEL_HDRSIZE));
l1 = LEVEL_NEXT(l1);
l2 = LEVEL_NEXT(l2);
@@ -367,9 +412,11 @@ lca_inner(ltree** a, int len) {
}
Datum
lca(PG_FUNCTION_ARGS) {
lca(PG_FUNCTION_ARGS)
{
int i;
ltree **a,*res;
ltree **a,
*res;
a = (ltree **) palloc(sizeof(ltree *) * fcinfo->nargs);
for (i = 0; i < fcinfo->nargs; i++)
@@ -384,5 +431,3 @@ lca(PG_FUNCTION_ARGS) {
else
PG_RETURN_NULL();
}

View File

@@ -9,6 +9,7 @@
PG_FUNCTION_INFO_V1(ltxtq_in);
Datum ltxtq_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(ltxtq_out);
Datum ltxtq_out(PG_FUNCTION_ARGS);
@@ -22,7 +23,8 @@ Datum ltxtq_out(PG_FUNCTION_ARGS);
* node of query tree, also used
* for storing polish notation in parser
*/
typedef struct NODE {
typedef struct NODE
{
int4 type;
int4 val;
int2 distance;
@@ -31,7 +33,8 @@ typedef struct NODE {
struct NODE *next;
} NODE;
typedef struct {
typedef struct
{
char *buf;
int4 state;
int4 count;
@@ -76,21 +79,25 @@ gettoken_query(QPRS_STATE * state, int4 *val, int4 *lenval, char **strval, uint1
*strval = state->buf;
*lenval = 1;
*flag = 0;
} else if ( !isspace((unsigned int)*(state->buf)) )
}
else if (!isspace((unsigned int) *(state->buf)))
elog(ERROR, "Operand syntax error");
break;
case INOPERAND:
if ( ISALNUM(*(state->buf)) ) {
if (ISALNUM(*(state->buf)))
{
if (*flag)
elog(ERROR, "Modificators syntax error");
(*lenval)++;
} else if ( *(state->buf) == '%' ) {
}
else if (*(state->buf) == '%')
*flag |= LVAR_SUBLEXEM;
} else if ( *(state->buf) == '@' ) {
else if (*(state->buf) == '@')
*flag |= LVAR_INCASE;
} else if ( *(state->buf) == '*' ) {
else if (*(state->buf) == '*')
*flag |= LVAR_ANYEND;
} else {
else
{
state->state = WAITOPERATOR;
return VAL;
}
@@ -188,7 +195,8 @@ makepol(QPRS_STATE * state)
int4 lenstack = 0;
uint16 flag;
while ((type = gettoken_query(state, &val, &lenval, &strval,&flag)) != END) {
while ((type = gettoken_query(state, &val, &lenval, &strval, &flag)) != END)
{
switch (type)
{
case VAL:
@@ -236,7 +244,8 @@ makepol(QPRS_STATE * state)
}
}
while (lenstack) {
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
};
@@ -379,20 +388,24 @@ infix(INFIX * in, bool first)
char *op = in->op + in->curpol->distance;
RESIZEBUF(in, in->curpol->length * 2 + 5);
while (*op) {
while (*op)
{
*(in->cur) = *op;
op++;
in->cur++;
}
if ( in->curpol->flag & LVAR_SUBLEXEM ) {
if (in->curpol->flag & LVAR_SUBLEXEM)
{
*(in->cur) = '%';
in->cur++;
}
if ( in->curpol->flag & LVAR_INCASE ) {
if (in->curpol->flag & LVAR_INCASE)
{
*(in->cur) = '@';
in->cur++;
}
if ( in->curpol->flag & LVAR_ANYEND ) {
if (in->curpol->flag & LVAR_ANYEND)
{
*(in->cur) = '*';
in->cur++;
}
@@ -481,4 +494,3 @@ ltxtq_out(PG_FUNCTION_ARGS)
PG_FREE_IF_COPY(query, 0);
PG_RETURN_POINTER(nrm.buf);
}

View File

@@ -13,19 +13,25 @@ PG_FUNCTION_INFO_V1(ltxtq_rexec);
* check for boolean condition
*/
bool
ltree_execute(ITEM * curitem, void *checkval, bool calcnot, bool (*chkcond) (void *checkval, ITEM * val)) {
ltree_execute(ITEM * curitem, void *checkval, bool calcnot, bool (*chkcond) (void *checkval, ITEM * val))
{
if (curitem->type == VAL)
return (*chkcond) (checkval, curitem);
else if (curitem->val == (int4) '!') {
else if (curitem->val == (int4) '!')
{
return (calcnot) ?
((ltree_execute(curitem + 1, checkval, calcnot, chkcond)) ? false : true)
: true;
} else if (curitem->val == (int4) '&') {
}
else if (curitem->val == (int4) '&')
{
if (ltree_execute(curitem + curitem->left, checkval, calcnot, chkcond))
return ltree_execute(curitem + 1, checkval, calcnot, chkcond);
else
return false;
} else { /* |-operator */
}
else
{ /* |-operator */
if (ltree_execute(curitem + curitem->left, checkval, calcnot, chkcond))
return true;
else
@@ -34,24 +40,29 @@ ltree_execute(ITEM * curitem, void *checkval, bool calcnot, bool (*chkcond) (voi
return false;
}
typedef struct {
typedef struct
{
ltree *node;
char *operand;
} CHKVAL;
static bool
checkcondition_str(void* checkval, ITEM * val) {
checkcondition_str(void *checkval, ITEM * val)
{
ltree_level *level = LTREE_FIRST(((CHKVAL *) checkval)->node);
int tlen = ((CHKVAL *) checkval)->node->numlevel;
char *op = ((CHKVAL *) checkval)->operand + val->distance;
int (*cmpptr) (const char *, const char *, size_t);
cmpptr = (val->flag & LVAR_INCASE) ? strncasecmp : strncmp;
while( tlen > 0 ) {
if ( val->flag & LVAR_SUBLEXEM ) {
while (tlen > 0)
{
if (val->flag & LVAR_SUBLEXEM)
{
if (compare_subnode(level, op, val->length, cmpptr, (val->flag & LVAR_ANYEND)))
return true;
} else if (
}
else if (
(
val->length == level->len ||
(level->len > val->length && (val->flag & LVAR_ANYEND))
@@ -67,7 +78,8 @@ checkcondition_str(void* checkval, ITEM * val) {
}
Datum
ltxtq_exec(PG_FUNCTION_ARGS) {
ltxtq_exec(PG_FUNCTION_ARGS)
{
ltree *val = PG_GETARG_LTREE(0);
ltxtquery *query = PG_GETARG_LTXTQUERY(1);
CHKVAL chkval;
@@ -89,11 +101,10 @@ ltxtq_exec(PG_FUNCTION_ARGS) {
}
Datum
ltxtq_rexec(PG_FUNCTION_ARGS) {
ltxtq_rexec(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(ltxtq_exec,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
));
}

View File

@@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.9 2002/08/15 02:58:29 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.10 2002/09/04 20:31:06 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------

View File

@@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.7 2002/08/15 02:58:29 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.8 2002/09/04 20:31:06 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------

View File

@@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.10 2001/11/12 17:44:14 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.11 2002/09/04 20:31:07 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------

View File

@@ -1,5 +1,5 @@
/*
* $Header: /cvsroot/pgsql/contrib/pgbench/pgbench.c,v 1.18 2002/08/15 02:58:29 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pgbench/pgbench.c,v 1.19 2002/09/04 20:31:08 momjian Exp $
*
* pgbench: a simple TPC-B like benchmark program for PostgreSQL
* written by Tatsuo Ishii
@@ -88,8 +88,8 @@ typedef struct
int state; /* state No. */
int cnt; /* xacts count */
int ecnt; /* error count */
int listen; /* 0 indicates that an async query
* has been sent */
int listen; /* 0 indicates that an async query has
* been sent */
int aid; /* account id for this transaction */
int bid; /* branch id for this transaction */
int tid; /* teller id for this transaction */
@@ -241,7 +241,11 @@ doOne(CState * state, int n, int debug, int ttype)
discard_response(st);
break;
case 6: /* response to "end" */
/* transaction finished: record the time it took in the log */
/*
* transaction finished: record the time it took in the
* log
*/
if (use_log)
{
long long diff;
@@ -577,6 +581,7 @@ init(void)
}
#ifdef NOT_USED
/*
* do a checkpoint to purge the old WAL logs
*/
@@ -655,8 +660,9 @@ main(int argc, char **argv)
* testing? */
int is_full_vacuum = 0; /* do full vacuum before testing? */
int debug = 0; /* debug flag */
int ttype = 0; /* transaction type. 0: TPC-B, 1: SELECT only,
* 2: skip update of branches and tellers */
int ttype = 0; /* transaction type. 0: TPC-B, 1: SELECT
* only, 2: skip update of branches and
* tellers */
static CState *state; /* status of clients */

View File

@@ -1,5 +1,5 @@
/*
* $Header: /cvsroot/pgsql/contrib/pgstattuple/pgstattuple.c,v 1.8 2002/08/29 17:14:31 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/pgstattuple/pgstattuple.c,v 1.9 2002/09/04 20:31:08 momjian Exp $
*
* Copyright (c) 2001,2002 Tatsuo Ishii
*
@@ -158,15 +158,13 @@ pgstattuple(PG_FUNCTION_ARGS)
}
/*
* Prepare a values array for storage in our slot.
* This should be an array of C strings which will
* be processed later by the appropriate "in" functions.
* Prepare a values array for storage in our slot. This should be an
* array of C strings which will be processed later by the appropriate
* "in" functions.
*/
values = (char **) palloc(NCOLUMNS * sizeof(char *));
for (i = 0; i < NCOLUMNS; i++)
{
values[i] = (char *) palloc(NCHARS * sizeof(char));
}
i = 0;
snprintf(values[i++], NCHARS, "%lld", table_len);
snprintf(values[i++], NCHARS, "%lld", tuple_count);
@@ -186,9 +184,7 @@ pgstattuple(PG_FUNCTION_ARGS)
/* Clean up */
for (i = 0; i < NCOLUMNS; i++)
{
pfree(values[i]);
}
pfree(values);
PG_RETURN_DATUM(result);

View File

@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/contrib/rtree_gist/Attic/rtree_gist.c,v 1.5 2002/05/28 15:24:53 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/rtree_gist/Attic/rtree_gist.c,v 1.6 2002/09/04 20:31:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -161,19 +161,22 @@ gbox_penalty(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(result);
}
typedef struct {
typedef struct
{
BOX *key;
int pos;
} KBsort;
static int
compare_KB(const void* a, const void* b) {
compare_KB(const void *a, const void *b)
{
BOX *abox = ((KBsort *) a)->key;
BOX *bbox = ((KBsort *) b)->key;
float sa = (abox->high.x - abox->low.x) * (abox->high.y - abox->low.y);
float sb = (bbox->high.x - bbox->low.x) * (bbox->high.y - bbox->low.y);
if ( sa==sb ) return 0;
if (sa == sb)
return 0;
return (sa > sb) ? 1 : -1;
}
@@ -303,34 +306,42 @@ gbox_picksplit(PG_FUNCTION_ARGS)
}
/* bad disposition, sort by ascending and resplit */
if ( (posR==0 || posL==0) && (posT==0 || posB==0) ) {
if ((posR == 0 || posL == 0) && (posT == 0 || posB == 0))
{
KBsort *arr = (KBsort *) palloc(sizeof(KBsort) * maxoff);
posL = posR = posB = posT = 0;
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
arr[i - 1].key = DatumGetBoxP(((GISTENTRY *) VARDATA(entryvec))[i].key);
arr[i - 1].pos = i;
}
qsort(arr, maxoff, sizeof(KBsort), compare_KB);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
cur = arr[i - 1].key;
if (cur->low.x - pageunion.low.x < pageunion.high.x - cur->high.x)
ADDLIST(listL, unionL, posL, arr[i - 1].pos);
else if ( cur->low.x - pageunion.low.x == pageunion.high.x - cur->high.x ) {
else if (cur->low.x - pageunion.low.x == pageunion.high.x - cur->high.x)
{
if (posL > posR)
ADDLIST(listR, unionR, posR, arr[i - 1].pos);
else
ADDLIST(listL, unionL, posL, arr[i - 1].pos);
} else
}
else
ADDLIST(listR, unionR, posR, arr[i - 1].pos);
if (cur->low.y - pageunion.low.y < pageunion.high.y - cur->high.y)
ADDLIST(listB, unionB, posB, arr[i - 1].pos);
else if ( cur->low.y - pageunion.low.y == pageunion.high.y - cur->high.y ) {
else if (cur->low.y - pageunion.low.y == pageunion.high.y - cur->high.y)
{
if (posB > posT)
ADDLIST(listT, unionT, posT, arr[i - 1].pos);
else
ADDLIST(listB, unionB, posB, arr[i - 1].pos);
} else
}
else
ADDLIST(listT, unionT, posT, arr[i - 1].pos);
}
pfree(arr);

View File

@@ -124,7 +124,10 @@ normal_rand(PG_FUNCTION_ARGS)
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/* switch to memory context appropriate for multiple function calls */
/*
* switch to memory context appropriate for multiple function
* calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* total number of tuples to be returned */
@@ -134,11 +137,10 @@ normal_rand(PG_FUNCTION_ARGS)
fctx = (normal_rand_fctx *) palloc(sizeof(normal_rand_fctx));
/*
* Use fctx to keep track of upper and lower bounds
* from call to call. It will also be used to carry over
* the spare value we get from the Box-Muller algorithm
* so that we only actually calculate a new value every
* other call.
* Use fctx to keep track of upper and lower bounds from call to
* call. It will also be used to carry over the spare value we get
* from the Box-Muller algorithm so that we only actually
* calculate a new value every other call.
*/
fctx->mean = PG_GETARG_FLOAT8(1);
fctx->stddev = PG_GETARG_FLOAT8(2);
@@ -198,11 +200,10 @@ normal_rand(PG_FUNCTION_ARGS)
/* send the result */
SRF_RETURN_NEXT(funcctx, Float8GetDatum(result));
}
else /* do when there is no more left */
{
else
/* do when there is no more left */
SRF_RETURN_DONE(funcctx);
}
}
/*
* get_normal_pair()
@@ -218,7 +219,11 @@ normal_rand(PG_FUNCTION_ARGS)
static void
get_normal_pair(float8 *x1, float8 *x2)
{
float8 u1, u2, v1, v2, s;
float8 u1,
u2,
v1,
v2,
s;
for (;;)
{
@@ -315,7 +320,10 @@ crosstab(PG_FUNCTION_ARGS)
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/* switch to memory context appropriate for multiple function calls */
/*
* switch to memory context appropriate for multiple function
* calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* Connect to SPI manager */
@@ -336,10 +344,9 @@ crosstab(PG_FUNCTION_ARGS)
* The provided SQL query must always return three columns.
*
* 1. rowname the label or identifier for each row in the final
* result
* 2. category the label or identifier for each column in the
* final result
* 3. values the value for each column in the final result
* result 2. category the label or identifier for each column
* in the final result 3. values the value for each column
* in the final result
*/
if (spi_tupdesc->natts != 3)
elog(ERROR, "crosstab: provided SQL must return 3 columns;"
@@ -398,8 +405,8 @@ crosstab(PG_FUNCTION_ARGS)
funcctx->slot = slot;
/*
* Generate attribute metadata needed later to produce tuples from raw
* C strings
* Generate attribute metadata needed later to produce tuples from
* raw C strings
*/
attinmeta = TupleDescGetAttInMetadata(tupdesc);
funcctx->attinmeta = attinmeta;
@@ -463,8 +470,8 @@ crosstab(PG_FUNCTION_ARGS)
memset(values, '\0', (1 + num_categories) * sizeof(char *));
/*
* now loop through the sql results and assign each value
* in sequence to the next category
* now loop through the sql results and assign each value in
* sequence to the next category
*/
for (i = 0; i < num_categories; i++)
{
@@ -482,10 +489,11 @@ crosstab(PG_FUNCTION_ARGS)
rowid = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
/*
* If this is the first pass through the values for this rowid
* set it, otherwise make sure it hasn't changed on us. Also
* check to see if the rowid is the same as that of the last
* tuple sent -- if so, skip this tuple entirely
* If this is the first pass through the values for this
* rowid set it, otherwise make sure it hasn't changed on
* us. Also check to see if the rowid is the same as that
* of the last tuple sent -- if so, skip this tuple
* entirely
*/
if (i == 0)
values[0] = pstrdup(rowid);
@@ -498,18 +506,19 @@ crosstab(PG_FUNCTION_ARGS)
allnulls = false;
/*
* Get the next category item value, which is alway attribute
* number three.
* Get the next category item value, which is alway
* attribute number three.
*
* Be careful to sssign the value to the array index based
* on which category we are presently processing.
* Be careful to sssign the value to the array index
* based on which category we are presently
* processing.
*/
values[1 + i] = SPI_getvalue(spi_tuple, spi_tupdesc, 3);
/*
* increment the counter since we consume a row
* for each category, but not for last pass
* because the API will do that for us
* increment the counter since we consume a row for
* each category, but not for last pass because the
* API will do that for us
*/
if (i < (num_categories - 1))
call_cntr = ++funcctx->call_cntr;
@@ -517,10 +526,9 @@ crosstab(PG_FUNCTION_ARGS)
else
{
/*
* We'll fill in NULLs for the missing values,
* but we need to decrement the counter since
* this sql result row doesn't belong to the current
* output tuple.
* We'll fill in NULLs for the missing values, but we
* need to decrement the counter since this sql result
* row doesn't belong to the current output tuple.
*/
call_cntr = --funcctx->call_cntr;
break;
@@ -534,7 +542,10 @@ crosstab(PG_FUNCTION_ARGS)
if (values[0] != NULL)
{
/* switch to memory context appropriate for multiple function calls */
/*
* switch to memory context appropriate for multiple
* function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
lastrowid = fctx->lastrowid = pstrdup(values[0]);
@@ -561,8 +572,7 @@ crosstab(PG_FUNCTION_ARGS)
{
/*
* Skipping this tuple entirely, but we need to advance
* the counter like the API would if we had returned
* one.
* the counter like the API would if we had returned one.
*/
call_cntr = ++funcctx->call_cntr;
@@ -579,7 +589,8 @@ crosstab(PG_FUNCTION_ARGS)
}
}
}
else /* do when there is no more left */
else
/* do when there is no more left */
{
/* release SPI related resources */
SPI_finish();
@@ -680,8 +691,8 @@ connectby_text(PG_FUNCTION_ARGS)
MemoryContextSwitchTo(oldcontext);
/*
* SFRM_Materialize mode expects us to return a NULL Datum.
* The actual tuples are in our tuplestore and passed back through
* SFRM_Materialize mode expects us to return a NULL Datum. The actual
* tuples are in our tuplestore and passed back through
* rsinfo->setResult. rsinfo->setDesc is set to the tuple description
* that we actually used to build our tuples with, so the caller can
* verify we did what it was expecting.
@@ -802,8 +813,8 @@ build_tuplestore_recursively(char *key_fld,
{
/*
* Check that return tupdesc is compatible with the one we got
* from the query, but only at level 0 -- no need to check more
* than once
* from the query, but only at level 0 -- no need to check
* more than once
*/
if (!compatConnectbyTupleDescs(tupdesc, spi_tupdesc))
@@ -997,10 +1008,9 @@ compatCrosstabTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc)
" return rowid datatype");
/*
* - attribute [1] of the sql tuple is the category;
* no need to check it
* - attribute [2] of the sql tuple should match
* attributes [1] to [natts] of the return tuple
* - attribute [1] of the sql tuple is the category; no need to check
* it - attribute [2] of the sql tuple should match attributes [1] to
* [natts] of the return tuple
*/
sql_attr = sql_tupdesc->attrs[2];
for (i = 1; i < ret_tupdesc->natts; i++)
@@ -1027,10 +1037,9 @@ make_crosstab_tupledesc(TupleDesc spi_tupdesc, int num_catagories)
int i;
/*
* We need to build a tuple description with one column
* for the rowname, and num_catagories columns for the values.
* Each must be of the same type as the corresponding
* spi result input column.
* We need to build a tuple description with one column for the
* rowname, and num_catagories columns for the values. Each must be of
* the same type as the corresponding spi result input column.
*/
natts = num_catagories + 1;
tupdesc = CreateTemplateTupleDesc(natts, false);

View File

@@ -31,4 +31,3 @@
extern const char *descr[];
#endif

View File

@@ -19,21 +19,28 @@
PG_FUNCTION_INFO_V1(gtxtidx_in);
Datum gtxtidx_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtxtidx_out);
Datum gtxtidx_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtxtidx_compress);
Datum gtxtidx_compress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtxtidx_decompress);
Datum gtxtidx_decompress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtxtidx_consistent);
Datum gtxtidx_consistent(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtxtidx_union);
Datum gtxtidx_union(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtxtidx_same);
Datum gtxtidx_same(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtxtidx_penalty);
Datum gtxtidx_penalty(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtxtidx_picksplit);
Datum gtxtidx_picksplit(PG_FUNCTION_ARGS);
@@ -51,26 +58,32 @@ Datum gtxtidx_picksplit(PG_FUNCTION_ARGS);
Datum
gtxtidx_in(PG_FUNCTION_ARGS) {
gtxtidx_in(PG_FUNCTION_ARGS)
{
elog(ERROR, "Not implemented");
PG_RETURN_DATUM(0);
}
Datum
gtxtidx_out(PG_FUNCTION_ARGS) {
gtxtidx_out(PG_FUNCTION_ARGS)
{
elog(ERROR, "Not implemented");
PG_RETURN_DATUM(0);
}
static int
compareint( const void * a, const void * b ) {
if ( *((int4*)a) == *((int4*)b) ) return 0;
compareint(const void *a, const void *b)
{
if (*((int4 *) a) == *((int4 *) b))
return 0;
return (*((int4 *) a) > *((int4 *) b)) ? 1 : -1;
}
static int
uniqueint( int4* a, int4 l ) {
int4 *ptr, *res;
uniqueint(int4 *a, int4 l)
{
int4 *ptr,
*res;
if (l == 1)
return l;
@@ -88,20 +101,25 @@ uniqueint( int4* a, int4 l ) {
}
static void
makesign( BITVECP sign, GISTTYPE *a) {
int4 k,len = ARRNELEM( a );
makesign(BITVECP sign, GISTTYPE * a)
{
int4 k,
len = ARRNELEM(a);
int4 *ptr = GETARR(a);
MemSet((void *) sign, 0, sizeof(BITVEC));
for (k = 0; k < len; k++)
HASH(sign, ptr[k]);
}
Datum
gtxtidx_compress(PG_FUNCTION_ARGS) {
gtxtidx_compress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval = entry;
if ( entry->leafkey ) { /* txtidx */
if (entry->leafkey)
{ /* txtidx */
GISTTYPE *res;
txtidx *toastedval = (txtidx *) DatumGetPointer(entry->key);
txtidx *val = (txtidx *) DatumGetPointer(PG_DETOAST_DATUM(entry->key));
@@ -116,15 +134,20 @@ gtxtidx_compress(PG_FUNCTION_ARGS) {
res->flag = ARRKEY;
arr = GETARR(res);
len = val->size;
while( len-- ) {
while (len--)
{
*arr = crc32_sz((uint8 *) &words[ptr->pos], ptr->len);
arr++; ptr++;
arr++;
ptr++;
}
len = uniqueint(GETARR(res), val->size);
if ( len != val->size ) {
/* there is a collision of hash-function;
len is always less than val->size */
if (len != val->size)
{
/*
* there is a collision of hash-function; len is always less
* than val->size
*/
len = CALCGTSIZE(ARRKEY, len);
res = (GISTTYPE *) repalloc((void *) res, len);
res->len = len;
@@ -133,7 +156,8 @@ gtxtidx_compress(PG_FUNCTION_ARGS) {
pfree(val);
/* make signature, if array is too long */
if ( res->len > TOAST_INDEX_TARGET ) {
if (res->len > TOAST_INDEX_TARGET)
{
GISTTYPE *ressign;
len = CALCGTSIZE(SIGNKEY, 0);
@@ -149,9 +173,12 @@ gtxtidx_compress(PG_FUNCTION_ARGS) {
gistentryinit(*retval, PointerGetDatum(res),
entry->rel, entry->page,
entry->offset, res->len, FALSE);
} else if ( ISSIGNKEY(DatumGetPointer( entry->key )) &&
! ISALLTRUE(DatumGetPointer( entry->key )) ){
int4 i,len;
}
else if (ISSIGNKEY(DatumGetPointer(entry->key)) &&
!ISALLTRUE(DatumGetPointer(entry->key)))
{
int4 i,
len;
GISTTYPE *res;
BITVECP sign = GETSIGN(DatumGetPointer(entry->key));
@@ -174,12 +201,15 @@ gtxtidx_compress(PG_FUNCTION_ARGS) {
}
Datum
gtxtidx_decompress(PG_FUNCTION_ARGS) {
gtxtidx_decompress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTTYPE *key = (GISTTYPE *) DatumGetPointer(PG_DETOAST_DATUM(entry->key));
if ( key != (GISTTYPE*)DatumGetPointer(entry->key) ) {
if (key != (GISTTYPE *) DatumGetPointer(entry->key))
{
GISTENTRY *retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, key->len, FALSE);
@@ -190,7 +220,8 @@ gtxtidx_decompress(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(entry);
}
typedef struct {
typedef struct
{
int4 *arrb;
int4 *arre;
} CHKVAL;
@@ -199,14 +230,16 @@ typedef struct {
* is there value 'val' in array or not ?
*/
static bool
checkcondition_arr( void *checkval, ITEM* val ) {
checkcondition_arr(void *checkval, ITEM * val)
{
int4 *StopLow = ((CHKVAL *) checkval)->arrb;
int4 *StopHigh = ((CHKVAL *) checkval)->arre;
int4 *StopMiddle;
/* Loop invariant: StopLow <= val < StopHigh */
while (StopLow < StopHigh) {
while (StopLow < StopHigh)
{
StopMiddle = StopLow + (StopHigh - StopLow) / 2;
if (*StopMiddle == val->val)
return (true);
@@ -220,12 +253,14 @@ checkcondition_arr( void *checkval, ITEM* val ) {
}
static bool
checkcondition_bit( void *checkval, ITEM* val ) {
checkcondition_bit(void *checkval, ITEM * val)
{
return GETBIT(checkval, HASHVAL(val->val));
}
Datum
gtxtidx_consistent(PG_FUNCTION_ARGS) {
gtxtidx_consistent(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = (QUERYTYPE *) PG_GETARG_POINTER(1);
GISTTYPE *key = (GISTTYPE *) DatumGetPointer(
((GISTENTRY *) PG_GETARG_POINTER(0))->key
@@ -234,7 +269,8 @@ gtxtidx_consistent(PG_FUNCTION_ARGS) {
if (!query->size)
PG_RETURN_BOOL(false);
if ( ISSIGNKEY(key) ) {
if (ISSIGNKEY(key))
{
if (ISALLTRUE(key))
PG_RETURN_BOOL(true);
@@ -243,7 +279,9 @@ gtxtidx_consistent(PG_FUNCTION_ARGS) {
(void *) GETSIGN(key), false,
checkcondition_bit
));
} else { /* only leaf pages */
}
else
{ /* only leaf pages */
CHKVAL chkval;
chkval.arrb = GETARR(key);
@@ -257,10 +295,12 @@ gtxtidx_consistent(PG_FUNCTION_ARGS) {
}
static int4
unionkey( BITVECP sbase, GISTTYPE *add ) {
unionkey(BITVECP sbase, GISTTYPE * add)
{
int4 i;
if ( ISSIGNKEY(add) ) {
if (ISSIGNKEY(add))
{
BITVECP sadd = GETSIGN(add);
if (ISALLTRUE(add))
@@ -269,8 +309,11 @@ unionkey( BITVECP sbase, GISTTYPE *add ) {
LOOPBYTE(
sbase[i] |= sadd[i];
);
} else {
}
else
{
int4 *ptr = GETARR(add);
for (i = 0; i < ARRNELEM(add); i++)
HASH(sbase, ptr[i]);
}
@@ -279,7 +322,8 @@ unionkey( BITVECP sbase, GISTTYPE *add ) {
Datum
gtxtidx_union(PG_FUNCTION_ARGS) {
gtxtidx_union(PG_FUNCTION_ARGS)
{
bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
int *size = (int *) PG_GETARG_POINTER(1);
BITVEC base;
@@ -289,8 +333,10 @@ gtxtidx_union(PG_FUNCTION_ARGS) {
GISTTYPE *result;
MemSet((void *) base, 0, sizeof(BITVEC));
for(i=0;i<len;i++) {
if ( unionkey( base, GETENTRY(entryvec, i) ) ) {
for (i = 0; i < len; i++)
{
if (unionkey(base, GETENTRY(entryvec, i)))
{
flag = ALLISTRUE;
break;
}
@@ -308,42 +354,53 @@ gtxtidx_union(PG_FUNCTION_ARGS) {
}
Datum
gtxtidx_same(PG_FUNCTION_ARGS) {
gtxtidx_same(PG_FUNCTION_ARGS)
{
GISTTYPE *a = (GISTTYPE *) PG_GETARG_POINTER(0);
GISTTYPE *b = (GISTTYPE *) PG_GETARG_POINTER(1);
bool *result = (bool *) PG_GETARG_POINTER(2);
if ( ISSIGNKEY(a) ) { /* then b also ISSIGNKEY */
if ( ISALLTRUE(a) && ISALLTRUE(b) ) {
if (ISSIGNKEY(a))
{ /* then b also ISSIGNKEY */
if (ISALLTRUE(a) && ISALLTRUE(b))
*result = true;
} else if ( ISALLTRUE(a) ) {
else if (ISALLTRUE(a))
*result = false;
} else if ( ISALLTRUE(b) ) {
else if (ISALLTRUE(b))
*result = false;
} else {
else
{
int4 i;
BITVECP sa=GETSIGN(a), sb=GETSIGN(b);
BITVECP sa = GETSIGN(a),
sb = GETSIGN(b);
*result = true;
LOOPBYTE(
if ( sa[i] != sb[i] ) {
if (sa[i] != sb[i])
{
*result = false;
break;
}
);
}
} else { /* a and b ISARRKEY */
int4 lena = ARRNELEM(a), lenb = ARRNELEM(b);
}
else
{ /* a and b ISARRKEY */
int4 lena = ARRNELEM(a),
lenb = ARRNELEM(b);
if ( lena != lenb ) {
if (lena != lenb)
*result = false;
} else {
int4 *ptra = GETARR(a), *ptrb = GETARR(b);
else
{
int4 *ptra = GETARR(a),
*ptrb = GETARR(b);
int4 i;
*result = true;
for (i = 0; i < lena; i++)
if ( ptra[i] != ptrb[i] ) {
if (ptra[i] != ptrb[i])
{
*result = false;
break;
}
@@ -354,8 +411,11 @@ gtxtidx_same(PG_FUNCTION_ARGS) {
}
static int4
sizebitvec( BITVECP sign ) {
int4 size=0, i;
sizebitvec(BITVECP sign)
{
int4 size = 0,
i;
LOOPBYTE(
size += SUMBIT(*(char *) sign);
sign = (BITVECP) (((char *) sign) + 1);
@@ -364,7 +424,8 @@ sizebitvec( BITVECP sign ) {
}
Datum
gtxtidx_penalty(PG_FUNCTION_ARGS) {
gtxtidx_penalty(PG_FUNCTION_ARGS)
{
GISTENTRY *origentry = (GISTENTRY *) PG_GETARG_POINTER(0); /* always ISSIGNKEY */
GISTENTRY *newentry = (GISTENTRY *) PG_GETARG_POINTER(1);
float *penalty = (float *) PG_GETARG_POINTER(2);
@@ -373,23 +434,31 @@ gtxtidx_penalty(PG_FUNCTION_ARGS) {
int4 unionsize = 0;
BITVECP orig = GETSIGN(origval);
if ( ISALLTRUE(origval) ) {
if (ISALLTRUE(origval))
{
*penalty = 0.0;
PG_RETURN_POINTER(penalty);
}
if ( ISARRKEY(newval) ) {
int4 *ptr=GETARR(newval), n=ARRNELEM(newval);
while( n-- ) {
if (ISARRKEY(newval))
{
int4 *ptr = GETARR(newval),
n = ARRNELEM(newval);
while (n--)
{
if (GETBIT(orig, HASHVAL(*ptr)) == 0)
unionsize++;
ptr++;
}
*penalty = (float) unionsize;
} else {
if ( ISALLTRUE(newval) ) {
}
else
{
if (ISALLTRUE(newval))
*penalty = (float) (SIGLENBIT - sizebitvec(orig));
} else {
else
{
char valtmp;
BITVECP nval = GETSIGN(newval);
int4 i;
@@ -405,31 +474,34 @@ gtxtidx_penalty(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(penalty);
}
typedef struct {
typedef struct
{
bool allistrue;
BITVEC sign;
} CACHESIGN;
static void
fillcache( CACHESIGN *item, GISTTYPE *key ) {
fillcache(CACHESIGN * item, GISTTYPE * key)
{
item->allistrue = false;
if ( ISARRKEY( key ) ) {
if (ISARRKEY(key))
makesign(item->sign, key);
} else if ( ISALLTRUE(key) ) {
else if (ISALLTRUE(key))
item->allistrue = true;
} else {
else
memcpy((void *) item->sign, (void *) GETSIGN(key), sizeof(BITVEC));
}
}
#define WISH_F(a,b,c) (double)( -(double)(((a)-(b))*((a)-(b))*((a)-(b)))*(c) )
typedef struct {
typedef struct
{
OffsetNumber pos;
int4 cost;
} SPLITCOST;
static int
comparecost( const void *a, const void *b ) {
comparecost(const void *a, const void *b)
{
if (((SPLITCOST *) a)->cost == ((SPLITCOST *) b)->cost)
return 0;
else
@@ -437,21 +509,34 @@ comparecost( const void *a, const void *b ) {
}
Datum
gtxtidx_picksplit(PG_FUNCTION_ARGS) {
gtxtidx_picksplit(PG_FUNCTION_ARGS)
{
bytea *entryvec = (bytea *) PG_GETARG_POINTER(0);
GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1);
OffsetNumber k,j;
GISTTYPE *datum_l, *datum_r;
BITVEC union_l, union_r;
OffsetNumber k,
j;
GISTTYPE *datum_l,
*datum_r;
BITVEC union_l,
union_r;
bool firsttime = true;
int4 size_alpha,size_beta,sizeu,sizei;
int4 size_waste, waste = 0.0;
int4 size_l, size_r;
int4 size_alpha,
size_beta,
sizeu,
sizei;
int4 size_waste,
waste = 0.0;
int4 size_l,
size_r;
int4 nbytes;
OffsetNumber seed_1=0, seed_2=0;
OffsetNumber *left, *right;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
*right;
OffsetNumber maxoff;
BITVECP ptra, ptrb, ptrc;
BITVECP ptra,
ptrb,
ptrc;
int i;
CACHESIGN *cache;
char valtmp;
@@ -465,19 +550,24 @@ gtxtidx_picksplit(PG_FUNCTION_ARGS) {
cache = (CACHESIGN *) palloc(sizeof(CACHESIGN) * (maxoff + 2));
fillcache(&cache[FirstOffsetNumber], GETENTRY(entryvec, FirstOffsetNumber));
for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) {
for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) {
for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k))
{
for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j))
{
if (k == FirstOffsetNumber)
fillcache(&cache[j], GETENTRY(entryvec, j));
if ( cache[k].allistrue || cache[j].allistrue ) {
if (cache[k].allistrue || cache[j].allistrue)
{
sizeu = SIGLENBIT;
if (cache[k].allistrue && cache[j].allistrue)
sizei = SIGLENBIT;
else
sizei = (cache[k].allistrue) ?
sizebitvec(cache[j].sign) : sizebitvec(cache[k].sign);
} else {
}
else
{
sizeu = sizei = 0;
ptra = cache[j].sign;
ptrb = cache[k].sign;
@@ -506,7 +596,8 @@ gtxtidx_picksplit(PG_FUNCTION_ARGS) {
}
size_waste = sizeu - sizei;
if (size_waste > waste || firsttime) {
if (size_waste > waste || firsttime)
{
waste = size_waste;
seed_1 = k;
seed_2 = j;
@@ -520,29 +611,40 @@ gtxtidx_picksplit(PG_FUNCTION_ARGS) {
right = v->spl_right;
v->spl_nright = 0;
if ( seed_1 == 0 || seed_2 == 0 ) {
if (seed_1 == 0 || seed_2 == 0)
{
seed_1 = 1;
seed_2 = 2;
}
/* form initial .. */
if ( cache[seed_1].allistrue ) {
if (cache[seed_1].allistrue)
{
datum_l = (GISTTYPE *) palloc(CALCGTSIZE(SIGNKEY | ALLISTRUE, 0));
datum_l->len = CALCGTSIZE( SIGNKEY|ALLISTRUE, 0 ); datum_l->flag = SIGNKEY|ALLISTRUE;
datum_l->len = CALCGTSIZE(SIGNKEY | ALLISTRUE, 0);
datum_l->flag = SIGNKEY | ALLISTRUE;
size_l = SIGLENBIT;
} else {
}
else
{
datum_l = (GISTTYPE *) palloc(CALCGTSIZE(SIGNKEY, 0));
datum_l->len = CALCGTSIZE( SIGNKEY, 0 ); datum_l->flag = SIGNKEY;
datum_l->len = CALCGTSIZE(SIGNKEY, 0);
datum_l->flag = SIGNKEY;
memcpy((void *) GETSIGN(datum_l), (void *) cache[seed_1].sign, sizeof(BITVEC));
size_l = sizebitvec(GETSIGN(datum_l));
}
if ( cache[seed_2].allistrue ) {
if (cache[seed_2].allistrue)
{
datum_r = (GISTTYPE *) palloc(CALCGTSIZE(SIGNKEY | ALLISTRUE, 0));
datum_r->len = CALCGTSIZE( SIGNKEY|ALLISTRUE, 0 ); datum_r->flag = SIGNKEY|ALLISTRUE;
datum_r->len = CALCGTSIZE(SIGNKEY | ALLISTRUE, 0);
datum_r->flag = SIGNKEY | ALLISTRUE;
size_r = SIGLENBIT;
} else {
}
else
{
datum_r = (GISTTYPE *) palloc(CALCGTSIZE(SIGNKEY, 0));
datum_r->len = CALCGTSIZE( SIGNKEY, 0 ); datum_r->flag = SIGNKEY;
datum_r->len = CALCGTSIZE(SIGNKEY, 0);
datum_r->flag = SIGNKEY;
memcpy((void *) GETSIGN(datum_r), (void *) cache[seed_2].sign, sizeof(BITVEC));
size_r = sizebitvec(GETSIGN(datum_r));
}
@@ -551,31 +653,42 @@ gtxtidx_picksplit(PG_FUNCTION_ARGS) {
fillcache(&cache[maxoff], GETENTRY(entryvec, maxoff));
/* sort before ... */
costvector = (SPLITCOST *) palloc(sizeof(SPLITCOST) * maxoff);
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) {
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j))
{
costvector[j - 1].pos = j;
if ( cache[j].allistrue ) {
if (cache[j].allistrue)
{
size_alpha = SIGLENBIT - size_l;
size_beta = SIGLENBIT - size_r;
} else {
}
else
{
ptra = cache[seed_1].sign;
ptrb = cache[seed_2].sign;
ptrc = cache[j].sign;
size_beta = size_alpha = 0;
if ( cache[seed_1].allistrue ) {
if ( ! cache[seed_2].allistrue ) {
if (cache[seed_1].allistrue)
{
if (!cache[seed_2].allistrue)
{
LOOPBIT(
if (GETBIT(ptrc, i) && !GETBIT(ptrb, i))
size_beta++;
);
}
} else if ( cache[seed_2].allistrue ) {
if ( ! cache[seed_1].allistrue ) {
}
else if (cache[seed_2].allistrue)
{
if (!cache[seed_1].allistrue)
{
LOOPBIT(
if (GETBIT(ptrc, i) && !GETBIT(ptra, i))
size_alpha++;
);
}
} else {
}
else
{
LOOPBIT(
if (GETBIT(ptrc, i) && !GETBIT(ptra, i))
size_alpha++;
@@ -589,21 +702,26 @@ gtxtidx_picksplit(PG_FUNCTION_ARGS) {
}
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
for (k = 0; k < maxoff; k++) {
for (k = 0; k < maxoff; k++)
{
j = costvector[k].pos;
if ( j == seed_1 ) {
if (j == seed_1)
{
*left++ = j;
v->spl_nleft++;
continue;
} else if ( j == seed_2 ) {
}
else if (j == seed_2)
{
*right++ = j;
v->spl_nright++;
continue;
}
if ( ISALLTRUE( datum_l ) || cache[j].allistrue ) {
if (ISALLTRUE(datum_l) || cache[j].allistrue)
size_alpha = SIGLENBIT;
} else {
else
{
ptra = cache[j].sign;
ptrb = GETSIGN(datum_l);
size_alpha = 0;
@@ -612,9 +730,10 @@ gtxtidx_picksplit(PG_FUNCTION_ARGS) {
size_alpha += SUMBIT(valtmp);
);
}
if ( ISALLTRUE( datum_r ) || cache[j].allistrue ) {
if (ISALLTRUE(datum_r) || cache[j].allistrue)
size_beta = SIGLENBIT;
} else {
else
{
ptra = cache[j].sign;
ptrb = GETSIGN(datum_r);
size_beta = 0;
@@ -624,23 +743,32 @@ gtxtidx_picksplit(PG_FUNCTION_ARGS) {
);
}
if (size_alpha - size_l < size_beta - size_r + WISH_F(v->spl_nleft, v->spl_nright, 0.1)) {
if ( ! ISALLTRUE( datum_l ) ) {
if ( size_alpha == SIGLENBIT ) {
if (size_alpha - size_l < size_beta - size_r + WISH_F(v->spl_nleft, v->spl_nright, 0.1))
{
if (!ISALLTRUE(datum_l))
{
if (size_alpha == SIGLENBIT)
{
if (size_alpha != size_l)
MemSet((void *) GETSIGN(datum_l), 0xff, sizeof(BITVEC));
} else
}
else
memcpy((void *) GETSIGN(datum_l), (void *) union_l, sizeof(BITVEC));
}
size_l = size_alpha;
*left++ = j;
v->spl_nleft++;
} else {
if ( ! ISALLTRUE( datum_r ) ) {
if ( size_beta == SIGLENBIT ) {
}
else
{
if (!ISALLTRUE(datum_r))
{
if (size_beta == SIGLENBIT)
{
if (size_beta != size_r)
MemSet((void *) GETSIGN(datum_r), 0xff, sizeof(BITVEC));
} else
}
else
memcpy((void *) GETSIGN(datum_r), (void *) union_r, sizeof(BITVEC));
}
size_r = size_beta;
@@ -657,5 +785,3 @@ gtxtidx_picksplit(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(v);
}

View File

@@ -739,8 +739,10 @@ qtxt_out(PG_FUNCTION_ARGS)
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));
INFIX nrm;
if (query->size == 0) {
if (query->size == 0)
{
char *b = palloc(1);
*b = '\0';
PG_RETURN_POINTER(b);
}
@@ -769,7 +771,8 @@ querytree(PG_FUNCTION_ARGS)
int4 len;
if (query->size == 0) {
if (query->size == 0)
{
res = (text *) palloc(VARHDRSZ);
VARATT_SIZEP(res) = VARHDRSZ;
PG_RETURN_POINTER(res);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.13 2002/08/15 02:58:29 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.14 2002/09/04 20:31:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,9 +35,12 @@
#define BUFSIZE 1024
extern char *optarg;
extern int optind, opterr, optopt;
extern int optind,
opterr,
optopt;
struct _param {
struct _param
{
char *pg_user;
int pg_prompt;
char *pg_port;
@@ -175,9 +178,11 @@ vacuumlo(char *database, struct _param *param)
int i;
char *password = NULL;
if(param->pg_prompt) {
if (param->pg_prompt)
{
password = simple_prompt("Password: ", 32, 0);
if(!password) {
if (!password)
{
fprintf(stderr, "failed to get password\n");
exit(1);
}
@@ -201,7 +206,8 @@ vacuumlo(char *database, struct _param *param)
return -1;
}
if (param->verbose) {
if (param->verbose)
{
fprintf(stdout, "Connected to %s\n", database);
if (param->dry_run)
fprintf(stdout, "Test run: no large objects will be removed!\n");
@@ -342,7 +348,8 @@ vacuumlo(char *database, struct _param *param)
fflush(stdout);
}
if(param->dry_run == 0) {
if (param->dry_run == 0)
{
if (lo_unlink(conn, lo) < 0)
{
fprintf(stderr, "\nFailed to remove lo %u: ", lo);
@@ -350,7 +357,8 @@ vacuumlo(char *database, struct _param *param)
}
else
deleted++;
} else
}
else
deleted++;
}
PQclear(res);
@@ -371,7 +379,8 @@ vacuumlo(char *database, struct _param *param)
}
void
usage(void) {
usage(void)
{
fprintf(stdout, "vacuumlo removes unreferenced large objects from databases\n\n");
fprintf(stdout, "Usage:\n vacuumlo [options] dbname [dbnames...]\n\n");
fprintf(stdout, "Options:\n");
@@ -401,14 +410,17 @@ main(int argc, char **argv)
param.verbose = 0;
param.dry_run = 0;
while( 1 ) {
while (1)
{
c = getopt(argc, argv, "?h:U:p:vnW");
if (c == -1)
break;
switch(c) {
switch (c)
{
case '?':
if(optopt == '?') {
if (optopt == '?')
{
usage();
exit(0);
}
@@ -430,7 +442,8 @@ main(int argc, char **argv)
break;
case 'p':
port = strtol(optarg, NULL, 10);
if( (port < 1) || (port > 65535)) {
if ((port < 1) || (port > 65535))
{
fprintf(stderr, "[%s]: invalid port number '%s'\n", argv[0], optarg);
exit(1);
}
@@ -443,13 +456,15 @@ main(int argc, char **argv)
}
/* No database given? Show usage */
if(optind >= argc-1) {
if (optind >= argc - 1)
{
fprintf(stderr, "vacuumlo: missing required argument: database name\n");
fprintf(stderr, "Try 'vacuumlo -?' for help.\n");
exit(1);
}
for(c = optind; c < argc; c++) {
for (c = optind; c < argc; c++)
{
/* Work on selected database */
rc += (vacuumlo(argv[c], &param) != 0);
}

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.81 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.82 2002/09/04 20:31:08 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -326,9 +326,9 @@ nocachegetattr(HeapTuple tuple,
/*
* If slow is false, and we got here, we know that we have a tuple
* with no nulls or var-widths before the target attribute. If possible,
* we also want to initialize the remainder of the attribute cached
* offset values.
* with no nulls or var-widths before the target attribute. If
* possible, we also want to initialize the remainder of the attribute
* cached offset values.
*/
if (!slow)
{
@@ -702,8 +702,8 @@ heap_modifytuple(HeapTuple tuple,
nulls);
/*
* copy the identification info of the old tuple: t_ctid, t_self,
* and OID (if any)
* copy the identification info of the old tuple: t_ctid, t_self, and
* OID (if any)
*/
newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
newTuple->t_self = tuple->t_self;

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.59 2002/08/25 17:20:00 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.60 2002/09/04 20:31:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -319,9 +319,9 @@ nocache_index_getattr(IndexTuple tup,
/*
* If slow is false, and we got here, we know that we have a tuple
* with no nulls or var-widths before the target attribute. If possible,
* we also want to initialize the remainder of the attribute cached
* offset values.
* with no nulls or var-widths before the target attribute. If
* possible, we also want to initialize the remainder of the attribute
* cached offset values.
*/
if (!slow)
{

View File

@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.64 2002/08/24 15:00:46 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.65 2002/09/04 20:31:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,8 +88,8 @@ printtup_setup(DestReceiver *self, int operation,
pq_puttextmessage('P', portalName);
/*
* if this is a retrieve, then we send back the tuple
* descriptor of the tuples.
* if this is a retrieve, then we send back the tuple descriptor of
* the tuples.
*/
if (operation == CMD_SELECT)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.88 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.89 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -425,9 +425,8 @@ TupleDescInitEntry(TupleDesc desc,
*
* (Why not just make the atttypid point to the OID type, instead of the
* type the query returns? Because the executor uses the atttypid to
* tell the front end what type will be returned,
* and in the end the type returned will be the result of the query,
* not an OID.)
* tell the front end what type will be returned, and in the end the
* type returned will be the result of the query, not an OID.)
*
* (Why not wait until the return type of the set is known (i.e., the
* recursive call to the executor to execute the set has returned)

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.95 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.96 2002/09/04 20:31:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -294,6 +294,7 @@ gistinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
bool checkUnique = PG_GETARG_BOOL(5);
@@ -496,9 +497,9 @@ gistlayerinsert(Relation r, BlockNumber blkno,
gistdelete(r, &oldtid);
/*
* if child was splitted, new key for child will be inserted
* in the end list of child, so we must say to any scans
* that page is changed beginning from 'child' offset
* if child was splitted, new key for child will be inserted in
* the end list of child, so we must say to any scans that page is
* changed beginning from 'child' offset
*/
if (ret & SPLITED)
gistadjscans(r, GISTOP_SPLIT, blkno, child);

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/gist/gistget.c,v 1.34 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/gist/gistget.c,v 1.35 2002/09/04 20:31:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.59 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.60 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -164,6 +164,7 @@ hashinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
bool checkUnique = PG_GETARG_BOOL(5);
@@ -228,18 +229,21 @@ hashgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
* Yes, so mark it by setting the LP_DELETE bit in the item
* flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->hashso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
* Since this can be redone later if needed, it's treated the
* same as a commit-hint-bit status update for heap tuples:
* we mark the buffer dirty but don't make a WAL log entry.
* same as a commit-hint-bit status update for heap tuples: we
* mark the buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->hashso_curbuf);
}
/*
* Now continue the scan.
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.34 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.35 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -96,7 +96,8 @@ hashname(PG_FUNCTION_ARGS)
char *key = NameStr(*PG_GETARG_NAME(0));
int keylen = strlen(key);
Assert(keylen < NAMEDATALEN); /* else it's not truncated correctly */
Assert(keylen < NAMEDATALEN); /* else it's not truncated
* correctly */
return hash_any((unsigned char *) key, keylen);
}
@@ -166,7 +167,10 @@ hashvarlena(PG_FUNCTION_ARGS)
Datum
hash_any(register const unsigned char *k, register int keylen)
{
register uint32 a,b,c,len;
register uint32 a,
b,
c,
len;
/* Set up the internal state */
len = keylen;
@@ -180,25 +184,37 @@ hash_any(register const unsigned char *k, register int keylen)
b += (k[4] + ((uint32) k[5] << 8) + ((uint32) k[6] << 16) + ((uint32) k[7] << 24));
c += (k[8] + ((uint32) k[9] << 8) + ((uint32) k[10] << 16) + ((uint32) k[11] << 24));
mix(a, b, c);
k += 12; len -= 12;
k += 12;
len -= 12;
}
/* handle the last 11 bytes */
c += keylen;
switch (len) /* all the case statements fall through */
{
case 11: c+=((uint32)k[10]<<24);
case 10: c+=((uint32)k[9]<<16);
case 9 : c+=((uint32)k[8]<<8);
case 11:
c += ((uint32) k[10] << 24);
case 10:
c += ((uint32) k[9] << 16);
case 9:
c += ((uint32) k[8] << 8);
/* the first byte of c is reserved for the length */
case 8 : b+=((uint32)k[7]<<24);
case 7 : b+=((uint32)k[6]<<16);
case 6 : b+=((uint32)k[5]<<8);
case 5 : b+=k[4];
case 4 : a+=((uint32)k[3]<<24);
case 3 : a+=((uint32)k[2]<<16);
case 2 : a+=((uint32)k[1]<<8);
case 1 : a+=k[0];
case 8:
b += ((uint32) k[7] << 24);
case 7:
b += ((uint32) k[6] << 16);
case 6:
b += ((uint32) k[5] << 8);
case 5:
b += k[4];
case 4:
a += ((uint32) k[3] << 24);
case 3:
a += ((uint32) k[2] << 16);
case 2:
a += ((uint32) k[1] << 8);
case 1:
a += k[0];
/* case 0: nothing left to add */
}
mix(a, b, c);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.28 2002/06/20 20:29:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.29 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* Because we can be doing an index scan on a relation while we

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.147 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.148 2002/09/04 20:31:09 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -985,8 +985,8 @@ heap_fetch(Relation relation,
*userbuf = buffer;
/*
* Count the successful fetch in *pgstat_info if given,
* otherwise in the relation's default statistics area.
* Count the successful fetch in *pgstat_info if given, otherwise
* in the relation's default statistics area.
*/
if (pgstat_info != NULL)
pgstat_count_heap_fetch(pgstat_info);
@@ -1120,6 +1120,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
/* this is redundant with an Assert in HeapTupleSetOid */
Assert(tup->t_data->t_infomask & HEAP_HASOID);
#endif
/*
* If the object id of this tuple has already been assigned, trust
* the caller. There are a couple of ways this can happen. At
@@ -1224,10 +1225,10 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
WriteBuffer(buffer);
/*
* If tuple is cachable, mark it for invalidation from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
* buffer, because the "tup" data structure is all in local memory,
* not in the shared buffer.
* If tuple is cachable, mark it for invalidation from the caches in
* case we abort. Note it is OK to do this after WriteBuffer releases
* the buffer, because the "tup" data structure is all in local
* memory, not in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, tup);
@@ -1379,6 +1380,7 @@ l1:
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
#ifdef TUPLE_TOASTER_ACTIVE
/*
* If the relation has toastable attributes, we need to delete no
* longer needed items there too. We have to do this before
@@ -1728,10 +1730,10 @@ l2:
WriteBuffer(buffer);
/*
* If new tuple is cachable, mark it for invalidation from the caches in
* case we abort. Note it is OK to do this after WriteBuffer releases
* the buffer, because the "newtup" data structure is all in local
* memory, not in the shared buffer.
* If new tuple is cachable, mark it for invalidation from the caches
* in case we abort. Note it is OK to do this after WriteBuffer
* releases the buffer, because the "newtup" data structure is all in
* local memory, not in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, newtup);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.35 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.36 2002/09/04 20:31:09 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -181,9 +181,7 @@ heap_tuple_untoast_attr_slice(varattrib *attr, int32 sliceoffset, int32 slicelen
varattrib *tmp;
if (VARATT_IS_EXTERNAL(attr))
{
tmp = toast_fetch_datum(attr);
}
else
{
tmp = attr; /* compressed in main tuple */
@@ -206,10 +204,8 @@ heap_tuple_untoast_attr_slice(varattrib *attr, int32 sliceoffset, int32 slicelen
return (toast_fetch_datum_slice(attr, sliceoffset, slicelength));
}
else
{
preslice = attr;
}
}
/* slicing of datum for compressed cases and plain value */
@@ -221,16 +217,15 @@ heap_tuple_untoast_attr_slice(varattrib *attr, int32 sliceoffset, int32 slicelen
}
if (((sliceoffset + slicelength) > attrsize) || slicelength < 0)
{
slicelength = attrsize - sliceoffset;
}
result = (varattrib *) palloc(slicelength + VARHDRSZ);
VARATT_SIZEP(result) = slicelength + VARHDRSZ;
memcpy(VARDATA(result), VARDATA(preslice) + sliceoffset, slicelength);
if (preslice != attr) pfree(preslice);
if (preslice != attr)
pfree(preslice);
return result;
}
@@ -1053,9 +1048,9 @@ toast_fetch_datum(varattrib *attr)
/*
* Read the chunks by index
*
* Note that because the index is actually on (valueid, chunkidx)
* we will see the chunks in chunkidx order, even though we didn't
* explicitly ask for it.
* Note that because the index is actually on (valueid, chunkidx) we will
* see the chunks in chunkidx order, even though we didn't explicitly
* ask for it.
*/
nextidx = 0;
@@ -1169,9 +1164,7 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
}
if (((sliceoffset + length) > attrsize) || length < 0)
{
length = attrsize - sliceoffset;
}
result = (varattrib *) palloc(length + VARHDRSZ);
VARATT_SIZEP(result) = length + VARHDRSZ;
@@ -1179,7 +1172,8 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
if (length == 0) return (result); /* Can save a lot of work at this point! */
if (length == 0)
return (result); /* Can save a lot of work at this point! */
startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE;
endchunk = (sliceoffset + length - 1) / TOAST_MAX_CHUNK_SIZE;
@@ -1205,6 +1199,7 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
(AttrNumber) 1,
(RegProcedure) F_OIDEQ,
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/*
* Now dependent on number of chunks:
*/
@@ -1279,8 +1274,10 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
*/
chcpystrt = 0;
chcpyend = chunksize - 1;
if (residx == startchunk) chcpystrt = startoffset;
if (residx == endchunk) chcpyend = endoffset;
if (residx == startchunk)
chcpystrt = startoffset;
if (residx == endchunk)
chcpyend = endoffset;
memcpy(((char *) VARATT_DATA(result)) +
(residx * TOAST_MAX_CHUNK_SIZE - sliceoffset) + chcpystrt,

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.35 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.36 2002/09/04 20:31:09 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -201,6 +201,7 @@ systable_beginscan(Relation heapRelation,
/* We assume it's a system index, so index_openr is OK */
sysscan->irel = irel = index_openr(indexRelname);
/*
* Change attribute numbers to be index column numbers.
*

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.61 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.62 2002/09/04 20:31:09 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -272,8 +272,8 @@ index_beginscan(Relation heapRelation,
PointerGetDatum(key)));
/*
* Save additional parameters into the scandesc. Everything else
* was set up by RelationGetIndexScan.
* Save additional parameters into the scandesc. Everything else was
* set up by RelationGetIndexScan.
*/
scan->heapRelation = heapRelation;
scan->xs_snapshot = snapshot;
@@ -409,8 +409,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
scan->kill_prior_tuple = false;
/*
* Can skip entering the index AM if we already got a tuple
* and it must be unique.
* Can skip entering the index AM if we already got a tuple and it
* must be unique.
*/
if (scan->keys_are_unique && scan->got_tuple)
return NULL;
@@ -454,9 +454,9 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
* index AM to not return it on future indexscans.
*
* We told heap_fetch to keep a pin on the buffer, so we can
* re-access the tuple here. But we must re-lock the buffer first.
* Also, it's just barely possible for an update of hint bits to
* occur here.
* re-access the tuple here. But we must re-lock the buffer
* first. Also, it's just barely possible for an update of hint
* bits to occur here.
*/
LockBuffer(scan->xs_cbuf, BUFFER_LOCK_SHARE);
sv_infomask = heapTuple->t_data->t_infomask;
@@ -642,10 +642,11 @@ index_getprocinfo(Relation irel,
procId = loc[procindex];
/*
* Complain if function was not found during IndexSupportInitialize.
* This should not happen unless the system tables contain bogus
* entries for the index opclass. (If an AM wants to allow a
* support function to be optional, it can use index_getprocid.)
* Complain if function was not found during
* IndexSupportInitialize. This should not happen unless the
* system tables contain bogus entries for the index opclass. (If
* an AM wants to allow a support function to be optional, it can
* use index_getprocid.)
*/
if (!RegProcedureIsValid(procId))
elog(ERROR, "Missing support function %d for attribute %d of index %s",

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.95 2002/08/06 02:36:33 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.96 2002/09/04 20:31:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,14 +119,14 @@ top:
*
* NOTE: obviously, _bt_check_unique can only detect keys that are
* already in the index; so it cannot defend against concurrent
* insertions of the same key. We protect against that by means
* of holding a write lock on the target page. Any other would-be
* insertions of the same key. We protect against that by means of
* holding a write lock on the target page. Any other would-be
* inserter of the same key must acquire a write lock on the same
* target page, so only one would-be inserter can be making the check
* at one time. Furthermore, once we are past the check we hold
* write locks continuously until we have performed our insertion,
* so no later inserter can fail to see our insertion. (This
* requires some care in _bt_insertonpg.)
* at one time. Furthermore, once we are past the check we hold write
* locks continuously until we have performed our insertion, so no
* later inserter can fail to see our insertion. (This requires some
* care in _bt_insertonpg.)
*
* If we must wait for another xact, we release the lock while waiting,
* and then must start over completely.
@@ -205,15 +205,16 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
if (offset <= maxoff)
{
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
* handling NULLs - and so we must not use _bt_compare in real
* comparison, but only for ordering/finding items on pages. -
* vadim 03/24/97
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
* how we handling NULLs - and so we must not use _bt_compare
* in real comparison, but only for ordering/finding items on
* pages. - vadim 03/24/97
*/
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
curitemid = PageGetItemId(page, offset);
/*
* We can skip the heap fetch if the item is marked killed.
*/
@@ -230,6 +231,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
SnapshotDirty->xmin : SnapshotDirty->xmax;
ReleaseBuffer(hbuffer);
/*
* If this tuple is being updated by other transaction
* then we have to wait for its commit/abort.
@@ -252,8 +254,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
{
/*
* Hmm, if we can't see the tuple, maybe it can be
* marked killed. This logic should match index_getnext
* and btgettuple.
* marked killed. This logic should match
* index_getnext and btgettuple.
*/
uint16 sv_infomask;

View File

@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.91 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.92 2002/09/04 20:31:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -315,24 +315,28 @@ btgettuple(PG_FUNCTION_ARGS)
* buffer, too.
*/
_bt_restscan(scan);
/*
* Check to see if we should kill the previously-fetched tuple.
*/
if (scan->kill_prior_tuple)
{
/*
* Yes, so mark it by setting the LP_DELETE bit in the item flags.
* Yes, so mark it by setting the LP_DELETE bit in the item
* flags.
*/
offnum = ItemPointerGetOffsetNumber(&(scan->currentItemData));
page = BufferGetPage(so->btso_curbuf);
PageGetItemId(page, offnum)->lp_flags |= LP_DELETE;
/*
* Since this can be redone later if needed, it's treated the
* same as a commit-hint-bit status update for heap tuples:
* we mark the buffer dirty but don't make a WAL log entry.
* same as a commit-hint-bit status update for heap tuples: we
* mark the buffer dirty but don't make a WAL log entry.
*/
SetBufferCommitInfoNeedsSave(so->btso_curbuf);
}
/*
* Now continue the scan.
*/
@@ -645,15 +649,15 @@ btbulkdelete(PG_FUNCTION_ARGS)
/*
* If this is first deletion on this page, trade in read
* lock for a really-exclusive write lock. Then, step
* back one and re-examine the item, because other backends
* might have inserted item(s) while we weren't holding
* the lock!
* back one and re-examine the item, because other
* backends might have inserted item(s) while we weren't
* holding the lock!
*
* We assume that only concurrent insertions, not deletions,
* can occur while we're not holding the page lock (the caller
* should hold a suitable relation lock to ensure this).
* Therefore, the item we want to delete is either in the
* same slot as before, or some slot to its right.
* can occur while we're not holding the page lock (the
* caller should hold a suitable relation lock to ensure
* this). Therefore, the item we want to delete is either
* in the same slot as before, or some slot to its right.
* Rechecking the same slot is necessary and sufficient to
* get back in sync after any insertions.
*/
@@ -675,19 +679,19 @@ btbulkdelete(PG_FUNCTION_ARGS)
}
/*
* In either case, we now need to back up the scan one item,
* so that the next cycle will re-examine the same offnum on
* this page.
* In either case, we now need to back up the scan one
* item, so that the next cycle will re-examine the same
* offnum on this page.
*
* For now, just hack the current-item index. Will need to
* be smarter when deletion includes removal of empty
* index pages.
*
* We must decrement ip_posid in all cases but one: if the
* page was formerly rightmost but was split while we didn't
* hold the lock, and ip_posid is pointing to item 1, then
* ip_posid now points at the high key not a valid data item.
* In this case we do want to step forward.
* page was formerly rightmost but was split while we
* didn't hold the lock, and ip_posid is pointing to item
* 1, then ip_posid now points at the high key not a valid
* data item. In this case we do want to step forward.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (current->ip_posid >= P_FIRSTDATAKEY(opaque))

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.50 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.51 2002/09/04 20:31:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -439,8 +439,8 @@ _bt_orderkeys(IndexScanDesc scan)
so->numberOfKeys = new_numberOfKeys;
/*
* If unique index and we have equality keys for all columns,
* set keys_are_unique flag for higher levels.
* If unique index and we have equality keys for all columns, set
* keys_are_unique flag for higher levels.
*/
if (allEqualSoFar && relation->rd_index->indisunique &&
relation->rd_rel->relnatts == new_numberOfKeys)

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.27 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.28 2002/09/04 20:31:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.74 2002/06/25 17:26:11 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.75 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -223,6 +223,7 @@ rtinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
bool checkUnique = PG_GETARG_BOOL(5);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.53 2002/06/20 20:29:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.54 2002/09/04 20:31:13 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the

View File

@@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.50 2002/06/11 13:40:50 wieck Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.51 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,12 +68,12 @@ GetNewTransactionId(void)
* might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit (and in fact could cause a
* deadlock against GetSnapshotData). So for now, assume atomicity.
* Note that readers of PGPROC xid field should be careful to fetch the
* value only once, rather than assume they can read it multiple times
* and get the same answer each time.
* Note that readers of PGPROC xid field should be careful to fetch
* the value only once, rather than assume they can read it multiple
* times and get the same answer each time.
*
* A solution to the atomic-store problem would be to give each PGPROC its
* own spinlock used only for fetching/storing that PGPROC's xid.
* A solution to the atomic-store problem would be to give each PGPROC
* its own spinlock used only for fetching/storing that PGPROC's xid.
* (SInvalLock would then mean primarily that PROCs couldn't be added/
* removed while holding the lock.)
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.131 2002/08/30 22:18:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.132 2002/09/04 20:31:13 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -265,7 +265,6 @@ SetTransactionFlushEnabled(bool state)
{
TransactionFlushState = (state == true);
}
#endif
@@ -517,8 +516,8 @@ void
RecordTransactionCommit(void)
{
/*
* If we made neither any XLOG entries nor any temp-rel updates,
* we can omit recording the transaction commit at all.
* If we made neither any XLOG entries nor any temp-rel updates, we
* can omit recording the transaction commit at all.
*/
if (MyXactMadeXLogEntry || MyXactMadeTempRelUpdate)
{
@@ -531,10 +530,10 @@ RecordTransactionCommit(void)
START_CRIT_SECTION();
/*
* We only need to log the commit in xlog if the transaction made any
* transaction-controlled XLOG entries. (Otherwise, its XID appears
* nowhere in permanent storage, so no one else will ever care if it
* committed.)
* We only need to log the commit in xlog if the transaction made
* any transaction-controlled XLOG entries. (Otherwise, its XID
* appears nowhere in permanent storage, so no one else will ever
* care if it committed.)
*/
if (MyLastRecPtr.xrecoff != 0)
{
@@ -560,20 +559,20 @@ RecordTransactionCommit(void)
}
/*
* We must flush our XLOG entries to disk if we made any XLOG entries,
* whether in or out of transaction control. For example, if we
* reported a nextval() result to the client, this ensures that any
* XLOG record generated by nextval will hit the disk before we report
* the transaction committed.
* We must flush our XLOG entries to disk if we made any XLOG
* entries, whether in or out of transaction control. For
* example, if we reported a nextval() result to the client, this
* ensures that any XLOG record generated by nextval will hit the
* disk before we report the transaction committed.
*/
if (MyXactMadeXLogEntry)
{
/*
* Sleep before flush! So we can flush more than one commit
* records per single fsync. (The idea is some other backend may
* do the XLogFlush while we're sleeping. This needs work still,
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
* records per single fsync. (The idea is some other backend
* may do the XLogFlush while we're sleeping. This needs work
* still, because on most Unixen, the minimum select() delay
* is 10msec or more, which is way too long.)
*
* We do not sleep if enableFsync is not turned on, nor if there
* are fewer than CommitSiblings other backends with active
@@ -593,13 +592,14 @@ RecordTransactionCommit(void)
}
/*
* We must mark the transaction committed in clog if its XID appears
* either in permanent rels or in local temporary rels. We test
* this by seeing if we made transaction-controlled entries *OR*
* local-rel tuple updates. Note that if we made only the latter,
* we have not emitted an XLOG record for our commit, and so in the
* event of a crash the clog update might be lost. This is okay
* because no one else will ever care whether we committed.
* We must mark the transaction committed in clog if its XID
* appears either in permanent rels or in local temporary rels.
* We test this by seeing if we made transaction-controlled
* entries *OR* local-rel tuple updates. Note that if we made
* only the latter, we have not emitted an XLOG record for our
* commit, and so in the event of a crash the clog update might be
* lost. This is okay because no one else will ever care whether
* we committed.
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
TransactionIdCommit(xid);
@@ -628,6 +628,7 @@ AtCommit_Cache(void)
* Clean up the relation cache.
*/
AtEOXact_RelationCache(true);
/*
* Make catalog changes visible to all backends.
*/
@@ -698,8 +699,8 @@ RecordTransactionAbort(void)
{
/*
* If we made neither any transaction-controlled XLOG entries nor any
* temp-rel updates, we can omit recording the transaction abort at all.
* No one will ever care that it aborted.
* temp-rel updates, we can omit recording the transaction abort at
* all. No one will ever care that it aborted.
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
{
@@ -716,11 +717,12 @@ RecordTransactionAbort(void)
START_CRIT_SECTION();
/*
* We only need to log the abort in XLOG if the transaction made any
* transaction-controlled XLOG entries. (Otherwise, its XID appears
* nowhere in permanent storage, so no one else will ever care if it
* committed.) We do not flush XLOG to disk in any case, since the
* default assumption after a crash would be that we aborted, anyway.
* We only need to log the abort in XLOG if the transaction made
* any transaction-controlled XLOG entries. (Otherwise, its XID
* appears nowhere in permanent storage, so no one else will ever
* care if it committed.) We do not flush XLOG to disk in any
* case, since the default assumption after a crash would be that
* we aborted, anyway.
*/
if (MyLastRecPtr.xrecoff != 0)
{
@@ -1165,8 +1167,8 @@ StartTransactionCommand(bool preventChain)
TransactionState s = CurrentTransactionState;
/*
* Remember if caller wants to prevent autocommit-off chaining.
* This is only allowed if not already in a transaction block.
* Remember if caller wants to prevent autocommit-off chaining. This
* is only allowed if not already in a transaction block.
*/
suppressChain = preventChain;
if (preventChain && s->blockState != TBLOCK_DEFAULT)
@@ -1260,16 +1262,18 @@ CommitTransactionCommand(bool forceCommit)
{
/*
* If we aren't in a transaction block, and we are doing
* autocommit, just do our usual transaction commit. But
* if we aren't doing autocommit, start a transaction block
* autocommit, just do our usual transaction commit. But if
* we aren't doing autocommit, start a transaction block
* automatically by switching to INPROGRESS state. (We handle
* this choice here, and not earlier, so that an explicit BEGIN
* issued in autocommit-off mode won't issue strange warnings.)
* this choice here, and not earlier, so that an explicit
* BEGIN issued in autocommit-off mode won't issue strange
* warnings.)
*
* Autocommit mode is forced by either a true forceCommit parameter
* to me, or a true preventChain parameter to the preceding
* StartTransactionCommand call. This is needed so that commands
* like VACUUM can ensure that the right things happen.
* Autocommit mode is forced by either a true forceCommit
* parameter to me, or a true preventChain parameter to the
* preceding StartTransactionCommand call. This is needed so
* that commands like VACUUM can ensure that the right things
* happen.
*/
case TBLOCK_DEFAULT:
if (autocommit || forceCommit || suppressChain)
@@ -1442,9 +1446,9 @@ BeginTransactionBlock(void)
s->blockState = TBLOCK_BEGIN;
/*
* do begin processing. NOTE: if you put anything here, check that
* it behaves properly in both autocommit-on and autocommit-off modes.
* In the latter case we will already have done some work in the new
* do begin processing. NOTE: if you put anything here, check that it
* behaves properly in both autocommit-on and autocommit-off modes. In
* the latter case we will already have done some work in the new
* transaction.
*/

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.105 2002/09/02 02:47:01 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.106 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1283,26 +1283,27 @@ XLogFlush(XLogRecPtr record)
/*
* If we still haven't flushed to the request point then we have a
* problem; most likely, the requested flush point is past end of XLOG.
* This has been seen to occur when a disk page has a corrupted LSN.
* problem; most likely, the requested flush point is past end of
* XLOG. This has been seen to occur when a disk page has a corrupted
* LSN.
*
* Formerly we treated this as a PANIC condition, but that hurts the
* system's robustness rather than helping it: we do not want to take
* down the whole system due to corruption on one data page. In
* particular, if the bad page is encountered again during recovery then
* we would be unable to restart the database at all! (This scenario
* has actually happened in the field several times with 7.1 releases.
* Note that we cannot get here while InRedo is true, but if the bad
* page is brought in and marked dirty during recovery then
* particular, if the bad page is encountered again during recovery
* then we would be unable to restart the database at all! (This
* scenario has actually happened in the field several times with 7.1
* releases. Note that we cannot get here while InRedo is true, but if
* the bad page is brought in and marked dirty during recovery then
* CreateCheckpoint will try to flush it at the end of recovery.)
*
* The current approach is to ERROR under normal conditions, but only
* WARNING during recovery, so that the system can be brought up even if
* there's a corrupt LSN. Note that for calls from xact.c, the ERROR
* will be promoted to PANIC since xact.c calls this routine inside a
* critical section. However, calls from bufmgr.c are not within
* critical sections and so we will not force a restart for a bad LSN
* on a data page.
* WARNING during recovery, so that the system can be brought up even
* if there's a corrupt LSN. Note that for calls from xact.c, the
* ERROR will be promoted to PANIC since xact.c calls this routine
* inside a critical section. However, calls from bufmgr.c are not
* within critical sections and so we will not force a restart for a
* bad LSN on a data page.
*/
if (XLByteLT(LogwrtResult.Flush, record))
elog(InRecovery ? WARNING : ERROR,
@@ -1618,8 +1619,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
/*
* Before deleting the file, see if it can be recycled as
* a future log segment. We allow recycling segments up
* to XLOGfileslop segments beyond the current
* XLOG location.
* to XLOGfileslop segments beyond the current XLOG
* location.
*/
if (InstallXLogFileSegment(endlogId, endlogSeg, path,
true, XLOGfileslop,
@@ -3019,19 +3020,19 @@ CreateCheckPoint(bool shutdown)
}
/*
* Get UNDO record ptr - this is oldest of PGPROC->logRec values. We do
* this while holding insert lock to ensure that we won't miss any
* Get UNDO record ptr - this is oldest of PGPROC->logRec values. We
* do this while holding insert lock to ensure that we won't miss any
* about-to-commit transactions (UNDO must include all xacts that have
* commits after REDO point).
*
* XXX temporarily ifdef'd out to avoid three-way deadlock condition:
* GetUndoRecPtr needs to grab SInvalLock to ensure that it is looking
* at a stable set of proc records, but grabbing SInvalLock while holding
* WALInsertLock is no good. GetNewTransactionId may cause a WAL record
* to be written while holding XidGenLock, and GetSnapshotData needs to
* get XidGenLock while holding SInvalLock, so there's a risk of deadlock.
* Need to find a better solution. See pgsql-hackers discussion of
* 17-Dec-01.
* at a stable set of proc records, but grabbing SInvalLock while
* holding WALInsertLock is no good. GetNewTransactionId may cause a
* WAL record to be written while holding XidGenLock, and
* GetSnapshotData needs to get XidGenLock while holding SInvalLock,
* so there's a risk of deadlock. Need to find a better solution. See
* pgsql-hackers discussion of 17-Dec-01.
*/
#ifdef NOT_USED
checkPoint.undo = GetUndoRecPtr();
@@ -3298,9 +3299,7 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
}
#endif
else
{
return NULL;
}
if (!doit)
return method;

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.140 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.141 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -235,9 +235,7 @@ BootstrapMain(int argc, char *argv[])
* If we are running under the postmaster, this is done already.
*/
if (!IsUnderPostmaster)
{
MemoryContextInit();
}
/*
* process command arguments
@@ -263,6 +261,7 @@ BootstrapMain(int argc, char *argv[])
{
/* Turn on debugging for the bootstrap process. */
char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
sprintf(debugstr, "debug%s", optarg);
SetConfigOption("server_min_messages", debugstr,
PGC_POSTMASTER, PGC_S_ARGV);
@@ -391,7 +390,8 @@ BootstrapMain(int argc, char *argv[])
InitDummyProcess(); /* needed to get LWLocks */
CreateDummyCaches();
CreateCheckPoint(false);
SetSavedRedoRecPtr(); /* pass redo ptr back to postmaster */
SetSavedRedoRecPtr(); /* pass redo ptr back to
* postmaster */
proc_exit(0); /* done */
case BS_XLOG_STARTUP:
@@ -640,10 +640,11 @@ DefineAttr(char *name, char *type, int attnum)
}
attrtypes[attnum]->attcacheoff = -1;
attrtypes[attnum]->atttypmod = -1;
/*
* Mark as "not null" if type is fixed-width and prior columns are too.
* This corresponds to case where column can be accessed directly via
* C struct declaration.
* Mark as "not null" if type is fixed-width and prior columns are
* too. This corresponds to case where column can be accessed directly
* via C struct declaration.
*/
if (attlen > 0)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.76 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.77 2002/09/04 20:31:13 momjian Exp $
*
* NOTES
* See acl.h.
@@ -97,16 +97,19 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
if (grantee->username)
{
aclitem. ai_id = get_usesysid(grantee->username);
idtype = ACL_IDTYPE_UID;
}
else if (grantee->groupname)
{
aclitem. ai_id = get_grosysid(grantee->groupname);
idtype = ACL_IDTYPE_GID;
}
else
{
aclitem. ai_id = ACL_ID_WORLD;
idtype = ACL_IDTYPE_WORLD;
}
@@ -569,8 +572,8 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
aclcheck_error(ACLCHECK_NOT_OWNER, nspname);
/*
* If there's no ACL, create a default using the pg_namespace.nspowner
* field.
* If there's no ACL, create a default using the
* pg_namespace.nspowner field.
*/
aclDatum = SysCacheGetAttr(NAMESPACENAME, tuple,
Anum_pg_namespace_nspacl,
@@ -1163,8 +1166,8 @@ pg_namespace_aclcheck(Oid nsp_oid, Oid userid, AclMode mode)
Acl *acl;
/*
* If we have been assigned this namespace as a temp namespace,
* assume we have all grantable privileges on it.
* If we have been assigned this namespace as a temp namespace, assume
* we have all grantable privileges on it.
*/
if (isTempNamespace(nsp_oid))
return ACLCHECK_OK;

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.47 2002/06/20 20:29:26 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.48 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.8 2002/08/02 18:15:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.9 2002/09/04 20:31:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -135,8 +135,8 @@ performDeletion(const ObjectAddress *object,
Relation depRel;
/*
* Get object description for possible use in failure message.
* Must do this before deleting it ...
* Get object description for possible use in failure message. Must do
* this before deleting it ...
*/
objDescription = getObjectDescription(object);
@@ -231,8 +231,8 @@ recursiveDeletion(const ObjectAddress *object,
* ensures that we avoid infinite recursion in the case of cycles.
* Also, some dependency types require extra processing here.
*
* When dropping a whole object (subId = 0), remove all pg_depend
* records for its sub-objects too.
* When dropping a whole object (subId = 0), remove all pg_depend records
* for its sub-objects too.
*/
ScanKeyEntryInitialize(&key[0], 0x0,
Anum_pg_depend_classid, F_OIDEQ,
@@ -268,9 +268,10 @@ recursiveDeletion(const ObjectAddress *object,
/* no problem */
break;
case DEPENDENCY_INTERNAL:
/*
* This object is part of the internal implementation
* of another object. We have three cases:
* This object is part of the internal implementation of
* another object. We have three cases:
*
* 1. At the outermost recursion level, disallow the DROP.
* (We just elog here, rather than considering this drop
@@ -285,24 +286,26 @@ recursiveDeletion(const ObjectAddress *object,
"\n\tYou may drop %s instead",
objDescription, otherObjDesc, otherObjDesc);
}
/*
* 2. When recursing from the other end of this dependency,
* it's okay to continue with the deletion. This holds when
* recursing from a whole object that includes the nominal
* other end as a component, too.
* 2. When recursing from the other end of this
* dependency, it's okay to continue with the deletion.
* This holds when recursing from a whole object that
* includes the nominal other end as a component, too.
*/
if (callingObject->classId == otherObject.classId &&
callingObject->objectId == otherObject.objectId &&
(callingObject->objectSubId == otherObject.objectSubId ||
callingObject->objectSubId == 0))
break;
/*
* 3. When recursing from anyplace else, transform this
* deletion request into a delete of the other object.
* (This will be an error condition iff RESTRICT mode.)
* In this case we finish deleting my dependencies except
* for the INTERNAL link, which will be needed to cause
* the owning object to recurse back to me.
* (This will be an error condition iff RESTRICT mode.) In
* this case we finish deleting my dependencies except for
* the INTERNAL link, which will be needed to cause the
* owning object to recurse back to me.
*/
if (amOwned) /* shouldn't happen */
elog(ERROR, "recursiveDeletion: multiple INTERNAL dependencies for %s",
@@ -312,6 +315,7 @@ recursiveDeletion(const ObjectAddress *object,
/* "continue" bypasses the simple_heap_delete call below */
continue;
case DEPENDENCY_PIN:
/*
* Should not happen; PIN dependencies should have zeroes
* in the depender fields...
@@ -331,10 +335,10 @@ recursiveDeletion(const ObjectAddress *object,
systable_endscan(scan);
/*
* CommandCounterIncrement here to ensure that preceding changes
* are all visible; in particular, that the above deletions of pg_depend
* entries are visible. That prevents infinite recursion in case of
* a dependency loop (which is perfectly legal).
* CommandCounterIncrement here to ensure that preceding changes are
* all visible; in particular, that the above deletions of pg_depend
* entries are visible. That prevents infinite recursion in case of a
* dependency loop (which is perfectly legal).
*/
CommandCounterIncrement();
@@ -368,21 +372,21 @@ recursiveDeletion(const ObjectAddress *object,
/*
* Step 2: scan pg_depend records that link to this object, showing
* the things that depend on it. Recursively delete those things.
* (We don't delete the pg_depend records here, as the recursive call
* will do that.) Note it's important to delete the dependent objects
* the things that depend on it. Recursively delete those things. (We
* don't delete the pg_depend records here, as the recursive call will
* do that.) Note it's important to delete the dependent objects
* before the referenced one, since the deletion routines might do
* things like try to update the pg_class record when deleting a
* check constraint.
* things like try to update the pg_class record when deleting a check
* constraint.
*
* Again, when dropping a whole object (subId = 0), find pg_depend
* records for its sub-objects too.
*
* NOTE: because we are using SnapshotNow, if a recursive call deletes
* any pg_depend tuples that our scan hasn't yet visited, we will not see
* them as good when we do visit them. This is essential for correct
* behavior if there are multiple dependency paths between two objects
* --- else we might try to delete an already-deleted object.
* any pg_depend tuples that our scan hasn't yet visited, we will not
* see them as good when we do visit them. This is essential for
* correct behavior if there are multiple dependency paths between two
* objects --- else we might try to delete an already-deleted object.
*/
ScanKeyEntryInitialize(&key[0], 0x0,
Anum_pg_depend_refclassid, F_OIDEQ,
@@ -418,9 +422,9 @@ recursiveDeletion(const ObjectAddress *object,
{
/*
* We've found a restricted object (or at least one
* that's not deletable along this path). Log for later
* processing. (Note it's okay if the same object gets
* into mypending multiple times.)
* that's not deletable along this path). Log for
* later processing. (Note it's okay if the same
* object gets into mypending multiple times.)
*/
add_exact_object_address(&otherObject, &mypending);
}
@@ -437,6 +441,7 @@ recursiveDeletion(const ObjectAddress *object,
break;
case DEPENDENCY_AUTO:
case DEPENDENCY_INTERNAL:
/*
* We propagate the DROP without complaint even in the
* RESTRICT case. (However, normal dependencies on the
@@ -451,6 +456,7 @@ recursiveDeletion(const ObjectAddress *object,
ok = false;
break;
case DEPENDENCY_PIN:
/*
* For a PIN dependency we just elog immediately; there
* won't be any others to report.
@@ -469,14 +475,14 @@ recursiveDeletion(const ObjectAddress *object,
/*
* If we found no restricted objects, or got rid of them all via other
* paths, we're in good shape. Otherwise continue step 2 by processing
* the remaining restricted objects.
* paths, we're in good shape. Otherwise continue step 2 by
* processing the remaining restricted objects.
*/
if (mypending.numrefs > 0)
{
/*
* Successively extract and delete each remaining object.
* Note that the right things will happen if some of these objects
* Successively extract and delete each remaining object. Note
* that the right things will happen if some of these objects
* depend on others: we'll report/delete each one exactly once.
*/
while (mypending.numrefs > 0)
@@ -508,19 +514,21 @@ recursiveDeletion(const ObjectAddress *object,
doDeletion(object);
/*
* Delete any comments associated with this object. (This is a convenient
* place to do it instead of having every object type know to do it.)
* Delete any comments associated with this object. (This is a
* convenient place to do it instead of having every object type know
* to do it.)
*/
DeleteComments(object->objectId, object->classId, object->objectSubId);
/*
* If this object is mentioned in any caller's pending list, remove it.
* If this object is mentioned in any caller's pending list, remove
* it.
*/
del_object_address(object, pending);
/*
* CommandCounterIncrement here to ensure that preceding changes
* are all visible.
* CommandCounterIncrement here to ensure that preceding changes are
* all visible.
*/
CommandCounterIncrement();
@@ -755,8 +763,8 @@ find_expr_references_walker(Node *node,
bool result;
/*
* Add whole-relation refs for each plain relation mentioned in the
* subquery's rtable. (Note: query_tree_walker takes care of
* Add whole-relation refs for each plain relation mentioned in
* the subquery's rtable. (Note: query_tree_walker takes care of
* recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need
* to do that here.)
*/
@@ -810,6 +818,7 @@ eliminate_duplicate_dependencies(ObjectAddresses *addrs)
{
if (priorobj->objectSubId == thisobj->objectSubId)
continue; /* identical, so drop thisobj */
/*
* If we have a whole-object reference and a reference to a
* part of the same object, we don't need the whole-object
@@ -852,9 +861,10 @@ object_address_comparator(const void *a, const void *b)
return -1;
if (obja->objectId > objb->objectId)
return 1;
/*
* We sort the subId as an unsigned int so that 0 will come first.
* See logic in eliminate_duplicate_dependencies.
* We sort the subId as an unsigned int so that 0 will come first. See
* logic in eliminate_duplicate_dependencies.
*/
if ((unsigned int) obja->objectSubId < (unsigned int) objb->objectSubId)
return -1;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.224 2002/09/02 01:05:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.225 2002/09/04 20:31:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -256,22 +256,14 @@ heap_create(const char *relname,
relid = RelOid_pg_class;
}
else if (strcmp(ShadowRelationName, relname) == 0)
{
relid = RelOid_pg_shadow;
}
else if (strcmp(GroupRelationName, relname) == 0)
{
relid = RelOid_pg_group;
}
else if (strcmp(DatabaseRelationName, relname) == 0)
{
relid = RelOid_pg_database;
}
else
{
relid = newoid();
}
}
else
relid = newoid();
@@ -293,7 +285,8 @@ heap_create(const char *relname,
nailme);
/*
* have the storage manager create the relation's disk file, if wanted.
* have the storage manager create the relation's disk file, if
* wanted.
*/
if (storage_create)
heap_storage_create(rel);
@@ -475,7 +468,8 @@ AddNewAttributeTuples(Oid new_rel_oid,
/*
* Next we add the system attributes. Skip OID if rel has no OIDs.
* Skip all for a view or type relation. We don't bother with making
* datatype dependencies here, since presumably all these types are pinned.
* datatype dependencies here, since presumably all these types are
* pinned.
*/
if (relkind != RELKIND_VIEW && relkind != RELKIND_COMPOSITE_TYPE)
{
@@ -628,13 +622,13 @@ AddNewRelationType(const char *typeName,
*
* OLD and probably obsolete comments:
*
* The sizes are set to oid size because it makes implementing sets
* MUCH easier, and no one (we hope) uses these fields to figure out
* how much space to allocate for the type. An oid is the type used
* for a set definition. When a user requests a set, what they
* actually get is the oid of a tuple in the pg_proc catalog, so the
* size of the "set" is the size of an oid. Similarly, byval being
* true makes sets much easier, and it isn't used by anything else.
* The sizes are set to oid size because it makes implementing sets MUCH
* easier, and no one (we hope) uses these fields to figure out how
* much space to allocate for the type. An oid is the type used for a
* set definition. When a user requests a set, what they actually get
* is the oid of a tuple in the pg_proc catalog, so the size of the
* "set" is the size of an oid. Similarly, byval being true makes sets
* much easier, and it isn't used by anything else.
*/
TypeCreate(typeName, /* type name */
typeNamespace, /* type namespace */
@@ -746,9 +740,9 @@ heap_create_with_catalog(const char *relname,
AddNewAttributeTuples(new_rel_oid, new_rel_desc->rd_att, relkind);
/*
* make a dependency link to force the relation to be deleted if
* its namespace is. Skip this in bootstrap mode, since we don't
* make dependencies while bootstrapping.
* make a dependency link to force the relation to be deleted if its
* namespace is. Skip this in bootstrap mode, since we don't make
* dependencies while bootstrapping.
*/
if (!IsBootstrapProcessingMode())
{
@@ -768,8 +762,9 @@ heap_create_with_catalog(const char *relname,
* store constraints and defaults passed in the tupdesc, if any.
*
* NB: this may do a CommandCounterIncrement and rebuild the relcache
* entry, so the relation must be valid and self-consistent at this point.
* In particular, there are not yet constraints and defaults anywhere.
* entry, so the relation must be valid and self-consistent at this
* point. In particular, there are not yet constraints and defaults
* anywhere.
*/
StoreConstraints(new_rel_desc, tupdesc);
@@ -811,9 +806,7 @@ RelationRemoveInheritance(Relation relation)
SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
simple_heap_delete(catalogRelation, &tuple->t_self);
}
systable_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
@@ -880,9 +873,7 @@ DeleteAttributeTuples(Oid relid)
/* Delete all the matching tuples */
while ((atttup = systable_getnext(scan)) != NULL)
{
simple_heap_delete(attrel, &atttup->t_self);
}
/* Clean up after the scan */
systable_endscan(scan);
@@ -907,10 +898,10 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
/*
* Grab an exclusive lock on the target table, which we will NOT
* release until end of transaction. (In the simple case where
* we are directly dropping this column, AlterTableDropColumn already
* did this ... but when cascading from a drop of some other object,
* we may not have any lock.)
* release until end of transaction. (In the simple case where we are
* directly dropping this column, AlterTableDropColumn already did
* this ... but when cascading from a drop of some other object, we
* may not have any lock.)
*/
rel = relation_open(relid, AccessExclusiveLock);
@@ -1366,8 +1357,8 @@ StoreConstraints(Relation rel, TupleDesc tupdesc)
/*
* Deparsing of constraint expressions will fail unless the
* just-created pg_attribute tuples for this relation are made
* visible. So, bump the command counter. CAUTION: this will
* cause a relcache entry rebuild.
* visible. So, bump the command counter. CAUTION: this will cause a
* relcache entry rebuild.
*/
CommandCounterIncrement();
@@ -1513,12 +1504,14 @@ AddRelationRawConstraints(Relation rel,
List *listptr2;
/*
* Generate a name that does not conflict with pre-existing
* constraints, nor with any auto-generated names so far.
* Generate a name that does not conflict with
* pre-existing constraints, nor with any auto-generated
* names so far.
*/
ccname = GenerateConstraintName(RelationGetRelid(rel),
RelationGetNamespace(rel),
&constr_name_ctr);
/*
* Check against other new constraints, in case the user
* has specified a name that looks like an auto-generated
@@ -1699,14 +1692,14 @@ cookDefault(ParseState *pstate,
/*
* Check that it will be possible to coerce the expression to the
* column's type. We store the expression without coercion,
* however, to avoid premature coercion in cases like
* column's type. We store the expression without coercion, however,
* to avoid premature coercion in cases like
*
* CREATE TABLE tbl (fld timestamp DEFAULT 'now'::text);
*
* NB: this should match the code in optimizer/prep/preptlist.c that
* will actually do the coercion, to ensure we don't accept an
* unusable default expression.
* NB: this should match the code in optimizer/prep/preptlist.c that will
* actually do the coercion, to ensure we don't accept an unusable
* default expression.
*/
if (OidIsValid(atttypid))
{
@@ -1815,9 +1808,7 @@ RemoveStatistics(Relation rel)
SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
simple_heap_delete(pgstatistic, &tuple->t_self);
}
systable_endscan(scan);
heap_close(pgstatistic, RowExclusiveLock);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.195 2002/09/03 16:00:02 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.196 2002/09/04 20:31:14 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -325,8 +325,8 @@ UpdateRelationRelation(Relation indexRelation)
(void *) indexRelation->rd_rel);
/*
* the new tuple must have the oid already chosen for the index.
* sure would be embarrassing to do this sort of thing in polite company.
* the new tuple must have the oid already chosen for the index. sure
* would be embarrassing to do this sort of thing in polite company.
*/
HeapTupleSetOid(tuple, RelationGetRelid(indexRelation));
simple_heap_insert(pg_class, tuple);
@@ -532,8 +532,8 @@ index_create(Oid heapRelationId,
heapRelation = heap_open(heapRelationId, ShareLock);
/*
* The index will be in the same namespace as its parent table,
* and is shared across databases if and only if the parent is.
* The index will be in the same namespace as its parent table, and is
* shared across databases if and only if the parent is.
*/
namespaceId = RelationGetNamespace(heapRelation);
shared_relation = heapRelation->rd_rel->relisshared;
@@ -577,6 +577,7 @@ index_create(Oid heapRelationId,
classObjectId);
indexTupDesc->tdhasoid = false;
/*
* create the index relation's relcache entry and physical disk file.
* (If we fail further down, it's the smgr's responsibility to remove
@@ -643,15 +644,15 @@ index_create(Oid heapRelationId,
* Register constraint and dependencies for the index.
*
* If the index is from a CONSTRAINT clause, construct a pg_constraint
* entry. The index is then linked to the constraint, which in turn is
* linked to the table. If it's not a CONSTRAINT, make the dependency
* directly on the table.
* entry. The index is then linked to the constraint, which in turn
* is linked to the table. If it's not a CONSTRAINT, make the
* dependency directly on the table.
*
* We don't need a dependency on the namespace, because there'll be
* an indirect dependency via our parent table.
* We don't need a dependency on the namespace, because there'll be an
* indirect dependency via our parent table.
*
* During bootstrap we can't register any dependencies, and we don't
* try to make a constraint either.
* During bootstrap we can't register any dependencies, and we don't try
* to make a constraint either.
*/
if (!IsBootstrapProcessingMode())
{
@@ -807,6 +808,7 @@ index_drop(Oid indexId)
* fix RELATION relation
*/
DeleteRelationTuple(indexId);
/*
* fix ATTRIBUTE relation
*/
@@ -839,11 +841,12 @@ index_drop(Oid indexId)
smgrunlink(DEFAULT_SMGR, userIndexRelation);
/*
* We are presently too lazy to attempt to compute the new correct value
* of relhasindex (the next VACUUM will fix it if necessary). So there is
* no need to update the pg_class tuple for the owning relation.
* But we must send out a shared-cache-inval notice on the owning relation
* to ensure other backends update their relcache lists of indexes.
* We are presently too lazy to attempt to compute the new correct
* value of relhasindex (the next VACUUM will fix it if necessary).
* So there is no need to update the pg_class tuple for the owning
* relation. But we must send out a shared-cache-inval notice on the
* owning relation to ensure other backends update their relcache
* lists of indexes.
*/
CacheInvalidateRelcache(heapId);
@@ -1798,12 +1801,12 @@ reindex_index(Oid indexId, bool force, bool inplace)
/*
* Open our index relation and get an exclusive lock on it.
*
* Note: doing this before opening the parent heap relation means
* there's a possibility for deadlock failure against another xact
* that is doing normal accesses to the heap and index. However,
* it's not real clear why you'd be needing to do REINDEX on a table
* that's in active use, so I'd rather have the protection of making
* sure the index is locked down.
* Note: doing this before opening the parent heap relation means there's
* a possibility for deadlock failure against another xact that is
* doing normal accesses to the heap and index. However, it's not
* real clear why you'd be needing to do REINDEX on a table that's in
* active use, so I'd rather have the protection of making sure the
* index is locked down.
*/
iRel = index_open(indexId);
if (iRel == NULL)

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.101 2002/08/06 02:36:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.102 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.34 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.35 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -312,10 +312,10 @@ RelationIsVisible(Oid relid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another relation of the same name earlier in the path.
* So we must do a slow check to see if this rel would be found by
* RelnameGetRelid.
* If it is in the path, it might still not be visible; it could
* be hidden by another relation of the same name earlier in the
* path. So we must do a slow check to see if this rel would be
* found by RelnameGetRelid.
*/
char *relname = NameStr(relform->relname);
@@ -394,10 +394,10 @@ TypeIsVisible(Oid typid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another type of the same name earlier in the path.
* So we must do a slow check to see if this type would be found by
* TypenameGetTypid.
* If it is in the path, it might still not be visible; it could
* be hidden by another type of the same name earlier in the path.
* So we must do a slow check to see if this type would be found
* by TypenameGetTypid.
*/
char *typname = NameStr(typform->typname);
@@ -495,11 +495,11 @@ FuncnameGetCandidates(List *names, int nargs)
* arguments as something we already accepted? If so, keep
* only the one that appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the
* normal case), then any conflicting proc must immediately
* adjoin this one in the list, so we only need to look at
* the newest result item. If we have an unordered list,
* we have to scan the whole result list.
* If we have an ordered list from SearchSysCacheList (the normal
* case), then any conflicting proc must immediately adjoin
* this one in the list, so we only need to look at the newest
* result item. If we have an unordered list, we have to scan
* the whole result list.
*/
if (resultList)
{
@@ -595,10 +595,10 @@ FunctionIsVisible(Oid funcid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another proc of the same name and arguments earlier
* in the path. So we must do a slow check to see if this is the
* same proc that would be found by FuncnameGetCandidates.
* If it is in the path, it might still not be visible; it could
* be hidden by another proc of the same name and arguments
* earlier in the path. So we must do a slow check to see if this
* is the same proc that would be found by FuncnameGetCandidates.
*/
char *proname = NameStr(procform->proname);
int nargs = procform->pronargs;
@@ -710,11 +710,11 @@ OpernameGetCandidates(List *names, char oprkind)
* arguments as something we already accepted? If so, keep
* only the one that appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the
* normal case), then any conflicting oper must immediately
* adjoin this one in the list, so we only need to look at
* the newest result item. If we have an unordered list,
* we have to scan the whole result list.
* If we have an ordered list from SearchSysCacheList (the normal
* case), then any conflicting oper must immediately adjoin
* this one in the list, so we only need to look at the newest
* result item. If we have an unordered list, we have to scan
* the whole result list.
*/
if (resultList)
{
@@ -807,10 +807,11 @@ OperatorIsVisible(Oid oprid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another operator of the same name and arguments earlier
* in the path. So we must do a slow check to see if this is the
* same operator that would be found by OpernameGetCandidates.
* If it is in the path, it might still not be visible; it could
* be hidden by another operator of the same name and arguments
* earlier in the path. So we must do a slow check to see if this
* is the same operator that would be found by
* OpernameGetCandidates.
*/
char *oprname = NameStr(oprform->oprname);
FuncCandidateList clist;
@@ -882,14 +883,14 @@ OpclassGetCandidates(Oid amid)
/*
* Okay, it's in the search path, but does it have the same name
* as something we already accepted? If so, keep
* only the one that appears earlier in the search path.
* as something we already accepted? If so, keep only the one
* that appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the
* normal case), then any conflicting opclass must immediately
* adjoin this one in the list, so we only need to look at
* the newest result item. If we have an unordered list,
* we have to scan the whole result list.
* If we have an ordered list from SearchSysCacheList (the normal
* case), then any conflicting opclass must immediately adjoin
* this one in the list, so we only need to look at the newest
* result item. If we have an unordered list, we have to scan the
* whole result list.
*/
if (resultList)
{
@@ -1019,10 +1020,10 @@ OpclassIsVisible(Oid opcid)
else
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another opclass of the same name earlier in the path.
* So we must do a slow check to see if this opclass would be found by
* OpclassnameGetOpcid.
* If it is in the path, it might still not be visible; it could
* be hidden by another opclass of the same name earlier in the
* path. So we must do a slow check to see if this opclass would
* be found by OpclassnameGetOpcid.
*/
char *opcname = NameStr(opcform->opcname);
@@ -1063,6 +1064,7 @@ DeconstructQualifiedName(List *names,
catalogname = strVal(lfirst(names));
schemaname = strVal(lsecond(names));
objname = strVal(lfirst(lnext(lnext(names))));
/*
* We check the catalog name and then ignore it.
*/
@@ -1248,7 +1250,8 @@ PopSpecialNamespace(Oid namespaceId)
/*
* FindConversionByName - find a conversion by possibly qualified name
*/
Oid FindConversionByName(List *name)
Oid
FindConversionByName(List *name)
{
char *conversion_name;
Oid namespaceId;
@@ -1285,7 +1288,8 @@ Oid FindConversionByName(List *name)
/*
* FindDefaultConversionProc - find default encoding cnnversion proc
*/
Oid FindDefaultConversionProc(int4 for_encoding, int4 to_encoding)
Oid
FindDefaultConversionProc(int4 for_encoding, int4 to_encoding)
{
Oid proc;
List *lptr;
@@ -1396,9 +1400,9 @@ recomputeNamespacePath(void)
firstNS = (Oid) lfirsti(oidlist);
/*
* Add any implicitly-searched namespaces to the list. Note these
* go on the front, not the back; also notice that we do not check
* USAGE permissions for these.
* Add any implicitly-searched namespaces to the list. Note these go
* on the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
if (!intMember(PG_CATALOG_NAMESPACE, oidlist))
oidlist = lconsi(PG_CATALOG_NAMESPACE, oidlist);
@@ -1453,13 +1457,13 @@ InitTempTableNamespace(void)
Oid namespaceId;
/*
* First, do permission check to see if we are authorized to make
* temp tables. We use a nonstandard error message here since
* First, do permission check to see if we are authorized to make temp
* tables. We use a nonstandard error message here since
* "databasename: permission denied" might be a tad cryptic.
*
* Note we apply the check to the session user, not the currently
* active userid, since we are not going to change our minds about
* temp table availability during the session.
* Note we apply the check to the session user, not the currently active
* userid, since we are not going to change our minds about temp table
* availability during the session.
*/
if (pg_database_aclcheck(MyDatabaseId, GetSessionUserId(),
ACL_CREATE_TEMP) != ACLCHECK_OK)
@@ -1476,11 +1480,11 @@ InitTempTableNamespace(void)
/*
* First use of this temp namespace in this database; create it.
* The temp namespaces are always owned by the superuser. We
* leave their permissions at default --- i.e., no access except to
* superuser --- to ensure that unprivileged users can't peek
* leave their permissions at default --- i.e., no access except
* to superuser --- to ensure that unprivileged users can't peek
* at other backends' temp tables. This works because the places
* that access the temp namespace for my own backend skip permissions
* checks on it.
* that access the temp namespace for my own backend skip
* permissions checks on it.
*/
namespaceId = NamespaceCreate(namespaceName, BOOTSTRAP_USESYSID);
/* Advance command counter to make namespace visible */
@@ -1532,6 +1536,7 @@ AtEOXact_Namespace(bool isCommit)
}
firstTempTransaction = false;
}
/*
* Clean up if someone failed to do PopSpecialNamespace
*/
@@ -1561,14 +1566,14 @@ RemoveTempRelations(Oid tempNamespaceId)
/*
* Scan pg_class to find all the relations in the target namespace.
* Ignore indexes, though, on the assumption that they'll go away
* when their tables are deleted.
* Ignore indexes, though, on the assumption that they'll go away when
* their tables are deleted.
*
* NOTE: if there are deletion constraints between temp relations,
* then our CASCADE delete call may cause as-yet-unvisited objects
* to go away. This is okay because we are using SnapshotNow; when
* the scan does reach those pg_class tuples, they'll be ignored as
* already deleted.
* NOTE: if there are deletion constraints between temp relations, then
* our CASCADE delete call may cause as-yet-unvisited objects to go
* away. This is okay because we are using SnapshotNow; when the scan
* does reach those pg_class tuples, they'll be ignored as already
* deleted.
*/
ScanKeyEntryInitialize(&key, 0x0,
Anum_pg_class_relnamespace,
@@ -1651,7 +1656,8 @@ assign_search_path(const char *newval, bool doit, bool interactive)
/*
* Verify that all the names are either valid namespace names or
* "$user". We do not require $user to correspond to a valid
* namespace. We do not check for USAGE rights, either; should we?
* namespace. We do not check for USAGE rights, either; should
* we?
*/
foreach(l, namelist)
{
@@ -1670,9 +1676,9 @@ assign_search_path(const char *newval, bool doit, bool interactive)
freeList(namelist);
/*
* We mark the path as needing recomputation, but don't do anything until
* it's needed. This avoids trying to do database access during GUC
* initialization.
* We mark the path as needing recomputation, but don't do anything
* until it's needed. This avoids trying to do database access during
* GUC initialization.
*/
if (doit)
namespaceSearchPathValid = false;
@@ -1692,7 +1698,8 @@ InitializeSearchPath(void)
{
/*
* In bootstrap mode, the search path must be 'pg_catalog' so that
* tables are created in the proper namespace; ignore the GUC setting.
* tables are created in the proper namespace; ignore the GUC
* setting.
*/
MemoryContext oldcxt;
@@ -1707,8 +1714,8 @@ InitializeSearchPath(void)
else
{
/*
* In normal mode, arrange for a callback on any syscache invalidation
* of pg_namespace rows.
* In normal mode, arrange for a callback on any syscache
* invalidation of pg_namespace rows.
*/
CacheRegisterSyscacheCallback(NAMESPACEOID,
NamespaceCallback,

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.54 2002/08/22 00:01:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.55 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -89,8 +89,8 @@ AggregateCreate(const char *aggName,
/*
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary-compatible),
* so that it's OK to use the first input value as the initial
* type and transtype are the same (or at least binary-compatible), so
* that it's OK to use the first input value as the initial
* transValue.
*/
if (proc->proisstrict && agginitval == NULL)
@@ -128,7 +128,8 @@ AggregateCreate(const char *aggName,
/*
* Everything looks okay. Try to create the pg_proc entry for the
* aggregate. (This could fail if there's already a conflicting entry.)
* aggregate. (This could fail if there's already a conflicting
* entry.)
*/
MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid));
fnArgs[0] = aggBaseType;
@@ -143,9 +144,11 @@ AggregateCreate(const char *aggName,
"aggregate_dummy", /* placeholder proc */
"-", /* probin */
true, /* isAgg */
false, /* security invoker (currently not definable for agg) */
false, /* security invoker (currently not
* definable for agg) */
false, /* isStrict (not needed for agg) */
PROVOLATILE_IMMUTABLE, /* volatility (not needed for agg) */
PROVOLATILE_IMMUTABLE, /* volatility (not
* needed for agg) */
1, /* parameterCount */
fnArgs); /* parameterTypes */

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.5 2002/08/26 17:53:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.6 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -165,8 +165,8 @@ CreateConstraintEntry(const char *constraintName,
if (OidIsValid(relId))
{
/*
* Register auto dependency from constraint to owning relation,
* or to specific column(s) if any are mentioned.
* Register auto dependency from constraint to owning relation, or
* to specific column(s) if any are mentioned.
*/
ObjectAddress relobject;
@@ -219,9 +219,9 @@ CreateConstraintEntry(const char *constraintName,
if (conExpr != NULL)
{
/*
* Register dependencies from constraint to objects mentioned
* in CHECK expression. We gin up a rather bogus rangetable
* list to handle any Vars in the constraint.
* Register dependencies from constraint to objects mentioned in
* CHECK expression. We gin up a rather bogus rangetable list to
* handle any Vars in the constraint.
*/
RangeTblEntry rte;
@@ -399,8 +399,8 @@ RemoveConstraintById(Oid conId)
con = (Form_pg_constraint) GETSTRUCT(tup);
/*
* If the constraint is for a relation, open and exclusive-lock
* the relation it's for.
* If the constraint is for a relation, open and exclusive-lock the
* relation it's for.
*
* XXX not clear what we should lock, if anything, for other constraints.
*/
@@ -411,9 +411,9 @@ RemoveConstraintById(Oid conId)
rel = heap_open(con->conrelid, AccessExclusiveLock);
/*
* We need to update the relcheck count if it is a check constraint
* being dropped. This update will force backends to rebuild
* relcache entries when we commit.
* We need to update the relcheck count if it is a check
* constraint being dropped. This update will force backends to
* rebuild relcache entries when we commit.
*/
if (con->contype == CONSTRAINT_CHECK)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.5 2002/08/06 05:40:45 ishii Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.6 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,8 @@
* Add a new tuple to pg_coversion.
* ---------------
*/
Oid ConversionCreate(const char *conname, Oid connamespace,
Oid
ConversionCreate(const char *conname, Oid connamespace,
int32 conowner,
int4 conforencoding, int4 contoencoding,
Oid conproc, bool def)
@@ -65,8 +66,10 @@ Oid ConversionCreate(const char *conname, Oid connamespace,
if (def)
{
/* make sure there is no existing default
<for encoding><to encoding> pair in this name space */
/*
* make sure there is no existing default <for encoding><to
* encoding> pair in this name space
*/
if (FindDefaultConversion(connamespace,
conforencoding,
contoencoding))
@@ -129,7 +132,8 @@ Oid ConversionCreate(const char *conname, Oid connamespace,
* Drop a conversion and do dependency check.
* ---------------
*/
void ConversionDrop(const char *conname, Oid connamespace,
void
ConversionDrop(const char *conname, Oid connamespace,
int32 conowner, DropBehavior behavior)
{
Relation rel;
@@ -233,7 +237,8 @@ RemoveConversionById(Oid conversionOid)
* If found, returns the procedure's oid, otherwise InvalidOid.
* ---------------
*/
Oid FindDefaultConversion(Oid name_space, int4 for_encoding, int4 to_encoding)
Oid
FindDefaultConversion(Oid name_space, int4 for_encoding, int4 to_encoding)
{
CatCList *catlist;
HeapTuple tuple;
@@ -272,7 +277,8 @@ Oid FindDefaultConversion(Oid name_space, int4 for_encoding, int4 to_encoding)
* Returns conversion oid.
* ---------------
*/
Oid FindConversion(const char *conname, Oid connamespace)
Oid
FindConversion(const char *conname, Oid connamespace)
{
HeapTuple tuple;
Oid procoid;
@@ -347,9 +353,11 @@ pg_convert3(PG_FUNCTION_ARGS)
ReleaseSysCache(tuple);
/* build text data type structre. we cannot use textin() here,
since textin assumes that input string encoding is same as
database encoding. */
/*
* build text data type structre. we cannot use textin() here, since
* textin assumes that input string encoding is same as database
* encoding.
*/
len = strlen(result) + VARHDRSZ;
retval = palloc(len);
VARATT_SIZEP(retval) = len;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_depend.c,v 1.5 2002/08/11 21:17:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_depend.c,v 1.6 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,9 +79,9 @@ recordMultipleDependencies(const ObjectAddress *depender,
for (i = 0; i < nreferenced; i++, referenced++)
{
/*
* If the referenced object is pinned by the system, there's no real
* need to record dependencies on it. This saves lots of space in
* pg_depend, so it's worth the time taken to check.
* If the referenced object is pinned by the system, there's no
* real need to record dependencies on it. This saves lots of
* space in pg_depend, so it's worth the time taken to check.
*/
if (!isObjectPinned(referenced, dependDesc))
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.76 2002/08/22 00:01:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.77 2002/09/04 20:31:14 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -528,9 +528,10 @@ OperatorCreate(const char *operatorName,
operatorName, operatorNamespace,
leftTypeId, rightTypeId,
true);
/*
* self-linkage to this operator; will fix below. Note
* that only self-linkage for commutation makes sense.
* self-linkage to this operator; will fix below. Note that only
* self-linkage for commutation makes sense.
*/
if (!OidIsValid(commutatorId))
selfCommutator = true;
@@ -703,8 +704,8 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId,
otherRightTypeId == rightTypeId)
{
/*
* self-linkage to this operator; caller will fix later. Note
* that only self-linkage for commutation makes sense.
* self-linkage to this operator; caller will fix later. Note that
* only self-linkage for commutation makes sense.
*/
if (!isCommutator)
elog(ERROR, "operator cannot be its own negator or sort operator");
@@ -918,11 +919,11 @@ makeOperatorDependencies(HeapTuple tuple, Oid pg_operator_relid)
/*
* NOTE: we do not consider the operator to depend on the associated
* operators oprcom, oprnegate, oprlsortop, oprrsortop, oprltcmpop,
* oprgtcmpop. We would not want to delete this operator if those
* go away, but only reset the link fields; which is not a function
* that the dependency code can presently handle. (Something could
* perhaps be done with objectSubId though.) For now, it's okay to
* let those links dangle if a referenced operator is removed.
* oprgtcmpop. We would not want to delete this operator if those go
* away, but only reset the link fields; which is not a function that
* the dependency code can presently handle. (Something could perhaps
* be done with objectSubId though.) For now, it's okay to let those
* links dangle if a referenced operator is removed.
*/
/* Dependency on implementation function */

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.92 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.93 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -332,7 +332,8 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
tlist = parse->targetList;
/*
* The last query must be a SELECT if and only if return type isn't VOID.
* The last query must be a SELECT if and only if return type isn't
* VOID.
*/
if (rettype == VOIDOID)
{
@@ -360,8 +361,8 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
/*
* For base-type returns, the target list should have exactly one
* entry, and its type should agree with what the user declared. (As
* of Postgres 7.2, we accept binary-compatible types too.)
* entry, and its type should agree with what the user declared.
* (As of Postgres 7.2, we accept binary-compatible types too.)
*/
if (tlistlen != 1)
elog(ERROR, "function declared to return %s returns multiple columns in final SELECT",
@@ -378,11 +379,11 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
Assert(typerelid != InvalidOid);
/*
* If the target list is of length 1, and the type of the varnode in
* the target list matches the declared return type, this is okay.
* This can happen, for example, where the body of the function is
* 'SELECT func2()', where func2 has the same return type as the
* function that's calling it.
* If the target list is of length 1, and the type of the varnode
* in the target list matches the declared return type, this is
* okay. This can happen, for example, where the body of the
* function is 'SELECT func2()', where func2 has the same return
* type as the function that's calling it.
*/
if (tlistlen == 1)
{
@@ -392,11 +393,11 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
}
/*
* Otherwise verify that the targetlist matches the return tuple type.
* This part of the typechecking is a hack. We look up the relation
* that is the declared return type, and scan the non-deleted
* attributes to ensure that they match the datatypes of the
* non-resjunk columns.
* Otherwise verify that the targetlist matches the return tuple
* type. This part of the typechecking is a hack. We look up the
* relation that is the declared return type, and scan the
* non-deleted attributes to ensure that they match the datatypes
* of the non-resjunk columns.
*/
reln = relation_open(typerelid, AccessShareLock);
relnatts = reln->rd_rel->relnatts;
@@ -413,7 +414,8 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
if (tle->resdom->resjunk)
continue;
do {
do
{
colindex++;
if (colindex > relnatts)
elog(ERROR, "function declared to return %s does not SELECT the right number of columns (%d)",
@@ -453,8 +455,8 @@ checkretval(Oid rettype, char fn_typtype, List *queryTreeList)
Assert(typerelid == InvalidOid);
/*
* For RECORD return type, defer this check until we get the
* first tuple.
* For RECORD return type, defer this check until we get the first
* tuple.
*/
}
else

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.81 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.82 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -158,8 +158,8 @@ TypeCreate(const char *typeName,
int i;
/*
* We assume that the caller validated the arguments individually,
* but did not check for bad combinations.
* We assume that the caller validated the arguments individually, but
* did not check for bad combinations.
*
* Validate size specifications: either positive (fixed-length) or -1
* (varlena) or -2 (cstring). Pass-by-value types must have a fixed
@@ -214,8 +214,8 @@ TypeCreate(const char *typeName,
values[i++] = Int32GetDatum(typNDims); /* typndims */
/*
* initialize the default binary value for this type. Check for
* nulls of course.
* initialize the default binary value for this type. Check for nulls
* of course.
*/
if (defaultTypeBin)
values[i] = DirectFunctionCall1(textin,
@@ -321,13 +321,13 @@ TypeCreate(const char *typeName,
/*
* If the type is a rowtype for a relation, mark it as internally
* dependent on the relation, *unless* it is a stand-alone composite
* type relation. For the latter case, we have to reverse the
* dependency.
* dependent on the relation, *unless* it is a stand-alone
* composite type relation. For the latter case, we have to
* reverse the dependency.
*
* In the former case, this allows the type to be auto-dropped
* when the relation is, and not otherwise. And in the latter,
* of course we get the opposite effect.
* In the former case, this allows the type to be auto-dropped when
* the relation is, and not otherwise. And in the latter, of
* course we get the opposite effect.
*/
if (OidIsValid(relationOid))
{
@@ -342,11 +342,11 @@ TypeCreate(const char *typeName,
}
/*
* If the type is an array type, mark it auto-dependent on the base
* type. (This is a compromise between the typical case where the
* array type is automatically generated and the case where it is
* manually created: we'd prefer INTERNAL for the former case and
* NORMAL for the latter.)
* If the type is an array type, mark it auto-dependent on the
* base type. (This is a compromise between the typical case
* where the array type is automatically generated and the case
* where it is manually created: we'd prefer INTERNAL for the
* former case and NORMAL for the latter.)
*/
if (OidIsValid(elementType))
{

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.4 2002/08/22 00:01:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.5 2002/09/04 20:31:14 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -110,8 +110,8 @@ DefineAggregate(List *names, List *parameters)
* We have historically allowed the command to look like basetype = 'ANY'
* so we must do a case-insensitive comparison for the name ANY. Ugh.
*
* basetype can be a pseudo-type, but transtype can't, since we need
* to be able to store values of the transtype.
* basetype can be a pseudo-type, but transtype can't, since we need to
* be able to store values of the transtype.
*/
if (strcasecmp(TypeNameToString(baseType), "ANY") == 0)
baseTypeId = ANYOID;
@@ -154,8 +154,8 @@ RemoveAggregate(RemoveAggrStmt *stmt)
* if a basetype is passed in, then attempt to find an aggregate for
* that specific type.
*
* else attempt to find an aggregate with a basetype of ANYOID.
* This means that the aggregate is to apply to all basetypes (eg, COUNT).
* else attempt to find an aggregate with a basetype of ANYOID. This
* means that the aggregate is to apply to all basetypes (eg, COUNT).
*/
if (aggType)
basetypeID = typenameTypeId(aggType);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.45 2002/08/26 18:45:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.46 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -163,9 +163,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
elevel = DEBUG1;
/*
* Use the current context for storing analysis info. vacuum.c ensures
* that this context will be cleared when I return, thus releasing the
* memory allocated here.
* Use the current context for storing analysis info. vacuum.c
* ensures that this context will be cleared when I return, thus
* releasing the memory allocated here.
*/
anl_context = CurrentMemoryContext;
@@ -1042,11 +1042,15 @@ compute_minimal_stats(VacAttrStats *stats,
*/
int f1 = nonnull_cnt - summultiple;
int d = f1 + nmultiple;
double numer, denom, stadistinct;
double numer,
denom,
stadistinct;
numer = (double) numrows *(double) d;
denom = (double) (numrows - f1) +
(double) f1 *(double) numrows / totalrows;
stadistinct = numer / denom;
/* Clamp to sane range in case of roundoff error */
if (stadistinct < (double) d)
@@ -1361,11 +1365,15 @@ compute_scalar_stats(VacAttrStats *stats,
*/
int f1 = ndistinct - nmultiple + toowide_cnt;
int d = f1 + nmultiple;
double numer, denom, stadistinct;
double numer,
denom,
stadistinct;
numer = (double) numrows *(double) d;
denom = (double) (numrows - f1) +
(double) f1 *(double) numrows / totalrows;
stadistinct = numer / denom;
/* Clamp to sane range in case of roundoff error */
if (stadistinct < (double) d)

View File

@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.89 2002/09/03 01:04:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.90 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -111,11 +111,11 @@ cluster(RangeVar *oldrelation, char *oldindexname)
RelationGetRelationName(OldHeap));
/*
* Disallow clustering system relations. This will definitely NOT work
* for shared relations (we have no way to update pg_class rows in other
* databases), nor for nailed-in-cache relations (the relfilenode values
* for those are hardwired, see relcache.c). It might work for other
* system relations, but I ain't gonna risk it.
* Disallow clustering system relations. This will definitely NOT
* work for shared relations (we have no way to update pg_class rows
* in other databases), nor for nailed-in-cache relations (the
* relfilenode values for those are hardwired, see relcache.c). It
* might work for other system relations, but I ain't gonna risk it.
*/
if (IsSystemRelation(OldHeap))
elog(ERROR, "CLUSTER: cannot cluster system relation \"%s\"",
@@ -130,16 +130,20 @@ cluster(RangeVar *oldrelation, char *oldindexname)
/*
* Create the new heap, using a temporary name in the same namespace
* as the existing table. NOTE: there is some risk of collision with user
* relnames. Working around this seems more trouble than it's worth; in
* particular, we can't create the new heap in a different namespace from
* the old, or we will have problems with the TEMP status of temp tables.
* as the existing table. NOTE: there is some risk of collision with
* user relnames. Working around this seems more trouble than it's
* worth; in particular, we can't create the new heap in a different
* namespace from the old, or we will have problems with the TEMP
* status of temp tables.
*/
snprintf(NewHeapName, NAMEDATALEN, "pg_temp_%u", OIDOldHeap);
OIDNewHeap = make_new_heap(OIDOldHeap, NewHeapName);
/* We don't need CommandCounterIncrement() because make_new_heap did it. */
/*
* We don't need CommandCounterIncrement() because make_new_heap did
* it.
*/
/*
* Copy the heap data into the new table in the desired order.
@@ -244,14 +248,14 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
while ((tuple = index_getnext(scan, ForwardScanDirection)) != NULL)
{
/*
* We must copy the tuple because heap_insert() will overwrite
* the commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus,
* the source relation would get trashed, which is bad news if
* we abort later on. (This was a bug in releases thru 7.0)
* We must copy the tuple because heap_insert() will overwrite the
* commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus, the
* source relation would get trashed, which is bad news if we
* abort later on. (This was a bug in releases thru 7.0)
*
* Note that the copied tuple will have the original OID, if any,
* so this does preserve OIDs.
* Note that the copied tuple will have the original OID, if any, so
* this does preserve OIDs.
*/
HeapTuple copiedTuple = heap_copytuple(tuple);
@@ -320,7 +324,8 @@ get_indexattr_list(Relation OldHeap, Oid OldIndex)
ReleaseSysCache(classTuple);
ReleaseSysCache(indexTuple);
/* Cons the gathered data into the list. We do not care about
/*
* Cons the gathered data into the list. We do not care about
* ordering, and this is more efficient than append.
*/
indexes = lcons(attrs, indexes);
@@ -352,10 +357,10 @@ recreate_indexattr(Oid OIDOldHeap, List *indexes)
snprintf(newIndexName, NAMEDATALEN, "pg_temp_%u", attrs->indexOID);
/*
* The new index will have primary and constraint status set to false,
* but since we will only use its filenode it doesn't matter:
* after the filenode swap the index will keep the constraint
* status of the old index.
* The new index will have primary and constraint status set to
* false, but since we will only use its filenode it doesn't
* matter: after the filenode swap the index will keep the
* constraint status of the old index.
*/
newIndexOID = index_create(OIDOldHeap, newIndexName,
attrs->indexInfo, attrs->accessMethodOID,
@@ -369,8 +374,8 @@ recreate_indexattr(Oid OIDOldHeap, List *indexes)
CommandCounterIncrement();
/*
* Make sure that indisclustered is correct: it should be set
* only for the index we just clustered on.
* Make sure that indisclustered is correct: it should be set only
* for the index we just clustered on.
*/
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(INDEXRELID,
@@ -394,8 +399,8 @@ recreate_indexattr(Oid OIDOldHeap, List *indexes)
object.objectSubId = 0;
/*
* The relation is local to our transaction and we know
* nothing depends on it, so DROP_RESTRICT should be OK.
* The relation is local to our transaction and we know nothing
* depends on it, so DROP_RESTRICT should be OK.
*/
performDeletion(&object, DROP_RESTRICT);
@@ -481,16 +486,17 @@ swap_relfilenodes(Oid r1, Oid r2)
CatalogCloseIndexes(indstate);
/*
* If we have toast tables associated with the relations being swapped,
* change their dependency links to re-associate them with their new
* owning relations. Otherwise the wrong one will get dropped ...
* If we have toast tables associated with the relations being
* swapped, change their dependency links to re-associate them with
* their new owning relations. Otherwise the wrong one will get
* dropped ...
*
* NOTE: for now, we can assume the new table will have a TOAST table
* if and only if the old one does. This logic might need work if we
* get smarter about dropped columns.
* NOTE: for now, we can assume the new table will have a TOAST table if
* and only if the old one does. This logic might need work if we get
* smarter about dropped columns.
*
* NOTE: at present, a TOAST table's only dependency is the one on
* its owning table. If more are ever created, we'd need to use something
* NOTE: at present, a TOAST table's only dependency is the one on its
* owning table. If more are ever created, we'd need to use something
* more selective than deleteDependencyRecordsFor() to get rid of only
* the link we want.
*/
@@ -533,11 +539,11 @@ swap_relfilenodes(Oid r1, Oid r2)
/*
* Blow away the old relcache entries now. We need this kluge because
* relcache.c indexes relcache entries by rd_node as well as OID.
* It will get confused if it is asked to (re)build an entry with a new
* relcache.c indexes relcache entries by rd_node as well as OID. It
* will get confused if it is asked to (re)build an entry with a new
* rd_node value when there is still another entry laying about with
* that same rd_node value. (Fortunately, since one of the entries
* is local in our transaction, it's sufficient to clear out our own
* that same rd_node value. (Fortunately, since one of the entries is
* local in our transaction, it's sufficient to clear out our own
* relcache this way; the problem cannot arise for other backends when
* they see our update on the non-local relation.)
*/

View File

@@ -7,7 +7,7 @@
* Copyright (c) 1996-2001, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.59 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.60 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -260,9 +260,7 @@ DeleteComments(Oid oid, Oid classoid, int32 subid)
SnapshotNow, nkeys, skey);
while ((oldtuple = systable_getnext(sd)) != NULL)
{
simple_heap_delete(description, &oldtuple->t_self);
}
/* Done */
@@ -820,9 +818,9 @@ CommentConstraint(List *qualname, char *comment)
aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(relation));
/*
* Fetch the constraint tuple from pg_constraint. There may be more than
* one match, because constraints are not required to have unique names;
* if so, error out.
* Fetch the constraint tuple from pg_constraint. There may be more
* than one match, because constraints are not required to have unique
* names; if so, error out.
*/
pg_constraint = heap_openr(ConstraintRelationName, AccessShareLock);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.3 2002/08/22 00:01:42 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.4 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,8 +64,9 @@ CreateConversionCommand(CreateConversionStmt *stmt)
if (to_encoding < 0)
elog(ERROR, "Invalid to encoding name: %s", to_encoding_name);
/* Check the existence of the conversion function.
* Function name could be a qualified name.
/*
* Check the existence of the conversion function. Function name could
* be a qualified name.
*/
funcoid = LookupFuncName(func_name, sizeof(funcargs) / sizeof(Oid), funcargs);
if (!OidIsValid(funcoid))
@@ -81,7 +82,10 @@ CreateConversionCommand(CreateConversionStmt *stmt)
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, get_namespace_name(funcnamespace));
/* All seem ok, go ahead (possible failure would be a duplicate conversion name) */
/*
* All seem ok, go ahead (possible failure would be a duplicate
* conversion name)
*/
ConversionCreate(conversion_name, namespaceId, GetUserId(),
for_encoding, to_encoding, funcoid, stmt->def);
}
@@ -104,9 +108,9 @@ DropConversionCommand(List *name, DropBehavior behavior)
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, get_namespace_name(namespaceId));
/* Go ahead (possible failure would be:
* none existing conversion
* not ower of this conversion
/*
* Go ahead (possible failure would be: none existing conversion not
* ower of this conversion
*/
ConversionDrop(conversion_name, namespaceId, GetUserId(), behavior);
}

View File

@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.170 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.171 2002/09/04 20:31:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -537,8 +537,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Get info about the columns we need to process.
*
* For binary copy we really only need isvarlena, but compute it
* all...
* For binary copy we really only need isvarlena, but compute it all...
*/
out_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo));
elements = (Oid *) palloc(num_phys_attrs * sizeof(Oid));
@@ -740,7 +739,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
HeapTuple tuple;
TupleDesc tupDesc;
Form_pg_attribute *attr;
AttrNumber num_phys_attrs, attr_count, num_defaults;
AttrNumber num_phys_attrs,
attr_count,
num_defaults;
FmgrInfo *in_functions;
Oid *elements;
int i;
@@ -787,9 +788,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
ExecSetSlotDescriptor(slot, tupDesc, false);
/*
* pick up the input function and default expression (if any) for
* each attribute in the relation. (We don't actually use the
* input function if it's a binary copy.)
* pick up the input function and default expression (if any) for each
* attribute in the relation. (We don't actually use the input
* function if it's a binary copy.)
*/
defmap = (int *) palloc(sizeof(int) * num_phys_attrs);
defexprs = (Node **) palloc(sizeof(Node *) * num_phys_attrs);
@@ -1059,9 +1060,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
Datum datumBuf;
/*
* We need this horsing around because we don't
* know how shorter data values are aligned within
* a Datum.
* We need this horsing around because we don't know
* how shorter data values are aligned within a Datum.
*/
Assert(fld_size > 0 && fld_size <= sizeof(Datum));
CopyGetData(&datumBuf, fld_size, fp);
@@ -1075,9 +1075,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
* Now compute and insert any defaults available for the
* columns not provided by the input data. Anything not
* processed here or above will remain NULL.
* Now compute and insert any defaults available for the columns
* not provided by the input data. Anything not processed here or
* above will remain NULL.
*/
for (i = 0; i < num_defaults; i++)
{

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.104 2002/09/03 22:17:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.105 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -226,10 +226,10 @@ createdb(const CreatedbStmt *stmt)
* database), and resolve alternate physical location if one is
* specified.
*
* If an alternate location is specified but is the same as the
* normal path, just drop the alternate-location spec (this seems
* friendlier than erroring out). We must test this case to avoid
* creating a circular symlink below.
* If an alternate location is specified but is the same as the normal
* path, just drop the alternate-location spec (this seems friendlier
* than erroring out). We must test this case to avoid creating a
* circular symlink below.
*/
nominal_loc = GetDatabasePath(dboid);
alt_loc = resolve_alt_dbpath(dbpath, dboid);
@@ -328,11 +328,12 @@ createdb(const CreatedbStmt *stmt)
/* do not set datpath to null, GetRawDatabaseInfo won't cope */
new_record[Anum_pg_database_datpath - 1] =
DirectFunctionCall1(textin, CStringGetDatum(dbpath ? dbpath : ""));
/*
* We deliberately set datconfig and datacl to defaults (NULL), rather
* than copying them from the template database. Copying datacl would
* be a bad idea when the owner is not the same as the template's owner.
* It's more debatable whether datconfig should be copied.
* be a bad idea when the owner is not the same as the template's
* owner. It's more debatable whether datconfig should be copied.
*/
new_record_nulls[Anum_pg_database_datconfig - 1] = 'n';
new_record_nulls[Anum_pg_database_datacl - 1] = 'n';

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.79 2002/08/10 19:01:53 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.80 2002/09/04 20:31:15 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -132,10 +132,11 @@ defGetInt64(DefElem *def)
case T_Integer:
return (int64) intVal(def->arg);
case T_Float:
/*
* Values too large for int4 will be represented as Float
* constants by the lexer. Accept these if they are valid int8
* strings.
* constants by the lexer. Accept these if they are valid
* int8 strings.
*/
return DatumGetInt64(DirectFunctionCall1(int8in,
CStringGetDatum(strVal(def->arg))));

View File

@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.86 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.87 2002/09/04 20:31:15 momjian Exp $
*
*/
@@ -662,8 +662,8 @@ show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
/*
* If we have an outer plan that is referenced by the qual, add it to
* the deparse context. If not, don't (so that we don't force prefixes
* unnecessarily).
* the deparse context. If not, don't (so that we don't force
* prefixes unnecessarily).
*/
if (outer_plan)
{
@@ -760,10 +760,11 @@ show_sort_keys(List *tlist, int nkeys, const char *qlabel,
/*
* In this routine we expect that the plan node's tlist has not been
* processed by set_plan_references(). Normally, any Vars will contain
* valid varnos referencing the actual rtable. But we might instead be
* looking at a dummy tlist generated by prepunion.c; if there are
* Vars with zero varno, use the tlist itself to determine their names.
* processed by set_plan_references(). Normally, any Vars will
* contain valid varnos referencing the actual rtable. But we might
* instead be looking at a dummy tlist generated by prepunion.c; if
* there are Vars with zero varno, use the tlist itself to determine
* their names.
*/
if (intMember(0, pull_varnos((Node *) tlist)))
{
@@ -827,9 +828,7 @@ make_ors_ands_explicit(List *orclauses)
List *orptr;
foreach(orptr, orclauses)
{
args = lappend(args, make_ands_explicit(lfirst(orptr)));
}
return (Node *) make_orclause(args);
}

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.18 2002/08/22 00:01:42 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.19 2002/09/04 20:31:15 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -439,13 +439,12 @@ CreateFunction(CreateFunctionStmt *stmt)
if (languageOid == INTERNALlanguageId)
{
/*
* In PostgreSQL versions before 6.5, the SQL name of the
* created function could not be different from the internal
* name, and "prosrc" wasn't used. So there is code out there
* that does CREATE FUNCTION xyz AS '' LANGUAGE 'internal'.
* To preserve some modicum of backwards compatibility, accept
* an empty "prosrc" value as meaning the supplied SQL
* function name.
* In PostgreSQL versions before 6.5, the SQL name of the created
* function could not be different from the internal name, and
* "prosrc" wasn't used. So there is code out there that does
* CREATE FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some
* modicum of backwards compatibility, accept an empty "prosrc"
* value as meaning the supplied SQL function name.
*/
if (strlen(prosrc_str) == 0)
prosrc_str = funcname;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.86 2002/08/30 22:18:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.87 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -117,9 +117,9 @@ DefineIndex(RangeVar *heapRelation,
/*
* Verify we (still) have CREATE rights in the rel's namespace.
* (Presumably we did when the rel was created, but maybe not anymore.)
* Skip check if bootstrapping, since permissions machinery may not
* be working yet.
* (Presumably we did when the rel was created, but maybe not
* anymore.) Skip check if bootstrapping, since permissions machinery
* may not be working yet.
*/
if (!IsBootstrapProcessingMode())
{
@@ -254,8 +254,8 @@ CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid)
elog(ERROR, "Cannot use aggregate in index predicate");
/*
* A predicate using mutable functions is probably wrong, for the
* same reasons that we don't allow a functional index to use one.
* A predicate using mutable functions is probably wrong, for the same
* reasons that we don't allow a functional index to use one.
*/
if (contain_mutable_functions((Node *) predList))
elog(ERROR, "Functions in index predicate must be marked isImmutable");
@@ -458,8 +458,8 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
NameListToString(attribute->opclass), accessMethodName);
/*
* Verify that the index operator class accepts this
* datatype. Note we will accept binary compatibility.
* Verify that the index operator class accepts this datatype. Note
* we will accept binary compatibility.
*/
opClassId = HeapTupleGetOid(tuple);
opInputType = ((Form_pg_opclass) GETSTRUCT(tuple))->opcintype;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/lockcmds.c,v 1.3 2002/06/20 20:29:27 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/lockcmds.c,v 1.4 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,8 +43,8 @@ LockTableCommand(LockStmt *lockstmt)
Relation rel;
/*
* We don't want to open the relation until we've checked privilege.
* So, manually get the relation OID.
* We don't want to open the relation until we've checked
* privilege. So, manually get the relation OID.
*/
reloid = RangeVarGetRelid(relation, false);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.4 2002/08/22 00:01:42 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.5 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -107,9 +107,9 @@ DefineOpClass(CreateOpClassStmt *stmt)
storageoid = InvalidOid;
/*
* Create work arrays to hold info about operators and procedures.
* We do this mainly so that we can detect duplicate strategy
* numbers and support-proc numbers.
* Create work arrays to hold info about operators and procedures. We
* do this mainly so that we can detect duplicate strategy numbers and
* support-proc numbers.
*/
operators = (Oid *) palloc(sizeof(Oid) * numOperators);
MemSet(operators, 0, sizeof(Oid) * numOperators);
@@ -221,8 +221,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
rel = heap_openr(OperatorClassRelationName, RowExclusiveLock);
/*
* Make sure there is no existing opclass of this name (this is
* just to give a more friendly error message than "duplicate key").
* Make sure there is no existing opclass of this name (this is just
* to give a more friendly error message than "duplicate key").
*/
if (SearchSysCacheExists(CLAAMNAMENSP,
ObjectIdGetDatum(amoid),
@@ -233,8 +233,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
opcname, stmt->amname);
/*
* If we are creating a default opclass, check there isn't one already.
* (XXX should we restrict this test to visible opclasses?)
* If we are creating a default opclass, check there isn't one
* already. (XXX should we restrict this test to visible opclasses?)
*/
if (stmt->isDefault)
{
@@ -291,8 +291,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
heap_freetuple(tup);
/*
* Now add tuples to pg_amop and pg_amproc tying in the
* operators and functions.
* Now add tuples to pg_amop and pg_amproc tying in the operators and
* functions.
*/
storeOperators(opclassoid, numOperators, operators, recheck);
storeProcedures(opclassoid, numProcs, procedures);
@@ -362,7 +362,8 @@ storeOperators(Oid opclassoid, int numOperators,
Datum values[Natts_pg_amop];
char nulls[Natts_pg_amop];
HeapTuple tup;
int i, j;
int i,
j;
rel = heap_openr(AccessMethodOperatorRelationName, RowExclusiveLock);
@@ -405,7 +406,8 @@ storeProcedures(Oid opclassoid, int numProcs, Oid *procedures)
Datum values[Natts_pg_amproc];
char nulls[Natts_pg_amproc];
HeapTuple tup;
int i, j;
int i,
j;
rel = heap_openr(AccessMethodProcedureRelationName, RowExclusiveLock);
@@ -445,7 +447,8 @@ storeProcedures(Oid opclassoid, int numProcs, Oid *procedures)
void
RemoveOpClass(RemoveOpClassStmt *stmt)
{
Oid amID, opcID;
Oid amID,
opcID;
char *schemaname;
char *opcname;
HeapTuple tuple;
@@ -559,9 +562,7 @@ RemoveOpClassById(Oid opclassOid)
SnapshotNow, 1, skey);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
simple_heap_delete(rel, &tup->t_self);
}
systable_endscan(scan);
heap_close(rel, RowExclusiveLock);
@@ -579,9 +580,7 @@ RemoveOpClassById(Oid opclassOid)
SnapshotNow, 1, skey);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
simple_heap_delete(rel, &tup->t_self);
}
systable_endscan(scan);
heap_close(rel, RowExclusiveLock);

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.6 2002/07/24 19:11:09 petere Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.7 2002/09/04 20:31:15 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.2 2002/05/21 22:05:54 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.3 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -134,12 +134,12 @@ PerformPortalFetch(char *name,
* Determine which direction to go in, and check to see if we're
* already at the end of the available tuples in that direction. If
* so, set the direction to NoMovement to avoid trying to fetch any
* tuples. (This check exists because not all plan node types
* are robust about being called again if they've already returned
* NULL once.) Then call the executor (we must not skip this, because
* the destination needs to see a setup and shutdown even if no tuples
* are available). Finally, update the atStart/atEnd state depending
* on the number of tuples that were retrieved.
* tuples. (This check exists because not all plan node types are
* robust about being called again if they've already returned NULL
* once.) Then call the executor (we must not skip this, because the
* destination needs to see a setup and shutdown even if no tuples are
* available). Finally, update the atStart/atEnd state depending on
* the number of tuples that were retrieved.
*/
if (forward)
{

View File

@@ -6,7 +6,7 @@
* Copyright (c) 2002, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.1 2002/08/27 04:55:07 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.2 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -185,9 +185,9 @@ ExecuteQuery(ExecuteStmt *stmt, CommandDest outputDest)
}
/*
* If we're processing multiple queries, we need to increment
* the command counter between them. For the last query,
* there's no need to do this, it's done automatically.
* If we're processing multiple queries, we need to increment the
* command counter between them. For the last query, there's no
* need to do this, it's done automatically.
*/
if (!is_last_query)
CommandCounterIncrement();
@@ -258,10 +258,10 @@ StoreQuery(const char *stmt_name, List *query_list, List *plan_list,
oldcxt = MemoryContextSwitchTo(entrycxt);
/*
* We need to copy the data so that it is stored in the correct
* memory context. Do this before making hashtable entry, so that
* an out-of-memory failure only wastes memory and doesn't leave us
* with an incomplete (ie corrupt) hashtable entry.
* We need to copy the data so that it is stored in the correct memory
* context. Do this before making hashtable entry, so that an
* out-of-memory failure only wastes memory and doesn't leave us with
* an incomplete (ie corrupt) hashtable entry.
*/
query_list = (List *) copyObject(query_list);
plan_list = (List *) copyObject(plan_list);
@@ -306,8 +306,8 @@ FetchQuery(const char *plan_name)
/*
* We can't just use the statement name as supplied by the user: the
* hash package is picky enough that it needs to be NULL-padded out
* to the appropriate length to work correctly.
* hash package is picky enough that it needs to be NULL-padded out to
* the appropriate length to work correctly.
*/
MemSet(key, 0, sizeof(key));
strncpy(key, plan_name, sizeof(key));
@@ -376,18 +376,18 @@ DeallocateQuery(DeallocateStmt *stmt)
/*
* We can't just use the statement name as supplied by the user: the
* hash package is picky enough that it needs to be NULL-padded out
* to the appropriate length to work correctly.
* hash package is picky enough that it needs to be NULL-padded out to
* the appropriate length to work correctly.
*/
MemSet(key, 0, sizeof(key));
strncpy(key, stmt->name, sizeof(key));
/*
* First lookup the entry, so we can release all the subsidiary memory
* it has allocated (when it's removed, hash_search() will return
* a dangling pointer, so it needs to be done prior to HASH_REMOVE).
* This requires an extra hash-table lookup, but DEALLOCATE
* isn't exactly a performance bottleneck.
* it has allocated (when it's removed, hash_search() will return a
* dangling pointer, so it needs to be done prior to HASH_REMOVE).
* This requires an extra hash-table lookup, but DEALLOCATE isn't
* exactly a performance bottleneck.
*/
entry = (QueryHashEntry *) hash_search(prepared_queries,
key,

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.41 2002/08/22 00:01:42 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.42 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,8 @@ void
CreateProceduralLanguage(CreatePLangStmt *stmt)
{
char languageName[NAMEDATALEN];
Oid procOid, valProcOid;
Oid procOid,
valProcOid;
Oid typev[FUNC_MAX_ARGS];
char nulls[Natts_pg_language];
Datum values[Natts_pg_language];

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.5 2002/07/18 16:47:24 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.6 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,15 +61,17 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
owner_name = authId;
/* The following will error out if user does not exist */
owner_userid = get_usesysid(owner_name);
/*
* Set the current user to the requested authorization so
* that objects created in the statement have the requested
* owner. (This will revert to session user on error or at
* the end of this routine.)
* Set the current user to the requested authorization so that
* objects created in the statement have the requested owner.
* (This will revert to session user on error or at the end of
* this routine.)
*/
SetUserId(owner_userid);
}
else /* not superuser */
else
/* not superuser */
{
owner_userid = saved_userid;
owner_name = GetUserNameFromId(owner_userid);
@@ -98,8 +100,8 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
/*
* Temporarily make the new namespace be the front of the search path,
* as well as the default creation target namespace. This will be undone
* at the end of this routine, or upon error.
* as well as the default creation target namespace. This will be
* undone at the end of this routine, or upon error.
*/
PushSpecialNamespace(namespaceId);
@@ -107,8 +109,8 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
* Examine the list of commands embedded in the CREATE SCHEMA command,
* and reorganize them into a sequentially executable order with no
* forward references. Note that the result is still a list of raw
* parsetrees in need of parse analysis --- we cannot, in general,
* run analyze.c on one statement until we have actually executed the
* parsetrees in need of parse analysis --- we cannot, in general, run
* analyze.c on one statement until we have actually executed the
* prior ones.
*/
parsetree_list = analyzeCreateSchemaStmt(stmt);
@@ -171,12 +173,12 @@ RemoveSchema(List *names, DropBehavior behavior)
aclcheck_error(ACLCHECK_NOT_OWNER, namespaceName);
/*
* Do the deletion. Objects contained in the schema are removed
* by means of their dependency links to the schema.
* Do the deletion. Objects contained in the schema are removed by
* means of their dependency links to the schema.
*
* XXX currently, index opclasses don't have creation/deletion
* commands, so they will not get removed when the containing
* schema is removed. This is annoying but not fatal.
* XXX currently, index opclasses don't have creation/deletion commands,
* so they will not get removed when the containing schema is removed.
* This is annoying but not fatal.
*/
object.classId = get_system_catalog_relid(NamespaceRelationName);
object.objectId = namespaceId;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.86 2002/09/03 18:50:54 petere Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.87 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -243,11 +243,12 @@ DefineSequence(CreateSeqStmt *seq)
{
/*
* Note that the "tuple" structure is still just a local tuple record
* created by heap_formtuple; its t_data pointer doesn't point at the
* disk buffer. To scribble on the disk buffer we need to fetch the
* item pointer. But do the same to the local tuple, since that will
* be the source for the WAL log record, below.
* Note that the "tuple" structure is still just a local tuple
* record created by heap_formtuple; its t_data pointer doesn't
* point at the disk buffer. To scribble on the disk buffer we
* need to fetch the item pointer. But do the same to the local
* tuple, since that will be the source for the WAL log record,
* below.
*/
ItemId itemId;
Item item;
@@ -362,8 +363,8 @@ nextval(PG_FUNCTION_ARGS)
* the fetch count to grab SEQ_LOG_VALS more values than we actually
* need to cache. (These will then be usable without logging.)
*
* If this is the first nextval after a checkpoint, we must force
* a new WAL record to be written anyway, else replay starting from the
* If this is the first nextval after a checkpoint, we must force a new
* WAL record to be written anyway, else replay starting from the
* checkpoint would fail to advance the sequence past the logged
* values. In this case we may as well fetch extra values.
*/
@@ -402,6 +403,7 @@ nextval(PG_FUNCTION_ARGS)
if (!seq->is_cycled)
{
char buf[100];
snprintf(buf, 100, INT64_FORMAT, maxv);
elog(ERROR, "%s.nextval: reached MAXVALUE (%s)",
sequence->relname, buf);
@@ -422,6 +424,7 @@ nextval(PG_FUNCTION_ARGS)
if (!seq->is_cycled)
{
char buf[100];
snprintf(buf, 100, INT64_FORMAT, minv);
elog(ERROR, "%s.nextval: reached MINVALUE (%s)",
sequence->relname, buf);
@@ -560,7 +563,10 @@ do_setval(RangeVar *sequence, int64 next, bool iscalled)
if ((next < seq->min_value) || (next > seq->max_value))
{
char bufv[100], bufm[100], bufx[100];
char bufv[100],
bufm[100],
bufx[100];
snprintf(bufv, 100, INT64_FORMAT, next);
snprintf(bufm, 100, INT64_FORMAT, seq->min_value);
snprintf(bufx, 100, INT64_FORMAT, seq->max_value);
@@ -697,8 +703,8 @@ init_sequence(const char *caller, RangeVar *relation,
* Allocate new seqtable entry if we didn't find one.
*
* NOTE: seqtable entries remain in the list for the life of a backend.
* If the sequence itself is deleted then the entry becomes wasted memory,
* but it's small enough that this should not matter.
* If the sequence itself is deleted then the entry becomes wasted
* memory, but it's small enough that this should not matter.
*/
if (elm == NULL)
{
@@ -828,7 +834,9 @@ init_params(CreateSeqStmt *seq, Form_pg_sequence new)
if (new->min_value >= new->max_value)
{
char bufm[100], bufx[100];
char bufm[100],
bufx[100];
snprintf(bufm, 100, INT64_FORMAT, new->min_value);
snprintf(bufx, 100, INT64_FORMAT, new->max_value);
elog(ERROR, "DefineSequence: MINVALUE (%s) must be less than MAXVALUE (%s)",
@@ -847,7 +855,9 @@ init_params(CreateSeqStmt *seq, Form_pg_sequence new)
if (new->last_value < new->min_value)
{
char bufs[100], bufm[100];
char bufs[100],
bufm[100];
snprintf(bufs, 100, INT64_FORMAT, new->last_value);
snprintf(bufm, 100, INT64_FORMAT, new->min_value);
elog(ERROR, "DefineSequence: START value (%s) can't be less than MINVALUE (%s)",
@@ -855,7 +865,9 @@ init_params(CreateSeqStmt *seq, Form_pg_sequence new)
}
if (new->last_value > new->max_value)
{
char bufs[100], bufm[100];
char bufs[100],
bufm[100];
snprintf(bufs, 100, INT64_FORMAT, new->last_value);
snprintf(bufm, 100, INT64_FORMAT, new->max_value);
elog(ERROR, "DefineSequence: START value (%s) can't be greater than MAXVALUE (%s)",
@@ -867,6 +879,7 @@ init_params(CreateSeqStmt *seq, Form_pg_sequence new)
else if ((new->cache_value = defGetInt64(cache_value)) <= 0)
{
char buf[100];
snprintf(buf, 100, INT64_FORMAT, new->cache_value);
elog(ERROR, "DefineSequence: CACHE (%s) can't be <= 0",
buf);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.38 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.39 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -114,9 +114,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
/*
* Look up the namespace in which we are supposed to create the
* relation. Check we have permission to create there.
* Skip check if bootstrapping, since permissions machinery may not
* be working yet.
* relation. Check we have permission to create there. Skip check if
* bootstrapping, since permissions machinery may not be working yet.
*/
namespaceId = RangeVarGetCreationNamespace(stmt->relation);
@@ -182,8 +181,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
/*
* Generate a constraint name. NB: this should match the
* form of names that GenerateConstraintName() may produce
* for names added later. We are assured that there is
* no name conflict, because MergeAttributes() did not pass
* for names added later. We are assured that there is no
* name conflict, because MergeAttributes() did not pass
* back any names of this form.
*/
check[ncheck].ccname = (char *) palloc(NAMEDATALEN);
@@ -242,8 +241,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
* CREATE TABLE.
*
* Another task that's conveniently done at this step is to add
* dependency links between columns and supporting relations (such
* as SERIAL sequences).
* dependency links between columns and supporting relations (such as
* SERIAL sequences).
*
* First, scan schema to find new column defaults.
*/
@@ -366,8 +365,7 @@ TruncateRelation(const RangeVar *relation)
aclcheck_error(ACLCHECK_NOT_OWNER, RelationGetRelationName(rel));
/*
* Don't allow truncate on tables which are referenced
* by foreign keys
* Don't allow truncate on tables which are referenced by foreign keys
*/
fkeyRel = heap_openr(ConstraintRelationName, AccessShareLock);
@@ -380,8 +378,8 @@ TruncateRelation(const RangeVar *relation)
SnapshotNow, 1, &key);
/*
* First foreign key found with us as the reference
* should throw an error.
* First foreign key found with us as the reference should throw an
* error.
*/
while (HeapTupleIsValid(tuple = systable_getnext(fkeyScan)))
{
@@ -554,7 +552,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/*
* newattno[] will contain the child-table attribute numbers for
* the attributes of this parent table. (They are not the same
* for parents after the first one, nor if we have dropped columns.)
* for parents after the first one, nor if we have dropped
* columns.)
*/
newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
@@ -573,8 +572,9 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (attribute->attisdropped)
{
/*
* change_varattnos_of_a_node asserts that this is greater than
* zero, so if anything tries to use it, we should find out.
* change_varattnos_of_a_node asserts that this is greater
* than zero, so if anything tries to use it, we should
* find out.
*/
newattno[parent_attno - 1] = 0;
continue;
@@ -684,6 +684,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
Node *expr;
cdef->contype = CONSTR_CHECK;
/*
* Do not inherit generated constraint names, since they
* might conflict across multiple inheritance parents.
@@ -857,8 +858,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
return;
/*
* Store INHERITS information in pg_inherits using direct ancestors only.
* Also enter dependencies on the direct ancestors.
* Store INHERITS information in pg_inherits using direct ancestors
* only. Also enter dependencies on the direct ancestors.
*/
relation = heap_openr(InheritsRelationName, RowExclusiveLock);
desc = RelationGetDescr(relation);
@@ -1141,8 +1142,8 @@ renameatt(Oid myrelid,
oldattname);
/*
* if the attribute is inherited, forbid the renaming, unless we
* are already inside a recursive rename.
* if the attribute is inherited, forbid the renaming, unless we are
* already inside a recursive rename.
*/
if (attform->attisinherited && !recursing)
elog(ERROR, "renameatt: inherited attribute \"%s\" may not be renamed",
@@ -1233,7 +1234,8 @@ renameatt(Oid myrelid,
true, false);
}
relation_close(targetrelation, NoLock); /* close rel but keep lock! */
relation_close(targetrelation, NoLock); /* close rel but keep
* lock! */
}
/*
@@ -1678,8 +1680,9 @@ AlterTableAddColumn(Oid myrelid,
RelationGetRelationName(rel));
/*
* this test is deliberately not attisdropped-aware, since if one tries
* to add a column matching a dropped column name, it's gonna fail anyway.
* this test is deliberately not attisdropped-aware, since if one
* tries to add a column matching a dropped column name, it's gonna
* fail anyway.
*/
if (SearchSysCacheExists(ATTNAME,
ObjectIdGetDatum(myrelid),
@@ -1891,8 +1894,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (indexStruct->indisprimary)
{
/*
* Loop over each attribute in the primary key and
* see if it matches the to-be-altered attribute
* Loop over each attribute in the primary key and see if it
* matches the to-be-altered attribute
*/
for (i = 0; i < INDEX_MAX_KEYS &&
indexStruct->indkey[i] != InvalidAttrNumber; i++)
@@ -2000,8 +2003,8 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
colName);
/*
* Perform a scan to ensure that there are no NULL
* values already in the relation
* Perform a scan to ensure that there are no NULL values already in
* the relation
*/
tupdesc = RelationGetDescr(rel);
@@ -2263,6 +2266,7 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
if (attrtuple->attnum < 0)
elog(ERROR, "ALTER TABLE: cannot change system attribute \"%s\"",
colName);
/*
* Now change the appropriate field
*/
@@ -2336,8 +2340,9 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName);
/*
* Make sure there will be at least one user column left in the relation
* after we drop this one. Zero-length tuples tend to confuse us.
* Make sure there will be at least one user column left in the
* relation after we drop this one. Zero-length tuples tend to
* confuse us.
*/
tupleDesc = RelationGetDescr(rel);
@@ -2363,8 +2368,8 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName);
/*
* If we are asked to drop ONLY in this table (no recursion),
* we need to mark the inheritors' attribute as non-inherited.
* If we are asked to drop ONLY in this table (no recursion), we need
* to mark the inheritors' attribute as non-inherited.
*/
if (!recurse && !recursing)
{
@@ -2495,8 +2500,8 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
foreach(listptr, newConstraints)
{
/*
* copy is because we may destructively alter the node below
* by inserting a generated name; this name is not necessarily
* copy is because we may destructively alter the node below by
* inserting a generated name; this name is not necessarily
* correct for children or parents.
*/
Node *newConstraint = copyObject(lfirst(listptr));
@@ -2672,8 +2677,8 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
* Grab an exclusive lock on the pk table, so that
* someone doesn't delete rows out from under us.
* (Although a lesser lock would do for that purpose,
* we'll need exclusive lock anyway to add triggers
* to the pk table; trying to start with a lesser lock
* we'll need exclusive lock anyway to add triggers to
* the pk table; trying to start with a lesser lock
* will just create a risk of deadlock.)
*/
pkrel = heap_openrv(fkconstraint->pktable,
@@ -2716,12 +2721,14 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
fkconstraint);
/*
* Create the triggers that will enforce the constraint.
* Create the triggers that will enforce the
* constraint.
*/
createForeignKeyTriggers(rel, fkconstraint, constrOid);
/*
* Close pk table, but keep lock until we've committed.
* Close pk table, but keep lock until we've
* committed.
*/
heap_close(pkrel, NoLock);
@@ -2754,10 +2761,9 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint,
int count;
/*
* Scan through each tuple, calling RI_FKey_check_ins
* (insert trigger) as if that tuple had just been
* inserted. If any of those fail, it should
* elog(ERROR) and that's that.
* Scan through each tuple, calling RI_FKey_check_ins (insert trigger)
* as if that tuple had just been inserted. If any of those fail, it
* should elog(ERROR) and that's that.
*/
MemSet(&trig, 0, sizeof(trig));
trig.tgoid = InvalidOid;
@@ -2910,7 +2916,8 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
constrobj;
/*
* Reconstruct a RangeVar for my relation (not passed in, unfortunately).
* Reconstruct a RangeVar for my relation (not passed in,
* unfortunately).
*/
myRel = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)),
RelationGetRelationName(rel));
@@ -2983,8 +2990,8 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
CommandCounterIncrement();
/*
* Build and execute a CREATE CONSTRAINT TRIGGER statement for the
* ON DELETE action on the referenced table.
* Build and execute a CREATE CONSTRAINT TRIGGER statement for the ON
* DELETE action on the referenced table.
*/
fk_trigger = makeNode(CreateTrigStmt);
fk_trigger->trigname = fkconstraint->constr_name;
@@ -3054,8 +3061,8 @@ createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
CommandCounterIncrement();
/*
* Build and execute a CREATE CONSTRAINT TRIGGER statement for the
* ON UPDATE action on the referenced table.
* Build and execute a CREATE CONSTRAINT TRIGGER statement for the ON
* UPDATE action on the referenced table.
*/
fk_trigger = makeNode(CreateTrigStmt);
fk_trigger->trigname = fkconstraint->constr_name;
@@ -3250,8 +3257,8 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
CheckTupleType(tuple_class);
/*
* Okay, this is a valid tuple: change its ownership and
* write to the heap.
* Okay, this is a valid tuple: change its ownership and write to the
* heap.
*/
tuple_class->relowner = newOwnerSysId;
simple_heap_update(class_rel, &tuple->t_self, tuple);
@@ -3267,16 +3274,15 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
if (tuple_class->relkind == RELKIND_RELATION ||
tuple_class->relkind == RELKIND_TOASTVALUE)
{
List *index_oid_list, *i;
List *index_oid_list,
*i;
/* Find all the indexes belonging to this relation */
index_oid_list = RelationGetIndexList(target_rel);
/* For each index, recursively change its ownership */
foreach(i, index_oid_list)
{
AlterTableOwner(lfirsti(i), newOwnerSysId);
}
freeList(index_oid_list);
}
@@ -3285,10 +3291,8 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
{
/* If it has a toast table, recurse to change its ownership */
if (tuple_class->reltoastrelid != InvalidOid)
{
AlterTableOwner(tuple_class->reltoastrelid, newOwnerSysId);
}
}
heap_freetuple(tuple);
heap_close(class_rel, RowExclusiveLock);
@@ -3453,10 +3457,11 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
tupdesc->attrs[2]->attstorage = 'p';
/*
* Note: the toast relation is placed in the regular pg_toast namespace
* even if its master relation is a temp table. There cannot be any
* naming collision, and the toast rel will be destroyed when its master
* is, so there's no need to handle the toast rel as temp.
* Note: the toast relation is placed in the regular pg_toast
* namespace even if its master relation is a temp table. There
* cannot be any naming collision, and the toast rel will be destroyed
* when its master is, so there's no need to handle the toast rel as
* temp.
*/
toast_relid = heap_create_with_catalog(toast_relname,
PG_TOAST_NAMESPACE,
@@ -3471,8 +3476,8 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
/*
* Create unique index on chunk_id, chunk_seq.
*
* NOTE: the normal TOAST access routines could actually function with
* a single-column index on chunk_id only. However, the slice access
* NOTE: the normal TOAST access routines could actually function with a
* single-column index on chunk_id only. However, the slice access
* routines use both columns for faster access to an individual chunk.
* In addition, we want it to be unique as a check against the
* possibility of duplicate TOAST chunk OIDs. The index might also be
@@ -3516,8 +3521,8 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
heap_freetuple(reltup);
/*
* Register dependency from the toast table to the master, so that
* the toast table will be deleted if the master is.
* Register dependency from the toast table to the master, so that the
* toast table will be deleted if the master is.
*/
baseobject.classId = RelOid_pg_class;
baseobject.objectId = relOid;

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.130 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.131 2002/09/04 20:31:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -132,9 +132,9 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
/*
* If trigger is an RI constraint, use specified trigger name as
* constraint name and build a unique trigger name instead.
* This is mainly for backwards compatibility with CREATE CONSTRAINT
* TRIGGER commands.
* constraint name and build a unique trigger name instead. This is
* mainly for backwards compatibility with CREATE CONSTRAINT TRIGGER
* commands.
*/
if (stmt->isconstraint)
{
@@ -183,10 +183,10 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
}
/*
* Scan pg_trigger for existing triggers on relation. We do this mainly
* because we must count them; a secondary benefit is to give a nice
* error message if there's already a trigger of the same name. (The
* unique index on tgrelid/tgname would complain anyway.)
* Scan pg_trigger for existing triggers on relation. We do this
* mainly because we must count them; a secondary benefit is to give a
* nice error message if there's already a trigger of the same name.
* (The unique index on tgrelid/tgname would complain anyway.)
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
@@ -354,8 +354,9 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
* CREATE TRIGGER command, also make trigger be auto-dropped if its
* relation is dropped or if the FK relation is dropped. (Auto drop
* is compatible with our pre-7.3 behavior.) If the trigger is being
* made for a constraint, we can skip the relation links; the dependency
* on the constraint will indirectly depend on the relations.
* made for a constraint, we can skip the relation links; the
* dependency on the constraint will indirectly depend on the
* relations.
*/
referenced.classId = RelOid_pg_proc;
referenced.objectId = funcoid;
@@ -495,8 +496,8 @@ RemoveTriggerById(Oid trigOid)
* rebuild relcache entries.
*
* Note this is OK only because we have AccessExclusiveLock on the rel,
* so no one else is creating/deleting triggers on this rel at the same
* time.
* so no one else is creating/deleting triggers on this rel at the
* same time.
*/
pgrel = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -555,10 +556,10 @@ renametrig(Oid relid,
targetrel = heap_open(relid, AccessExclusiveLock);
/*
* Scan pg_trigger twice for existing triggers on relation. We do this in
* order to ensure a trigger does not exist with newname (The unique index
* on tgrelid/tgname would complain anyway) and to ensure a trigger does
* exist with oldname.
* Scan pg_trigger twice for existing triggers on relation. We do
* this in order to ensure a trigger does not exist with newname (The
* unique index on tgrelid/tgname would complain anyway) and to ensure
* a trigger does exist with oldname.
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
@@ -611,9 +612,10 @@ renametrig(Oid relid,
CatalogUpdateIndexes(tgrel, tuple);
/*
* Invalidate relation's relcache entry so that other backends (and
* this one too!) are sent SI message to make them rebuild relcache
* entries. (Ideally this should happen automatically...)
* Invalidate relation's relcache entry so that other backends
* (and this one too!) are sent SI message to make them rebuild
* relcache entries. (Ideally this should happen
* automatically...)
*/
CacheInvalidateRelcache(relid);
}
@@ -656,10 +658,10 @@ RelationBuildTriggers(Relation relation)
ntrigs * sizeof(Trigger));
/*
* Note: since we scan the triggers using TriggerRelidNameIndex,
* we will be reading the triggers in name order, except possibly
* during emergency-recovery operations (ie, IsIgnoringSystemIndexes).
* This in turn ensures that triggers will be fired in name order.
* Note: since we scan the triggers using TriggerRelidNameIndex, we
* will be reading the triggers in name order, except possibly during
* emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
* in turn ensures that triggers will be fired in name order.
*/
ScanKeyEntryInitialize(&skey,
(bits16) 0x0,
@@ -1528,17 +1530,17 @@ deferredTriggerInvokeEvents(bool immediate_only)
/*
* If immediate_only is true, we remove fully-processed events from
* the event queue to recycle space. If immediate_only is false,
* we are going to discard the whole event queue on return anyway,
* so no need to bother with "retail" pfree's.
* the event queue to recycle space. If immediate_only is false, we
* are going to discard the whole event queue on return anyway, so no
* need to bother with "retail" pfree's.
*
* In a scenario with many commands in a transaction and many
* deferred-to-end-of-transaction triggers, it could get annoying
* to rescan all the deferred triggers at each command end.
* To speed this up, we could remember the actual end of the queue at
* EndQuery and examine only events that are newer. On state changes
* we simply reset the saved position to the beginning of the queue
* and process all events once with the new states.
* deferred-to-end-of-transaction triggers, it could get annoying to
* rescan all the deferred triggers at each command end. To speed this
* up, we could remember the actual end of the queue at EndQuery and
* examine only events that are newer. On state changes we simply
* reset the saved position to the beginning of the queue and process
* all events once with the new states.
*/
/* Make a per-tuple memory context for trigger function calls */
@@ -1585,8 +1587,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
}
/*
* So let's fire it... but first, open the correct relation
* if this is not the same relation as before.
* So let's fire it... but first, open the correct
* relation if this is not the same relation as before.
*/
if (rel == NULL || rel->rd_id != event->dte_relid)
{
@@ -1596,14 +1598,14 @@ deferredTriggerInvokeEvents(bool immediate_only)
pfree(finfo);
/*
* We assume that an appropriate lock is still held by the
* executor, so grab no new lock here.
* We assume that an appropriate lock is still held by
* the executor, so grab no new lock here.
*/
rel = heap_open(event->dte_relid, NoLock);
/*
* Allocate space to cache fmgr lookup info for triggers
* of this relation.
* Allocate space to cache fmgr lookup info for
* triggers of this relation.
*/
finfo = (FmgrInfo *)
palloc(rel->trigdesc->numtriggers * sizeof(FmgrInfo));
@@ -1622,8 +1624,8 @@ deferredTriggerInvokeEvents(bool immediate_only)
* If it's now completely done, throw it away.
*
* NB: it's possible the trigger calls above added more events to the
* queue, or that calls we will do later will want to add more,
* so we have to be careful about maintaining list validity here.
* queue, or that calls we will do later will want to add more, so
* we have to be careful about maintaining list validity here.
*/
next_event = event->dte_next;
@@ -1724,6 +1726,7 @@ DeferredTriggerBeginXact(void)
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
deftrig_all_isset = false;
/*
* If unspecified, constraints default to IMMEDIATE, per SQL
*/
@@ -1827,8 +1830,8 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
/*
* If called outside a transaction block, we can safely return: this
* command cannot effect any subsequent transactions, and there
* are no "session-level" trigger settings.
* command cannot effect any subsequent transactions, and there are no
* "session-level" trigger settings.
*/
if (!IsTransactionBlock())
return;
@@ -1910,9 +1913,9 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
Oid constr_oid;
/*
* If we found some, check that they fit the deferrability but
* skip ON <event> RESTRICT ones, since they are silently
* never deferrable.
* If we found some, check that they fit the deferrability
* but skip ON <event> RESTRICT ones, since they are
* silently never deferrable.
*/
if (stmt->deferred && !pg_trigger->tgdeferrable &&
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
@@ -1971,11 +1974,11 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
/*
* SQL99 requires that when a constraint is set to IMMEDIATE, any
* deferred checks against that constraint must be made when the
* SET CONSTRAINTS command is executed -- i.e. the effects of the
* SET CONSTRAINTS command applies retroactively. This happens "for
* free" since we have already made the necessary modifications to
* the constraints, and deferredTriggerEndQuery() is called by
* deferred checks against that constraint must be made when the SET
* CONSTRAINTS command is executed -- i.e. the effects of the SET
* CONSTRAINTS command applies retroactively. This happens "for free"
* since we have already made the necessary modifications to the
* constraints, and deferredTriggerEndQuery() is called by
* finish_xact_command().
*/
}
@@ -2062,6 +2065,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
break;
case TRIGGER_EVENT_UPDATE:
/*
* Check if one of the referenced keys is changed.
*/

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.12 2002/08/29 00:17:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.13 2002/09/04 20:31:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -203,8 +203,9 @@ DefineType(List *names, List *parameters)
outputOid = findTypeIOFunction(outputName, typoid, true);
/*
* Verify that I/O procs return the expected thing. OPAQUE is an allowed,
* but deprecated, alternative to the fully type-safe choices.
* Verify that I/O procs return the expected thing. OPAQUE is an
* allowed, but deprecated, alternative to the fully type-safe
* choices.
*/
resulttype = get_func_rettype(inputOid);
if (!(OidIsValid(typoid) && resulttype == typoid))
@@ -421,9 +422,10 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid = HeapTupleGetOid(typeTup);
/*
* Base type must be a plain base type. Domains over pseudo types would
* create a security hole. Domains of domains might be made to work in
* the future, but not today. Ditto for domains over complex types.
* Base type must be a plain base type. Domains over pseudo types
* would create a security hole. Domains of domains might be made to
* work in the future, but not today. Ditto for domains over complex
* types.
*/
typtype = baseType->typtype;
if (typtype != 'b')
@@ -469,11 +471,11 @@ DefineDomain(CreateDomainStmt *stmt)
basetypelem = baseType->typelem;
/*
* Run through constraints manually to avoid the additional
* processing conducted by DefineRelation() and friends.
* Run through constraints manually to avoid the additional processing
* conducted by DefineRelation() and friends.
*
* Besides, we don't want any constraints to be cooked. We'll
* do that when the table is created via MergeDomainAttributes().
* Besides, we don't want any constraints to be cooked. We'll do that
* when the table is created via MergeDomainAttributes().
*/
foreach(listptr, schema)
{
@@ -483,29 +485,31 @@ DefineDomain(CreateDomainStmt *stmt)
switch (colDef->contype)
{
/*
* The inherited default value may be overridden by the user
* with the DEFAULT <expr> statement.
* The inherited default value may be overridden by the
* user with the DEFAULT <expr> statement.
*
* We have to search the entire constraint tree returned as we
* don't want to cook or fiddle too much.
* We have to search the entire constraint tree returned as
* we don't want to cook or fiddle too much.
*/
case CONSTR_DEFAULT:
if (defaultExpr)
elog(ERROR, "CREATE DOMAIN has multiple DEFAULT expressions");
/* Create a dummy ParseState for transformExpr */
pstate = make_parsestate(NULL);
/*
* Cook the colDef->raw_expr into an expression.
* Note: Name is strictly for error message
* Cook the colDef->raw_expr into an expression. Note:
* Name is strictly for error message
*/
defaultExpr = cookDefault(pstate, colDef->raw_expr,
basetypeoid,
stmt->typename->typmod,
domainName);
/*
* Expression must be stored as a nodeToString result,
* but we also require a valid textual representation
* (mainly to make life easier for pg_dump).
* Expression must be stored as a nodeToString result, but
* we also require a valid textual representation (mainly
* to make life easier for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
deparse_context_for(domainName,
@@ -678,10 +682,10 @@ findTypeIOFunction(List *procname, Oid typeOid, bool isOutput)
if (isOutput)
{
/*
* Output functions can take a single argument of the type,
* or two arguments (data value, element OID). The signature
* may use OPAQUE in place of the actual type name; this is the
* only possibility if the type doesn't yet exist as a shell.
* Output functions can take a single argument of the type, or two
* arguments (data value, element OID). The signature may use
* OPAQUE in place of the actual type name; this is the only
* possibility if the type doesn't yet exist as a shell.
*
* Note: although we could throw a NOTICE in this routine if OPAQUE
* is used, we do not because of the probability that it'd be
@@ -728,8 +732,8 @@ findTypeIOFunction(List *procname, Oid typeOid, bool isOutput)
else
{
/*
* Input functions can take a single argument of type CSTRING,
* or three arguments (string, element OID, typmod). The signature
* Input functions can take a single argument of type CSTRING, or
* three arguments (string, element OID, typmod). The signature
* may use OPAQUE in place of CSTRING.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));

View File

@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.110 2002/09/02 02:47:01 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.111 2002/09/04 20:31:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,8 @@ static List *IdArrayToList(IdList *oldarray);
* Outputs string in quotes, with double-quotes duplicated.
* We could use quote_ident(), but that expects a TEXT argument.
*/
static void fputs_quote(char *str, FILE *fp)
static void
fputs_quote(char *str, FILE *fp)
{
fputc('"', fp);
while (*str)
@@ -125,8 +126,8 @@ write_group_file(Relation urel, Relation grel)
/*
* Create a temporary filename to be renamed later. This prevents the
* backend from clobbering the pg_group file while the postmaster might
* be reading from it.
* backend from clobbering the pg_group file while the postmaster
* might be reading from it.
*/
filename = group_getfilename();
bufsize = strlen(filename) + 12;
@@ -143,12 +144,14 @@ write_group_file(Relation urel, Relation grel)
scan = heap_beginscan(grel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Datum datum, grolist_datum;
Datum datum,
grolist_datum;
bool isnull;
char *groname;
IdList *grolist_p;
AclId *aidp;
int i, j,
int i,
j,
num;
char *usename;
bool first_user = true;
@@ -199,8 +202,8 @@ write_group_file(Relation urel, Relation grel)
continue;
}
/* File format is:
* "dbname" "user1" "user2" "user3"
/*
* File format is: "dbname" "user1" "user2" "user3"
*/
if (first_user)
{
@@ -833,8 +836,8 @@ AlterUserSet(AlterUserSetStmt *stmt)
valuestr = flatten_set_variable_args(stmt->variable, stmt->value);
/*
* RowExclusiveLock is sufficient, because we don't need to update
* the flat password file.
* RowExclusiveLock is sufficient, because we don't need to update the
* flat password file.
*/
rel = heap_openr(ShadowRelationName, RowExclusiveLock);
oldtuple = SearchSysCache(SHADOWNAME,
@@ -1253,8 +1256,8 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
* create user */
{
/*
* convert the to be added usernames to sysids and add them to
* the list
* convert the to be added usernames to sysids and add them to the
* list
*/
foreach(item, stmt->listUsers)
{
@@ -1282,6 +1285,7 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
if (!intMember(sysid, newlist))
newlist = lappendi(newlist, sysid);
else
/*
* we silently assume here that this error will only come
* up in a ALTER GROUP statement
@@ -1306,8 +1310,8 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
else
{
/*
* convert the to be dropped usernames to sysids and
* remove them from the list
* convert the to be dropped usernames to sysids and remove
* them from the list
*/
foreach(item, stmt->listUsers)
{
@@ -1404,9 +1408,7 @@ IdListToArray(List *members)
ARR_DIMS(newarray)[0] = nmembers; /* axis is this long */
i = 0;
foreach(item, members)
{
((int *) ARR_DATA_PTR(newarray))[i++] = lfirsti(item);
}
return newarray;
}

View File

@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.236 2002/09/02 01:05:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.237 2002/09/04 20:31:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -204,8 +204,9 @@ vacuum(VacuumStmt *vacstmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
* If we are running only ANALYZE, we don't need per-table transactions,
* but we still need a memory context with table lifetime.
* If we are running only ANALYZE, we don't need per-table
* transactions, but we still need a memory context with table
* lifetime.
*/
if (vacstmt->analyze && !vacstmt->vacuum)
anl_context = AllocSetContextCreate(QueryContext,
@@ -221,29 +222,29 @@ vacuum(VacuumStmt *vacstmt)
* Formerly, there was code here to prevent more than one VACUUM from
* executing concurrently in the same database. However, there's no
* good reason to prevent that, and manually removing lockfiles after
* a vacuum crash was a pain for dbadmins. So, forget about lockfiles,
* and just rely on the locks we grab on each target table
* a vacuum crash was a pain for dbadmins. So, forget about
* lockfiles, and just rely on the locks we grab on each target table
* to ensure that there aren't two VACUUMs running on the same table
* at the same time.
*/
/*
* The strangeness with committing and starting transactions here is due
* to wanting to run each table's VACUUM as a separate transaction, so
* that we don't hold locks unnecessarily long. Also, if we are doing
* VACUUM ANALYZE, the ANALYZE part runs as a separate transaction from
* the VACUUM to further reduce locking.
* The strangeness with committing and starting transactions here is
* due to wanting to run each table's VACUUM as a separate
* transaction, so that we don't hold locks unnecessarily long. Also,
* if we are doing VACUUM ANALYZE, the ANALYZE part runs as a separate
* transaction from the VACUUM to further reduce locking.
*
* vacuum_rel expects to be entered with no transaction active; it will
* start and commit its own transaction. But we are called by an SQL
* command, and so we are executing inside a transaction already. We
* commit the transaction started in PostgresMain() here, and start
* another one before exiting to match the commit waiting for us back in
* PostgresMain().
* another one before exiting to match the commit waiting for us back
* in PostgresMain().
*
* In the case of an ANALYZE statement (no vacuum, just analyze) it's
* okay to run the whole thing in the outer transaction, and so we skip
* transaction start/stop operations.
* okay to run the whole thing in the outer transaction, and so we
* skip transaction start/stop operations.
*/
if (vacstmt->vacuum)
{
@@ -254,19 +255,20 @@ vacuum(VacuumStmt *vacstmt)
*
* Compute the initially applicable OldestXmin and FreezeLimit
* XIDs, so that we can record these values at the end of the
* VACUUM. Note that individual tables may well be processed with
* newer values, but we can guarantee that no (non-shared)
* relations are processed with older ones.
* VACUUM. Note that individual tables may well be processed
* with newer values, but we can guarantee that no
* (non-shared) relations are processed with older ones.
*
* It is okay to record non-shared values in pg_database, even though
* we may vacuum shared relations with older cutoffs, because only
* the minimum of the values present in pg_database matters. We
* can be sure that shared relations have at some time been
* vacuumed with cutoffs no worse than the global minimum; for, if
* there is a backend in some other DB with xmin = OLDXMIN that's
* determining the cutoff with which we vacuum shared relations,
* it is not possible for that database to have a cutoff newer
* than OLDXMIN recorded in pg_database.
* It is okay to record non-shared values in pg_database, even
* though we may vacuum shared relations with older cutoffs,
* because only the minimum of the values present in
* pg_database matters. We can be sure that shared relations
* have at some time been vacuumed with cutoffs no worse than
* the global minimum; for, if there is a backend in some
* other DB with xmin = OLDXMIN that's determining the cutoff
* with which we vacuum shared relations, it is not possible
* for that database to have a cutoff newer than OLDXMIN
* recorded in pg_database.
*/
vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin, &initialFreezeLimit);
@@ -290,10 +292,11 @@ vacuum(VacuumStmt *vacstmt)
MemoryContext old_context = NULL;
/*
* If we vacuumed, use new transaction for analyze. Otherwise,
* we can use the outer transaction, but we still need to call
* analyze_rel in a memory context that will be cleaned up on
* return (else we leak memory while processing multiple tables).
* If we vacuumed, use new transaction for analyze.
* Otherwise, we can use the outer transaction, but we still
* need to call analyze_rel in a memory context that will be
* cleaned up on return (else we leak memory while processing
* multiple tables).
*/
if (vacstmt->vacuum)
StartTransactionCommand(true);
@@ -320,16 +323,17 @@ vacuum(VacuumStmt *vacstmt)
/* here, we are not in a transaction */
/*
* This matches the CommitTransaction waiting for us in PostgresMain().
* We tell xact.c not to chain the upcoming commit, so that a VACUUM
* doesn't start a transaction block, even when autocommit is off.
* This matches the CommitTransaction waiting for us in
* PostgresMain(). We tell xact.c not to chain the upcoming
* commit, so that a VACUUM doesn't start a transaction block,
* even when autocommit is off.
*/
StartTransactionCommand(true);
/*
* If we did a database-wide VACUUM, update the database's pg_database
* row with info about the transaction IDs used, and try to truncate
* pg_clog.
* If we did a database-wide VACUUM, update the database's
* pg_database row with info about the transaction IDs used, and
* try to truncate pg_clog.
*/
if (vacstmt->relation == NULL)
{
@@ -517,9 +521,9 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
/*
* Invalidate the tuple in the catcaches; this also arranges to flush
* the relation's relcache entry. (If we fail to commit for some reason,
* no flush will occur, but no great harm is done since there are no
* noncritical state updates here.)
* the relation's relcache entry. (If we fail to commit for some
* reason, no flush will occur, but no great harm is done since there
* are no noncritical state updates here.)
*/
CacheInvalidateHeapTuple(rd, &rtup);
@@ -647,8 +651,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
heap_close(relation, AccessShareLock);
/*
* Do not truncate CLOG if we seem to have suffered wraparound already;
* the computed minimum XID might be bogus.
* Do not truncate CLOG if we seem to have suffered wraparound
* already; the computed minimum XID might be bogus.
*/
if (vacuumAlreadyWrapped)
{
@@ -740,7 +744,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
*
* We allow the user to vacuum a table if he is superuser, the table
* owner, or the database owner (but in the latter case, only if it's
* not a shared relation). pg_class_ownercheck includes the superuser case.
* not a shared relation). pg_class_ownercheck includes the superuser
* case.
*
* Note we choose to treat permissions failure as a WARNING and keep
* trying to vacuum the rest of the DB --- is this appropriate?
@@ -1581,17 +1586,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* by "recent" transactions then we have to move all chain of
* tuples to another places.
*
* NOTE: this test is not 100% accurate: it is possible for
* a tuple to be an updated one with recent xmin, and yet not
* NOTE: this test is not 100% accurate: it is possible for a
* tuple to be an updated one with recent xmin, and yet not
* have a corresponding tuple in the vtlinks list. Presumably
* there was once a parent tuple with xmax matching the xmin,
* but it's possible that that tuple has been removed --- for
* example, if it had xmin = xmax then HeapTupleSatisfiesVacuum
* would deem it removable as soon as the xmin xact completes.
* example, if it had xmin = xmax then
* HeapTupleSatisfiesVacuum would deem it removable as soon as
* the xmin xact completes.
*
* To be on the safe side, we abandon the repair_frag process if
* we cannot find the parent tuple in vtlinks. This may be overly
* conservative; AFAICS it would be safe to move the chain.
* we cannot find the parent tuple in vtlinks. This may be
* overly conservative; AFAICS it would be safe to move the
* chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
@@ -1768,17 +1775,16 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
&(Ptp.t_data->t_ctid)));
/*
* Read above about cases when
* !ItemIdIsUsed(Citemid) (child item is
* removed)... Due to the fact that at the moment
* we don't remove unuseful part of update-chain,
* it's possible to get too old parent row here.
* Like as in the case which caused this problem,
* we stop shrinking here. I could try to find
* real parent row but want not to do it because
* of real solution will be implemented anyway,
* later, and we are too close to 6.5 release. -
* vadim 06/11/99
* Read above about cases when !ItemIdIsUsed(Citemid)
* (child item is removed)... Due to the fact that at
* the moment we don't remove unuseful part of
* update-chain, it's possible to get too old parent
* row here. Like as in the case which caused this
* problem, we stop shrinking here. I could try to
* find real parent row but want not to do it because
* of real solution will be implemented anyway, later,
* and we are too close to 6.5 release. - vadim
* 06/11/99
*/
if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data),
HeapTupleHeaderGetXmin(tp.t_data))))
@@ -1804,9 +1810,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (chain_move_failed)
{
/*
* Undo changes to offsets_used state. We don't bother
* cleaning up the amount-free state, since we're not
* going to do any further tuple motion.
* Undo changes to offsets_used state. We don't
* bother cleaning up the amount-free state, since
* we're not going to do any further tuple motion.
*/
for (i = 0; i < num_vtmove; i++)
{
@@ -1939,7 +1945,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
else
{
/* No XLOG record, but still need to flag that XID exists on disk */
/*
* No XLOG record, but still need to flag that XID
* exists on disk
*/
MyXactMadeTempRelUpdate = true;
}
@@ -2031,10 +2040,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* register invalidation of source tuple in catcaches.
*
* (Note: we do not need to register the copied tuple,
* because we are not changing the tuple contents and
* so there cannot be any need to flush negative
* catcache entries.)
* (Note: we do not need to register the copied tuple, because we
* are not changing the tuple contents and so there cannot be
* any need to flush negative catcache entries.)
*/
CacheInvalidateHeapTuple(onerel, &tuple);
@@ -2090,7 +2098,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
else
{
/* No XLOG record, but still need to flag that XID exists on disk */
/*
* No XLOG record, but still need to flag that XID exists
* on disk
*/
MyXactMadeTempRelUpdate = true;
}
@@ -2116,8 +2127,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} /* walk along page */
/*
* If we broke out of the walk-along-page loop early (ie, still have
* offnum <= maxoff), then we failed to move some tuple off
* If we broke out of the walk-along-page loop early (ie, still
* have offnum <= maxoff), then we failed to move some tuple off
* this page. No point in shrinking any more, so clean up and
* exit the per-page loop.
*/
@@ -2126,7 +2137,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
OffsetNumber off;
/*
* Fix vacpage state for any unvisited tuples remaining on page
* Fix vacpage state for any unvisited tuples remaining on
* page
*/
for (off = OffsetNumberNext(offnum);
off <= maxoff;
@@ -2389,7 +2401,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
else
{
/* No XLOG record, but still need to flag that XID exists on disk */
/*
* No XLOG record, but still need to flag that XID exists
* on disk
*/
MyXactMadeTempRelUpdate = true;
}

View File

@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.18 2002/08/06 02:36:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.19 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.70 2002/07/18 02:02:29 ishii Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.71 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -111,8 +111,8 @@ assign_datestyle(const char *value, bool doit, bool interactive)
* Easiest way to get the current DEFAULT state is to fetch
* the DEFAULT string from guc.c and recursively parse it.
*
* We can't simply "return assign_datestyle(...)" because we
* need to handle constructs like "DEFAULT, ISO".
* We can't simply "return assign_datestyle(...)" because we need
* to handle constructs like "DEFAULT, ISO".
*/
int saveDateStyle = DateStyle;
bool saveEuroDates = EuroDates;
@@ -188,8 +188,8 @@ assign_datestyle(const char *value, bool doit, bool interactive)
strcat(result, newEuroDates ? ", EURO" : ", US");
/*
* Finally, it's safe to assign to the global variables;
* the assignment cannot fail now.
* Finally, it's safe to assign to the global variables; the
* assignment cannot fail now.
*/
DateStyle = newDateStyle;
EuroDates = newEuroDates;
@@ -270,6 +270,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
return NULL;
}
*endptr = '\0';
/*
* Try to parse it. XXX an invalid interval format will result in
* elog, which is not desirable for GUC. We did what we could to
@@ -318,8 +319,8 @@ assign_timezone(const char *value, bool doit, bool interactive)
* available under Solaris, among others. Apparently putenv()
* called as below clears the process-specific environment
* variables. Other reasonable arguments to putenv() (e.g.
* "TZ=", "TZ", "") result in a core dump (under Linux anyway).
* - thomas 1998-01-26
* "TZ=", "TZ", "") result in a core dump (under Linux
* anyway). - thomas 1998-01-26
*/
if (doit)
{
@@ -339,7 +340,8 @@ assign_timezone(const char *value, bool doit, bool interactive)
* Otherwise assume it is a timezone name.
*
* XXX unfortunately we have no reasonable way to check whether a
* timezone name is good, so we have to just assume that it is.
* timezone name is good, so we have to just assume that it
* is.
*/
if (doit)
{
@@ -372,13 +374,9 @@ assign_timezone(const char *value, bool doit, bool interactive)
(double) CTimeZone / 3600.0);
}
else if (tzbuf[0] == 'T')
{
strcpy(result, tzbuf + 3);
}
else
{
strcpy(result, "UNKNOWN");
}
return result;
}
@@ -422,11 +420,20 @@ assign_XactIsoLevel(const char *value, bool doit, bool interactive)
elog(ERROR, "SET TRANSACTION ISOLATION LEVEL must be called before any query");
if (strcmp(value, "serializable") == 0)
{ if (doit) XactIsoLevel = XACT_SERIALIZABLE; }
{
if (doit)
XactIsoLevel = XACT_SERIALIZABLE;
}
else if (strcmp(value, "read committed") == 0)
{ if (doit) XactIsoLevel = XACT_READ_COMMITTED; }
{
if (doit)
XactIsoLevel = XACT_READ_COMMITTED;
}
else if (strcmp(value, "default") == 0)
{ if (doit) XactIsoLevel = DefaultXactIsoLevel; }
{
if (doit)
XactIsoLevel = DefaultXactIsoLevel;
}
else
return NULL;
@@ -476,10 +483,11 @@ assign_client_encoding(const char *value, bool doit, bool interactive)
if (encoding < 0)
return NULL;
/* XXX SetClientEncoding depends on namespace functions which are
* not available at startup time. So we accept requested client
* encoding anyway which might not be valid (e.g. no conversion
* procs available).
/*
* XXX SetClientEncoding depends on namespace functions which are not
* available at startup time. So we accept requested client encoding
* anyway which might not be valid (e.g. no conversion procs
* available).
*/
if (SetClientEncoding(encoding, doit) < 0)
{

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.70 2002/09/02 20:04:40 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.71 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -128,8 +128,8 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
else
{
/*
* now create the parameters for keys/inheritance etc. All of them are
* nil...
* now create the parameters for keys/inheritance etc. All of them
* are nil...
*/
createStmt->relation = (RangeVar *) relation;
createStmt->tableElts = attrList;
@@ -138,9 +138,9 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
createStmt->hasoids = false;
/*
* finally create the relation (this will error out if there's
* an existing view, so we don't need more code to complain
* if "replace" is false).
* finally create the relation (this will error out if there's an
* existing view, so we don't need more code to complain if
* "replace" is false).
*/
return DefineRelation(createStmt, RELKIND_VIEW);
}
@@ -179,6 +179,7 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
NameStr(oldattr->attname));
/* We can ignore the remaining attributes of an attribute... */
}
/*
* We ignore the constraint fields. The new view desc can't have any
* constraints, and the only ones that could be on the old view are
@@ -316,8 +317,8 @@ DefineView(const RangeVar *view, Query *viewParse, bool replace)
/*
* Create the view relation
*
* NOTE: if it already exists and replace is false, the xact will
* be aborted.
* NOTE: if it already exists and replace is false, the xact will be
* aborted.
*/
viewOid = DefineVirtualRelation(view, viewParse->targetList, replace);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.31 2002/07/20 05:16:57 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.32 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -383,8 +383,8 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
* information for the new "clean" tuple.
*
* Note: we use memory on the stack to optimize things when we are
* dealing with a small number of attributes. for large tuples we
* just use palloc.
* dealing with a small number of attributes. for large tuples we just
* use palloc.
*/
if (cleanLength > 64)
{

View File

@@ -27,7 +27,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.177 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.178 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -116,9 +116,9 @@ ExecutorStart(QueryDesc *queryDesc, EState *estate)
/*
* Make our own private copy of the current query snapshot data.
*
* This "freezes" our idea of which tuples are good and which are not
* for the life of this query, even if it outlives the current command
* and current snapshot.
* This "freezes" our idea of which tuples are good and which are not for
* the life of this query, even if it outlives the current command and
* current snapshot.
*/
estate->es_snapshot = CopyQuerySnapshot();
@@ -355,9 +355,10 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
/*
* Only plain-relation RTEs need to be checked here. Subquery RTEs
* will be checked when ExecCheckPlanPerms finds the SubqueryScan node,
* and function RTEs are checked by init_fcache when the function is
* prepared for execution. Join and special RTEs need no checks.
* will be checked when ExecCheckPlanPerms finds the SubqueryScan
* node, and function RTEs are checked by init_fcache when the
* function is prepared for execution. Join and special RTEs need no
* checks.
*/
if (rte->rtekind != RTE_RELATION)
return;
@@ -1071,7 +1072,8 @@ lnext: ;
slot = ExecStoreTuple(newTuple, /* tuple to store */
junkfilter->jf_resultSlot, /* dest slot */
InvalidBuffer, /* this tuple has no buffer */
InvalidBuffer, /* this tuple has no
* buffer */
true); /* tuple should be pfreed */
}
@@ -1084,7 +1086,8 @@ lnext: ;
{
case CMD_SELECT:
ExecSelect(slot, /* slot containing tuple */
destfunc, /* destination's tuple-receiver obj */
destfunc, /* destination's tuple-receiver
* obj */
estate);
result = slot;
break;

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.107 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.108 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -877,7 +877,8 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
argList = funcexpr->args;
/*
* get the fcache from the Func node. If it is NULL, then initialize it
* get the fcache from the Func node. If it is NULL, then initialize
* it
*/
fcache = func->func_fcache;
if (fcache == NULL)
@@ -921,8 +922,9 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
}
/*
* Prepare a resultinfo node for communication. We always do this even
* if not expecting a set result, so that we can pass expectedDesc.
* Prepare a resultinfo node for communication. We always do this
* even if not expecting a set result, so that we can pass
* expectedDesc.
*/
fcinfo.resultinfo = (Node *) &rsinfo;
rsinfo.type = T_ReturnSetInfo;
@@ -948,8 +950,9 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
HeapTuple tuple;
/*
* reset per-tuple memory context before each call of the function.
* This cleans up any local memory the function may leak when called.
* reset per-tuple memory context before each call of the
* function. This cleans up any local memory the function may leak
* when called.
*/
ResetExprContext(econtext);
@@ -970,8 +973,10 @@ ExecMakeTableFunctionResult(Expr *funcexpr,
*/
if (rsinfo.isDone == ExprEndResult)
break;
/*
* If first time through, build tupdesc and tuplestore for result
* If first time through, build tupdesc and tuplestore for
* result
*/
if (first_time)
{

View File

@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.58 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.59 2002/09/04 20:31:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -713,7 +713,8 @@ TupleDescGetAttInMetadata(TupleDesc tupdesc)
attinmeta = (AttInMetadata *) palloc(sizeof(AttInMetadata));
/*
* Gather info needed later to call the "in" function for each attribute
* Gather info needed later to call the "in" function for each
* attribute
*/
attinfuncinfo = (FmgrInfo *) palloc(natts * sizeof(FmgrInfo));
attelems = (Oid *) palloc(natts * sizeof(Oid));
@@ -850,6 +851,7 @@ do_text_output_multiline(TupOutputState *tstate, char *text)
*eol++ = '\0';
else
eol = text +strlen(text);
do_tup_output(tstate, &text);
text = eol;
}

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.89 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.90 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -297,19 +297,19 @@ ExecAssignResultTypeFromTL(Plan *node, CommonState *commonstate)
/*
* This is pretty grotty: we need to ensure that result tuples have
* space for an OID iff they are going to be stored into a relation
* that has OIDs. We assume that estate->es_result_relation_info
* is already set up to describe the target relation. One reason
* this is ugly is that all plan nodes in the plan tree will emit
* tuples with space for an OID, though we really only need the topmost
* plan to do so.
* that has OIDs. We assume that estate->es_result_relation_info is
* already set up to describe the target relation. One reason this is
* ugly is that all plan nodes in the plan tree will emit tuples with
* space for an OID, though we really only need the topmost plan to do
* so.
*
* It would be better to have InitPlan adjust the topmost plan node's
* output descriptor after plan tree initialization. However, that
* doesn't quite work because in an UPDATE that spans an inheritance
* tree, some of the target relations may have OIDs and some not.
* We have to make the decision on a per-relation basis as we initialize
* each of the child plans of the topmost Append plan. So, this is ugly
* but it works, for now ...
* tree, some of the target relations may have OIDs and some not. We
* have to make the decision on a per-relation basis as we initialize
* each of the child plans of the topmost Append plan. So, this is
* ugly but it works, for now ...
*/
ri = node->state->es_result_relation_info;
if (ri != NULL)
@@ -792,11 +792,9 @@ UnregisterExprContextCallback(ExprContext *econtext,
pfree(ecxt_callback);
}
else
{
prev_callback = &ecxt_callback->next;
}
}
}
/*
* Call all the shutdown callbacks registered in an ExprContext.

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.56 2002/08/29 00:17:04 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.57 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -566,8 +566,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
elog(ERROR, "Set-valued function called in context that cannot accept a set");
/*
* Ensure we will get shut down cleanly if the exprcontext is
* not run to completion.
* Ensure we will get shut down cleanly if the exprcontext is not
* run to completion.
*/
if (!fcache->shutdown_reg)
{

View File

@@ -46,7 +46,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.85 2002/06/20 20:29:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.86 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -877,8 +877,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
&peraggstate->transtypeByVal);
/*
* initval is potentially null, so don't try to access it as a struct
* field. Must do it the hard way with SysCacheGetAttr.
* initval is potentially null, so don't try to access it as a
* struct field. Must do it the hard way with SysCacheGetAttr.
*/
textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
Anum_pg_aggregate_agginitval,
@@ -907,8 +907,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
{
/*
* Note: use the type from the input expression here, not
* from pg_proc.proargtypes, because the latter might be 0.
* Note: use the type from the input expression here, not from
* pg_proc.proargtypes, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);
@@ -921,8 +921,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
if (aggref->aggdistinct)
{
/*
* Note: use the type from the input expression here, not
* from pg_proc.proargtypes, because the latter might be 0.
* Note: use the type from the input expression here, not from
* pg_proc.proargtypes, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeFunctionscan.c,v 1.11 2002/09/02 01:05:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeFunctionscan.c,v 1.12 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,7 +69,8 @@ FunctionNext(FunctionScan *node)
/*
* If first time through, read all tuples from function and put them
* in a tuplestore. Subsequent calls just fetch tuples from tuplestore.
* in a tuplestore. Subsequent calls just fetch tuples from
* tuplestore.
*/
if (tuplestorestate == NULL)
{
@@ -84,8 +85,8 @@ FunctionNext(FunctionScan *node)
/*
* If function provided a tupdesc, cross-check it. We only really
* need to do this for functions returning RECORD, but might as well
* do it always.
* need to do this for functions returning RECORD, but might as
* well do it always.
*/
if (funcTupdesc &&
tupledesc_mismatch(scanstate->tupdesc, funcTupdesc))

View File

@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* $Id: nodeHash.c,v 1.65 2002/09/02 02:47:02 momjian Exp $
* $Id: nodeHash.c,v 1.66 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -639,11 +639,11 @@ hashFunc(Datum key, int typLen, bool byVal)
{
/*
* If it's a by-value data type, just hash the whole Datum value.
* This assumes that datatypes narrower than Datum are consistently
* padded (either zero-extended or sign-extended, but not random
* bits) to fill Datum; see the XXXGetDatum macros in postgres.h.
* NOTE: it would not work to do hash_any(&key, len) since this
* would get the wrong bytes on a big-endian machine.
* This assumes that datatypes narrower than Datum are
* consistently padded (either zero-extended or sign-extended, but
* not random bits) to fill Datum; see the XXXGetDatum macros in
* postgres.h. NOTE: it would not work to do hash_any(&key, len)
* since this would get the wrong bytes on a big-endian machine.
*/
k = (unsigned char *) &key;
typLen = sizeof(Datum);
@@ -658,14 +658,14 @@ hashFunc(Datum key, int typLen, bool byVal)
else if (typLen == -1)
{
/*
* It's a varlena type, so 'key' points to a
* "struct varlena". NOTE: VARSIZE returns the
* "real" data length plus the sizeof the "vl_len" attribute of
* varlena (the length information). 'key' points to the beginning
* of the varlena struct, so we have to use "VARDATA" to find the
* beginning of the "real" data. Also, we have to be careful to
* detoast the datum if it's toasted. (We don't worry about
* freeing the detoasted copy; that happens for free when the
* It's a varlena type, so 'key' points to a "struct varlena".
* NOTE: VARSIZE returns the "real" data length plus the
* sizeof the "vl_len" attribute of varlena (the length
* information). 'key' points to the beginning of the varlena
* struct, so we have to use "VARDATA" to find the beginning
* of the "real" data. Also, we have to be careful to detoast
* the datum if it's toasted. (We don't worry about freeing
* the detoasted copy; that happens for free when the
* per-tuple memory context is reset in ExecHashGetBucket.)
*/
struct varlena *vkey = PG_DETOAST_DATUM(key);

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.70 2002/06/23 21:29:32 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.71 2002/09/04 20:31:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -165,10 +165,10 @@ IndexNext(IndexScan *node)
while ((tuple = index_getnext(scandesc, direction)) != NULL)
{
/*
* store the scanned tuple in the scan tuple slot of the
* scan state. Note: we pass 'false' because tuples
* returned by amgetnext are pointers onto disk pages and
* must not be pfree()'d.
* store the scanned tuple in the scan tuple slot of the scan
* state. Note: we pass 'false' because tuples returned by
* amgetnext are pointers onto disk pages and must not be
* pfree()'d.
*/
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
@@ -177,10 +177,9 @@ IndexNext(IndexScan *node)
/*
* We must check to see if the current tuple was already
* matched by an earlier index, so we don't double-report
* it. We do this by passing the tuple through ExecQual
* and checking for failure with all previous
* qualifications.
* matched by an earlier index, so we don't double-report it.
* We do this by passing the tuple through ExecQual and
* checking for failure with all previous qualifications.
*/
if (indexstate->iss_IndexPtr > 0)
{
@@ -485,8 +484,9 @@ ExecEndIndexScan(IndexScan *node)
* close the heap relation.
*
* Currently, we do not release the AccessShareLock acquired by
* ExecInitIndexScan. This lock should be held till end of transaction.
* (There is a faction that considers this too much locking, however.)
* ExecInitIndexScan. This lock should be held till end of
* transaction. (There is a faction that considers this too much
* locking, however.)
*/
heap_close(relation, NoLock);

Some files were not shown because too many files have changed in this diff Show More