1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

Massive commit to run PGINDENT on all *.c and *.h files.

This commit is contained in:
Bruce Momjian
1997-09-07 05:04:48 +00:00
parent 8fecd4febf
commit 1ccd423235
687 changed files with 150775 additions and 136888 deletions

View File

@ -49,27 +49,33 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
int typlen;
func_ptr proc_fn;
int pronargs;
int nitems, i, result;
int ndim, *dim;
int nitems,
i,
result;
int ndim,
*dim;
char *p;
/* Sanity checks */
if ((array == (ArrayType *) NULL)
|| (ARR_IS_LO(array) == true)) {
|| (ARR_IS_LO(array) == true))
{
/* elog(NOTICE, "array_iterator: array is null"); */
return (0);
}
ndim = ARR_NDIM(array);
dim = ARR_DIMS(array);
nitems = getNitems(ndim, dim);
if (nitems == 0) {
if (nitems == 0)
{
/* elog(NOTICE, "array_iterator: nitems = 0"); */
return (0);
}
/* Lookup element type information */
typ_tuple = SearchSysCacheTuple(TYPOID, ObjectIdGetDatum(elemtype), 0, 0, 0);
if (!HeapTupleIsValid(typ_tuple)) {
if (!HeapTupleIsValid(typ_tuple))
{
elog(WARN, "array_iterator: cache lookup failed for type %d", elemtype);
return 0;
}
@ -80,7 +86,8 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
/* Lookup the function entry point */
proc_fn == (func_ptr) NULL;
fmgr_info(proc, &proc_fn, &pronargs);
if ((proc_fn == NULL) || (pronargs != 2)) {
if ((proc_fn == NULL) || (pronargs != 2))
{
elog(WARN, "array_iterator: fmgr_info lookup failed for oid %d", proc);
return (0);
}
@ -88,9 +95,12 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
/* Scan the array and apply the operator to each element */
result = 0;
p = ARR_DATA_PTR(array);
for (i = 0; i < nitems; i++) {
if (typbyval) {
switch(typlen) {
for (i = 0; i < nitems; i++)
{
if (typbyval)
{
switch (typlen)
{
case 1:
result = (int) (*proc_fn) (*p, value);
break;
@ -103,28 +113,41 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
break;
}
p += typlen;
} else {
}
else
{
result = (int) (*proc_fn) (p, value);
if (typlen > 0) {
if (typlen > 0)
{
p += typlen;
} else {
}
else
{
p += INTALIGN(*(int32 *) p);
}
}
if (result) {
if (!and) {
if (result)
{
if (!and)
{
return (1);
}
} else {
if (and) {
}
else
{
if (and)
{
return (0);
}
}
}
if (and && result) {
if (and && result)
{
return (1);
} else {
}
else
{
return (0);
}
}

View File

@ -13,14 +13,17 @@
#include "utils/datetime.h"
TimeADT *time_difference(TimeADT * time1, TimeADT * time2)
TimeADT *
time_difference(TimeADT * time1, TimeADT * time2)
{
TimeADT *result = (TimeADT *) palloc(sizeof(TimeADT));
*result = *time1 - *time2;
return (result);
}
TimeADT *currenttime()
TimeADT *
currenttime()
{
time_t current_time;
struct tm *tm;
@ -31,11 +34,13 @@ TimeADT *currenttime()
*result = ((((tm->tm_hour * 60) + tm->tm_min) * 60) + tm->tm_sec);
return (result);
}
DateADT currentdate()
DateADT
currentdate()
{
time_t current_time;
struct tm *tm;
DateADT result;
current_time = time(NULL);
tm = localtime(&current_time);
@ -43,21 +48,25 @@ DateADT currentdate()
date2j(100, 1, 1);
return (result);
}
int4 hours(TimeADT * time)
int4
hours(TimeADT * time)
{
return (*time / (60 * 60));
}
int4 minutes(TimeADT * time)
int4
minutes(TimeADT * time)
{
return (((int) (*time / 60)) % 60);
}
int4 seconds(TimeADT * time)
int4
seconds(TimeADT * time)
{
return (((int) *time) % 60);
}
int4 day(DateADT *date)
int4
day(DateADT * date)
{
struct tm tm;
@ -66,7 +75,8 @@ int4 day(DateADT *date)
return (tm.tm_mday);
}
int4 month(DateADT *date)
int4
month(DateADT * date)
{
struct tm tm;
@ -75,7 +85,8 @@ int4 month(DateADT *date)
return (tm.tm_mon);
}
int4 year(DateADT *date)
int4
year(DateADT * date)
{
struct tm tm;
@ -84,13 +95,15 @@ int4 year(DateADT *date)
return (tm.tm_year);
}
int4 asminutes(TimeADT * time)
int4
asminutes(TimeADT * time)
{
int seconds = (int) *time;
return (seconds / 60);
}
int4 asseconds(TimeADT * time)
int4
asseconds(TimeADT * time)
{
int seconds = (int) *time;

View File

@ -30,14 +30,17 @@ typedef char[8] int64;
#elif defined(__alpha)
typedef long int int64;
#define INT64_FORMAT "%ld"
#elif defined(__GNUC__)
typedef long long int int64;
#define INT64_FORMAT "%Ld"
#else
typedef long int int64;
#define INT64_FORMAT "%ld"
#endif
@ -66,9 +69,11 @@ int64 *int8div(int64 *val1, int64 *val2);
int64 *int48(int32 val);
int32 int84(int64 * val);
#if FALSE
int64 *int28(int16 val);
int16 int82(int64 * val);
#endif
float64 i8tod(int64 * val);
@ -98,7 +103,8 @@ int64 *dtoi8(float64 val);
/* int8in()
*/
int64 *int8in(char *str)
int64 *
int8in(char *str)
{
int64 *result = PALLOCTYPE(int64);
@ -120,7 +126,8 @@ int64 *int8in(char *str)
/* int8out()
*/
char *int8out(int64 *val)
char *
int8out(int64 * val)
{
char *result;
@ -154,32 +161,38 @@ char *int8out(int64 *val)
/* int8relop()
* Is val1 relop val2?
*/
bool int8eq(int64 *val1, int64 *val2)
bool
int8eq(int64 * val1, int64 * val2)
{
return (*val1 == *val2);
} /* int8eq() */
bool int8ne(int64 *val1, int64 *val2)
bool
int8ne(int64 * val1, int64 * val2)
{
return (*val1 != *val2);
} /* int8ne() */
bool int8lt(int64 *val1, int64 *val2)
bool
int8lt(int64 * val1, int64 * val2)
{
return (*val1 < *val2);
} /* int8lt() */
bool int8gt(int64 *val1, int64 *val2)
bool
int8gt(int64 * val1, int64 * val2)
{
return (*val1 > *val2);
} /* int8gt() */
bool int8le(int64 *val1, int64 *val2)
bool
int8le(int64 * val1, int64 * val2)
{
return (*val1 <= *val2);
} /* int8le() */
bool int8ge(int64 *val1, int64 *val2)
bool
int8ge(int64 * val1, int64 * val2)
{
return (*val1 >= *val2);
} /* int8ge() */
@ -188,32 +201,38 @@ bool int8ge(int64 *val1, int64 *val2)
/* int84relop()
* Is 64-bit val1 relop 32-bit val2?
*/
bool int84eq(int64 *val1, int32 val2)
bool
int84eq(int64 * val1, int32 val2)
{
return (*val1 == val2);
} /* int84eq() */
bool int84ne(int64 *val1, int32 val2)
bool
int84ne(int64 * val1, int32 val2)
{
return (*val1 != val2);
} /* int84ne() */
bool int84lt(int64 *val1, int32 val2)
bool
int84lt(int64 * val1, int32 val2)
{
return (*val1 < val2);
} /* int84lt() */
bool int84gt(int64 *val1, int32 val2)
bool
int84gt(int64 * val1, int32 val2)
{
return (*val1 > val2);
} /* int84gt() */
bool int84le(int64 *val1, int32 val2)
bool
int84le(int64 * val1, int32 val2)
{
return (*val1 <= val2);
} /* int84le() */
bool int84ge(int64 *val1, int32 val2)
bool
int84ge(int64 * val1, int32 val2)
{
return (*val1 >= val2);
} /* int84ge() */
@ -223,7 +242,8 @@ bool int84ge(int64 *val1, int32 val2)
* Arithmetic operators on 64-bit integers.
*---------------------------------------------------------*/
int64 *int8um(int64 *val)
int64 *
int8um(int64 * val)
{
int64 *result = PALLOCTYPE(int64);
@ -235,7 +255,8 @@ int64 *int8um(int64 *val)
return (result);
} /* int8um() */
int64 *int8pl(int64 *val1, int64 *val2)
int64 *
int8pl(int64 * val1, int64 * val2)
{
int64 *result = PALLOCTYPE(int64);
@ -247,7 +268,8 @@ int64 *int8pl(int64 *val1, int64 *val2)
return (result);
} /* int8pl() */
int64 *int8mi(int64 *val1, int64 *val2)
int64 *
int8mi(int64 * val1, int64 * val2)
{
int64 *result = PALLOCTYPE(int64);
@ -259,7 +281,8 @@ int64 *int8mi(int64 *val1, int64 *val2)
return (result);
} /* int8mi() */
int64 *int8mul(int64 *val1, int64 *val2)
int64 *
int8mul(int64 * val1, int64 * val2)
{
int64 *result = PALLOCTYPE(int64);
@ -271,7 +294,8 @@ int64 *int8mul(int64 *val1, int64 *val2)
return (result);
} /* int8mul() */
int64 *int8div(int64 *val1, int64 *val2)
int64 *
int8div(int64 * val1, int64 * val2)
{
int64 *result = PALLOCTYPE(int64);
@ -288,7 +312,8 @@ int64 *int8div(int64 *val1, int64 *val2)
* Conversion operators.
*---------------------------------------------------------*/
int64 *int48(int32 val)
int64 *
int48(int32 val)
{
int64 *result = PALLOCTYPE(int64);
@ -297,7 +322,8 @@ int64 *int48(int32 val)
return (result);
} /* int48() */
int32 int84(int64 *val)
int32
int84(int64 * val)
{
int32 result;
@ -313,7 +339,8 @@ int32 int84(int64 *val)
} /* int84() */
#if FALSE
int64 *int28(int16 val)
int64 *
int28(int16 val)
{
int64 *result;
@ -325,7 +352,8 @@ int64 *int28(int16 val)
return (result);
} /* int28() */
int16 int82(int64 *val)
int16
int82(int64 * val)
{
int16 result;
@ -336,9 +364,11 @@ int16 int82(int64 *val)
return (result);
} /* int82() */
#endif
float64 i8tod(int64 *val)
float64
i8tod(int64 * val)
{
float64 result = PALLOCTYPE(float64data);
@ -347,7 +377,8 @@ float64 i8tod(int64 *val)
return (result);
} /* i8tod() */
int64 *dtoi8(float64 val)
int64 *
dtoi8(float64 val)
{
int64 *result = PALLOCTYPE(int64);
@ -358,4 +389,3 @@ int64 *dtoi8(float64 val)
return (result);
} /* dtoi8() */

View File

@ -20,11 +20,13 @@
**------------------------------------------------------------------------*/
/*VARARGS*/
void halt(va_alist)
void
halt(va_alist)
va_dcl
{
va_list arg_ptr;
char *format, *pstr;
char *format,
*pstr;
void (*sig_func) ();
va_start(arg_ptr);

View File

@ -4,4 +4,3 @@
*/
void halt();

View File

@ -10,14 +10,19 @@
#include "halt.h"
#include "pginterface.h"
int main(int argc, char **argv)
int
main(int argc, char **argv)
{
char query[4000];
int row = 1;
int aint;
float afloat;
double adouble;
char achar[11], achar16[17], abpchar[11], avarchar[51], atext[51];
char achar[11],
achar16[17],
abpchar[11],
avarchar[51],
atext[51];
time_t aabstime;
if (argc != 2)
@ -95,4 +100,3 @@ bpchar %s\nvarchar %s\ntext %s\nabstime %s",
disconnectdb();
return 0;
}

View File

@ -27,7 +27,8 @@ static PGresult* res = NULL;
static int on_error_state = ON_ERROR_STOP;
/* LOCAL VARIABLES */
static sigset_t block_sigs, unblock_sigs;
static sigset_t block_sigs,
unblock_sigs;
static int tuple;
/*
@ -35,7 +36,8 @@ static int tuple;
** connectdb - returns PGconn structure
**
*/
PGconn *connectdb( char *dbName,
PGconn *
connectdb(char *dbName,
char *pghost,
char *pgport,
char *pgoptions,
@ -55,7 +57,8 @@ PGconn *connectdb( char *dbName,
** disconnectdb
**
*/
void disconnectdb()
void
disconnectdb()
{
PQfinish(conn);
}
@ -65,7 +68,8 @@ void disconnectdb()
** doquery - returns PGresult structure
**
*/
PGresult *doquery(char *query)
PGresult *
doquery(char *query)
{
if (res != NULL)
PQclear(res);
@ -82,7 +86,8 @@ PGresult *doquery(char *query)
{
if (res != NULL)
fprintf(stderr, "query error: %s\n", PQcmdStatus(res));
else fprintf(stderr,"connection error: %s\n",PQerrorMessage(conn));
else
fprintf(stderr, "connection error: %s\n", PQerrorMessage(conn));
PQfinish(conn);
halt("failed request: %s\n", query);
}
@ -96,10 +101,12 @@ PGresult *doquery(char *query)
** NULL pointers are skipped
**
*/
int fetch(void *param, ...)
int
fetch(void *param,...)
{
va_list ap;
int arg, num_fields;
int arg,
num_fields;
num_fields = PQnfields(res);
@ -132,10 +139,12 @@ int fetch(void *param, ...)
** Returns true or false into null indicator variables
** NULL pointers are skipped
*/
int fetchwithnulls(void *param, ...)
int
fetchwithnulls(void *param,...)
{
va_list ap;
int arg, num_fields;
int arg,
num_fields;
num_fields = PQnfields(res);
@ -171,7 +180,8 @@ int fetchwithnulls(void *param, ...)
** on_error_stop
**
*/
void on_error_stop()
void
on_error_stop()
{
on_error_state = ON_ERROR_STOP;
}
@ -181,7 +191,8 @@ void on_error_stop()
** on_error_continue
**
*/
void on_error_continue()
void
on_error_continue()
{
on_error_state = ON_ERROR_CONTINUE;
}
@ -191,7 +202,8 @@ void on_error_continue()
** sig_disconnect
**
*/
static void sig_disconnect()
static void
sig_disconnect()
{
fprintf(stderr, "exiting...\n");
PQfinish(conn);
@ -203,7 +215,8 @@ static void sig_disconnect()
** set_signals
**
*/
static void set_signals()
static void
set_signals()
{
sigemptyset(&block_sigs);
sigemptyset(&unblock_sigs);

View File

@ -12,14 +12,19 @@
#include <libpq-fe.h>
#include <pginterface.h>
int main(int argc, char **argv)
int
main(int argc, char **argv)
{
char query[4000];
int row = 1;
int aint;
float afloat;
double adouble;
char achar[11], achar16[17], abpchar[11], avarchar[51], atext[51];
char achar[11],
achar16[17],
abpchar[11],
avarchar[51],
atext[51];
time_t aabstime;
int aint_null,
afloat_null,
@ -136,4 +141,3 @@ bpchar %d\nvarchar %d\ntext %d\nabstime %d\n",
disconnectdb();
return 0;
}

View File

@ -10,7 +10,8 @@
#include <libpq-fe.h>
#include "pginterface.h"
int main(int argc, char **argv)
int
main(int argc, char **argv)
{
char query[4000];
int row = 0;
@ -69,4 +70,3 @@ int main(int argc, char **argv)
disconnectdb();
return 0;
}

View File

@ -13,7 +13,8 @@
/* prototype for soundex function */
char *soundex(char *instr, char *outstr);
text *text_soundex(text *t)
text *
text_soundex(text * t)
{
/* ABCDEFGHIJKLMNOPQRSTUVWXYZ */
char *table = "01230120022455012623010202";
@ -46,7 +47,8 @@ text *text_soundex(text *t)
return (new_t);
}
char *soundex(char *instr, char *outstr)
char *
soundex(char *instr, char *outstr)
{ /* ABCDEFGHIJKLMNOPQRSTUVWXYZ */
char *table = "01230120022455012623010202";
int count = 0;
@ -54,22 +56,27 @@ char *soundex(char *instr, char *outstr)
while (!isalpha(instr[0]) && instr[0])
++instr;
if(!instr[0]) { /* Hey! Where'd the string go? */
if (!instr[0])
{ /* Hey! Where'd the string go? */
outstr[0] = (char) 0;
return outstr;
}
if(toupper(instr[0]) == 'P' && toupper(instr[1]) == 'H') {
if (toupper(instr[0]) == 'P' && toupper(instr[1]) == 'H')
{
instr[0] = 'F';
instr[1] = 'A';
}
*outstr++ = (char) toupper(*instr++);
while(*instr && count < 5) {
if(isalpha(*instr) && *instr != *(instr-1)) {
while (*instr && count < 5)
{
if (isalpha(*instr) && *instr != *(instr - 1))
{
*outstr = table[toupper(instr[0]) - 'A'];
if(*outstr != '0') {
if (*outstr != '0')
{
++outstr;
++count;
}
@ -80,4 +87,3 @@ char *soundex(char *instr, char *outstr)
*outstr = '\0';
return (outstr);
}

View File

@ -48,24 +48,32 @@
char *
string_output(char *data, int size)
{
register unsigned char c, *p, *r, *result;
register int l, len;
register unsigned char c,
*p,
*r,
*result;
register int l,
len;
if (data == NULL) {
if (data == NULL)
{
result = (char *) palloc(2);
result[0] = '-';
result[1] = '\0';
return (result);
}
if (size < 0) {
if (size < 0)
{
size = strlen(data);
}
/* adjust string length for escapes */
len = size;
for (p=data,l=size; l>0; p++,l--) {
switch (*p) {
for (p = data, l = size; l > 0; p++, l--)
{
switch (*p)
{
case '\\':
case '"':
case '{':
@ -79,7 +87,8 @@ string_output(char *data, int size)
len++;
break;
default:
if (NOTPRINTABLE(*p)) {
if (NOTPRINTABLE(*p))
{
len += 3;
}
}
@ -88,8 +97,10 @@ string_output(char *data, int size)
result = (char *) palloc(len);
for (p=data,r=result,l=size; (l > 0) && (c = *p); p++,l--) {
switch (c) {
for (p = data, r = result, l = size; (l > 0) && (c = *p); p++, l--)
{
switch (c)
{
case '\\':
case '"':
case '{':
@ -122,7 +133,8 @@ string_output(char *data, int size)
*r++ = 'v';
break;
default:
if (NOTPRINTABLE(c)) {
if (NOTPRINTABLE(c))
{
*r = '\\';
r += 3;
*r-- = DIGIT(c & 07);
@ -131,7 +143,9 @@ string_output(char *data, int size)
c >>= 3;
*r = DIGIT(c & 03);
r += 3;
} else {
}
else
{
*r++ = c;
}
}
@ -170,54 +184,69 @@ string_output(char *data, int size)
char *
string_input(char *str, int size, int hdrsize, int *rtn_size)
{
register unsigned char *p, *r;
register unsigned char *p,
*r;
unsigned char *result;
int len;
if ((str == NULL) || (hdrsize < 0)) {
if ((str == NULL) || (hdrsize < 0))
{
return (char *) NULL;
}
/* Compute result size */
len = strlen(str);
for (p=str; *p; ) {
if (*p++ == '\\') {
if (ISOCTAL(*p)) {
if (ISOCTAL(*(p+1))) {
for (p = str; *p;)
{
if (*p++ == '\\')
{
if (ISOCTAL(*p))
{
if (ISOCTAL(*(p + 1)))
{
p++;
len--;
}
if (ISOCTAL(*(p+1))) {
if (ISOCTAL(*(p + 1)))
{
p++;
len--;
}
}
if (*p) p++;
if (*p)
p++;
len--;
}
}
/* result has variable length */
if (size == 0) {
if (size == 0)
{
size = len + 1;
} else
}
else
/* result has variable length with maximum size */
if (size < 0) {
if (size < 0)
{
size = MIN(len, -size) + 1;
}
result = (char *) palloc(hdrsize + size);
memset(result, 0, hdrsize + size);
if (rtn_size) {
if (rtn_size)
{
*rtn_size = size;
}
r = result + hdrsize;
for (p=str; *p; ) {
for (p = str; *p;)
{
register unsigned char c;
if ((c = *p++) == '\\') {
switch (c = *p++) {
if ((c = *p++) == '\\')
{
switch (c = *p++)
{
case '\0':
p--;
break;
@ -230,10 +259,12 @@ string_input(char *str, int size, int hdrsize, int *rtn_size)
case '6':
case '7':
c = VALUE(c);
if (isdigit(*p)) {
if (isdigit(*p))
{
c = (c << 3) + VALUE(*p++);
}
if (isdigit(*p)) {
if (isdigit(*p))
{
c = (c << 3) + VALUE(*p++);
}
*r++ = c;
@ -259,7 +290,9 @@ string_input(char *str, int size, int hdrsize, int *rtn_size)
default:
*r++ = c;
}
} else {
}
else
{
*r++ = c;
}
}
@ -312,7 +345,8 @@ c_textout(struct varlena *vlena)
int len = 0;
char *s = NULL;
if (vlena) {
if (vlena)
{
len = VARSIZE(vlena) - VARHDRSZ;
s = VARDATA(vlena);
}
@ -328,7 +362,8 @@ c_varcharout(char *s)
{
int len;
if (s) {
if (s)
{
len = *(int32 *) s - 4;
s += 4;
}
@ -342,7 +377,8 @@ c_textin(char *str)
struct varlena *result;
int len;
if (str == NULL) {
if (str == NULL)
{
return ((struct varlena *) NULL);
}
@ -357,5 +393,5 @@ c_char16in(char *str)
{
return (string_input(str, 16, 0, NULL));
}
#endif
#endif

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.21 1997/08/26 23:31:20 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.22 1997/09/07 04:37:30 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@ -58,19 +58,26 @@ ComputeDataSize(TupleDesc tupleDesc,
int numberOfAttributes = tupleDesc->natts;
AttributeTupleForm *att = tupleDesc->attrs;
for (data_length = 0, i = 0; i < numberOfAttributes; i++) {
if (nulls[i] != ' ') continue;
for (data_length = 0, i = 0; i < numberOfAttributes; i++)
{
if (nulls[i] != ' ')
continue;
switch (att[i]->attlen) {
switch (att[i]->attlen)
{
case -1:
/*
* This is the size of the disk representation and so
* must include the additional sizeof long.
* This is the size of the disk representation and so must
* include the additional sizeof long.
*/
if (att[i]->attalign == 'd') {
if (att[i]->attalign == 'd')
{
data_length = DOUBLEALIGN(data_length)
+ VARSIZE(DatumGetPointer(value[i]));
} else {
}
else
{
data_length = INTALIGN(data_length)
+ VARSIZE(DatumGetPointer(value[i]));
}
@ -118,24 +125,31 @@ DataFill(char *data,
int numberOfAttributes = tupleDesc->natts;
AttributeTupleForm *att = tupleDesc->attrs;
if (bit != NULL) {
if (bit != NULL)
{
bitP = &bit[-1];
bitmask = CSIGNBIT;
}
*infomask = 0;
for (i = 0; i < numberOfAttributes; i++) {
if (bit != NULL) {
if (bitmask != CSIGNBIT) {
for (i = 0; i < numberOfAttributes; i++)
{
if (bit != NULL)
{
if (bitmask != CSIGNBIT)
{
bitmask <<= 1;
} else {
}
else
{
bitP += 1;
*bitP = 0x0;
bitmask = 1;
}
if (nulls[i] == 'n') {
if (nulls[i] == 'n')
{
*infomask |= HEAP_HASNULL;
continue;
}
@ -143,12 +157,16 @@ DataFill(char *data,
*bitP |= bitmask;
}
switch (att[i]->attlen) {
switch (att[i]->attlen)
{
case -1:
*infomask |= HEAP_HASVARLENA;
if (att[i]->attalign=='d') {
if (att[i]->attalign == 'd')
{
data = (char *) DOUBLEALIGN(data);
} else {
}
else
{
data = (char *) INTALIGN(data);
}
data_length = VARSIZE(DatumGetPointer(value[i]));
@ -178,12 +196,15 @@ DataFill(char *data,
if (att[i]->attlen < sizeof(int32))
elog(WARN, "DataFill: attribute %d has len %d",
i, att[i]->attlen);
if (att[i]->attalign == 'd') {
if (att[i]->attalign == 'd')
{
data = (char *) DOUBLEALIGN(data);
memmove(data, DatumGetPointer(value[i]),
att[i]->attlen);
data += att[i]->attlen;
} else {
}
else
{
data = (char *) LONGALIGN(data);
memmove(data, DatumGetPointer(value[i]),
att[i]->attlen);
@ -209,12 +230,16 @@ heap_attisnull(HeapTuple tup, int attnum)
if (attnum > (int) tup->t_natts)
return (1);
if (HeapTupleNoNulls(tup)) return(0);
if (HeapTupleNoNulls(tup))
return (0);
if (attnum > 0) {
if (attnum > 0)
{
return (att_isnull(attnum - 1, tup->t_bits));
} else
switch (attnum) {
}
else
switch (attnum)
{
case SelfItemPointerAttributeNumber:
case ObjectIdAttributeNumber:
case MinTransactionIdAttributeNumber:
@ -254,17 +279,28 @@ heap_sysattrlen(AttrNumber attno)
{
HeapTupleData *f = NULL;
switch (attno) {
case SelfItemPointerAttributeNumber: return sizeof f->t_ctid;
case ObjectIdAttributeNumber: return sizeof f->t_oid;
case MinTransactionIdAttributeNumber: return sizeof f->t_xmin;
case MinCommandIdAttributeNumber: return sizeof f->t_cmin;
case MaxTransactionIdAttributeNumber: return sizeof f->t_xmax;
case MaxCommandIdAttributeNumber: return sizeof f->t_cmax;
case ChainItemPointerAttributeNumber: return sizeof f->t_chain;
case MinAbsoluteTimeAttributeNumber: return sizeof f->t_tmin;
case MaxAbsoluteTimeAttributeNumber: return sizeof f->t_tmax;
case VersionTypeAttributeNumber: return sizeof f->t_vtype;
switch (attno)
{
case SelfItemPointerAttributeNumber:
return sizeof f->t_ctid;
case ObjectIdAttributeNumber:
return sizeof f->t_oid;
case MinTransactionIdAttributeNumber:
return sizeof f->t_xmin;
case MinCommandIdAttributeNumber:
return sizeof f->t_cmin;
case MaxTransactionIdAttributeNumber:
return sizeof f->t_xmax;
case MaxCommandIdAttributeNumber:
return sizeof f->t_cmax;
case ChainItemPointerAttributeNumber:
return sizeof f->t_chain;
case MinAbsoluteTimeAttributeNumber:
return sizeof f->t_tmin;
case MaxAbsoluteTimeAttributeNumber:
return sizeof f->t_tmax;
case VersionTypeAttributeNumber:
return sizeof f->t_vtype;
case AnchorItemPointerAttributeNumber:
elog(WARN, "heap_sysattrlen: field t_anchor does not exist!");
@ -287,7 +323,8 @@ heap_sysattrbyval(AttrNumber attno)
{
bool byval;
switch (attno) {
switch (attno)
{
case SelfItemPointerAttributeNumber:
byval = false;
break;
@ -338,7 +375,8 @@ heap_sysattrbyval(AttrNumber attno)
char *
heap_getsysattr(HeapTuple tup, Buffer b, int attnum)
{
switch (attnum) {
switch (attnum)
{
case SelfItemPointerAttributeNumber:
return ((char *) &tup->t_ctid);
case ObjectIdAttributeNumber:
@ -359,12 +397,12 @@ heap_getsysattr(HeapTuple tup, Buffer b, int attnum)
/*
* For tmin and tmax, we need to do some extra work. These don't
* get filled in until the vacuum cleaner runs (or we manage to flush
* a page after setting the value correctly below). If the vacuum
* cleaner hasn't run yet, then the times stored in the tuple are
* wrong, and we need to look up the commit time of the transaction.
* We cache this value in the tuple to avoid doing the work more than
* once.
* get filled in until the vacuum cleaner runs (or we manage to
* flush a page after setting the value correctly below). If the
* vacuum cleaner hasn't run yet, then the times stored in the
* tuple are wrong, and we need to look up the commit time of the
* transaction. We cache this value in the tuple to avoid doing
* the work more than once.
*/
case MinAbsoluteTimeAttributeNumber:
@ -373,7 +411,8 @@ heap_getsysattr(HeapTuple tup, Buffer b, int attnum)
tup->t_tmin = TransactionIdGetCommitTime(tup->t_xmin);
return ((char *) (long) tup->t_tmin);
case MaxAbsoluteTimeAttributeNumber:
if (!AbsoluteTimeIsBackwardCompatiblyReal(tup->t_tmax)) {
if (!AbsoluteTimeIsBackwardCompatiblyReal(tup->t_tmax))
{
if (TransactionIdDidCommit(tup->t_xmax))
tup->t_tmax = TransactionIdGetCommitTime(tup->t_xmax);
else
@ -435,13 +474,18 @@ fastgetattr(HeapTuple tup,
if (isnull)
*isnull = false;
if (HeapTupleNoNulls(tup)) {
if (HeapTupleNoNulls(tup))
{
attnum--;
if (att[attnum]->attcacheoff > 0) {
if (att[attnum]->attcacheoff > 0)
{
return (char *)
fetchatt(&(att[attnum]),
(char *) tup + tup->t_hoff + att[attnum]->attcacheoff);
} else if (attnum == 0) {
}
else if (attnum == 0)
{
/*
* first attribute is always at position zero
*/
@ -451,7 +495,10 @@ fastgetattr(HeapTuple tup,
tp = (char *) tup + tup->t_hoff;
slow = 0;
} else {
}
else
{
/*
* there's a null somewhere in the tuple
*/
@ -466,7 +513,8 @@ fastgetattr(HeapTuple tup,
* ----------------
*/
if (att_isnull(attnum, bp)) {
if (att_isnull(attnum, bp))
{
if (isnull)
*isnull = true;
return NULL;
@ -480,8 +528,10 @@ fastgetattr(HeapTuple tup,
{
register int i = 0; /* current offset in bp */
for (i = 0; i < attnum && !slow; i++) {
if (att_isnull(i, bp)) slow = 1;
for (i = 0; i < attnum && !slow; i++)
{
if (att_isnull(i, bp))
slow = 1;
}
}
}
@ -489,28 +539,36 @@ fastgetattr(HeapTuple tup,
/*
* now check for any non-fixed length attrs before our attribute
*/
if (!slow) {
if (att[attnum]->attcacheoff > 0) {
if (!slow)
{
if (att[attnum]->attcacheoff > 0)
{
return (char *)
fetchatt(&(att[attnum]),
tp + att[attnum]->attcacheoff);
} else if (attnum == 0) {
}
else if (attnum == 0)
{
return (char *)
fetchatt(&(att[0]), (char *) tup + tup->t_hoff);
} else if (!HeapTupleAllFixed(tup)) {
}
else if (!HeapTupleAllFixed(tup))
{
register int j = 0;
for (j = 0; j < attnum && !slow; j++)
if (att[j]->attlen < 1) slow = 1;
if (att[j]->attlen < 1)
slow = 1;
}
}
/*
* if slow is zero, and we got here, we know that we have a tuple with
* no nulls. We also have to initialize the remainder of
* the attribute cached offset values.
* no nulls. We also have to initialize the remainder of the
* attribute cached offset values.
*/
if (!slow) {
if (!slow)
{
register int j = 1;
register long off;
@ -520,12 +578,15 @@ fastgetattr(HeapTuple tup,
att[0]->attcacheoff = 0;
while (att[j]->attcacheoff > 0) j++;
while (att[j]->attcacheoff > 0)
j++;
off = att[j - 1]->attcacheoff + att[j - 1]->attlen;
for (; j < attnum + 1; j++) {
switch(att[j]->attlen) {
for (; j < attnum + 1; j++)
{
switch (att[j]->attlen)
{
case -1:
off = (att[j]->attalign == 'd') ?
DOUBLEALIGN(off) : INTALIGN(off);
@ -539,7 +600,8 @@ fastgetattr(HeapTuple tup,
off = INTALIGN(off);
break;
default:
if (att[j]->attlen < sizeof(int32)) {
if (att[j]->attlen < sizeof(int32))
{
elog(WARN,
"fastgetattr: attribute %d has len %d",
j, att[j]->attlen);
@ -557,7 +619,9 @@ fastgetattr(HeapTuple tup,
return
(char *) fetchatt(&(att[attnum]), tp + att[attnum]->attcacheoff);
} else {
}
else
{
register bool usecache = true;
register int off = 0;
register int i;
@ -565,21 +629,25 @@ fastgetattr(HeapTuple tup,
/*
* Now we know that we have to walk the tuple CAREFULLY.
*
* Note - This loop is a little tricky. On iteration i we
* first set the offset for attribute i and figure out how much
* the offset should be incremented. Finally, we need to align the
* offset based on the size of attribute i+1 (for which the offset
* has been computed). -mer 12 Dec 1991
* Note - This loop is a little tricky. On iteration i we first set
* the offset for attribute i and figure out how much the offset
* should be incremented. Finally, we need to align the offset
* based on the size of attribute i+1 (for which the offset has
* been computed). -mer 12 Dec 1991
*/
for (i = 0; i < attnum; i++) {
if (!HeapTupleNoNulls(tup)) {
if (att_isnull(i, bp)) {
for (i = 0; i < attnum; i++)
{
if (!HeapTupleNoNulls(tup))
{
if (att_isnull(i, bp))
{
usecache = false;
continue;
}
}
switch (att[i]->attlen) {
switch (att[i]->attlen)
{
case -1:
off = (att[i]->attalign == 'd') ?
DOUBLEALIGN(off) : INTALIGN(off);
@ -603,16 +671,22 @@ fastgetattr(HeapTuple tup,
off = LONGALIGN(off);
break;
}
if (usecache && att[i]->attcacheoff > 0) {
if (usecache && att[i]->attcacheoff > 0)
{
off = att[i]->attcacheoff;
if (att[i]->attlen == -1) {
if (att[i]->attlen == -1)
{
usecache = false;
}
} else {
if (usecache) att[i]->attcacheoff = off;
}
else
{
if (usecache)
att[i]->attcacheoff = off;
}
switch(att[i]->attlen) {
switch (att[i]->attlen)
{
case sizeof(char):
off++;
break;
@ -631,7 +705,8 @@ fastgetattr(HeapTuple tup,
break;
}
}
switch (att[attnum]->attlen) {
switch (att[attnum]->attlen)
{
case -1:
off = (att[attnum]->attalign == 'd') ?
DOUBLEALIGN(off) : INTALIGN(off);
@ -673,7 +748,8 @@ heap_copytuple(HeapTuple tuple)
return (NULL);
/* XXX For now, just prevent an undetectable executor related error */
if (tuple->t_len > MAXTUPLEN) {
if (tuple->t_len > MAXTUPLEN)
{
elog(WARN, "palloctup: cannot handle length %d tuples",
tuple->t_len);
}
@ -702,7 +778,8 @@ heap_deformtuple(HeapTuple tuple,
Assert(HeapTupleIsValid(tuple));
natts = tuple->t_natts;
for (i = 0; i<natts; i++) {
for (i = 0; i < natts; i++)
{
bool isnull;
values[i] = (Datum) heap_getattr(tuple,
@ -716,6 +793,7 @@ heap_deformtuple(HeapTuple tuple,
nulls[i] = ' ';
}
}
#endif
/* ----------------
@ -752,15 +830,18 @@ heap_formtuple(TupleDesc tupleDescriptor,
len = sizeof *tuple - sizeof tuple->t_bits;
for (i = 0; i < numberOfAttributes && !hasnull; i++) {
if (nulls[i] != ' ') hasnull = true;
for (i = 0; i < numberOfAttributes && !hasnull; i++)
{
if (nulls[i] != ' ')
hasnull = true;
}
if (numberOfAttributes > MaxHeapAttributeNumber)
elog(WARN, "heap_formtuple: numberOfAttributes of %d > %d",
numberOfAttributes, MaxHeapAttributeNumber);
if (hasnull) {
if (hasnull)
{
bitmaplen = BITMAPLEN(numberOfAttributes);
len += bitmaplen;
}
@ -831,7 +912,8 @@ heap_modifytuple(HeapTuple tuple,
* ----------------
*/
madecopy = 0;
if (BufferIsValid(buffer) == true) {
if (BufferIsValid(buffer) == true)
{
relation = (Relation) BufferGetRelation(buffer);
tuple = heap_copytuple(tuple);
madecopy = 1;
@ -849,9 +931,11 @@ heap_modifytuple(HeapTuple tuple,
for (attoff = 0;
attoff < numberOfAttributes;
attoff += 1) {
attoff += 1)
{
if (repl[attoff] == ' ') {
if (repl[attoff] == ' ')
{
char *attr;
attr =
@ -863,10 +947,14 @@ heap_modifytuple(HeapTuple tuple,
value[attoff] = PointerGetDatum(attr);
nulls[attoff] = (isNull) ? 'n' : ' ';
} else if (repl[attoff] != 'r') {
}
else if (repl[attoff] != 'r')
{
elog(WARN, "heap_modifytuple: repl is \\%3d", repl[attoff]);
} else { /* == 'r' */
}
else
{ /* == 'r' */
value[attoff] = replValue[attoff];
nulls[attoff] = replNull[attoff];
}
@ -889,7 +977,8 @@ heap_modifytuple(HeapTuple tuple,
(char *) &tuple->t_ctid,
((char *) &tuple->t_hoff - (char *) &tuple->t_ctid)); /* XXX */
newTuple->t_infomask = infomask;
newTuple->t_natts = numberOfAttributes; /* fix t_natts just in case */
newTuple->t_natts = numberOfAttributes; /* fix t_natts just in
* case */
/* ----------------
* if we made a copy of the tuple, then free it.

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/Attic/heapvalid.c,v 1.16 1997/08/29 09:12:20 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/Attic/heapvalid.c,v 1.17 1997/09/07 04:37:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -40,7 +40,8 @@ heap_keytest(HeapTuple t,
Datum atp;
int test;
for (; nkeys--; keys++) {
for (; nkeys--; keys++)
{
atp = (Datum) heap_getattr(t, InvalidBuffer,
keys->sk_attno,
tupdesc,
@ -50,7 +51,8 @@ heap_keytest(HeapTuple t,
/* XXX eventually should check if SK_ISNULL */
return false;
if (keys->sk_flags & SK_ISNULL) {
if (keys->sk_flags & SK_ISNULL)
{
return (false);
}
@ -99,9 +101,11 @@ heap_tuple_satisfies(ItemId itemId,
int nKeys,
ScanKey key)
{
HeapTuple tuple, result;
HeapTuple tuple,
result;
bool res;
TransactionId old_tmin, old_tmax;
TransactionId old_tmin,
old_tmax;
if (!ItemIdIsUsed(itemId))
return NULL;
@ -115,18 +119,24 @@ heap_tuple_satisfies(ItemId itemId,
res = TRUE;
result = (HeapTuple) NULL;
if (res) {
if(relation->rd_rel->relkind == RELKIND_UNCATALOGED) {
if (res)
{
if (relation->rd_rel->relkind == RELKIND_UNCATALOGED)
{
result = tuple;
} else {
}
else
{
old_tmin = tuple->t_tmin;
old_tmax = tuple->t_tmax;
res = HeapTupleSatisfiesTimeQual(tuple, qual);
if (tuple->t_tmin != old_tmin ||
tuple->t_tmax != old_tmax) {
tuple->t_tmax != old_tmax)
{
SetBufferCommitInfoNeedsSave(buffer);
}
if(res) {
if (res)
{
result = tuple;
}
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.15 1997/08/19 21:28:50 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.16 1997/09/07 04:37:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -27,7 +27,8 @@
#endif
static Size IndexInfoFindDataOffset(unsigned short t_info);
static char *fastgetiattr(IndexTuple tup, int attnum,
static char *
fastgetiattr(IndexTuple tup, int attnum,
TupleDesc att, bool * isnull);
/* ----------------------------------------------------------------
@ -46,7 +47,8 @@ index_formtuple(TupleDesc tupleDescriptor,
{
register char *tp; /* tuple pointer */
IndexTuple tuple; /* return tuple */
Size size, hoff;
Size size,
hoff;
int i;
unsigned short infomask = 0;
bool hasnull = false;
@ -58,11 +60,14 @@ index_formtuple(TupleDesc tupleDescriptor,
numberOfAttributes, MaxIndexAttributeNumber);
for (i = 0; i < numberOfAttributes && !hasnull; i++) {
if (null[i] != ' ') hasnull = true;
for (i = 0; i < numberOfAttributes && !hasnull; i++)
{
if (null[i] != ' ')
hasnull = true;
}
if (hasnull) infomask |= INDEX_NULL_MASK;
if (hasnull)
infomask |= INDEX_NULL_MASK;
hoff = IndexInfoFindDataOffset(infomask);
size = hoff
@ -83,12 +88,13 @@ index_formtuple(TupleDesc tupleDescriptor,
/*
* We do this because DataFill wants to initialize a "tupmask" which
* is used for HeapTuples, but we want an indextuple infomask. The only
* "relevent" info is the "has variable attributes" field, which is in
* mask position 0x02. We have already set the null mask above.
* is used for HeapTuples, but we want an indextuple infomask. The
* only "relevent" info is the "has variable attributes" field, which
* is in mask position 0x02. We have already set the null mask above.
*/
if (tupmask & 0x02) infomask |= INDEX_VAR_MASK;
if (tupmask & 0x02)
infomask |= INDEX_VAR_MASK;
/*
* Here we make sure that we can actually hold the size. We also want
@ -160,16 +166,19 @@ fastgetiattr(IndexTuple tup,
data_off = IndexTupleHasMinHeader(tup) ? sizeof *tup :
IndexInfoFindDataOffset(tup->t_info);
if (IndexTupleNoNulls(tup)) {
if (IndexTupleNoNulls(tup))
{
/* first attribute is always at position zero */
if (attnum == 1) {
if (attnum == 1)
{
return (fetchatt(&(att[0]), (char *) tup + data_off));
}
attnum--;
if (att[attnum]->attcacheoff > 0) {
if (att[attnum]->attcacheoff > 0)
{
return (fetchatt(&(att[attnum]),
(char *) tup + data_off +
att[attnum]->attcacheoff));
@ -178,9 +187,12 @@ fastgetiattr(IndexTuple tup,
tp = (char *) tup + data_off;
slow = 0;
}else { /* there's a null somewhere in the tuple */
}
else
{ /* there's a null somewhere in the tuple */
bp = (char *) tup + sizeof(*tup); /* "knows" t_bits are here! */
bp = (char *) tup + sizeof(*tup); /* "knows" t_bits are
* here! */
slow = 0;
/* ----------------
* check to see if desired att is null
@ -189,7 +201,8 @@ fastgetiattr(IndexTuple tup,
attnum--;
{
if (att_isnull(attnum, bp)) {
if (att_isnull(attnum, bp))
{
*isnull = true;
return NULL;
}
@ -202,20 +215,26 @@ fastgetiattr(IndexTuple tup,
register int i = 0; /* current offset in bp */
register int mask; /* bit in byte we're looking at */
register char n; /* current byte in bp */
register int byte, finalbit;
register int byte,
finalbit;
byte = attnum >> 3;
finalbit = attnum & 0x07;
for (; i <= byte; i++) {
for (; i <= byte; i++)
{
n = bp[i];
if (i < byte) {
if (i < byte)
{
/* check for nulls in any "earlier" bytes */
if ((~n) != 0) {
if ((~n) != 0)
{
slow++;
break;
}
} else {
}
else
{
/* check for nulls "before" final bit of last byte */
mask = (finalbit << 1) - 1;
if ((~n) & mask)
@ -228,15 +247,20 @@ fastgetiattr(IndexTuple tup,
/* now check for any non-fixed length attrs before our attribute */
if (!slow) {
if (att[attnum]->attcacheoff > 0) {
if (!slow)
{
if (att[attnum]->attcacheoff > 0)
{
return (fetchatt(&(att[attnum]),
tp + att[attnum]->attcacheoff));
}else if (!IndexTupleAllFixed(tup)) {
}
else if (!IndexTupleAllFixed(tup))
{
register int j = 0;
for (j = 0; j < attnum && !slow; j++)
if (att[j]->attlen < 1) slow = 1;
if (att[j]->attlen < 1)
slow = 1;
}
}
@ -246,7 +270,8 @@ fastgetiattr(IndexTuple tup,
* the attribute cached offset values.
*/
if (!slow) {
if (!slow)
{
register int j = 1;
register long off;
@ -256,12 +281,15 @@ fastgetiattr(IndexTuple tup,
att[0]->attcacheoff = 0;
while (att[j]->attcacheoff > 0) j++;
while (att[j]->attcacheoff > 0)
j++;
off = att[j - 1]->attcacheoff +
att[j - 1]->attlen;
for (; j < attnum + 1; j++) {
for (; j < attnum + 1; j++)
{
/*
* Fix me when going to a machine with more than a four-byte
* word!
@ -298,7 +326,9 @@ fastgetiattr(IndexTuple tup,
return (fetchatt(&(att[attnum]),
tp + att[attnum]->attcacheoff));
}else {
}
else
{
register bool usecache = true;
register int off = 0;
register int i;
@ -307,15 +337,19 @@ fastgetiattr(IndexTuple tup,
* Now we know that we have to walk the tuple CAREFULLY.
*/
for (i = 0; i < attnum; i++) {
if (!IndexTupleNoNulls(tup)) {
if (att_isnull(i, bp)) {
for (i = 0; i < attnum; i++)
{
if (!IndexTupleNoNulls(tup))
{
if (att_isnull(i, bp))
{
usecache = false;
continue;
}
}
if (usecache && att[i]->attcacheoff > 0) {
if (usecache && att[i]->attcacheoff > 0)
{
off = att[i]->attcacheoff;
if (att[i]->attlen == -1)
usecache = false;
@ -323,7 +357,8 @@ fastgetiattr(IndexTuple tup,
continue;
}
if (usecache) att[i]->attcacheoff = off;
if (usecache)
att[i]->attcacheoff = off;
switch (att[i]->attlen)
{
case sizeof(char):
@ -353,12 +388,13 @@ fastgetiattr(IndexTuple tup,
break;
}
}
/*
* I don't know why this code was missed here!
* I've got it from heaptuple.c:fastgetattr().
* - vadim 06/12/97
* I don't know why this code was missed here! I've got it from
* heaptuple.c:fastgetattr(). - vadim 06/12/97
*/
switch (att[attnum]->attlen) {
switch (att[attnum]->attlen)
{
case -1:
off = (att[attnum]->attalign == 'd') ?
DOUBLEALIGN(off) : INTALIGN(off);
@ -430,10 +466,12 @@ IndexInfoFindDataOffset(unsigned short t_info)
{
if (!(t_info & INDEX_NULL_MASK))
return ((Size) sizeof(IndexTupleData));
else {
else
{
Size size = sizeof(IndexTupleData);
if (t_info & INDEX_NULL_MASK) {
if (t_info & INDEX_NULL_MASK)
{
size += sizeof(IndexAttributeBitMapData);
}
return DOUBLEALIGN(size); /* be conservative */
@ -451,11 +489,11 @@ CopyIndexTuple(IndexTuple source, IndexTuple *target)
IndexTuple ret;
size = IndexTupleSize(source);
if (*target == NULL) {
if (*target == NULL)
{
*target = (IndexTuple) palloc(size);
}
ret = *target;
memmove((char *) ret, (char *) source, size);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/Attic/indexvalid.c,v 1.14 1997/03/18 18:38:19 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/Attic/indexvalid.c,v 1.15 1997/09/07 04:37:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -46,32 +46,39 @@ index_keytest(IndexTuple tuple,
IncrIndexProcessed();
while (scanKeySize > 0) {
while (scanKeySize > 0)
{
datum = index_getattr(tuple,
key[0].sk_attno,
tupdesc,
&isNull);
if (isNull) {
if (isNull)
{
/* XXX eventually should check if SK_ISNULL */
return (false);
}
if (key[0].sk_flags & SK_ISNULL) {
if (key[0].sk_flags & SK_ISNULL)
{
return (false);
}
if (key[0].sk_flags & SK_COMMUTE) {
if (key[0].sk_flags & SK_COMMUTE)
{
test = (*(key[0].sk_func))
(DatumGetPointer(key[0].sk_argument),
datum) ? 1 : 0;
} else {
}
else
{
test = (*(key[0].sk_func))
(datum,
DatumGetPointer(key[0].sk_argument)) ? 1 : 0;
}
if (!test == !(key[0].sk_flags & SK_NEGATE)) {
if (!test == !(key[0].sk_flags & SK_NEGATE))
{
return (false);
}
@ -81,4 +88,3 @@ index_keytest(IndexTuple tuple,
return (true);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.15 1997/08/26 23:31:23 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.16 1997/09/07 04:37:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -73,8 +73,11 @@ gettypelem(Oid type)
void
printtup(HeapTuple tuple, TupleDesc typeinfo)
{
int i, j, k;
char *outputstr, *attr;
int i,
j,
k;
char *outputstr,
*attr;
bool isnull;
Oid typoutput;
@ -90,13 +93,16 @@ printtup(HeapTuple tuple, TupleDesc typeinfo)
*/
j = 0;
k = 1 << 7;
for (i = 0; i < tuple->t_natts; ) {
i++; /* heap_getattr is a macro, so no increment */
for (i = 0; i < tuple->t_natts;)
{
i++; /* heap_getattr is a macro, so no
* increment */
attr = heap_getattr(tuple, InvalidBuffer, i, typeinfo, &isnull);
if (!isnull)
j |= k;
k >>= 1;
if (!(i & 7)) {
if (!(i & 7))
{
pq_putint(j, 1);
j = 0;
k = 1 << 7;
@ -109,11 +115,13 @@ printtup(HeapTuple tuple, TupleDesc typeinfo)
* send the attributes of this tuple
* ----------------
*/
for (i = 0; i < tuple->t_natts; ++i) {
for (i = 0; i < tuple->t_natts; ++i)
{
attr = heap_getattr(tuple, InvalidBuffer, i + 1, typeinfo, &isnull);
typoutput = typtoout((Oid) typeinfo->attrs[i]->atttypid);
if (!isnull && OidIsValid(typoutput)) {
if (!isnull && OidIsValid(typoutput))
{
outputstr = fmgr(typoutput, attr,
gettypelem(typeinfo->attrs[i]->atttypid));
pq_putint(strlen(outputstr) + 4, 4);
@ -168,15 +176,18 @@ void
debugtup(HeapTuple tuple, TupleDesc typeinfo)
{
register int i;
char *attr, *value;
char *attr,
*value;
bool isnull;
Oid typoutput;
for (i = 0; i < tuple->t_natts; ++i) {
for (i = 0; i < tuple->t_natts; ++i)
{
attr = heap_getattr(tuple, InvalidBuffer, i + 1, typeinfo, &isnull);
typoutput = typtoout((Oid) typeinfo->attrs[i]->atttypid);
if (!isnull && OidIsValid(typoutput)) {
if (!isnull && OidIsValid(typoutput))
{
value = fmgr(typoutput, attr,
gettypelem(typeinfo->attrs[i]->atttypid));
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
@ -198,7 +209,9 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo)
void
printtup_internal(HeapTuple tuple, TupleDesc typeinfo)
{
int i, j, k;
int i,
j,
k;
char *attr;
bool isnull;
@ -214,13 +227,16 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo)
*/
j = 0;
k = 1 << 7;
for (i = 0; i < tuple->t_natts; ) {
i++; /* heap_getattr is a macro, so no increment */
for (i = 0; i < tuple->t_natts;)
{
i++; /* heap_getattr is a macro, so no
* increment */
attr = heap_getattr(tuple, InvalidBuffer, i, typeinfo, &isnull);
if (!isnull)
j |= k;
k >>= 1;
if (!(i & 7)) {
if (!(i & 7))
{
pq_putint(j, 1);
j = 0;
k = 1 << 7;
@ -236,13 +252,16 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo)
#ifdef IPORTAL_DEBUG
fprintf(stderr, "sending tuple with %d atts\n", tuple->t_natts);
#endif
for (i = 0; i < tuple->t_natts; ++i) {
for (i = 0; i < tuple->t_natts; ++i)
{
int32 len = typeinfo->attrs[i]->attlen;
attr = heap_getattr(tuple, InvalidBuffer, i + 1, typeinfo, &isnull);
if (!isnull) {
if (!isnull)
{
/* # of bytes, and opaque data */
if (len == -1) {
if (len == -1)
{
/* variable length, assume a varlena structure */
len = VARSIZE(attr) - VARHDRSZ;
@ -256,15 +275,19 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo)
len, *d, *(d + 1), *(d + 2), *(d + 3));
}
#endif
} else {
}
else
{
/* fixed size */
if (typeinfo->attrs[i]->attbyval) {
if (typeinfo->attrs[i]->attbyval)
{
int8 i8;
int16 i16;
int32 i32;
pq_putint(len, sizeof(int32));
switch (len) {
switch (len)
{
case sizeof(int8):
i8 = DatumGetChar(attr);
pq_putnchar((char *) &i8, len);
@ -281,7 +304,9 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo)
#ifdef IPORTAL_DEBUG
fprintf(stderr, "byval length %d data %d\n", len, attr);
#endif
} else {
}
else
{
pq_putint(len, sizeof(int32));
pq_putnchar(attr, len);
#ifdef IPORTAL_DEBUG

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.9 1996/11/05 07:42:45 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.10 1997/09/07 04:37:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.19 1997/08/22 02:55:39 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.20 1997/09/07 04:37:41 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@ -106,13 +106,15 @@ TupleDesc
CreateTupleDescCopy(TupleDesc tupdesc)
{
TupleDesc desc;
int i, size;
int i,
size;
desc = (TupleDesc) palloc(sizeof(struct tupleDesc));
desc->natts = tupdesc->natts;
size = desc->natts * sizeof(AttributeTupleForm);
desc->attrs = (AttributeTupleForm *) palloc(size);
for (i=0;i<desc->natts;i++) {
for (i = 0; i < desc->natts; i++)
{
desc->attrs[i] =
(AttributeTupleForm) palloc(ATTRIBUTE_TUPLE_SIZE);
memmove(desc->attrs[i],
@ -139,13 +141,15 @@ CreateTupleDescCopyConstr(TupleDesc tupdesc)
{
TupleDesc desc;
TupleConstr *constr = tupdesc->constr;
int i, size;
int i,
size;
desc = (TupleDesc) palloc(sizeof(struct tupleDesc));
desc->natts = tupdesc->natts;
size = desc->natts * sizeof(AttributeTupleForm);
desc->attrs = (AttributeTupleForm *) palloc(size);
for (i=0;i<desc->natts;i++) {
for (i = 0; i < desc->natts; i++)
{
desc->attrs[i] =
(AttributeTupleForm) palloc(ATTRIBUTE_TUPLE_SIZE);
memmove(desc->attrs[i],
@ -264,8 +268,11 @@ TupleDescInitEntry(TupleDesc desc,
*/
AssertArg(PointerIsValid(desc));
AssertArg(attributeNumber >= 1);
/* attributeName's are sometimes NULL,
from resdom's. I don't know why that is, though -- Jolly */
/*
* attributeName's are sometimes NULL, from resdom's. I don't know
* why that is, though -- Jolly
*/
/* AssertArg(NameIsValid(attributeName));*/
/* AssertArg(NameIsValid(typeName));*/
@ -321,7 +328,8 @@ TupleDescInitEntry(TupleDesc desc,
*/
tuple = SearchSysCacheTuple(TYPNAME, PointerGetDatum(typeName),
0, 0, 0);
if (! HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
/* ----------------
* here type info does not exist yet so we just fill
* the attribute with dummy information and return false.
@ -368,11 +376,15 @@ TupleDescInitEntry(TupleDesc desc,
are considered, etc. Ugh.
-----------------------------------------
*/
if (attisset) {
if (attisset)
{
Type t = type("oid");
att->attlen = tlen(t);
att->attbyval = tbyval(t);
} else {
}
else
{
att->attlen = typeForm->typlen;
att->attbyval = typeForm->typbyval;
}
@ -451,7 +463,8 @@ BuildDescForRelation(List *schema, char *relname)
typename = palloc(NAMEDATALEN);
foreach(p, schema) {
foreach(p, schema)
{
ColumnDef *entry;
List *arry;
@ -475,7 +488,8 @@ BuildDescForRelation(List *schema, char *relname)
attdim = 0;
if (!TupleDescInitEntry(desc, attnum, attname,
typename, attdim, attisset)) {
typename, attdim, attisset))
{
/* ----------------
* if TupleDescInitEntry() fails, it means there is
* no type in the system catalogs. So now we check if
@ -483,9 +497,11 @@ BuildDescForRelation(List *schema, char *relname)
* have a self reference, otherwise it's an error.
* ----------------
*/
if (!strcmp(typename, relname)) {
if (!strcmp(typename, relname))
{
TupleDescMakeSelfReference(desc, attnum, relname);
} else
}
else
elog(WARN, "DefineRelation: no such type %s",
typename);
}
@ -494,10 +510,11 @@ BuildDescForRelation(List *schema, char *relname)
* this is for char() and varchar(). When an entry is of type
* char() or varchar(), typlen is set to the appropriate length,
* which we'll use here instead. (The catalog lookup only returns
* the length of bpchar and varchar which is not what we want!)
* - ay 6/95
* the length of bpchar and varchar which is not what we want!) -
* ay 6/95
*/
if (entry->typename->typlen > 0) {
if (entry->typename->typlen > 0)
{
desc->attrs[attnum - 1]->attlen = entry->typename->typlen;
}
@ -542,4 +559,3 @@ BuildDescForRelation(List *schema, char *relname)
}
return desc;
}

View File

@ -32,32 +32,42 @@
#endif
/* non-export function prototypes */
static InsertIndexResult gistdoinsert(Relation r, IndexTuple itup,
static InsertIndexResult
gistdoinsert(Relation r, IndexTuple itup,
GISTSTATE * GISTstate);
static InsertIndexResult gistentryinsert(Relation r, GISTSTACK *stk,
static InsertIndexResult
gistentryinsert(Relation r, GISTSTACK * stk,
IndexTuple tup,
GISTSTATE * giststate);
static void gistentryinserttwo(Relation r, GISTSTACK *stk, IndexTuple ltup,
static void
gistentryinserttwo(Relation r, GISTSTACK * stk, IndexTuple ltup,
IndexTuple rtup, GISTSTATE * giststate);
static void gistAdjustKeys(Relation r, GISTSTACK *stk, BlockNumber blk,
static void
gistAdjustKeys(Relation r, GISTSTACK * stk, BlockNumber blk,
char *datum, int att_size, GISTSTATE * giststate);
static void gistintinsert(Relation r, GISTSTACK *stk, IndexTuple ltup,
static void
gistintinsert(Relation r, GISTSTACK * stk, IndexTuple ltup,
IndexTuple rtup, GISTSTATE * giststate);
static InsertIndexResult gistSplit(Relation r, Buffer buffer,
static InsertIndexResult
gistSplit(Relation r, Buffer buffer,
GISTSTACK * stack, IndexTuple itup,
GISTSTATE * giststate);
static void gistnewroot(GISTSTATE *giststate, Relation r, IndexTuple lt,
static void
gistnewroot(GISTSTATE * giststate, Relation r, IndexTuple lt,
IndexTuple rt);
static void GISTInitBuffer(Buffer b, uint32 f);
static BlockNumber gistChooseSubtree(Relation r, IndexTuple itup, int level,
static BlockNumber
gistChooseSubtree(Relation r, IndexTuple itup, int level,
GISTSTATE * giststate,
GISTSTACK ** retstack, Buffer * leafbuf);
static OffsetNumber gistchoose(Relation r, Page p, IndexTuple it,
static OffsetNumber
gistchoose(Relation r, Page p, IndexTuple it,
GISTSTATE * giststate);
static int gistnospace(Page p, IndexTuple it);
void gistdelete(Relation r, ItemPointer tid);
static IndexTuple gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t);
static void gistcentryinit(GISTSTATE *giststate, GISTENTRY *e, char *pr,
static void
gistcentryinit(GISTSTATE * giststate, GISTENTRY * e, char *pr,
Relation r, Page pg, OffsetNumber o, int b, bool l);
static char *int_range_out(INTRANGE * r);
@ -80,18 +90,25 @@ gistbuild(Relation heap,
AttrNumber i;
HeapTuple htup;
IndexTuple itup;
TupleDesc hd, id;
TupleDesc hd,
id;
InsertIndexResult res;
Datum *d;
bool *nulls;
int nb, nh, ni;
int nb,
nh,
ni;
#ifndef OMIT_PARTIAL_INDEX
ExprContext *econtext;
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
Oid hrelid, irelid;
Node *pred, *oldPred;
Oid hrelid,
irelid;
Node *pred,
*oldPred;
GISTSTATE giststate;
GISTENTRY tmpcentry;
bool *compvec;
@ -107,15 +124,16 @@ gistbuild(Relation heap,
oldPred = predInfo->oldPred;
/*
* We expect to be called exactly once for any index relation.
* If that's not the case, big trouble's what we have.
* We expect to be called exactly once for any index relation. If
* that's not the case, big trouble's what we have.
*/
if (oldPred == NULL && (nb = RelationGetNumberOfBlocks(index)) != 0)
elog(WARN, "%.16s already contains data", &(index->rd_rel->relname.data[0]));
/* initialize the root page (if this is a new index) */
if (oldPred == NULL) {
if (oldPred == NULL)
{
buffer = ReadBuffer(index, P_NEW);
GISTInitBuffer(buffer, F_LEAF);
WriteBuffer(buffer);
@ -128,20 +146,23 @@ gistbuild(Relation heap,
nulls = (bool *) palloc(natts * sizeof(*nulls));
/*
* If this is a predicate (partial) index, we will need to evaluate the
* predicate using ExecQual, which requires the current tuple to be in a
* slot of a TupleTable. In addition, ExecQual must have an ExprContext
* referring to that slot. Here, we initialize dummy TupleTable and
* ExprContext objects for this purpose. --Nels, Feb '92
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
* be in a slot of a TupleTable. In addition, ExecQual must have an
* ExprContext referring to that slot. Here, we initialize dummy
* TupleTable and ExprContext objects for this purpose. --Nels, Feb
* '92
*/
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) {
if (pred != NULL || oldPred != NULL)
{
tupleTable = ExecCreateTupleTable(1);
slot = ExecAllocTableSlot(tupleTable);
econtext = makeNode(ExprContext);
FillDummyExprContext(econtext, slot, hd, buffer);
}
else /* shut the compiler up */
else
/* shut the compiler up */
{
tupleTable = NULL;
slot = NULL;
@ -154,7 +175,8 @@ gistbuild(Relation heap,
/* int the tuples as we insert them */
nh = ni = 0;
for (; HeapTupleIsValid(htup); htup = heap_getnext(scan, 0, &buffer)) {
for (; HeapTupleIsValid(htup); htup = heap_getnext(scan, 0, &buffer))
{
nh++;
@ -162,19 +184,25 @@ gistbuild(Relation heap,
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
*/
if (oldPred != NULL) {
if (oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
if (ExecQual((List*)oldPred, econtext) == true) {
if (ExecQual((List *) oldPred, econtext) == true)
{
ni++;
continue;
}
#endif /* OMIT_PARTIAL_INDEX */
}
/* Skip this tuple if it doesn't satisfy the partial-index predicate */
if (pred != NULL) {
/*
* Skip this tuple if it doesn't satisfy the partial-index
* predicate
*/
if (pred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
@ -186,23 +214,25 @@ gistbuild(Relation heap,
ni++;
/*
* For the current heap tuple, extract all the attributes
* we use in this index, and note which are null.
* For the current heap tuple, extract all the attributes we use
* in this index, and note which are null.
*/
for (i = 1; i <= natts; i++) {
for (i = 1; i <= natts; i++)
{
int attoff;
bool attnull;
/*
* Offsets are from the start of the tuple, and are
* zero-based; indices are one-based. The next call
* returns i - 1. That's data hiding for you.
* zero-based; indices are one-based. The next call returns i
* - 1. That's data hiding for you.
*/
attoff = AttrNumberGetAttrOffset(i);
/*
d[attoff] = HeapTupleGetAttributeValue(htup, buffer,
* d[attoff] = HeapTupleGetAttributeValue(htup, buffer,
*/
d[attoff] = GetIndexValue(htup,
hd,
@ -216,13 +246,15 @@ gistbuild(Relation heap,
/* immediately compress keys to normalize */
compvec = (bool *) palloc(sizeof(bool) * natts);
for (i = 0; i < natts; i++) {
for (i = 0; i < natts; i++)
{
gistcentryinit(&giststate, &tmpcentry, (char *) d[i],
(Relation) NULL, (Page) NULL, (OffsetNumber) 0,
-1 /* size is currently bogus */ , TRUE);
if (d[i] != (Datum) tmpcentry.pred && !(giststate.keytypbyval))
compvec[i] = TRUE;
else compvec[i] = FALSE;
else
compvec[i] = FALSE;
d[i] = (Datum) tmpcentry.pred;
}
@ -231,17 +263,17 @@ gistbuild(Relation heap,
itup->t_tid = htup->t_ctid;
/*
* Since we already have the index relation locked, we
* call gistdoinsert directly. Normal access method calls
* dispatch through gistinsert, which locks the relation
* for write. This is the right thing to do if you're
* inserting single tups, but not when you're initializing
* the whole index at once.
* Since we already have the index relation locked, we call
* gistdoinsert directly. Normal access method calls dispatch
* through gistinsert, which locks the relation for write. This
* is the right thing to do if you're inserting single tups, but
* not when you're initializing the whole index at once.
*/
res = gistdoinsert(index, itup, &giststate);
for (i = 0; i < natts; i++)
if (compvec[i] == TRUE) pfree((char *)d[i]);
if (compvec[i] == TRUE)
pfree((char *) d[i]);
pfree(itup);
pfree(res);
pfree(compvec);
@ -251,7 +283,8 @@ gistbuild(Relation heap,
heap_endscan(scan);
RelationUnsetLockForWrite(index);
if (pred != NULL || oldPred != NULL) {
if (pred != NULL || oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
ExecDestroyTupleTable(tupleTable, true);
pfree(econtext);
@ -259,14 +292,13 @@ gistbuild(Relation heap,
}
/*
* Since we just inted the tuples in the heap, we update its
* stats in pg_relation to guarantee that the planner takes
* advantage of the index we just created. UpdateStats() does a
* CommandinterIncrement(), which flushes changed entries from
* the system relcache. The act of constructing an index changes
* these heap and index tuples in the system catalogs, so they
* need to be flushed. We close them to guarantee that they
* will be.
* Since we just inted the tuples in the heap, we update its stats in
* pg_relation to guarantee that the planner takes advantage of the
* index we just created. UpdateStats() does a
* CommandinterIncrement(), which flushes changed entries from the
* system relcache. The act of constructing an index changes these
* heap and index tuples in the system catalogs, so they need to be
* flushed. We close them to guarantee that they will be.
*/
hrelid = heap->rd_id;
@ -277,8 +309,10 @@ gistbuild(Relation heap,
UpdateStats(hrelid, nh, true);
UpdateStats(irelid, ni, false);
if (oldPred != NULL) {
if (ni == nh) pred = NULL;
if (oldPred != NULL)
{
if (ni == nh)
pred = NULL;
UpdateIndexPredicate(irelid, oldPred, pred);
}
@ -307,13 +341,15 @@ gistinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
/* immediately compress keys to normalize */
compvec = (bool *) palloc(sizeof(bool) * r->rd_att->natts);
for (i = 0; i < r->rd_att->natts; i++) {
for (i = 0; i < r->rd_att->natts; i++)
{
gistcentryinit(&giststate, &tmpentry, (char *) datum[i],
(Relation) NULL, (Page) NULL, (OffsetNumber) 0,
-1 /* size is currently bogus */ , TRUE);
if (datum[i] != (Datum) tmpentry.pred && !(giststate.keytypbyval))
compvec[i] = TRUE;
else compvec[i] = FALSE;
else
compvec[i] = FALSE;
datum[i] = (Datum) tmpentry.pred;
}
itup = index_formtuple(RelationGetTupleDescriptor(r), datum, nulls);
@ -322,7 +358,8 @@ gistinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
RelationSetLockForWrite(r);
res = gistdoinsert(r, itup, &giststate);
for (i = 0; i < r->rd_att->natts; i++)
if (compvec[i] == TRUE) pfree((char *)datum[i]);
if (compvec[i] == TRUE)
pfree((char *) datum[i]);
pfree(itup);
pfree(compvec);
@ -350,8 +387,10 @@ gistPageAddItem(GISTSTATE *giststate,
GISTENTRY tmpcentry;
IndexTuple itup = (IndexTuple) item;
/* recompress the item given that we now know the exact page and
offset for insertion */
/*
* recompress the item given that we now know the exact page and
* offset for insertion
*/
gistdentryinit(giststate, dentry,
(((char *) itup) + sizeof(IndexTupleData)),
(Relation) 0, (Page) 0, (OffsetNumber) InvalidOffsetNumber,
@ -388,7 +427,8 @@ gistdoinsert(Relation r,
blk = gistChooseSubtree(r, itup, 0, giststate, &stack, &buffer);
page = (Page) BufferGetPage(buffer);
if (gistnospace(page, itup)) {
if (gistnospace(page, itup))
{
/* need to do a split */
res = gistSplit(r, buffer, stack, itup, giststate);
gistfreestack(stack);
@ -425,7 +465,8 @@ gistdoinsert(Relation r,
static BlockNumber
gistChooseSubtree(Relation r, IndexTuple itup, /* itup has compressed entry */
gistChooseSubtree(Relation r, IndexTuple itup, /* itup has compressed
* entry */
int level,
GISTSTATE * giststate,
GISTSTACK ** retstack /* out */ ,
@ -442,7 +483,8 @@ gistChooseSubtree(Relation r, IndexTuple itup, /* itup has compressed entry */
buffer = InvalidBuffer;
stack = (GISTSTACK *) NULL;
do {
do
{
/* let go of current buffer before getting next */
if (buffer != InvalidBuffer)
ReleaseBuffer(buffer);
@ -452,7 +494,8 @@ gistChooseSubtree(Relation r, IndexTuple itup, /* itup has compressed entry */
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
if (!(opaque->flags & F_LEAF)) {
if (!(opaque->flags & F_LEAF))
{
GISTSTACK *n;
ItemId iid;
@ -488,8 +531,11 @@ gistAdjustKeys(Relation r,
Buffer b;
bool result;
bytea *evec;
GISTENTRY centry, *ev0p, *ev1p;
int size, datumsize;
GISTENTRY centry,
*ev0p,
*ev1p;
int size,
datumsize;
IndexTuple tid;
if (stk == (GISTSTACK *) NULL)
@ -522,27 +568,32 @@ gistAdjustKeys(Relation r,
/* did union leave decompressed version of oldud unchanged? */
(giststate->equalFn) (ev0p->pred, datum, &result);
if (!result) {
if (!result)
{
TupleDesc td = RelationGetTupleDescriptor(r);
/* compress datum for storage on page */
gistcentryinit(giststate, &centry, datum, ev0p->rel, ev0p->page,
ev0p->offset, datumsize, FALSE);
if (td->attrs[0]->attlen >= 0) {
if (td->attrs[0]->attlen >= 0)
{
memmove(oldud, centry.pred, att_size);
gistAdjustKeys(r, stk->gs_parent, stk->gs_blk, datum, att_size,
giststate);
}
else if (VARSIZE(centry.pred) == VARSIZE(oldud)) {
else if (VARSIZE(centry.pred) == VARSIZE(oldud))
{
memmove(oldud, centry.pred, VARSIZE(centry.pred));
gistAdjustKeys(r, stk->gs_parent, stk->gs_blk, datum, att_size,
giststate);
}
else {
else
{
/*
** new datum is not the same size as the old.
** We have to delete the old entry and insert the new
** one. Note that this may cause a split here!
* * new datum is not the same size as the old. * We have to
* delete the old entry and insert the new * one. Note that
* this may cause a split here!
*/
IndexTuple newtup;
ItemPointerData oldtid;
@ -579,7 +630,8 @@ gistAdjustKeys(Relation r,
if (centry.pred != datum)
pfree(datum);
}
else {
else
{
ReleaseBuffer(b);
}
pfree(evec);
@ -597,15 +649,21 @@ gistSplit(Relation r,
GISTSTATE * giststate)
{
Page p;
Buffer leftbuf, rightbuf;
Page left, right;
Buffer leftbuf,
rightbuf;
Page left,
right;
ItemId itemid;
IndexTuple item;
IndexTuple ltup, rtup, newtup;
IndexTuple ltup,
rtup,
newtup;
OffsetNumber maxoff;
OffsetNumber i;
OffsetNumber leftoff, rightoff;
BlockNumber lbknum, rbknum;
OffsetNumber leftoff,
rightoff;
BlockNumber lbknum,
rbknum;
BlockNumber bufblock;
GISTPageOpaque opaque;
int blank;
@ -616,7 +674,8 @@ gistSplit(Relation r,
bytea *entryvec;
bool *decompvec;
IndexTuple item_1;
GISTENTRY tmpdentry, tmpentry;
GISTENTRY tmpdentry,
tmpentry;
isnull = (char *) palloc(r->rd_rel->relnatts);
for (blank = 0; blank < r->rd_rel->relnatts; blank++)
@ -626,17 +685,20 @@ gistSplit(Relation r,
/*
* The root of the tree is the first block in the relation. If
* we're about to split the root, we need to do some hocus-pocus
* to enforce this guarantee.
* The root of the tree is the first block in the relation. If we're
* about to split the root, we need to do some hocus-pocus to enforce
* this guarantee.
*/
if (BufferGetBlockNumber(buffer) == GISTP_ROOT) {
if (BufferGetBlockNumber(buffer) == GISTP_ROOT)
{
leftbuf = ReadBuffer(r, P_NEW);
GISTInitBuffer(leftbuf, opaque->flags);
lbknum = BufferGetBlockNumber(leftbuf);
left = (Page) BufferGetPage(leftbuf);
} else {
}
else
{
leftbuf = buffer;
IncrBufferRefCount(buffer);
lbknum = BufferGetBlockNumber(buffer);
@ -652,7 +714,8 @@ gistSplit(Relation r,
maxoff = PageGetMaxOffsetNumber(p);
entryvec = (bytea *) palloc(VARHDRSZ + (maxoff + 2) * sizeof(GISTENTRY));
decompvec = (bool *) palloc(VARHDRSZ + (maxoff + 2) * sizeof(bool));
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
item_1 = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(entryvec))[i],
(((char *) item_1) + sizeof(IndexTupleData)),
@ -661,7 +724,8 @@ gistSplit(Relation r,
if ((char *) (((GISTENTRY *) VARDATA(entryvec))[i].pred)
== (((char *) item_1) + sizeof(IndexTupleData)))
decompvec[i] = FALSE;
else decompvec[i] = TRUE;
else
decompvec[i] = TRUE;
}
/* add the new datum as the last entry */
@ -672,7 +736,8 @@ gistSplit(Relation r,
if ((char *) (((GISTENTRY *) VARDATA(entryvec))[maxoff + 1]).pred !=
(((char *) itup) + sizeof(IndexTupleData)))
decompvec[maxoff + 1] = TRUE;
else decompvec[maxoff+1] = FALSE;
else
decompvec[maxoff + 1] = FALSE;
VARSIZE(entryvec) = (maxoff + 2) * sizeof(GISTENTRY) + VARHDRSZ;
@ -703,11 +768,13 @@ gistSplit(Relation r,
leftoff = rightoff = FirstOffsetNumber;
maxoff = PageGetMaxOffsetNumber(p);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
itemid = PageGetItemId(p, i);
item = (IndexTuple) PageGetItem(p, itemid);
if (i == *(v.spl_left)) {
if (i == *(v.spl_left))
{
gistPageAddItem(giststate, r, left, (Item) item,
IndexTupleSize(item),
leftoff, LP_USED, &tmpdentry, &newtup);
@ -719,7 +786,8 @@ gistSplit(Relation r,
if ((IndexTuple) item != newtup)
pfree(newtup);
}
else {
else
{
gistPageAddItem(giststate, r, right, (Item) item,
IndexTupleSize(item),
rightoff, LP_USED, &tmpdentry, &newtup);
@ -737,7 +805,8 @@ gistSplit(Relation r,
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
/* now insert the new index tuple */
if (*(v.spl_left) != FirstOffsetNumber) {
if (*(v.spl_left) != FirstOffsetNumber)
{
gistPageAddItem(giststate, r, left, (Item) itup,
IndexTupleSize(itup),
leftoff, LP_USED, &tmpdentry, &newtup);
@ -748,7 +817,9 @@ gistSplit(Relation r,
pfree(tmpdentry.pred);
if (itup != newtup)
pfree(newtup);
} else {
}
else
{
gistPageAddItem(giststate, r, right, (Item) itup,
IndexTupleSize(itup),
rightoff, LP_USED, &tmpdentry, &newtup);
@ -761,7 +832,8 @@ gistSplit(Relation r,
pfree(newtup);
}
if ((bufblock = BufferGetBlockNumber(buffer)) != GISTP_ROOT) {
if ((bufblock = BufferGetBlockNumber(buffer)) != GISTP_ROOT)
{
PageRestoreTempPage(left, p);
}
WriteBuffer(leftbuf);
@ -770,18 +842,17 @@ gistSplit(Relation r,
/*
* Okay, the page is split. We have three things left to do:
*
* 1) Adjust any active scans on this index to cope with changes
* we introduced in its structure by splitting this page.
* 1) Adjust any active scans on this index to cope with changes we
* introduced in its structure by splitting this page.
*
* 2) "Tighten" the bounding box of the pointer to the left
* page in the parent node in the tree, if any. Since we
* moved a bunch of stuff off the left page, we expect it
* to get smaller. This happens in the internal insertion
* routine.
* 2) "Tighten" the bounding box of the pointer to the left page in the
* parent node in the tree, if any. Since we moved a bunch of stuff
* off the left page, we expect it to get smaller. This happens in
* the internal insertion routine.
*
* 3) Insert a pointer to the right page in the parent. This
* may cause the parent to split. If it does, we need to
* repeat steps one and two for each split node in the tree.
* 3) Insert a pointer to the right page in the parent. This may cause
* the parent to split. If it does, we need to repeat steps one and
* two for each split node in the tree.
*/
/* adjust active scans */
@ -820,7 +891,8 @@ gistintinsert(Relation r,
{
ItemPointerData ltid;
if (stk == (GISTSTACK *) NULL) {
if (stk == (GISTSTACK *) NULL)
{
gistnewroot(giststate, r, ltup, rtup);
return;
}
@ -848,12 +920,16 @@ gistentryinserttwo(Relation r, GISTSTACK *stk, IndexTuple ltup,
b = ReadBuffer(r, stk->gs_blk);
p = BufferGetPage(b);
if (gistnospace(p, ltup)) {
if (gistnospace(p, ltup))
{
res = gistSplit(r, b, stk->gs_parent, ltup, giststate);
WriteBuffer(b); /* don't forget to release buffer! - 01/31/94 */
WriteBuffer(b); /* don't forget to release buffer! -
* 01/31/94 */
pfree(res);
gistdoinsert(r, rtup, giststate);
} else {
}
else
{
gistPageAddItem(giststate, r, p, (Item) ltup,
IndexTupleSize(ltup), InvalidOffsetNumber,
LP_USED, &tmpentry, &newtup);
@ -887,12 +963,15 @@ gistentryinsert(Relation r, GISTSTACK *stk, IndexTuple tup,
b = ReadBuffer(r, stk->gs_blk);
p = BufferGetPage(b);
if (gistnospace(p, tup)) {
if (gistnospace(p, tup))
{
res = gistSplit(r, b, stk->gs_parent, tup, giststate);
WriteBuffer(b); /* don't forget to release buffer! - 01/31/94 */
WriteBuffer(b); /* don't forget to release buffer! -
* 01/31/94 */
return (res);
}
else {
else
{
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
off = gistPageAddItem(giststate, r, p, (Item) tup, IndexTupleSize(tup),
InvalidOffsetNumber, LP_USED, &tmpentry, &newtup);
@ -972,8 +1051,10 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
float usize;
OffsetNumber which;
float which_grow;
GISTENTRY entry, identry;
int size, idsize;
GISTENTRY entry,
identry;
int size,
idsize;
idsize = IndexTupleSize(it) - sizeof(IndexTupleData);
id = ((char *) it) + sizeof(IndexTupleData);
@ -984,13 +1065,15 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
gistdentryinit(giststate, &identry, id, (Relation) NULL, (Page) NULL,
(OffsetNumber) 0, idsize, FALSE);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
datum = (char *) PageGetItem(p, PageGetItemId(p, i));
size = IndexTupleSize(datum) - sizeof(IndexTupleData);
datum += sizeof(IndexTupleData);
gistdentryinit(giststate, &entry, datum, r, p, i, size, FALSE);
(giststate->penaltyFn) (&entry, &identry, &usize);
if (which_grow < 0 || usize < which_grow) {
if (which_grow < 0 || usize < which_grow)
{
which = i;
which_grow = usize;
if (which_grow == 0)
@ -1016,7 +1099,8 @@ gistfreestack(GISTSTACK *s)
{
GISTSTACK *p;
while (s != (GISTSTACK *) NULL) {
while (s != (GISTSTACK *) NULL)
{
p = s->gs_parent;
pfree(s);
s = p;
@ -1058,8 +1142,13 @@ gistdelete(Relation r, ItemPointer tid)
void
initGISTstate(GISTSTATE * giststate, Relation index)
{
RegProcedure consistent_proc, union_proc, compress_proc, decompress_proc;
RegProcedure penalty_proc, picksplit_proc, equal_proc;
RegProcedure consistent_proc,
union_proc,
compress_proc,
decompress_proc;
RegProcedure penalty_proc,
picksplit_proc,
equal_proc;
func_ptr user_fn;
int pronargs;
HeapTuple htup;
@ -1094,13 +1183,15 @@ initGISTstate(GISTSTATE *giststate, Relation index)
if (!HeapTupleIsValid(htup))
elog(WARN, "initGISTstate: index %d not found", index->rd_id);
giststate->haskeytype = itupform->indhaskeytype;
if (giststate->haskeytype) {
if (giststate->haskeytype)
{
/* key type is different -- is it byval? */
htup = SearchSysCacheTuple(ATTNUM,
ObjectIdGetDatum(itupform->indexrelid),
UInt16GetDatum(FirstOffsetNumber),
0, 0);
if (!HeapTupleIsValid(htup)) {
if (!HeapTupleIsValid(htup))
{
elog(WARN, "initGISTstate: no attribute tuple %d %d",
itupform->indexrelid, FirstOffsetNumber);
return;
@ -1124,7 +1215,8 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
char *datum = (((char *) t) + sizeof(IndexTupleData));
/* if new entry fits in index tuple, copy it in */
if (entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData)) {
if (entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData))
{
memcpy(datum, entry.pred, entry.bytes);
/* clear out old size */
t->t_info &= 0xe000;
@ -1133,7 +1225,8 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
return (t);
}
else {
else
{
/* generate a new index tuple for the compressed entry */
TupleDesc tupDesc = r->rd_att;
IndexTuple newtup;
@ -1161,12 +1254,15 @@ gistdentryinit(GISTSTATE *giststate, GISTENTRY *e, char *pr, Relation r,
Page pg, OffsetNumber o, int b, bool l)
{
GISTENTRY *dep;
gistentryinit(*e, pr, r, pg, o, b, l);
if (giststate->haskeytype) {
if (giststate->haskeytype)
{
dep = (GISTENTRY *) ((giststate->decompressFn) (e));
gistentryinit(*e, dep->pred, dep->rel, dep->page, dep->offset, dep->bytes,
dep->leafkey);
if (dep != e) pfree(dep);
if (dep != e)
pfree(dep);
}
}
@ -1179,12 +1275,15 @@ gistcentryinit(GISTSTATE *giststate, GISTENTRY *e, char *pr, Relation r,
Page pg, OffsetNumber o, int b, bool l)
{
GISTENTRY *cep;
gistentryinit(*e, pr, r, pg, o, b, l);
if (giststate->haskeytype) {
if (giststate->haskeytype)
{
cep = (GISTENTRY *) ((giststate->compressFn) (e));
gistentryinit(*e, cep->pred, cep->rel, cep->page, cep->offset, cep->bytes,
cep->leafkey);
if (cep != e) pfree(cep);
if (cep != e)
pfree(cep);
}
}
@ -1202,7 +1301,8 @@ _gistdump(Relation r)
{
Buffer buf;
Page page;
OffsetNumber offnum, maxoff;
OffsetNumber offnum,
maxoff;
BlockNumber blkno;
BlockNumber nblocks;
GISTPageOpaque po;
@ -1213,7 +1313,8 @@ _gistdump(Relation r)
char *itkey;
nblocks = RelationGetNumberOfBlocks(r);
for (blkno = 0; blkno < nblocks; blkno++) {
for (blkno = 0; blkno < nblocks; blkno++)
{
buf = ReadBuffer(r, blkno);
page = BufferGetPage(buf);
po = (GISTPageOpaque) PageGetSpecialPointer(page);
@ -1221,14 +1322,16 @@ _gistdump(Relation r)
printf("Page %d maxoff %d <%s>\n", blkno, maxoff,
(po->flags & F_LEAF ? "LEAF" : "INTERNAL"));
if (PageIsEmpty(page)) {
if (PageIsEmpty(page))
{
ReleaseBuffer(buf);
continue;
}
for (offnum = FirstOffsetNumber;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum)) {
offnum = OffsetNumberNext(offnum))
{
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
itblkno = ItemPointerGetBlockNumber(&(itup->t_tid));
itoffno = ItemPointerGetOffsetNumber(&(itup->t_tid));
@ -1247,10 +1350,12 @@ _gistdump(Relation r)
}
#ifdef NOT_USED
static char *text_range_out(TXTRANGE *r)
static char *
text_range_out(TXTRANGE * r)
{
char *result;
char *lower, *upper;
char *lower,
*upper;
if (r == NULL)
return (NULL);
@ -1269,6 +1374,7 @@ static char *text_range_out(TXTRANGE *r)
pfree(upper);
return (result);
}
#endif
static char *
@ -1285,4 +1391,3 @@ int_range_out(INTRANGE *r)
}
#endif /* defined GISTDEBUG */

View File

@ -28,13 +28,15 @@
#endif
static OffsetNumber gistfindnext(IndexScanDesc s, Page p, OffsetNumber n,
static OffsetNumber
gistfindnext(IndexScanDesc s, Page p, OffsetNumber n,
ScanDirection dir);
static RetrieveIndexResult gistscancache(IndexScanDesc s, ScanDirection dir);
static RetrieveIndexResult gistfirst(IndexScanDesc s, ScanDirection dir);
static RetrieveIndexResult gistnext(IndexScanDesc s, ScanDirection dir);
static ItemPointer gistheapptr(Relation r, ItemPointer itemp);
static bool gistindex_keytest(IndexTuple tuple, TupleDesc tupdesc,
static bool
gistindex_keytest(IndexTuple tuple, TupleDesc tupdesc,
int scanKeySize, ScanKey key, GISTSTATE * giststate,
Relation r, Page p, OffsetNumber offset);
@ -49,9 +51,12 @@ gistgettuple(IndexScanDesc s, ScanDirection dir)
return (res);
/* not cached, so we'll have to do some work */
if (ItemPointerIsValid(&(s->currentItemData))) {
if (ItemPointerIsValid(&(s->currentItemData)))
{
res = gistnext(s, dir);
} else {
}
else
{
res = gistfirst(s, dir);
}
return (res);
@ -76,14 +81,16 @@ gistfirst(IndexScanDesc s, ScanDirection dir)
po = (GISTPageOpaque) PageGetSpecialPointer(p);
so = (GISTScanOpaque) s->opaque;
for (;;) {
for (;;)
{
maxoff = PageGetMaxOffsetNumber(p);
if (ScanDirectionIsBackward(dir))
n = gistfindnext(s, p, maxoff, dir);
else
n = gistfindnext(s, p, FirstOffsetNumber, dir);
while (n < FirstOffsetNumber || n > maxoff) {
while (n < FirstOffsetNumber || n > maxoff)
{
ReleaseBuffer(b);
if (so->s_stack == (GISTSTACK *) NULL)
@ -95,9 +102,12 @@ gistfirst(IndexScanDesc s, ScanDirection dir)
po = (GISTPageOpaque) PageGetSpecialPointer(p);
maxoff = PageGetMaxOffsetNumber(p);
if (ScanDirectionIsBackward(dir)) {
if (ScanDirectionIsBackward(dir))
{
n = OffsetNumberPrev(stk->gs_child);
} else {
}
else
{
n = OffsetNumberNext(stk->gs_child);
}
so->s_stack = stk->gs_parent;
@ -105,7 +115,8 @@ gistfirst(IndexScanDesc s, ScanDirection dir)
n = gistfindnext(s, p, n, dir);
}
if (po->flags & F_LEAF) {
if (po->flags & F_LEAF)
{
ItemPointerSet(&(s->currentItemData), BufferGetBlockNumber(b), n);
it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n));
@ -114,7 +125,9 @@ gistfirst(IndexScanDesc s, ScanDirection dir)
ReleaseBuffer(b);
return (res);
} else {
}
else
{
stk = (GISTSTACK *) palloc(sizeof(GISTSTACK));
stk->gs_child = n;
stk->gs_blk = BufferGetBlockNumber(b);
@ -149,9 +162,12 @@ gistnext(IndexScanDesc s, ScanDirection dir)
blk = ItemPointerGetBlockNumber(&(s->currentItemData));
n = ItemPointerGetOffsetNumber(&(s->currentItemData));
if (ScanDirectionIsForward(dir)) {
if (ScanDirectionIsForward(dir))
{
n = OffsetNumberNext(n);
} else {
}
else
{
n = OffsetNumberPrev(n);
}
@ -160,11 +176,13 @@ gistnext(IndexScanDesc s, ScanDirection dir)
po = (GISTPageOpaque) PageGetSpecialPointer(p);
so = (GISTScanOpaque) s->opaque;
for (;;) {
for (;;)
{
maxoff = PageGetMaxOffsetNumber(p);
n = gistfindnext(s, p, n, dir);
while (n < FirstOffsetNumber || n > maxoff) {
while (n < FirstOffsetNumber || n > maxoff)
{
ReleaseBuffer(b);
if (so->s_stack == (GISTSTACK *) NULL)
@ -176,9 +194,12 @@ gistnext(IndexScanDesc s, ScanDirection dir)
maxoff = PageGetMaxOffsetNumber(p);
po = (GISTPageOpaque) PageGetSpecialPointer(p);
if (ScanDirectionIsBackward(dir)) {
if (ScanDirectionIsBackward(dir))
{
n = OffsetNumberPrev(stk->gs_child);
} else {
}
else
{
n = OffsetNumberNext(stk->gs_child);
}
so->s_stack = stk->gs_parent;
@ -186,7 +207,8 @@ gistnext(IndexScanDesc s, ScanDirection dir)
n = gistfindnext(s, p, n, dir);
}
if (po->flags & F_LEAF) {
if (po->flags & F_LEAF)
{
ItemPointerSet(&(s->currentItemData), BufferGetBlockNumber(b), n);
it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n));
@ -195,7 +217,9 @@ gistnext(IndexScanDesc s, ScanDirection dir)
ReleaseBuffer(b);
return (res);
} else {
}
else
{
stk = (GISTSTACK *) palloc(sizeof(GISTSTACK));
stk->gs_child = n;
stk->gs_blk = BufferGetBlockNumber(b);
@ -210,9 +234,12 @@ gistnext(IndexScanDesc s, ScanDirection dir)
p = BufferGetPage(b);
po = (GISTPageOpaque) PageGetSpecialPointer(p);
if (ScanDirectionIsBackward(dir)) {
if (ScanDirectionIsBackward(dir))
{
n = PageGetMaxOffsetNumber(p);
} else {
}
else
{
n = FirstOffsetNumber;
}
}
@ -238,7 +265,8 @@ gistindex_keytest(IndexTuple tuple,
IncrIndexProcessed();
while (scanKeySize > 0) {
while (scanKeySize > 0)
{
datum = index_getattr(tuple,
1,
tupdesc,
@ -247,23 +275,28 @@ gistindex_keytest(IndexTuple tuple,
IndexTupleSize(tuple) - sizeof(IndexTupleData),
FALSE);
if (isNull) {
if (isNull)
{
/* XXX eventually should check if SK_ISNULL */
return (false);
}
if (key[0].sk_flags & SK_COMMUTE) {
if (key[0].sk_flags & SK_COMMUTE)
{
test = (*(key[0].sk_func))
(DatumGetPointer(key[0].sk_argument),
&de, key[0].sk_procedure) ? 1 : 0;
} else {
}
else
{
test = (*(key[0].sk_func))
(&de,
DatumGetPointer(key[0].sk_argument),
key[0].sk_procedure) ? 1 : 0;
}
if (!test == !(key[0].sk_flags & SK_NEGATE)) {
if (!test == !(key[0].sk_flags & SK_NEGATE))
{
return (false);
}
@ -294,12 +327,14 @@ gistfindnext(IndexScanDesc s, Page p, OffsetNumber n, ScanDirection dir)
* a ghost tuple, before the scan. If this is the case, back up one.
*/
if (so->s_flags & GS_CURBEFORE) {
if (so->s_flags & GS_CURBEFORE)
{
so->s_flags &= ~GS_CURBEFORE;
n = OffsetNumberPrev(n);
}
while (n >= FirstOffsetNumber && n <= maxoff) {
while (n >= FirstOffsetNumber && n <= maxoff)
{
it = (char *) PageGetItem(p, PageGetItemId(p, n));
if (gistindex_keytest((IndexTuple) it,
RelationGetTupleDescriptor(s->relation),
@ -307,9 +342,12 @@ gistfindnext(IndexScanDesc s, Page p, OffsetNumber n, ScanDirection dir)
s->relation, p, n))
break;
if (ScanDirectionIsBackward(dir)) {
if (ScanDirectionIsBackward(dir))
{
n = OffsetNumberPrev(n);
} else {
}
else
{
n = OffsetNumberNext(n);
}
}
@ -324,7 +362,8 @@ gistscancache(IndexScanDesc s, ScanDirection dir)
ItemPointer ip;
if (!(ScanDirectionIsNoMovement(dir)
&& ItemPointerIsValid(&(s->currentItemData)))) {
&& ItemPointerIsValid(&(s->currentItemData))))
{
return ((RetrieveIndexResult) NULL);
}
@ -355,7 +394,8 @@ gistheapptr(Relation r, ItemPointer itemp)
OffsetNumber n;
ip = (ItemPointer) palloc(sizeof(ItemPointerData));
if (ItemPointerIsValid(itemp)) {
if (ItemPointerIsValid(itemp))
{
b = ReadBuffer(r, ItemPointerGetBlockNumber(itemp));
p = BufferGetPage(b);
n = ItemPointerGetOffsetNumber(itemp);
@ -363,7 +403,9 @@ gistheapptr(Relation r, ItemPointer itemp)
memmove((char *) ip, (char *) &(it->t_tid),
sizeof(ItemPointerData));
ReleaseBuffer(b);
} else {
}
else
{
ItemPointerSetInvalid(ip);
}

View File

@ -29,11 +29,14 @@
/* routines defined and used here */
static void gistregscan(IndexScanDesc s);
static void gistdropscan(IndexScanDesc s);
static void gistadjone(IndexScanDesc s, int op, BlockNumber blkno,
static void
gistadjone(IndexScanDesc s, int op, BlockNumber blkno,
OffsetNumber offnum);
static void adjuststack(GISTSTACK *stk, BlockNumber blkno,
static void
adjuststack(GISTSTACK * stk, BlockNumber blkno,
OffsetNumber offnum);
static void adjustiptr(IndexScanDesc s, ItemPointer iptr,
static void
adjustiptr(IndexScanDesc s, ItemPointer iptr,
int op, BlockNumber blkno, OffsetNumber offnum);
/*
@ -46,7 +49,8 @@ static void adjustiptr(IndexScanDesc s, ItemPointer iptr,
* locks on the same object, so that's why we need to handle this case.
*/
typedef struct GISTScanListData {
typedef struct GISTScanListData
{
IndexScanDesc gsl_scan;
struct GISTScanListData *gsl_next;
} GISTScanListData;
@ -77,7 +81,8 @@ gistrescan(IndexScanDesc s, bool fromEnd, ScanKey key)
GISTScanOpaque p;
int i;
if (!IndexScanIsValid(s)) {
if (!IndexScanIsValid(s))
{
elog(WARN, "gistrescan: invalid scan.");
return;
}
@ -96,24 +101,31 @@ gistrescan(IndexScanDesc s, bool fromEnd, ScanKey key)
/*
* Set flags.
*/
if (RelationGetNumberOfBlocks(s->relation) == 0) {
if (RelationGetNumberOfBlocks(s->relation) == 0)
{
s->flags = ScanUnmarked;
} else if (fromEnd) {
}
else if (fromEnd)
{
s->flags = ScanUnmarked | ScanUncheckedPrevious;
} else {
}
else
{
s->flags = ScanUnmarked | ScanUncheckedNext;
}
s->scanFromEnd = fromEnd;
if (s->numberOfKeys > 0) {
if (s->numberOfKeys > 0)
{
memmove(s->keyData,
key,
s->numberOfKeys * sizeof(ScanKeyData));
}
p = (GISTScanOpaque) s->opaque;
if (p != (GISTScanOpaque) NULL) {
if (p != (GISTScanOpaque) NULL)
{
gistfreestack(p->s_stack);
gistfreestack(p->s_markstk);
p->s_stack = p->s_markstk = (GISTSTACK *) NULL;
@ -125,7 +137,9 @@ gistrescan(IndexScanDesc s, bool fromEnd, ScanKey key)
s->keyData[i].sk_procedure);
s->keyData[i].sk_func = p->giststate->consistentFn;
}
} else {
}
else
{
/* initialize opaque data */
p = (GISTScanOpaque) palloc(sizeof(GISTScanOpaqueData));
p->s_stack = p->s_markstk = (GISTSTACK *) NULL;
@ -134,16 +148,20 @@ gistrescan(IndexScanDesc s, bool fromEnd, ScanKey key)
p->giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
initGISTstate(p->giststate, s->relation);
if (s->numberOfKeys > 0)
/*
** Play games here with the scan key to use the Consistent
** function for all comparisons:
** 1) the sk_procedure field will now be used to hold the
** strategy number
** 2) the sk_func field will point to the Consistent function
* * Play games here with the scan key to use the Consistent *
* function for all comparisons: * 1) the sk_procedure field
* will now be used to hold the * strategy number * 2) the
* sk_func field will point to the Consistent function
*/
for (i = 0; i < s->numberOfKeys; i++)
{
/*
* s->keyData[i].sk_procedure =
* index_getprocid(s->relation, 1, GIST_CONSISTENT_PROC);
*/
for (i = 0; i < s->numberOfKeys; i++) {
/* s->keyData[i].sk_procedure
= index_getprocid(s->relation, 1, GIST_CONSISTENT_PROC); */
s->keyData[i].sk_procedure
= RelationGetGISTStrategy(s->relation, s->keyData[i].sk_attno,
s->keyData[i].sk_procedure);
@ -156,7 +174,9 @@ void
gistmarkpos(IndexScanDesc s)
{
GISTScanOpaque p;
GISTSTACK *o, *n, *tmp;
GISTSTACK *o,
*n,
*tmp;
s->currentMarkData = s->currentItemData;
p = (GISTScanOpaque) s->opaque;
@ -169,7 +189,8 @@ gistmarkpos(IndexScanDesc s)
n = p->s_stack;
/* copy the parent stack from the current item data */
while (n != (GISTSTACK *) NULL) {
while (n != (GISTSTACK *) NULL)
{
tmp = (GISTSTACK *) palloc(sizeof(GISTSTACK));
tmp->gs_child = n->gs_child;
tmp->gs_blk = n->gs_blk;
@ -186,7 +207,9 @@ void
gistrestrpos(IndexScanDesc s)
{
GISTScanOpaque p;
GISTSTACK *o, *n, *tmp;
GISTSTACK *o,
*n,
*tmp;
s->currentItemData = s->currentMarkData;
p = (GISTScanOpaque) s->opaque;
@ -199,7 +222,8 @@ gistrestrpos(IndexScanDesc s)
n = p->s_markstk;
/* copy the parent stack from the current item data */
while (n != (GISTSTACK *) NULL) {
while (n != (GISTSTACK *) NULL)
{
tmp = (GISTSTACK *) palloc(sizeof(GISTSTACK));
tmp->gs_child = n->gs_child;
tmp->gs_blk = n->gs_blk;
@ -219,7 +243,8 @@ gistendscan(IndexScanDesc s)
p = (GISTScanOpaque) s->opaque;
if (p != (GISTScanOpaque) NULL) {
if (p != (GISTScanOpaque) NULL)
{
gistfreestack(p->s_stack);
gistfreestack(p->s_markstk);
pfree(s->opaque);
@ -250,7 +275,8 @@ gistdropscan(IndexScanDesc s)
for (l = GISTScans;
l != (GISTScanList) NULL && l->gsl_scan != s;
l = l->gsl_next) {
l = l->gsl_next)
{
prev = l;
}
@ -272,7 +298,8 @@ gistadjscans(Relation r, int op, BlockNumber blkno, OffsetNumber offnum)
Oid relid;
relid = r->rd_id;
for (l = GISTScans; l != (GISTScanList) NULL; l = l->gsl_next) {
for (l = GISTScans; l != (GISTScanList) NULL; l = l->gsl_next)
{
if (l->gsl_scan->relation->rd_id == relid)
gistadjone(l->gsl_scan, op, blkno, offnum);
}
@ -301,7 +328,8 @@ gistadjone(IndexScanDesc s,
so = (GISTScanOpaque) s->opaque;
if (op == GISTOP_SPLIT) {
if (op == GISTOP_SPLIT)
{
adjuststack(so->s_stack, blkno, offnum);
adjuststack(so->s_markstk, blkno, offnum);
}
@ -324,20 +352,27 @@ adjustiptr(IndexScanDesc s,
OffsetNumber curoff;
GISTScanOpaque so;
if (ItemPointerIsValid(iptr)) {
if (ItemPointerGetBlockNumber(iptr) == blkno) {
if (ItemPointerIsValid(iptr))
{
if (ItemPointerGetBlockNumber(iptr) == blkno)
{
curoff = ItemPointerGetOffsetNumber(iptr);
so = (GISTScanOpaque) s->opaque;
switch (op) {
switch (op)
{
case GISTOP_DEL:
/* back up one if we need to */
if (curoff >= offnum) {
if (curoff >= offnum)
{
if (curoff > FirstOffsetNumber) {
if (curoff > FirstOffsetNumber)
{
/* just adjust the item pointer */
ItemPointerSet(iptr, blkno, OffsetNumberPrev(curoff));
} else {
}
else
{
/* remember that we're before the current tuple */
ItemPointerSet(iptr, blkno, FirstOffsetNumber);
if (iptr == &(s->currentItemData))
@ -383,7 +418,8 @@ adjuststack(GISTSTACK *stk,
BlockNumber blkno,
OffsetNumber offnum)
{
while (stk != (GISTSTACK *) NULL) {
while (stk != (GISTSTACK *) NULL)
{
if (stk->gs_blk == blkno)
stk->gs_child = FirstOffsetNumber;

View File

@ -113,4 +113,5 @@ RelationInvokeGISTStrategy(Relation r,
return (RelationInvokeStrategy(r, &GISTEvaluationData, attnum, s,
left, right));
}
#endif

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.12 1997/01/10 09:46:13 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.13 1997/09/07 04:37:49 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -56,20 +56,26 @@ hashbuild(Relation heap,
Buffer buffer;
HeapTuple htup;
IndexTuple itup;
TupleDesc htupdesc, itupdesc;
TupleDesc htupdesc,
itupdesc;
Datum *attdata;
bool *nulls;
InsertIndexResult res;
int nhtups, nitups;
int nhtups,
nitups;
int i;
HashItem hitem;
#ifndef OMIT_PARTIAL_INDEX
ExprContext *econtext;
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
Oid hrelid, irelid;
Node *pred, *oldPred;
Oid hrelid,
irelid;
Node *pred,
*oldPred;
/* note that this is a new btree */
BuildingHash = true;
@ -90,20 +96,23 @@ hashbuild(Relation heap,
nulls = (bool *) palloc(natts * sizeof(bool));
/*
* If this is a predicate (partial) index, we will need to evaluate the
* predicate using ExecQual, which requires the current tuple to be in a
* slot of a TupleTable. In addition, ExecQual must have an ExprContext
* referring to that slot. Here, we initialize dummy TupleTable and
* ExprContext objects for this purpose. --Nels, Feb '92
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
* be in a slot of a TupleTable. In addition, ExecQual must have an
* ExprContext referring to that slot. Here, we initialize dummy
* TupleTable and ExprContext objects for this purpose. --Nels, Feb
* '92
*/
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) {
if (pred != NULL || oldPred != NULL)
{
tupleTable = ExecCreateTupleTable(1);
slot = ExecAllocTableSlot(tupleTable);
econtext = makeNode(ExprContext);
FillDummyExprContext(econtext, slot, htupdesc, buffer);
}
else /* quiet the compiler */
else
/* quiet the compiler */
{
econtext = NULL;
tupleTable = 0;
@ -118,7 +127,8 @@ hashbuild(Relation heap,
/* build the index */
nhtups = nitups = 0;
for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer)) {
for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer))
{
nhtups++;
@ -126,19 +136,25 @@ hashbuild(Relation heap,
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
*/
if (oldPred != NULL) {
if (oldPred != NULL)
{
/* SetSlotContents(slot, htup); */
#ifndef OMIT_PARTIAL_INDEX
slot->val = htup;
if (ExecQual((List*)oldPred, econtext) == true) {
if (ExecQual((List *) oldPred, econtext) == true)
{
nitups++;
continue;
}
#endif /* OMIT_PARTIAL_INDEX */
}
/* Skip this tuple if it doesn't satisfy the partial-index predicate */
if (pred != NULL) {
/*
* Skip this tuple if it doesn't satisfy the partial-index
* predicate
*/
if (pred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
@ -150,25 +166,27 @@ hashbuild(Relation heap,
nitups++;
/*
* For the current heap tuple, extract all the attributes
* we use in this index, and note which are null.
* For the current heap tuple, extract all the attributes we use
* in this index, and note which are null.
*/
for (i = 1; i <= natts; i++) {
for (i = 1; i <= natts; i++)
{
int attoff;
bool attnull;
/*
* Offsets are from the start of the tuple, and are
* zero-based; indices are one-based. The next call
* returns i - 1. That's data hiding for you.
* zero-based; indices are one-based. The next call returns i
* - 1. That's data hiding for you.
*/
/* attoff = i - 1 */
attoff = AttrNumberGetAttrOffset(i);
/* below, attdata[attoff] set to equal some datum &
* attnull is changed to indicate whether or not the attribute
* is null for this tuple
/*
* below, attdata[attoff] set to equal some datum & attnull is
* changed to indicate whether or not the attribute is null
* for this tuple
*/
attdata[attoff] = GetIndexValue(htup,
htupdesc,
@ -184,10 +202,9 @@ hashbuild(Relation heap,
itup = index_formtuple(itupdesc, attdata, nulls);
/*
* If the single index key is null, we don't insert it into
* the index. Hash tables support scans on '='.
* Relational algebra says that A = B
* returns null if either A or B is null. This
* If the single index key is null, we don't insert it into the
* index. Hash tables support scans on '='. Relational algebra
* says that A = B returns null if either A or B is null. This
* means that no qualification used in an index scan could ever
* return true on a null attribute. It also means that indices
* can't be used by ISNULL or NOTNULL scans, but that's an
@ -195,7 +212,8 @@ hashbuild(Relation heap,
* of the way nulls are handled here.
*/
if (itup->t_info & INDEX_NULL_MASK) {
if (itup->t_info & INDEX_NULL_MASK)
{
pfree(itup);
continue;
}
@ -211,7 +229,8 @@ hashbuild(Relation heap,
/* okay, all heap tuples are indexed */
heap_endscan(hscan);
if (pred != NULL || oldPred != NULL) {
if (pred != NULL || oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
ExecDestroyTupleTable(tupleTable, true);
pfree(econtext);
@ -219,10 +238,10 @@ hashbuild(Relation heap,
}
/*
* Since we just counted the tuples in the heap, we update its
* stats in pg_class to guarantee that the planner takes advantage
* of the index we just created. Finally, only update statistics
* during normal index definitions, not for indices on system catalogs
* Since we just counted the tuples in the heap, we update its stats
* in pg_class to guarantee that the planner takes advantage of the
* index we just created. Finally, only update statistics during
* normal index definitions, not for indices on system catalogs
* created during bootstrap processing. We must close the relations
* before updatings statistics to guarantee that the relcache entries
* are flushed when we increment the command counter in UpdateStats().
@ -235,8 +254,10 @@ hashbuild(Relation heap,
index_close(index);
UpdateStats(hrelid, nhtups, true);
UpdateStats(irelid, nitups, false);
if (oldPred != NULL) {
if (nitups == nhtups) pred = NULL;
if (oldPred != NULL)
{
if (nitups == nhtups)
pred = NULL;
UpdateIndexPredicate(irelid, oldPred, pred);
}
}
@ -291,9 +312,9 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
RetrieveIndexResult res;
/*
* If we've already initialized this scan, we can just advance it
* in the appropriate direction. If we haven't done so yet, we
* call a routine to get the first item in the scan.
* If we've already initialized this scan, we can just advance it in
* the appropriate direction. If we haven't done so yet, we call a
* routine to get the first item in the scan.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
@ -341,19 +362,22 @@ hashrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey)
so = (HashScanOpaque) scan->opaque;
/* we hold a read lock on the current page in the scan */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* reset the scan key */
if (scan->numberOfKeys > 0) {
if (scan->numberOfKeys > 0)
{
memmove(scan->keyData,
scankey,
scan->numberOfKeys * sizeof(ScanKeyData));
@ -373,13 +397,15 @@ hashendscan(IndexScanDesc scan)
so = (HashScanOpaque) scan->opaque;
/* release any locks we still hold */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
if (BufferIsValid(so->hashso_mrkbuf))
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
@ -403,9 +429,10 @@ hashmarkpos(IndexScanDesc scan)
ItemPointer iptr;
HashScanOpaque so;
/* see if we ever call this code. if we do, then so_mrkbuf a
* useful element in the scan->opaque structure. if this procedure
* is never called, so_mrkbuf should be removed from the scan->opaque
/*
* see if we ever call this code. if we do, then so_mrkbuf a useful
* element in the scan->opaque structure. if this procedure is never
* called, so_mrkbuf should be removed from the scan->opaque
* structure.
*/
elog(NOTICE, "Hashmarkpos() called.");
@ -413,14 +440,16 @@ hashmarkpos(IndexScanDesc scan)
so = (HashScanOpaque) scan->opaque;
/* release lock on old marked data, if any */
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
_hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ);
so->hashso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* bump lock on currentItemData and copy to currentMarkData */
if (ItemPointerIsValid(&(scan->currentItemData))) {
if (ItemPointerIsValid(&(scan->currentItemData)))
{
so->hashso_mrkbuf = _hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_curbuf),
HASH_READ);
@ -437,9 +466,10 @@ hashrestrpos(IndexScanDesc scan)
ItemPointer iptr;
HashScanOpaque so;
/* see if we ever call this code. if we do, then so_mrkbuf a
* useful element in the scan->opaque structure. if this procedure
* is never called, so_mrkbuf should be removed from the scan->opaque
/*
* see if we ever call this code. if we do, then so_mrkbuf a useful
* element in the scan->opaque structure. if this procedure is never
* called, so_mrkbuf should be removed from the scan->opaque
* structure.
*/
elog(NOTICE, "Hashrestrpos() called.");
@ -447,14 +477,16 @@ hashrestrpos(IndexScanDesc scan)
so = (HashScanOpaque) scan->opaque;
/* release lock on current data, if any */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ);
so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* bump lock on currentMarkData and copy to currentItemData */
if (ItemPointerIsValid(&(scan->currentMarkData))) {
if (ItemPointerIsValid(&(scan->currentMarkData)))
{
so->hashso_curbuf =
_hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_mrkbuf),
@ -474,4 +506,3 @@ hashdelete(Relation rel, ItemPointer tid)
/* delete the data from the page */
_hash_pagedel(rel, tid);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.3 1996/11/10 02:57:40 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.4 1997/09/07 04:37:53 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -20,18 +20,21 @@
#include "access/hash.h"
uint32 hashint2(int16 key)
uint32
hashint2(int16 key)
{
return ((uint32) ~ key);
}
uint32 hashint4(uint32 key)
uint32
hashint4(uint32 key)
{
return (~key);
}
/* Hash function from Chris Torek. */
uint32 hashfloat4(float32 keyp)
uint32
hashfloat4(float32 keyp)
{
int len;
int loop;
@ -46,12 +49,15 @@ uint32 hashfloat4(float32 keyp)
h = 0;
if (len > 0) {
if (len > 0)
{
loop = (len + 8 - 1) >> 3;
switch (len & (8 - 1)) {
switch (len & (8 - 1))
{
case 0:
do { /* All fall throughs */
do
{ /* All fall throughs */
HASH4;
case 7:
HASH4;
@ -74,7 +80,8 @@ uint32 hashfloat4(float32 keyp)
}
uint32 hashfloat8(float64 keyp)
uint32
hashfloat8(float64 keyp)
{
int len;
int loop;
@ -89,12 +96,15 @@ uint32 hashfloat8(float64 keyp)
h = 0;
if (len > 0) {
if (len > 0)
{
loop = (len + 8 - 1) >> 3;
switch (len & (8 - 1)) {
switch (len & (8 - 1))
{
case 0:
do { /* All fall throughs */
do
{ /* All fall throughs */
HASH4;
case 7:
HASH4;
@ -117,13 +127,15 @@ uint32 hashfloat8(float64 keyp)
}
uint32 hashoid(Oid key)
uint32
hashoid(Oid key)
{
return ((uint32) ~ key);
}
uint32 hashchar(char key)
uint32
hashchar(char key)
{
int len;
uint32 h;
@ -141,7 +153,8 @@ uint32 hashchar(char key)
return (h);
}
uint32 hashchar2(uint16 intkey)
uint32
hashchar2(uint16 intkey)
{
uint32 h;
int len;
@ -157,7 +170,8 @@ uint32 hashchar2(uint16 intkey)
return (h);
}
uint32 hashchar4(uint32 intkey)
uint32
hashchar4(uint32 intkey)
{
uint32 h;
int len;
@ -173,7 +187,8 @@ uint32 hashchar4(uint32 intkey)
return (h);
}
uint32 hashchar8(char *key)
uint32
hashchar8(char *key)
{
uint32 h;
int len;
@ -188,7 +203,8 @@ uint32 hashchar8(char *key)
return (h);
}
uint32 hashname(NameData *n)
uint32
hashname(NameData * n)
{
uint32 h;
int len;
@ -207,7 +223,8 @@ uint32 hashname(NameData *n)
}
uint32 hashchar16(char *key)
uint32
hashchar16(char *key)
{
uint32 h;
int len;
@ -234,7 +251,8 @@ uint32 hashchar16(char *key)
*
* "OZ's original sdbm hash"
*/
uint32 hashtext(struct varlena *key)
uint32
hashtext(struct varlena * key)
{
int keylen;
char *keydata;
@ -250,12 +268,15 @@ uint32 hashtext(struct varlena *key)
#define HASHC n = *keydata++ + 65599 * n
n = 0;
if (keylen > 0) {
if (keylen > 0)
{
loop = (keylen + 8 - 1) >> 3;
switch (keylen & (8 - 1)) {
switch (keylen & (8 - 1))
{
case 0:
do { /* All fall throughs */
do
{ /* All fall throughs */
HASHC;
case 7:
HASHC;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.8 1997/08/12 22:51:30 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.9 1997/09/07 04:37:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -70,12 +70,12 @@ _hash_doinsert(Relation rel, HashItem hitem)
/*
* XXX btree comment (haven't decided what to do in hash): don't
* think the bucket can be split while we're reading the metapage.
* XXX btree comment (haven't decided what to do in hash): don't think
* the bucket can be split while we're reading the metapage.
*
* If the page was split between the time that we surrendered our
* read lock and acquired our write lock, then this page may no
* longer be the right place for the key we want to insert.
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be
* the right place for the key we want to insert.
*/
/* do the insertion */
@ -132,11 +132,15 @@ _hash_insertonpg(Relation rel,
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
itemsz = DOUBLEALIGN(itemsz);
while (PageGetFreeSpace(page) < itemsz) {
while (PageGetFreeSpace(page) < itemsz)
{
/*
* no space on this page; check for an overflow page
*/
if (BlockNumberIsValid(pageopaque->hasho_nextblkno)) {
if (BlockNumberIsValid(pageopaque->hasho_nextblkno))
{
/*
* ovfl page exists; go get it. if it doesn't have room,
* we'll find out next pass through the loop test above.
@ -146,11 +150,13 @@ _hash_insertonpg(Relation rel,
_hash_relbuf(rel, buf, HASH_WRITE);
buf = ovflbuf;
page = BufferGetPage(buf);
} else {
}
else
{
/*
* we're at the end of the bucket chain and we haven't
* found a page with enough room. allocate a new overflow
* page.
* we're at the end of the bucket chain and we haven't found a
* page with enough room. allocate a new overflow page.
*/
do_expand = true;
ovflbuf = _hash_addovflpage(rel, &metabuf, buf);
@ -158,7 +164,8 @@ _hash_insertonpg(Relation rel,
buf = ovflbuf;
page = BufferGetPage(buf);
if (PageGetFreeSpace(page) < itemsz) {
if (PageGetFreeSpace(page) < itemsz)
{
/* it doesn't fit on an empty page -- give up */
elog(WARN, "hash item too large");
}
@ -176,11 +183,13 @@ _hash_insertonpg(Relation rel,
ItemPointerSet(&(res->pointerData), itup_blkno, itup_off);
if (res != NULL) {
if (res != NULL)
{
/*
* Increment the number of keys in the table.
* We switch lock access type just for a moment
* to allow greater accessibility to the metapage.
* Increment the number of keys in the table. We switch lock
* access type just for a moment to allow greater accessibility to
* the metapage.
*/
metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf,
HASH_READ, HASH_WRITE);
@ -194,7 +203,8 @@ _hash_insertonpg(Relation rel,
if (do_expand ||
(metap->hashm_nkeys / (metap->hashm_maxbucket + 1))
> metap->hashm_ffactor) {
> metap->hashm_ffactor)
{
_hash_expandtable(rel, metabuf);
}
_hash_relbuf(rel, metabuf, HASH_READ);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.9 1997/08/12 22:51:34 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.10 1997/09/07 04:37:57 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@ -63,7 +63,8 @@ _hash_addovflpage(Relation rel, Buffer *metabufp, Buffer buf)
/* allocate an empty overflow page */
oaddr = _hash_getovfladdr(rel, metabufp);
if (oaddr == InvalidOvflAddress) {
if (oaddr == InvalidOvflAddress)
{
elog(WARN, "_hash_addovflpage: problem with _hash_getovfladdr.");
}
ovflblkno = OADDR_TO_BLKNO(OADDR_OF(SPLITNUM(oaddr), OPAGENUM(oaddr)));
@ -114,7 +115,8 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
uint32 free_bit;
uint32 free_page;
uint32 in_use_bits;
uint32 i, j;
uint32 i,
j;
metap = (HashMetaPage) _hash_chgbufaccess(rel, metabufp, HASH_READ, HASH_WRITE);
@ -126,7 +128,8 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
/* Look through all the free maps to find the first free block */
first_page = metap->LAST_FREED >> (metap->BSHIFT + BYTE_TO_BIT);
for ( i = first_page; i <= free_page; i++ ) {
for (i = first_page; i <= free_page; i++)
{
Page mappage;
blkno = metap->hashm_mapp[i];
@ -141,11 +144,14 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
else
in_use_bits = BMPGSZ_BIT(metap) - 1;
if (i == first_page) {
if (i == first_page)
{
bit = metap->LAST_FREED & (BMPGSZ_BIT(metap) - 1);
j = bit / BITS_PER_MAP;
bit = bit & ~(BITS_PER_MAP - 1);
} else {
}
else
{
bit = 0;
j = 0;
}
@ -162,8 +168,10 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
#define OVMSG "HASH: Out of overflow pages. Out of luck.\n"
if (offset > SPLITMASK) {
if (++splitnum >= NCACHED) {
if (offset > SPLITMASK)
{
if (++splitnum >= NCACHED)
{
elog(WARN, OVMSG);
}
metap->OVFL_POINT = splitnum;
@ -173,35 +181,39 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
}
/* Check if we need to allocate a new bitmap page */
if (free_bit == BMPGSZ_BIT(metap) - 1) {
if (free_bit == BMPGSZ_BIT(metap) - 1)
{
/* won't be needing old map page */
_hash_relbuf(rel, mapbuf, HASH_WRITE);
free_page++;
if (free_page >= NCACHED) {
if (free_page >= NCACHED)
{
elog(WARN, OVMSG);
}
/*
* This is tricky. The 1 indicates that you want the new page
* allocated with 1 clear bit. Actually, you are going to
* allocate 2 pages from this map. The first is going to be
* the map page, the second is the overflow page we were
* looking for. The init_bitmap routine automatically, sets
* the first bit of itself to indicate that the bitmap itself
* is in use. We would explicitly set the second bit, but
* don't have to if we tell init_bitmap not to leave it clear
* in the first place.
* allocate 2 pages from this map. The first is going to be the
* map page, the second is the overflow page we were looking for.
* The init_bitmap routine automatically, sets the first bit of
* itself to indicate that the bitmap itself is in use. We would
* explicitly set the second bit, but don't have to if we tell
* init_bitmap not to leave it clear in the first place.
*/
if (_hash_initbitmap(rel, metap, OADDR_OF(splitnum, offset),
1, free_page)) {
1, free_page))
{
elog(WARN, "overflow_page: problem with _hash_initbitmap.");
}
metap->SPARES[splitnum]++;
offset++;
if (offset > SPLITMASK) {
if (++splitnum >= NCACHED) {
if (offset > SPLITMASK)
{
if (++splitnum >= NCACHED)
{
elog(WARN, OVMSG);
}
metap->OVFL_POINT = splitnum;
@ -209,11 +221,13 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
metap->SPARES[splitnum - 1]--;
offset = 0;
}
} else {
}
else
{
/*
* Free_bit addresses the last used bit. Bump it to address
* the first available bit.
* Free_bit addresses the last used bit. Bump it to address the
* first available bit.
*/
free_bit++;
SETBIT(freep, free_bit);
@ -231,13 +245,14 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
_hash_wrtbuf(rel, mapbuf);
/*
* Bits are addressed starting with 0, but overflow pages are addressed
* beginning at 1. Bit is a bit addressnumber, so we need to increment
* it to convert it to a page number.
* Bits are addressed starting with 0, but overflow pages are
* addressed beginning at 1. Bit is a bit addressnumber, so we need to
* increment it to convert it to a page number.
*/
bit = 1 + bit + (i * BMPGSZ_BIT(metap));
if (bit >= metap->LAST_FREED) {
if (bit >= metap->LAST_FREED)
{
metap->LAST_FREED = bit - 1;
}
@ -245,7 +260,8 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
for (i = 0; (i < splitnum) && (bit > metap->SPARES[i]); i++)
;
offset = (i ? bit - metap->SPARES[i - 1] : bit);
if (offset >= SPLITMASK) {
if (offset >= SPLITMASK)
{
elog(WARN, OVMSG);
}
@ -266,10 +282,12 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
static uint32
_hash_firstfreebit(uint32 map)
{
uint32 i, mask;
uint32 i,
mask;
mask = 0x1;
for (i = 0; i < BITS_PER_MAP; i++) {
for (i = 0; i < BITS_PER_MAP; i++)
{
if (!(mask & map))
return (i);
mask = mask << 1;
@ -301,7 +319,8 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
SplitNumber splitnum;
uint32 *freep;
uint32 ovflpgno;
int32 bitmappage, bitmapbit;
int32 bitmappage,
bitmapbit;
Bucket bucket;
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);
@ -319,17 +338,16 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
_hash_wrtbuf(rel, ovflbuf);
/*
* fix up the bucket chain. this is a doubly-linked list, so we
* must fix up the bucket chain members behind and ahead of the
* overflow page being deleted.
* fix up the bucket chain. this is a doubly-linked list, so we must
* fix up the bucket chain members behind and ahead of the overflow
* page being deleted.
*
* XXX this should look like:
* - lock prev/next
* - modify/write prev/next (how to do write ordering with a
* doubly-linked list?)
* - unlock prev/next
* XXX this should look like: - lock prev/next - modify/write prev/next
* (how to do write ordering with a doubly-linked list?) - unlock
* prev/next
*/
if (BlockNumberIsValid(prevblkno)) {
if (BlockNumberIsValid(prevblkno))
{
Buffer prevbuf = _hash_getbuf(rel, prevblkno, HASH_WRITE);
Page prevpage = BufferGetPage(prevbuf);
HashPageOpaque prevopaque =
@ -340,7 +358,8 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
prevopaque->hasho_nextblkno = nextblkno;
_hash_wrtbuf(rel, prevbuf);
}
if (BlockNumberIsValid(nextblkno)) {
if (BlockNumberIsValid(nextblkno))
{
Buffer nextbuf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
Page nextpage = BufferGetPage(nextbuf);
HashPageOpaque nextopaque =
@ -354,14 +373,15 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
/*
* Fix up the overflow page bitmap that tracks this particular
* overflow page. The bitmap can be found in the MetaPageData
* array element hashm_mapp[bitmappage].
* overflow page. The bitmap can be found in the MetaPageData array
* element hashm_mapp[bitmappage].
*/
splitnum = (addr >> SPLITSHIFT);
ovflpgno =
(splitnum ? metap->SPARES[splitnum - 1] : 0) + (addr & SPLITMASK) - 1;
if (ovflpgno < metap->LAST_FREED) {
if (ovflpgno < metap->LAST_FREED)
{
metap->LAST_FREED = ovflpgno;
}
@ -379,12 +399,15 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
_hash_relbuf(rel, metabuf, HASH_WRITE);
/*
* now instantiate the page that replaced this one,
* if it exists, and return that buffer with a write lock.
* now instantiate the page that replaced this one, if it exists, and
* return that buffer with a write lock.
*/
if (BlockNumberIsValid(nextblkno)) {
if (BlockNumberIsValid(nextblkno))
{
return (_hash_getbuf(rel, nextblkno, HASH_WRITE));
} else {
}
else
{
return (InvalidBuffer);
}
}
@ -418,7 +441,8 @@ _hash_initbitmap(Relation rel,
Page pg;
HashPageOpaque op;
uint32 *freep;
int clearbytes, clearints;
int clearbytes,
clearints;
blkno = OADDR_TO_BLKNO(pnum);
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
@ -499,7 +523,8 @@ _hash_squeezebucket(Relation rel,
/*
* if there aren't any overflow pages, there's nothing to squeeze.
*/
if (!BlockNumberIsValid(wopaque->hasho_nextblkno)) {
if (!BlockNumberIsValid(wopaque->hasho_nextblkno))
{
_hash_relbuf(rel, wbuf, HASH_WRITE);
return;
}
@ -508,16 +533,17 @@ _hash_squeezebucket(Relation rel,
* find the last page in the bucket chain by starting at the base
* bucket page and working forward.
*
* XXX if chains tend to be long, we should probably move forward
* using HASH_READ and then _hash_chgbufaccess to HASH_WRITE when
* we reach the end. if they are short we probably don't care
* very much. if the hash function is working at all, they had
* better be short..
* XXX if chains tend to be long, we should probably move forward using
* HASH_READ and then _hash_chgbufaccess to HASH_WRITE when we reach
* the end. if they are short we probably don't care very much. if
* the hash function is working at all, they had better be short..
*/
ropaque = wopaque;
do {
do
{
rblkno = ropaque->hasho_nextblkno;
if (ropaque != wopaque) {
if (ropaque != wopaque)
{
_hash_relbuf(rel, rbuf, HASH_WRITE);
}
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
@ -532,7 +558,8 @@ _hash_squeezebucket(Relation rel,
* squeeze the tuples.
*/
roffnum = FirstOffsetNumber;
for(;;) {
for (;;)
{
hitem = (HashItem) PageGetItem(rpage, PageGetItemId(rpage, roffnum));
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
@ -542,12 +569,14 @@ _hash_squeezebucket(Relation rel,
* walk up the bucket chain, looking for a page big enough for
* this item.
*/
while (PageGetFreeSpace(wpage) < itemsz) {
while (PageGetFreeSpace(wpage) < itemsz)
{
wblkno = wopaque->hasho_nextblkno;
_hash_wrtbuf(rel, wbuf);
if (!BlockNumberIsValid(wblkno) || (rblkno == wblkno)) {
if (!BlockNumberIsValid(wblkno) || (rblkno == wblkno))
{
_hash_wrtbuf(rel, rbuf);
/* wbuf is already released */
return;
@ -569,33 +598,35 @@ _hash_squeezebucket(Relation rel,
PageAddItem(wpage, (Item) hitem, itemsz, woffnum, LP_USED);
/*
* delete the tuple from the "read" page.
* PageIndexTupleDelete repacks the ItemId array, so 'roffnum'
* will be "advanced" to the "next" ItemId.
* delete the tuple from the "read" page. PageIndexTupleDelete
* repacks the ItemId array, so 'roffnum' will be "advanced" to
* the "next" ItemId.
*/
PageIndexTupleDelete(rpage, roffnum);
_hash_wrtnorelbuf(rel, rbuf);
/*
* if the "read" page is now empty because of the deletion,
* free it.
* if the "read" page is now empty because of the deletion, free
* it.
*/
if (PageIsEmpty(rpage) && (ropaque->hasho_flag & LH_OVERFLOW_PAGE)) {
if (PageIsEmpty(rpage) && (ropaque->hasho_flag & LH_OVERFLOW_PAGE))
{
rblkno = ropaque->hasho_prevblkno;
Assert(BlockNumberIsValid(rblkno));
/*
* free this overflow page. the extra _hash_relbuf is
* because _hash_freeovflpage gratuitously returns the
* next page (we want the previous page and will get it
* ourselves later).
* free this overflow page. the extra _hash_relbuf is because
* _hash_freeovflpage gratuitously returns the next page (we
* want the previous page and will get it ourselves later).
*/
rbuf = _hash_freeovflpage(rel, rbuf);
if (BufferIsValid(rbuf)) {
if (BufferIsValid(rbuf))
{
_hash_relbuf(rel, rbuf, HASH_WRITE);
}
if (rblkno == wblkno) {
if (rblkno == wblkno)
{
/* rbuf is already released */
_hash_wrtbuf(rel, wbuf);
return;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.9 1997/08/18 20:51:34 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.10 1997/09/07 04:38:00 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@ -83,7 +83,8 @@ _hash_metapinit(Relation rel)
if (USELOCKING)
RelationSetLockForWrite(rel);
if ((nblocks = RelationGetNumberOfBlocks(rel)) != 0) {
if ((nblocks = RelationGetNumberOfBlocks(rel)) != 0)
{
elog(WARN, "Cannot initialize non-empty hash table %s",
RelationGetRelationName(rel));
}
@ -100,10 +101,12 @@ _hash_metapinit(Relation rel)
metap->hashm_ffactor = DEFAULT_FFACTOR;
metap->hashm_bsize = BufferGetPageSize(metabuf);
metap->hashm_bshift = _hash_log2(metap->hashm_bsize);
for (i = metap->hashm_bshift; i > 0; --i) {
for (i = metap->hashm_bshift; i > 0; --i)
{
if ((1 << i) < (metap->hashm_bsize -
(DOUBLEALIGN(sizeof(PageHeaderData)) +
DOUBLEALIGN(sizeof(HashPageOpaqueData))))) {
DOUBLEALIGN(sizeof(HashPageOpaqueData)))))
{
break;
}
}
@ -112,8 +115,8 @@ _hash_metapinit(Relation rel)
metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
/*
* Make nelem = 2 rather than 0 so that we end up allocating space
* for the next greater power of two number of buckets.
* Make nelem = 2 rather than 0 so that we end up allocating space for
* the next greater power of two number of buckets.
*/
nelem = 2;
lg2nelem = 1; /* _hash_log2(MAX(nelem, 2)) */
@ -139,8 +142,8 @@ _hash_metapinit(Relation rel)
/*
* First bitmap page is at: splitpoint lg2nelem page offset 1 which
* turns out to be page 3. Couldn't initialize page 3 until we created
* the first two buckets above.
* turns out to be page 3. Couldn't initialize page 3 until we
* created the first two buckets above.
*/
if (_hash_initbitmap(rel, metap, OADDR_OF(lg2nelem, 1), lg2nelem + 1, 0))
elog(WARN, "Problem with _hash_initbitmap.");
@ -151,7 +154,8 @@ _hash_metapinit(Relation rel)
/*
* initialize the first two buckets
*/
for (i = 0; i <= 1; i++) {
for (i = 0; i <= 1; i++)
{
buf = _hash_getbuf(rel, BUCKET_TO_BLKNO(i), HASH_WRITE);
pg = BufferGetPage(buf);
_hash_pageinit(pg, BufferGetPageSize(buf));
@ -186,10 +190,12 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access)
{
Buffer buf;
if (blkno == P_NEW) {
if (blkno == P_NEW)
{
elog(WARN, "_hash_getbuf: internal error: hash AM does not use P_NEW");
}
switch (access) {
switch (access)
{
case HASH_WRITE:
case HASH_READ:
_hash_setpagelock(rel, blkno, access);
@ -215,7 +221,8 @@ _hash_relbuf(Relation rel, Buffer buf, int access)
blkno = BufferGetBlockNumber(buf);
switch (access) {
switch (access)
{
case HASH_WRITE:
case HASH_READ:
_hash_unsetpagelock(rel, blkno, access);
@ -271,7 +278,8 @@ _hash_chgbufaccess(Relation rel,
blkno = BufferGetBlockNumber(*bufp);
switch (from_access) {
switch (from_access)
{
case HASH_WRITE:
_hash_wrtbuf(rel, *bufp);
break;
@ -314,10 +322,12 @@ _hash_setpagelock(Relation rel,
{
ItemPointerData iptr;
if (USELOCKING) {
if (USELOCKING)
{
ItemPointerSet(&iptr, blkno, 1);
switch (access) {
switch (access)
{
case HASH_WRITE:
RelationSetSingleWLockPage(rel, &iptr);
break;
@ -339,10 +349,12 @@ _hash_unsetpagelock(Relation rel,
{
ItemPointerData iptr;
if (USELOCKING) {
if (USELOCKING)
{
ItemPointerSet(&iptr, blkno, 1);
switch (access) {
switch (access)
{
case HASH_WRITE:
RelationUnsetSingleWLockPage(rel, &iptr);
break;
@ -379,12 +391,16 @@ _hash_pagedel(Relation rel, ItemPointer tid)
PageIndexTupleDelete(page, offno);
_hash_wrtnorelbuf(rel, buf);
if (PageIsEmpty(page) && (opaque->hasho_flag & LH_OVERFLOW_PAGE)) {
if (PageIsEmpty(page) && (opaque->hasho_flag & LH_OVERFLOW_PAGE))
{
buf = _hash_freeovflpage(rel, buf);
if (BufferIsValid(buf)) {
if (BufferIsValid(buf))
{
_hash_relbuf(rel, buf, HASH_WRITE);
}
} else {
}
else
{
_hash_relbuf(rel, buf, HASH_WRITE);
}
@ -414,12 +430,13 @@ _hash_expandtable(Relation rel, Buffer metabuf)
old_bucket = (metap->MAX_BUCKET & metap->LOW_MASK);
/*
* If the split point is increasing (MAX_BUCKET's log base 2
* * increases), we need to copy the current contents of the spare
* split bucket to the next bucket.
* If the split point is increasing (MAX_BUCKET's log base 2 *
* increases), we need to copy the current contents of the spare split
* bucket to the next bucket.
*/
spare_ndx = _hash_log2(metap->MAX_BUCKET + 1);
if (spare_ndx > metap->OVFL_POINT) {
if (spare_ndx > metap->OVFL_POINT)
{
metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_READ, HASH_WRITE);
metap->SPARES[spare_ndx] = metap->SPARES[metap->OVFL_POINT];
@ -427,7 +444,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_WRITE, HASH_READ);
}
if (new_bucket > metap->HIGH_MASK) {
if (new_bucket > metap->HIGH_MASK)
{
/* Starting a new doubling */
metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_READ, HASH_WRITE);
@ -500,23 +518,25 @@ _hash_splitpage(Relation rel,
_hash_wrtnorelbuf(rel, nbuf);
/*
* make sure the old bucket isn't empty. advance 'opage' and
* friends through the overflow bucket chain until we find a
* non-empty page.
* make sure the old bucket isn't empty. advance 'opage' and friends
* through the overflow bucket chain until we find a non-empty page.
*
* XXX we should only need this once, if we are careful to
* preserve the invariant that overflow pages are never empty.
* XXX we should only need this once, if we are careful to preserve the
* invariant that overflow pages are never empty.
*/
_hash_checkpage(opage, LH_BUCKET_PAGE);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
if (PageIsEmpty(opage)) {
if (PageIsEmpty(opage))
{
oblkno = oopaque->hasho_nextblkno;
_hash_relbuf(rel, obuf, HASH_WRITE);
if (!BlockNumberIsValid(oblkno)) {
if (!BlockNumberIsValid(oblkno))
{
/*
* the old bucket is completely empty; of course, the new
* bucket will be as well, but since it's a base bucket
* page we don't care.
* bucket will be as well, but since it's a base bucket page
* we don't care.
*/
_hash_relbuf(rel, nbuf, HASH_WRITE);
return;
@ -524,7 +544,8 @@ _hash_splitpage(Relation rel,
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
opage = BufferGetPage(obuf);
_hash_checkpage(opage, LH_OVERFLOW_PAGE);
if (PageIsEmpty(opage)) {
if (PageIsEmpty(opage))
{
elog(WARN, "_hash_splitpage: empty overflow page %d", oblkno);
}
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
@ -532,26 +553,31 @@ _hash_splitpage(Relation rel,
/*
* we are now guaranteed that 'opage' is not empty. partition the
* tuples in the old bucket between the old bucket and the new
* bucket, advancing along their respective overflow bucket chains
* and adding overflow pages as needed.
* tuples in the old bucket between the old bucket and the new bucket,
* advancing along their respective overflow bucket chains and adding
* overflow pages as needed.
*/
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
for (;;) {
for (;;)
{
/*
* at each iteration through this loop, each of these variables
* should be up-to-date: obuf opage oopaque ooffnum omaxoffnum
*/
/* check if we're at the end of the page */
if (ooffnum > omaxoffnum) {
if (ooffnum > omaxoffnum)
{
/* at end of page, but check for overflow page */
oblkno = oopaque->hasho_nextblkno;
if (BlockNumberIsValid(oblkno)) {
if (BlockNumberIsValid(oblkno))
{
/*
* we ran out of tuples on this particular page, but
* we have more overflow pages; re-init values.
* we ran out of tuples on this particular page, but we
* have more overflow pages; re-init values.
*/
_hash_wrtbuf(rel, obuf);
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
@ -560,19 +586,23 @@ _hash_splitpage(Relation rel,
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
/* we're guaranteed that an ovfl page has at least 1 tuple */
if (PageIsEmpty(opage)) {
if (PageIsEmpty(opage))
{
elog(WARN, "_hash_splitpage: empty ovfl page %d!",
oblkno);
}
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
} else {
}
else
{
/*
* we're at the end of the bucket chain, so now we're
* really done with everything. before quitting, call
* _hash_squeezebucket to ensure the tuples in the
* bucket (including the overflow pages) are packed as
* tightly as possible.
* _hash_squeezebucket to ensure the tuples in the bucket
* (including the overflow pages) are packed as tightly as
* possible.
*/
_hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);
@ -588,19 +618,21 @@ _hash_splitpage(Relation rel,
datum = index_getattr(itup, 1, itupdesc, &null);
bucket = _hash_call(rel, metap, datum);
if (bucket == nbucket) {
if (bucket == nbucket)
{
/*
* insert the tuple into the new bucket. if it doesn't
* fit on the current page in the new bucket, we must
* allocate a new overflow page and place the tuple on
* that page instead.
* insert the tuple into the new bucket. if it doesn't fit on
* the current page in the new bucket, we must allocate a new
* overflow page and place the tuple on that page instead.
*/
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
itemsz = DOUBLEALIGN(itemsz);
if (PageGetFreeSpace(npage) < itemsz) {
if (PageGetFreeSpace(npage) < itemsz)
{
ovflbuf = _hash_addovflpage(rel, &metabuf, nbuf);
_hash_wrtbuf(rel, nbuf);
nbuf = ovflbuf;
@ -615,53 +647,58 @@ _hash_splitpage(Relation rel,
/*
* now delete the tuple from the old bucket. after this
* section of code, 'ooffnum' will actually point to the
* ItemId to which we would point if we had advanced it
* before the deletion (PageIndexTupleDelete repacks the
* ItemId array). this also means that 'omaxoffnum' is
* exactly one less than it used to be, so we really can
* just decrement it instead of calling
* PageGetMaxOffsetNumber.
* ItemId to which we would point if we had advanced it before
* the deletion (PageIndexTupleDelete repacks the ItemId
* array). this also means that 'omaxoffnum' is exactly one
* less than it used to be, so we really can just decrement it
* instead of calling PageGetMaxOffsetNumber.
*/
PageIndexTupleDelete(opage, ooffnum);
_hash_wrtnorelbuf(rel, obuf);
omaxoffnum = OffsetNumberPrev(omaxoffnum);
/*
* tidy up. if the old page was an overflow page and it
* is now empty, we must free it (we want to preserve the
* tidy up. if the old page was an overflow page and it is
* now empty, we must free it (we want to preserve the
* invariant that overflow pages cannot be empty).
*/
if (PageIsEmpty(opage) &&
(oopaque->hasho_flag & LH_OVERFLOW_PAGE)) {
(oopaque->hasho_flag & LH_OVERFLOW_PAGE))
{
obuf = _hash_freeovflpage(rel, obuf);
/* check that we're not through the bucket chain */
if (BufferIsInvalid(obuf)) {
if (BufferIsInvalid(obuf))
{
_hash_wrtbuf(rel, nbuf);
_hash_squeezebucket(rel, metap, obucket);
return;
}
/*
* re-init. again, we're guaranteed that an ovfl page
* has at least one tuple.
* re-init. again, we're guaranteed that an ovfl page has
* at least one tuple.
*/
opage = BufferGetPage(obuf);
_hash_checkpage(opage, LH_OVERFLOW_PAGE);
oblkno = BufferGetBlockNumber(obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
if (PageIsEmpty(opage)) {
if (PageIsEmpty(opage))
{
elog(WARN, "_hash_splitpage: empty overflow page %d",
oblkno);
}
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
}
} else {
}
else
{
/*
* the tuple stays on this page. we didn't move anything,
* so we didn't delete anything and therefore we don't
* have to change 'omaxoffnum'.
* the tuple stays on this page. we didn't move anything, so
* we didn't delete anything and therefore we don't have to
* change 'omaxoffnum'.
*
* XXX any hash value from [0, nbucket-1] will map to this
* bucket, which doesn't make sense to me.

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.8 1996/11/15 18:36:31 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.9 1997/09/07 04:38:01 momjian Exp $
*
* NOTES
* Because we can be doing an index scan on a relation while we
@ -34,7 +34,8 @@
static void _hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno);
static bool _hash_scantouched(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno);
typedef struct HashScanListData {
typedef struct HashScanListData
{
IndexScanDesc hashsl_scan;
struct HashScanListData *hashsl_next;
} HashScanListData;
@ -63,12 +64,14 @@ _hash_regscan(IndexScanDesc scan)
void
_hash_dropscan(IndexScanDesc scan)
{
HashScanList chk, last;
HashScanList chk,
last;
last = (HashScanList) NULL;
for (chk = HashScans;
chk != (HashScanList) NULL && chk->hashsl_scan != scan;
chk = chk->hashsl_next) {
chk = chk->hashsl_next)
{
last = chk;
}
@ -90,7 +93,8 @@ _hash_adjscans(Relation rel, ItemPointer tid)
Oid relid;
relid = rel->rd_id;
for (l = HashScans; l != (HashScanList) NULL; l = l->hashsl_next) {
for (l = HashScans; l != (HashScanList) NULL; l = l->hashsl_next)
{
if (relid == l->hashsl_scan->relation->rd_id)
_hash_scandel(l->hashsl_scan, ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
@ -116,7 +120,8 @@ _hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
current = &(scan->currentItemData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno) {
&& ItemPointerGetOffsetNumber(current) >= offno)
{
_hash_step(scan, &buf, BackwardScanDirection, metabuf);
so->hashso_curbuf = buf;
}
@ -124,8 +129,10 @@ _hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
current = &(scan->currentMarkData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno) {
&& ItemPointerGetOffsetNumber(current) >= offno)
{
ItemPointerData tmp;
tmp = *current;
*current = scan->currentItemData;
scan->currentItemData = tmp;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.10 1997/06/28 05:45:40 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.11 1997/09/07 04:38:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,14 +39,17 @@ _hash_search(Relation rel,
Bucket bucket;
if (scankey == (ScanKey) NULL ||
(keyDatum = scankey[0].sk_argument) == (Datum) NULL) {
(keyDatum = scankey[0].sk_argument) == (Datum) NULL)
{
/*
* If the scankey argument is NULL, all tuples will satisfy
* the scan so we start the scan at the first bucket (bucket
* 0).
* If the scankey argument is NULL, all tuples will satisfy the
* scan so we start the scan at the first bucket (bucket 0).
*/
bucket = 0;
} else {
}
else
{
bucket = _hash_call(rel, metap, keyDatum);
}
@ -86,11 +89,12 @@ _hash_next(IndexScanDesc scan, ScanDirection dir)
/*
* XXX 10 may 91: somewhere there's a bug in our management of the
* cached buffer for this scan. wei discovered it. the following
* is a workaround so he can work until i figure out what's going on.
* cached buffer for this scan. wei discovered it. the following is
* a workaround so he can work until i figure out what's going on.
*/
if (!BufferIsValid(so->hashso_curbuf)) {
if (!BufferIsValid(so->hashso_curbuf))
{
so->hashso_curbuf = _hash_getbuf(rel,
ItemPointerGetBlockNumber(current),
HASH_READ);
@ -100,11 +104,12 @@ _hash_next(IndexScanDesc scan, ScanDirection dir)
buf = so->hashso_curbuf;
/*
* step to next valid tuple. note that _hash_step releases our
* lock on 'metabuf'; if we switch to a new 'buf' while looking
* for the next tuple, we come back with a lock on that buffer.
* step to next valid tuple. note that _hash_step releases our lock
* on 'metabuf'; if we switch to a new 'buf' while looking for the
* next tuple, we come back with a lock on that buffer.
*/
if (!_hash_step(scan, &buf, dir, metabuf)) {
if (!_hash_step(scan, &buf, dir, metabuf))
{
return ((RetrieveIndexResult) NULL);
}
@ -129,7 +134,8 @@ _hash_readnext(Relation rel,
blkno = (*opaquep)->hasho_nextblkno;
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno)) {
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
*pagep = BufferGetPage(*bufp);
_hash_checkpage(*pagep, LH_OVERFLOW_PAGE);
@ -147,12 +153,14 @@ _hash_readprev(Relation rel,
blkno = (*opaquep)->hasho_prevblkno;
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno)) {
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
*pagep = BufferGetPage(*bufp);
_hash_checkpage(*pagep, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
if (PageIsEmpty(*pagep)) {
if (PageIsEmpty(*pagep))
{
Assert((*opaquep)->hasho_flag & LH_BUCKET_PAGE);
_hash_relbuf(rel, *bufp, HASH_READ);
*bufp = InvalidBuffer;
@ -194,10 +202,9 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
_hash_checkpage((Page) metap, LH_META_PAGE);
/*
* XXX -- The attribute number stored in the scan key is the attno
* in the heap relation. We need to transmogrify this into
* the index relation attno here. For the moment, we have
* hardwired attno == 1.
* XXX -- The attribute number stored in the scan key is the attno in
* the heap relation. We need to transmogrify this into the index
* relation attno here. For the moment, we have hardwired attno == 1.
*/
/* find the correct bucket page and load it into buf */
@ -208,23 +215,28 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
/*
* if we are scanning forward, we need to find the first non-empty
* page (if any) in the bucket chain. since overflow pages are
* never empty, this had better be either the bucket page or the
* first overflow page.
* page (if any) in the bucket chain. since overflow pages are never
* empty, this had better be either the bucket page or the first
* overflow page.
*
* if we are scanning backward, we always go all the way to the
* end of the bucket chain.
* if we are scanning backward, we always go all the way to the end of
* the bucket chain.
*/
if (PageIsEmpty(page)) {
if (BlockNumberIsValid(opaque->hasho_nextblkno)) {
if (PageIsEmpty(page))
{
if (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
} else {
}
else
{
ItemPointerSetInvalid(current);
so->hashso_curbuf = InvalidBuffer;
/*
* If there is no scankeys, all tuples will satisfy
* the scan - so we continue in _hash_step to get
* tuples from all buckets. - vadim 04/29/97
* If there is no scankeys, all tuples will satisfy the scan -
* so we continue in _hash_step to get tuples from all
* buckets. - vadim 04/29/97
*/
if (scan->numberOfKeys >= 1)
{
@ -234,13 +246,16 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
}
}
}
if (ScanDirectionIsBackward(dir)) {
while (BlockNumberIsValid(opaque->hasho_nextblkno)) {
if (ScanDirectionIsBackward(dir))
{
while (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
}
if (!_hash_step(scan, &buf, dir, metabuf)) {
if (!_hash_step(scan, &buf, dir, metabuf))
{
return ((RetrieveIndexResult) NULL);
}
@ -305,37 +320,48 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
* presumably want to start at the beginning/end of the page...
*/
maxoff = PageGetMaxOffsetNumber(page);
if (ItemPointerIsValid(current)) {
if (ItemPointerIsValid(current))
{
offnum = ItemPointerGetOffsetNumber(current);
} else {
}
else
{
offnum = InvalidOffsetNumber;
}
/*
* 'offnum' now points to the last tuple we have seen (if any).
*
* continue to step through tuples until:
* 1) we get to the end of the bucket chain or
* 2) we find a valid tuple.
* continue to step through tuples until: 1) we get to the end of the
* bucket chain or 2) we find a valid tuple.
*/
do {
do
{
bucket = opaque->hasho_bucket;
switch (dir) {
switch (dir)
{
case ForwardScanDirection:
if (offnum != InvalidOffsetNumber) {
if (offnum != InvalidOffsetNumber)
{
offnum = OffsetNumberNext(offnum); /* move forward */
} else {
}
else
{
offnum = FirstOffsetNumber; /* new page */
}
while (offnum > maxoff) {
while (offnum > maxoff)
{
/*
* either this page is empty (maxoff ==
* InvalidOffsetNumber) or we ran off the end.
*/
_hash_readnext(rel, &buf, &page, &opaque);
if (BufferIsInvalid(buf)) { /* end of chain */
if (allbuckets && bucket < metap->hashm_maxbucket) {
if (BufferIsInvalid(buf))
{ /* end of chain */
if (allbuckets && bucket < metap->hashm_maxbucket)
{
++bucket;
blkno = BUCKET_TO_BLKNO(bucket);
buf = _hash_getbuf(rel, blkno, HASH_READ);
@ -344,16 +370,21 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
while (PageIsEmpty(page) &&
BlockNumberIsValid(opaque->hasho_nextblkno)) {
BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
} else {
}
else
{
maxoff = offnum = InvalidOffsetNumber;
break; /* while */
}
} else {
}
else
{
/* _hash_readnext never returns an empty page */
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
@ -361,19 +392,26 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
}
break;
case BackwardScanDirection:
if (offnum != InvalidOffsetNumber) {
if (offnum != InvalidOffsetNumber)
{
offnum = OffsetNumberPrev(offnum); /* move back */
} else {
}
else
{
offnum = maxoff;/* new page */
}
while (offnum < FirstOffsetNumber) {
while (offnum < FirstOffsetNumber)
{
/*
* either this page is empty (offnum ==
* InvalidOffsetNumber) or we ran off the end.
*/
_hash_readprev(rel, &buf, &page, &opaque);
if (BufferIsInvalid(buf)) { /* end of chain */
if (allbuckets && bucket > 0) {
if (BufferIsInvalid(buf))
{ /* end of chain */
if (allbuckets && bucket > 0)
{
--bucket;
blkno = BUCKET_TO_BLKNO(bucket);
buf = _hash_getbuf(rel, blkno, HASH_READ);
@ -381,15 +419,20 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
_hash_checkpage(page, LH_BUCKET_PAGE);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
while (BlockNumberIsValid(opaque->hasho_nextblkno)) {
while (BlockNumberIsValid(opaque->hasho_nextblkno))
{
_hash_readnext(rel, &buf, &page, &opaque);
}
maxoff = offnum = PageGetMaxOffsetNumber(page);
} else {
}
else
{
maxoff = offnum = InvalidOffsetNumber;
break; /* while */
}
} else {
}
else
{
/* _hash_readprev never returns an empty page */
maxoff = offnum = PageGetMaxOffsetNumber(page);
}
@ -402,7 +445,8 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
}
/* we ran off the end of the world without finding a match */
if (offnum == InvalidOffsetNumber) {
if (offnum == InvalidOffsetNumber)
{
_hash_relbuf(rel, metabuf, HASH_READ);
*bufP = so->hashso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(current);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.9 1997/08/20 02:01:42 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.10 1997/09/07 04:38:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -43,6 +43,7 @@ static StrategyEvaluationData HTEvaluationData = {
(StrategyTransformMap) HTNegateCommute,
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}
};
#endif
/* ----------------------------------------------------------------
@ -64,6 +65,7 @@ _hash_getstrat(Relation rel,
return (strat);
}
#endif
#ifdef NOT_USED
@ -77,4 +79,5 @@ _hash_invokestrat(Relation rel,
return (RelationInvokeStrategy(rel, &HTEvaluationData, attno, strat,
left, right));
}
#endif

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.9 1997/08/14 05:01:32 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.10 1997/09/07 04:38:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -41,7 +41,8 @@ _hash_mkscankey(Relation rel, IndexTuple itup, HashMetaPage metap)
skey = (ScanKey) palloc(natts * sizeof(ScanKeyData));
for (i = 0; i < natts; i++) {
for (i = 0; i < natts; i++)
{
arg = index_getattr(itup, i + 1, itupdesc, &null);
proc = metap->hashm_procid;
ScanKeyEntryInitialize(&skey[i],
@ -112,7 +113,8 @@ _hash_call(Relation rel, HashMetaPage metap, Datum key)
uint32
_hash_log2(uint32 num)
{
uint32 i, limit;
uint32 i,
limit;
limit = 1;
for (i = 0; limit < num; limit = limit << 1, i++)
@ -137,7 +139,8 @@ _hash_checkpage(Page page, int flags)
(BLCKSZ - DOUBLEALIGN(sizeof(HashPageOpaqueData))));
Assert(((PageHeader) (page))->pd_opaque.od_pagesize == BLCKSZ);
#endif
if (flags) {
if (flags)
{
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_flag & flags);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.15 1997/08/27 09:00:20 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.16 1997/09/07 04:38:09 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -114,14 +114,17 @@ initsdesc(HeapScanDesc sdesc,
unsigned nkeys,
ScanKey key)
{
if (!RelationGetNumberOfBlocks(relation)) {
if (!RelationGetNumberOfBlocks(relation))
{
/* ----------------
* relation is empty
* ----------------
*/
sdesc->rs_ntup = sdesc->rs_ctup = sdesc->rs_ptup = NULL;
sdesc->rs_nbuf = sdesc->rs_cbuf = sdesc->rs_pbuf = InvalidBuffer;
} else if (atend) {
}
else if (atend)
{
/* ----------------
* reverse scan
* ----------------
@ -130,7 +133,9 @@ initsdesc(HeapScanDesc sdesc,
sdesc->rs_nbuf = sdesc->rs_cbuf = InvalidBuffer;
sdesc->rs_ptup = NULL;
sdesc->rs_pbuf = UnknownBuffer;
} else {
}
else
{
/* ----------------
* forward scan
* ----------------
@ -162,7 +167,8 @@ initsdesc(HeapScanDesc sdesc,
static void
unpinsdesc(HeapScanDesc sdesc)
{
if (BufferIsValid(sdesc->rs_pbuf)) {
if (BufferIsValid(sdesc->rs_pbuf))
{
ReleaseBuffer(sdesc->rs_pbuf);
}
@ -172,11 +178,13 @@ unpinsdesc(HeapScanDesc sdesc)
* times.
* ------------------------------------
*/
if (BufferIsValid(sdesc->rs_cbuf)) {
if (BufferIsValid(sdesc->rs_cbuf))
{
ReleaseBuffer(sdesc->rs_cbuf);
}
if (BufferIsValid(sdesc->rs_nbuf)) {
if (BufferIsValid(sdesc->rs_nbuf))
{
ReleaseBuffer(sdesc->rs_nbuf);
}
}
@ -235,27 +243,34 @@ heapgettup(Relation relation,
* ----------------
*/
#ifdef HEAPDEBUGALL
if (ItemPointerIsValid(tid)) {
if (ItemPointerIsValid(tid))
{
elog(DEBUG, "heapgettup(%.16s, tid=0x%x[%d,%d], dir=%d, ...)",
RelationGetRelationName(relation), tid, tid->ip_blkid,
tid->ip_posid, dir);
} else {
}
else
{
elog(DEBUG, "heapgettup(%.16s, tid=0x%x, dir=%d, ...)",
RelationGetRelationName(relation), tid, dir);
}
elog(DEBUG, "heapgettup(..., b=0x%x, timeQ=0x%x, nkeys=%d, key=0x%x",
b, timeQual, nkeys, key);
if (timeQual == SelfTimeQual) {
if (timeQual == SelfTimeQual)
{
elog(DEBUG, "heapgettup: relation(%c)=`%.16s', SelfTimeQual",
relation->rd_rel->relkind, &relation->rd_rel->relname);
} else {
}
else
{
elog(DEBUG, "heapgettup: relation(%c)=`%.16s', timeQual=%d",
relation->rd_rel->relkind, &relation->rd_rel->relname,
timeQual);
}
#endif /* !defined(HEAPDEBUGALL) */
if (!ItemPointerIsValid(tid)) {
if (!ItemPointerIsValid(tid))
{
Assert(!PointerIsValid(tid));
}
@ -270,13 +285,15 @@ heapgettup(Relation relation,
* calculate next starting lineoff, given scan direction
* ----------------
*/
if (!dir) {
if (!dir)
{
/* ----------------
* ``no movement'' scan direction
* ----------------
*/
/* assume it is a valid TID XXX */
if (ItemPointerIsValid(tid) == false) {
if (ItemPointerIsValid(tid) == false)
{
*b = InvalidBuffer;
return (NULL);
}
@ -285,7 +302,8 @@ heapgettup(Relation relation,
*b);
#ifndef NO_BUFFERISVALID
if (!BufferIsValid(*b)) {
if (!BufferIsValid(*b))
{
elog(WARN, "heapgettup: failed ReadBuffer");
}
#endif
@ -297,56 +315,73 @@ heapgettup(Relation relation,
rtup = (HeapTuple) PageGetItem((Page) dp, lpp);
return (rtup);
} else if (dir < 0) {
}
else if (dir < 0)
{
/* ----------------
* reverse scan direction
* ----------------
*/
if (ItemPointerIsValid(tid) == false) {
if (ItemPointerIsValid(tid) == false)
{
tid = NULL;
}
if (tid == NULL) {
if (tid == NULL)
{
page = pages - 1; /* final page */
} else {
}
else
{
page = ItemPointerGetBlockNumber(tid); /* current page */
}
if (page < 0) {
if (page < 0)
{
*b = InvalidBuffer;
return (NULL);
}
*b = RelationGetBufferWithBuffer(relation, page, *b);
#ifndef NO_BUFFERISVALID
if (!BufferIsValid(*b)) {
if (!BufferIsValid(*b))
{
elog(WARN, "heapgettup: failed ReadBuffer");
}
#endif
dp = (Page) BufferGetPage(*b);
lines = PageGetMaxOffsetNumber(dp);
if (tid == NULL) {
if (tid == NULL)
{
lineoff = lines; /* final offnum */
} else {
}
else
{
lineoff = /* previous offnum */
OffsetNumberPrev(ItemPointerGetOffsetNumber(tid));
}
/* page and lineoff now reference the physically previous tid */
} else {
}
else
{
/* ----------------
* forward scan direction
* ----------------
*/
if (ItemPointerIsValid(tid) == false) {
if (ItemPointerIsValid(tid) == false)
{
page = 0; /* first page */
lineoff = FirstOffsetNumber; /* first offnum */
} else {
}
else
{
page = ItemPointerGetBlockNumber(tid); /* current page */
lineoff = /* next offnum */
OffsetNumberNext(ItemPointerGetOffsetNumber(tid));
}
if (page >= pages) {
if (page >= pages)
{
*b = InvalidBuffer;
return (NULL);
}
@ -354,7 +389,8 @@ heapgettup(Relation relation,
*b = RelationGetBufferWithBuffer(relation, page, *b);
#ifndef NO_BUFFERISVALID
if (!BufferIsValid(*b)) {
if (!BufferIsValid(*b))
{
elog(WARN, "heapgettup: failed ReadBuffer");
}
#endif
@ -371,9 +407,12 @@ heapgettup(Relation relation,
* ----------------
*/
lpp = PageGetItemId(dp, lineoff);
if (dir < 0) {
if (dir < 0)
{
linesleft = lineoff - 1;
} else {
}
else
{
linesleft = lines - lineoff;
}
@ -382,20 +421,25 @@ heapgettup(Relation relation,
* run out of stuff to scan
* ----------------
*/
for (;;) {
while (linesleft >= 0) {
for (;;)
{
while (linesleft >= 0)
{
/* ----------------
* if current tuple qualifies, return it.
* ----------------
*/
if ((rtup = heap_tuple_satisfies(lpp, relation, *b, (PageHeader) dp,
timeQual, nkeys, key)) != NULL) {
timeQual, nkeys, key)) != NULL)
{
ItemPointer iptr = &(rtup->t_ctid);
if (ItemPointerGetBlockNumber(iptr) != page) {
if (ItemPointerGetBlockNumber(iptr) != page)
{
/*
* set block id to the correct page number
* --- this is a hack to support the virtual fragment
* concept
* set block id to the correct page number --- this is
* a hack to support the virtual fragment concept
*/
ItemPointerSetBlockNumber(iptr, page);
}
@ -407,10 +451,14 @@ heapgettup(Relation relation,
* ----------------
*/
--linesleft;
if (dir < 0) {
if (dir < 0)
{
--lpp; /* move back in this page's ItemId array */
} else {
++lpp; /* move forward in this page's ItemId array */
}
else
{
++lpp; /* move forward in this page's ItemId
* array */
}
}
@ -425,7 +473,8 @@ heapgettup(Relation relation,
* return NULL if we've exhausted all the pages..
* ----------------
*/
if (page < 0 || page >= pages) {
if (page < 0 || page >= pages)
{
if (BufferIsValid(*b))
ReleaseBuffer(*b);
*b = InvalidBuffer;
@ -435,16 +484,20 @@ heapgettup(Relation relation,
*b = ReleaseAndReadBuffer(*b, relation, page);
#ifndef NO_BUFFERISVALID
if (!BufferIsValid(*b)) {
if (!BufferIsValid(*b))
{
elog(WARN, "heapgettup: failed ReadBuffer");
}
#endif
dp = (Page) BufferGetPage(*b);
lines = lineoff = PageGetMaxOffsetNumber((Page) dp);
linesleft = lines - 1;
if (dir < 0) {
if (dir < 0)
{
lpp = PageGetItemId(dp, lineoff);
} else {
}
else
{
lpp = PageGetItemId(dp, FirstOffsetNumber);
}
}
@ -471,6 +524,7 @@ SetHeapAccessMethodImmediateInvalidation(bool on)
{
ImmediateInvalidation = on;
}
#endif
/* ----------------------------------------------------------------
@ -498,7 +552,8 @@ heap_open(Oid relationId)
r = (Relation) RelationIdGetRelation(relationId);
if (RelationIsValid(r) && r->rd_rel->relkind == RELKIND_INDEX) {
if (RelationIsValid(r) && r->rd_rel->relkind == RELKIND_INDEX)
{
elog(WARN, "%s is an index relation", r->rd_rel->relname.data);
}
@ -526,7 +581,8 @@ heap_openr(char *relationName)
r = RelationNameGetRelation(relationName);
if (RelationIsValid(r) && r->rd_rel->relkind == RELKIND_INDEX) {
if (RelationIsValid(r) && r->rd_rel->relkind == RELKIND_INDEX)
{
elog(WARN, "%s is an index relation", r->rd_rel->relname.data);
}
@ -588,7 +644,8 @@ heap_beginscan(Relation relation,
RelationSetLockForRead(relation);
/* XXX someday assert SelfTimeQual if relkind == RELKIND_UNCATALOGED */
if (relation->rd_rel->relkind == RELKIND_UNCATALOGED) {
if (relation->rd_rel->relkind == RELKIND_UNCATALOGED)
{
timeQual = SelfTimeQual;
}
@ -607,13 +664,18 @@ heap_beginscan(Relation relation,
relation->rd_nblocks = smgrnblocks(relation->rd_rel->relsmgr, relation);
sdesc->rs_rd = relation;
if (nkeys) {
if (nkeys)
{
/*
* we do this here instead of in initsdesc() because heap_rescan also
* calls initsdesc() and we don't want to allocate memory again
* we do this here instead of in initsdesc() because heap_rescan
* also calls initsdesc() and we don't want to allocate memory
* again
*/
sdesc->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
} else {
}
else
{
sdesc->rs_key = NULL;
}
@ -768,12 +830,14 @@ heap_getnext(HeapScanDesc scandesc,
* initialize return buffer to InvalidBuffer
* ----------------
*/
if (! PointerIsValid(b)) b = &localb;
if (!PointerIsValid(b))
b = &localb;
(*b) = InvalidBuffer;
HEAPDEBUG_1; /* heap_getnext( info ) */
if (backw) {
if (backw)
{
/* ----------------
* handle reverse scan
* ----------------
@ -789,11 +853,11 @@ heap_getnext(HeapScanDesc scandesc,
}
/*
* Copy the "current" tuple/buffer
* to "next". Pin/unpin the buffers
* accordingly
* Copy the "current" tuple/buffer to "next". Pin/unpin the
* buffers accordingly
*/
if (sdesc->rs_nbuf != sdesc->rs_cbuf) {
if (sdesc->rs_nbuf != sdesc->rs_cbuf)
{
if (BufferIsValid(sdesc->rs_nbuf))
ReleaseBuffer(sdesc->rs_nbuf);
if (BufferIsValid(sdesc->rs_cbuf))
@ -802,8 +866,10 @@ heap_getnext(HeapScanDesc scandesc,
sdesc->rs_ntup = sdesc->rs_ctup;
sdesc->rs_nbuf = sdesc->rs_cbuf;
if (sdesc->rs_ptup != NULL) {
if (sdesc->rs_cbuf != sdesc->rs_pbuf) {
if (sdesc->rs_ptup != NULL)
{
if (sdesc->rs_cbuf != sdesc->rs_pbuf)
{
if (BufferIsValid(sdesc->rs_cbuf))
ReleaseBuffer(sdesc->rs_cbuf);
if (BufferIsValid(sdesc->rs_pbuf))
@ -811,18 +877,22 @@ heap_getnext(HeapScanDesc scandesc,
}
sdesc->rs_ctup = sdesc->rs_ptup;
sdesc->rs_cbuf = sdesc->rs_pbuf;
} else { /* NONTUP */
}
else
{ /* NONTUP */
ItemPointer iptr;
iptr = (sdesc->rs_ctup != NULL) ?
&(sdesc->rs_ctup->t_ctid) : (ItemPointer) NULL;
/* Don't release sdesc->rs_cbuf at this point, because
heapgettup doesn't increase PrivateRefCount if it
is already set. On a backward scan, both rs_ctup and rs_ntup
usually point to the same buffer page, so
PrivateRefCount[rs_cbuf] should be 2 (or more, if for instance
ctup is stored in a TupleTableSlot). - 01/09/94 */
/*
* Don't release sdesc->rs_cbuf at this point, because
* heapgettup doesn't increase PrivateRefCount if it is
* already set. On a backward scan, both rs_ctup and rs_ntup
* usually point to the same buffer page, so
* PrivateRefCount[rs_cbuf] should be 2 (or more, if for
* instance ctup is stored in a TupleTableSlot). - 01/09/94
*/
sdesc->rs_ctup = (HeapTuple)
heapgettup(sdesc->rs_rd,
@ -852,13 +922,16 @@ heap_getnext(HeapScanDesc scandesc,
sdesc->rs_ptup = NULL;
sdesc->rs_pbuf = UnknownBuffer;
} else {
}
else
{
/* ----------------
* handle forward scan
* ----------------
*/
if (sdesc->rs_ctup == sdesc->rs_ntup &&
BufferIsInvalid(sdesc->rs_nbuf)) {
BufferIsInvalid(sdesc->rs_nbuf))
{
if (BufferIsValid(sdesc->rs_pbuf))
ReleaseBuffer(sdesc->rs_pbuf);
HEAPDEBUG_3; /* heap_getnext returns NULL at end */
@ -866,11 +939,11 @@ heap_getnext(HeapScanDesc scandesc,
}
/*
* Copy the "current" tuple/buffer
* to "previous". Pin/unpin the buffers
* accordingly
* Copy the "current" tuple/buffer to "previous". Pin/unpin the
* buffers accordingly
*/
if (sdesc->rs_pbuf != sdesc->rs_cbuf) {
if (sdesc->rs_pbuf != sdesc->rs_cbuf)
{
if (BufferIsValid(sdesc->rs_pbuf))
ReleaseBuffer(sdesc->rs_pbuf);
if (BufferIsValid(sdesc->rs_cbuf))
@ -879,8 +952,10 @@ heap_getnext(HeapScanDesc scandesc,
sdesc->rs_ptup = sdesc->rs_ctup;
sdesc->rs_pbuf = sdesc->rs_cbuf;
if (sdesc->rs_ntup != NULL) {
if (sdesc->rs_cbuf != sdesc->rs_nbuf) {
if (sdesc->rs_ntup != NULL)
{
if (sdesc->rs_cbuf != sdesc->rs_nbuf)
{
if (BufferIsValid(sdesc->rs_cbuf))
ReleaseBuffer(sdesc->rs_cbuf);
if (BufferIsValid(sdesc->rs_nbuf))
@ -889,18 +964,22 @@ heap_getnext(HeapScanDesc scandesc,
sdesc->rs_ctup = sdesc->rs_ntup;
sdesc->rs_cbuf = sdesc->rs_nbuf;
HEAPDEBUG_5; /* heap_getnext next tuple was cached */
} else { /* NONTUP */
}
else
{ /* NONTUP */
ItemPointer iptr;
iptr = (sdesc->rs_ctup != NULL) ?
&sdesc->rs_ctup->t_ctid : (ItemPointer) NULL;
/* Don't release sdesc->rs_cbuf at this point, because
heapgettup doesn't increase PrivateRefCount if it
is already set. On a forward scan, both rs_ctup and rs_ptup
usually point to the same buffer page, so
PrivateRefCount[rs_cbuf] should be 2 (or more, if for instance
ctup is stored in a TupleTableSlot). - 01/09/93 */
/*
* Don't release sdesc->rs_cbuf at this point, because
* heapgettup doesn't increase PrivateRefCount if it is
* already set. On a forward scan, both rs_ctup and rs_ptup
* usually point to the same buffer page, so
* PrivateRefCount[rs_cbuf] should be 2 (or more, if for
* instance ctup is stored in a TupleTableSlot). - 01/09/93
*/
sdesc->rs_ctup = (HeapTuple)
heapgettup(sdesc->rs_rd,
@ -912,7 +991,8 @@ heap_getnext(HeapScanDesc scandesc,
sdesc->rs_key);
}
if (sdesc->rs_ctup == NULL && !BufferIsValid(sdesc->rs_cbuf)) {
if (sdesc->rs_ctup == NULL && !BufferIsValid(sdesc->rs_cbuf))
{
if (BufferIsValid(sdesc->rs_nbuf))
ReleaseBuffer(sdesc->rs_nbuf);
sdesc->rs_ntup = NULL;
@ -970,8 +1050,8 @@ heap_fetch(Relation relation,
/*
* Note: This is collosally expensive - does two system calls per
* indexscan tuple fetch. Not good, and since we should be doing
* page level locking by the scanner anyway, it is commented out.
* indexscan tuple fetch. Not good, and since we should be doing page
* level locking by the scanner anyway, it is commented out.
*/
/* RelationSetLockForTupleRead(relation, tid); */
@ -985,7 +1065,8 @@ heap_fetch(Relation relation,
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
#ifndef NO_BUFFERISVALID
if (!BufferIsValid(buffer)) {
if (!BufferIsValid(buffer))
{
elog(WARN, "heap_fetch: %s relation: ReadBuffer(%lx) failed",
&relation->rd_rel->relname, (long) tid);
}
@ -1027,9 +1108,12 @@ heap_fetch(Relation relation,
* ----------------
*/
if (PointerIsValid(b)) {
if (PointerIsValid(b))
{
*b = buffer;
} else {
}
else
{
tuple = heap_copytuple(tuple);
ReleaseBuffer(buffer);
}
@ -1078,7 +1162,8 @@ heap_insert(Relation relation, HeapTuple tup)
* another).
* ----------------
*/
if (!OidIsValid(tup->t_oid)) {
if (!OidIsValid(tup->t_oid))
{
tup->t_oid = newoid();
LastOidProcessed = tup->t_oid;
}
@ -1093,7 +1178,8 @@ heap_insert(Relation relation, HeapTuple tup)
doinsert(relation, tup);
if ( IsSystemRelationName(RelationGetRelationName(relation)->data)) {
if (IsSystemRelationName(RelationGetRelationName(relation)->data))
{
RelationUnsetLockForWrite(relation);
/* ----------------
@ -1144,7 +1230,8 @@ heap_delete(Relation relation, ItemPointer tid)
b = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
#ifndef NO_BUFFERISVALID
if (!BufferIsValid(b)) { /* XXX L_SH better ??? */
if (!BufferIsValid(b))
{ /* XXX L_SH better ??? */
elog(WARN, "heap_delete: failed ReadBuffer");
}
#endif /* NO_BUFFERISVALID */
@ -1158,7 +1245,8 @@ heap_delete(Relation relation, ItemPointer tid)
*/
tp = (HeapTuple) PageGetItem((Page) dp, lp);
Assert(HeapTupleIsValid(tp));
if (TupleUpdatedByCurXactAndCmd(tp)) {
if (TupleUpdatedByCurXactAndCmd(tp))
{
elog(NOTICE, "Non-functional delete, tuple already deleted");
if (IsSystemRelationName(RelationGetRelationName(relation)->data))
RelationUnsetLockForWrite(relation);
@ -1170,7 +1258,8 @@ heap_delete(Relation relation, ItemPointer tid)
* ----------------
*/
if (!(tp = heap_tuple_satisfies(lp, relation, b, dp,
NowTimeQual, 0, (ScanKey) NULL))) {
NowTimeQual, 0, (ScanKey) NULL)))
{
/* XXX call something else */
ReleaseBuffer(b);
@ -1251,7 +1340,8 @@ heap_replace(Relation relation, ItemPointer otid, HeapTuple tup)
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(otid));
#ifndef NO_BUFFERISVALID
if (!BufferIsValid(buffer)) {
if (!BufferIsValid(buffer))
{
/* XXX L_SH better ??? */
elog(WARN, "amreplace: failed ReadBuffer");
}
@ -1279,7 +1369,8 @@ heap_replace(Relation relation, ItemPointer otid, HeapTuple tup)
* -----------------
*/
if (TupleUpdatedByCurXactAndCmd(tp)) {
if (TupleUpdatedByCurXactAndCmd(tp))
{
elog(NOTICE, "Non-functional update, only first update is performed");
if (IsSystemRelationName(RelationGetRelationName(relation)->data))
RelationUnsetLockForWrite(relation);
@ -1322,9 +1413,12 @@ heap_replace(Relation relation, ItemPointer otid, HeapTuple tup)
* insert new item
* ----------------
*/
if ((unsigned)DOUBLEALIGN(tup->t_len) <= PageGetFreeSpace((Page) dp)) {
if ((unsigned) DOUBLEALIGN(tup->t_len) <= PageGetFreeSpace((Page) dp))
{
RelationPutHeapTuple(relation, BufferGetBlockNumber(buffer), tup);
} else {
}
else
{
/* ----------------
* new item won't fit on same page as old item, have to look
* for a new place to put it.
@ -1386,7 +1480,8 @@ heap_markpos(HeapScanDesc sdesc)
/* Note: no locking manipulations needed */
if (sdesc->rs_ptup == NULL &&
BufferIsUnknown(sdesc->rs_pbuf)) { /* == NONTUP */
BufferIsUnknown(sdesc->rs_pbuf))
{ /* == NONTUP */
sdesc->rs_ptup = (HeapTuple)
heapgettup(sdesc->rs_rd,
(sdesc->rs_ctup == NULL) ?
@ -1397,8 +1492,10 @@ heap_markpos(HeapScanDesc sdesc)
sdesc->rs_nkeys,
sdesc->rs_key);
} else if (sdesc->rs_ntup == NULL &&
BufferIsUnknown(sdesc->rs_nbuf)) { /* == NONTUP */
}
else if (sdesc->rs_ntup == NULL &&
BufferIsUnknown(sdesc->rs_nbuf))
{ /* == NONTUP */
sdesc->rs_ntup = (HeapTuple)
heapgettup(sdesc->rs_rd,
(sdesc->rs_ctup == NULL) ?
@ -1414,19 +1511,28 @@ heap_markpos(HeapScanDesc sdesc)
* Should not unpin the buffer pages. They may still be in use.
* ----------------
*/
if (sdesc->rs_ptup != NULL) {
if (sdesc->rs_ptup != NULL)
{
sdesc->rs_mptid = sdesc->rs_ptup->t_ctid;
} else {
}
else
{
ItemPointerSetInvalid(&sdesc->rs_mptid);
}
if (sdesc->rs_ctup != NULL) {
if (sdesc->rs_ctup != NULL)
{
sdesc->rs_mctid = sdesc->rs_ctup->t_ctid;
} else {
}
else
{
ItemPointerSetInvalid(&sdesc->rs_mctid);
}
if (sdesc->rs_ntup != NULL) {
if (sdesc->rs_ntup != NULL)
{
sdesc->rs_mntid = sdesc->rs_ntup->t_ctid;
} else {
}
else
{
ItemPointerSetInvalid(&sdesc->rs_mntid);
}
}
@ -1473,9 +1579,12 @@ heap_restrpos(HeapScanDesc sdesc)
sdesc->rs_cbuf = InvalidBuffer;
sdesc->rs_nbuf = InvalidBuffer;
if (!ItemPointerIsValid(&sdesc->rs_mptid)) {
if (!ItemPointerIsValid(&sdesc->rs_mptid))
{
sdesc->rs_ptup = NULL;
} else {
}
else
{
sdesc->rs_ptup = (HeapTuple)
heapgettup(sdesc->rs_rd,
&sdesc->rs_mptid,
@ -1486,9 +1595,12 @@ heap_restrpos(HeapScanDesc sdesc)
(ScanKey) NULL);
}
if (!ItemPointerIsValid(&sdesc->rs_mctid)) {
if (!ItemPointerIsValid(&sdesc->rs_mctid))
{
sdesc->rs_ctup = NULL;
} else {
}
else
{
sdesc->rs_ctup = (HeapTuple)
heapgettup(sdesc->rs_rd,
&sdesc->rs_mctid,
@ -1499,9 +1611,12 @@ heap_restrpos(HeapScanDesc sdesc)
(ScanKey) NULL);
}
if (!ItemPointerIsValid(&sdesc->rs_mntid)) {
if (!ItemPointerIsValid(&sdesc->rs_mntid))
{
sdesc->rs_ntup = NULL;
} else {
}
else
{
sdesc->rs_ntup = (HeapTuple)
heapgettup(sdesc->rs_rd,
&sdesc->rs_mntid,

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Id: hio.c,v 1.9 1996/11/05 09:53:02 scrappy Exp $
* $Id: hio.c,v 1.10 1997/09/07 04:38:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -58,7 +58,8 @@ RelationPutHeapTuple(Relation relation,
buffer = ReadBuffer(relation, blockIndex);
#ifndef NO_BUFFERISVALID
if (!BufferIsValid(buffer)) {
if (!BufferIsValid(buffer))
{
elog(WARN, "RelationPutHeapTuple: no buffer for %ld in %s",
blockIndex, &relation->rd_rel->relname);
}
@ -119,9 +120,9 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
Assert(HeapTupleIsValid(tuple));
/*
* XXX This does an lseek - VERY expensive - but at the moment it
* is the only way to accurately determine how many blocks are in
* a relation. A good optimization would be to get this to actually
* XXX This does an lseek - VERY expensive - but at the moment it is
* the only way to accurately determine how many blocks are in a
* relation. A good optimization would be to get this to actually
* work properly.
*/
@ -145,8 +146,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
len = (unsigned) DOUBLEALIGN(tuple->t_len); /* be conservative */
/*
* Note that this is true if the above returned a bogus page, which
* it will do for a completely empty relation.
* Note that this is true if the above returned a bogus page, which it
* will do for a completely empty relation.
*/
if (len > PageGetFreeSpace(pageHeader))

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.11 1997/08/19 21:29:21 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.12 1997/09/07 04:38:13 momjian Exp $
*
* NOTES
* initam should be moved someplace else.
@ -174,6 +174,7 @@ ResetHeapAccessStatistics()
time(&stats->local_reset_timestamp);
time(&stats->last_request_timestamp);
}
#endif
#ifdef NOT_USED
@ -181,7 +182,8 @@ ResetHeapAccessStatistics()
* GetHeapAccessStatistics
* ----------------
*/
HeapAccessStatistics GetHeapAccessStatistics()
HeapAccessStatistics
GetHeapAccessStatistics()
{
HeapAccessStatistics stats;
@ -211,6 +213,7 @@ HeapAccessStatistics GetHeapAccessStatistics()
return stats;
}
#endif
#ifdef NOT_USED
@ -309,6 +312,7 @@ PrintHeapAccessStatistics(HeapAccessStatistics stats)
printf("\n");
}
#endif
#ifdef NOT_USED
@ -323,6 +327,7 @@ PrintAndFreeHeapAccessStatistics(HeapAccessStatistics stats)
if (stats != NULL)
pfree(stats);
}
#endif
/* ----------------------------------------------------------------

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.7 1997/08/19 21:29:26 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.8 1997/09/07 04:38:17 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@ -116,9 +116,12 @@ RelationGetIndexScan(Relation relation,
ItemPointerSetInvalid(&scan->currentMarkData);
ItemPointerSetInvalid(&scan->nextMarkData);
if (numberOfKeys > 0) {
if (numberOfKeys > 0)
{
scan->keyData = (ScanKey) palloc(sizeof(ScanKeyData) * numberOfKeys);
} else {
}
else
{
scan->keyData = NULL;
}
@ -167,6 +170,7 @@ IndexScanRestart(IndexScanDesc scan,
key,
scan->numberOfKeys * sizeof(ScanKeyData));
}
#endif
#ifdef NOT_USED
@ -191,6 +195,7 @@ IndexScanEnd(IndexScanDesc scan)
pfree(scan);
}
#endif
/* ----------------
@ -213,23 +218,32 @@ IndexScanMarkPosition(IndexScanDesc scan)
{
RetrieveIndexResult result;
if (scan->flags & ScanUncheckedPrevious) {
if (scan->flags & ScanUncheckedPrevious)
{
result =
index_getnext(scan, BackwardScanDirection);
if (result != NULL) {
if (result != NULL)
{
scan->previousItemData = result->index_iptr;
} else {
}
else
{
ItemPointerSetInvalid(&scan->previousItemData);
}
} else if (scan->flags & ScanUncheckedNext) {
}
else if (scan->flags & ScanUncheckedNext)
{
result = (RetrieveIndexResult)
index_getnext(scan, ForwardScanDirection);
if (result != NULL) {
if (result != NULL)
{
scan->nextItemData = result->index_iptr;
} else {
}
else
{
ItemPointerSetInvalid(&scan->nextItemData);
}
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.13 1997/08/26 23:31:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.14 1997/09/07 04:38:26 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relationId
@ -286,6 +286,7 @@ index_markpos(IndexScanDesc scan)
fmgr(procedure, scan);
}
#endif
#ifdef NOT_USED
@ -303,6 +304,7 @@ index_restrpos(IndexScanDesc scan)
fmgr(procedure, scan);
}
#endif
/* ----------------
@ -376,11 +378,13 @@ GetIndexValue(HeapTuple tuple,
Datum returnVal;
bool isNull;
if (PointerIsValid(fInfo) && FIgetProcOid(fInfo) != InvalidOid) {
if (PointerIsValid(fInfo) && FIgetProcOid(fInfo) != InvalidOid)
{
int i;
Datum *attData = (Datum *) palloc(FIgetnArgs(fInfo) * sizeof(Datum));
for (i = 0; i < FIgetnArgs(fInfo); i++) {
for (i = 0; i < FIgetnArgs(fInfo); i++)
{
attData[i] = (Datum) heap_getattr(tuple,
buffer,
attrNums[i],
@ -393,7 +397,9 @@ GetIndexValue(HeapTuple tuple,
&isNull);
pfree(attData);
*attNull = FALSE;
}else {
}
else
{
returnVal = (Datum) heap_getattr(tuple, buffer, attrNums[attOff],
hTupDesc, attNull);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.9 1997/08/22 16:48:14 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.10 1997/09/07 04:38:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -28,14 +28,19 @@
#ifndef NO_ASSERT_CHECKING
static bool StrategyEvaluationIsValid(StrategyEvaluation evaluation);
static bool StrategyExpressionIsValid(StrategyExpression expression,
static bool
StrategyExpressionIsValid(StrategyExpression expression,
StrategyNumber maxStrategy);
static ScanKey StrategyMapGetScanKeyEntry(StrategyMap map,
static ScanKey
StrategyMapGetScanKeyEntry(StrategyMap map,
StrategyNumber strategyNumber);
static bool StrategyOperatorIsValid(StrategyOperator operator,
static bool
StrategyOperatorIsValid(StrategyOperator operator,
StrategyNumber maxStrategy);
static bool StrategyTermIsValid(StrategyTerm term,
static bool
StrategyTermIsValid(StrategyTerm term,
StrategyNumber maxStrategy);
#endif
@ -140,9 +145,11 @@ StrategyTermIsValid(StrategyTerm term,
if (!PointerIsValid(term) || term->degree == 0)
return false;
for (index = 0; index < term->degree; index += 1) {
for (index = 0; index < term->degree; index += 1)
{
if (!StrategyOperatorIsValid(&term->operatorData[index],
maxStrategy)) {
maxStrategy))
{
return false;
}
@ -188,20 +195,24 @@ StrategyEvaluationIsValid(StrategyEvaluation evaluation)
!StrategyNumberIsValid(evaluation->maxStrategy) ||
!StrategyTransformMapIsValid(evaluation->negateTransform) ||
!StrategyTransformMapIsValid(evaluation->commuteTransform) ||
! StrategyTransformMapIsValid(evaluation->negateCommuteTransform)) {
!StrategyTransformMapIsValid(evaluation->negateCommuteTransform))
{
return false;
}
for (index = 0; index < evaluation->maxStrategy; index += 1) {
for (index = 0; index < evaluation->maxStrategy; index += 1)
{
if (!StrategyExpressionIsValid(evaluation->expression[index],
evaluation->maxStrategy)) {
evaluation->maxStrategy))
{
return false;
}
}
return true;
}
#endif
/* ----------------
@ -221,13 +232,15 @@ StrategyTermEvaluate(StrategyTerm term,
ScanKey entry;
for (index = 0, operator = &term->operatorData[0];
index < term->degree; index += 1, operator += 1) {
index < term->degree; index += 1, operator += 1)
{
entry = &map->entry[operator->strategy - 1];
Assert(RegProcedureIsValid(entry->sk_procedure));
switch (operator->flags ^ entry->sk_flags) {
switch (operator->flags ^ entry->sk_flags)
{
case 0x0:
tmpres = (long) FMGR_PTR2(entry->sk_func, entry->sk_procedure,
left, right);
@ -294,8 +307,10 @@ RelationGetStrategy(Relation relation,
attributeNumber);
/* get a strategy number for the procedure ignoring flags for now */
for (index = 0; index < evaluation->maxStrategy; index += 1) {
if (strategyMap->entry[index].sk_procedure == procedure) {
for (index = 0; index < evaluation->maxStrategy; index += 1)
{
if (strategyMap->entry[index].sk_procedure == procedure)
{
break;
}
}
@ -308,7 +323,8 @@ RelationGetStrategy(Relation relation,
Assert(!(entry->sk_flags & ~(SK_NEGATE | SK_COMMUTE)));
switch (entry->sk_flags & (SK_NEGATE | SK_COMMUTE)) {
switch (entry->sk_flags & (SK_NEGATE | SK_COMMUTE))
{
case 0x0:
return strategy;
@ -329,8 +345,10 @@ RelationGetStrategy(Relation relation,
}
if (! StrategyNumberIsInBounds(strategy, evaluation->maxStrategy)) {
if (! StrategyNumberIsValid(strategy)) {
if (!StrategyNumberIsInBounds(strategy, evaluation->maxStrategy))
{
if (!StrategyNumberIsValid(strategy))
{
elog(WARN, "RelationGetStrategy: corrupted evaluation");
}
}
@ -375,7 +393,8 @@ RelationInvokeStrategy(Relation relation,
entry = StrategyMapGetScanKeyEntry(strategyMap, strategy);
if (RegProcedureIsValid(entry->sk_procedure)) {
if (RegProcedureIsValid(entry->sk_procedure))
{
termData.operatorData[0].strategy = strategy;
termData.operatorData[0].flags = 0x0;
@ -385,11 +404,13 @@ RelationInvokeStrategy(Relation relation,
newStrategy = evaluation->negateTransform->strategy[strategy - 1];
if (newStrategy != strategy && StrategyNumberIsValid(newStrategy)) {
if (newStrategy != strategy && StrategyNumberIsValid(newStrategy))
{
entry = StrategyMapGetScanKeyEntry(strategyMap, newStrategy);
if (RegProcedureIsValid(entry->sk_procedure)) {
if (RegProcedureIsValid(entry->sk_procedure))
{
termData.operatorData[0].strategy = newStrategy;
termData.operatorData[0].flags = SK_NEGATE;
@ -399,11 +420,13 @@ RelationInvokeStrategy(Relation relation,
}
newStrategy = evaluation->commuteTransform->strategy[strategy - 1];
if (newStrategy != strategy && StrategyNumberIsValid(newStrategy)) {
if (newStrategy != strategy && StrategyNumberIsValid(newStrategy))
{
entry = StrategyMapGetScanKeyEntry(strategyMap, newStrategy);
if (RegProcedureIsValid(entry->sk_procedure)) {
if (RegProcedureIsValid(entry->sk_procedure))
{
termData.operatorData[0].strategy = newStrategy;
termData.operatorData[0].flags = SK_COMMUTE;
@ -413,11 +436,13 @@ RelationInvokeStrategy(Relation relation,
}
newStrategy = evaluation->negateCommuteTransform->strategy[strategy - 1];
if (newStrategy != strategy && StrategyNumberIsValid(newStrategy)) {
if (newStrategy != strategy && StrategyNumberIsValid(newStrategy))
{
entry = StrategyMapGetScanKeyEntry(strategyMap, newStrategy);
if (RegProcedureIsValid(entry->sk_procedure)) {
if (RegProcedureIsValid(entry->sk_procedure))
{
termData.operatorData[0].strategy = newStrategy;
termData.operatorData[0].flags = SK_NEGATE | SK_COMMUTE;
@ -426,23 +451,28 @@ RelationInvokeStrategy(Relation relation,
}
}
if (PointerIsValid(evaluation->expression[strategy - 1])) {
if (PointerIsValid(evaluation->expression[strategy - 1]))
{
StrategyTerm *termP;
termP = &evaluation->expression[strategy - 1]->term[0];
while (PointerIsValid(*termP)) {
while (PointerIsValid(*termP))
{
Index index;
for (index = 0; index < (*termP)->degree; index += 1) {
for (index = 0; index < (*termP)->degree; index += 1)
{
entry = StrategyMapGetScanKeyEntry(strategyMap,
(*termP)->operatorData[index].strategy);
if (! RegProcedureIsValid(entry->sk_procedure)) {
if (!RegProcedureIsValid(entry->sk_procedure))
{
break;
}
}
if (index == (*termP)->degree) {
if (index == (*termP)->degree)
{
return
StrategyTermEvaluate(*termP, strategyMap, left, right);
}
@ -482,7 +512,8 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
1, &scanKeyData);
tuple = heap_getnext(scan, false, (Buffer *) NULL);
if (! HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "OperatorObjectIdFillScanKeyEntry: unknown operator %lu",
(uint32) operatorObjectId);
}
@ -492,7 +523,8 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
((OperatorTupleForm) GETSTRUCT(tuple))->oprcode;
fmgr_info(entry->sk_procedure, &entry->sk_func, &entry->sk_nargs);
if (! RegProcedureIsValid(entry->sk_procedure)) {
if (!RegProcedureIsValid(entry->sk_procedure))
{
elog(WARN,
"OperatorObjectIdFillScanKeyEntry: no procedure for operator %lu",
(uint32) operatorObjectId);
@ -538,8 +570,8 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
elog(WARN, "IndexSupportInitialize: corrupted catalogs");
/*
* XXX note that the following assumes the INDEX tuple is well formed and
* that the key[] and class[] are 0 terminated.
* XXX note that the following assumes the INDEX tuple is well formed
* and that the key[] and class[] are 0 terminated.
*/
for (attributeIndex = 0; attributeIndex < maxAttributeNumber; attributeIndex++)
{
@ -547,8 +579,10 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
iform = (IndexTupleForm) GETSTRUCT(tuple);
if (!OidIsValid(iform->indkey[attributeIndex])) {
if (attributeIndex == 0) {
if (!OidIsValid(iform->indkey[attributeIndex]))
{
if (attributeIndex == 0)
{
elog(WARN, "IndexSupportInitialize: no pg_index tuple");
}
break;
@ -562,7 +596,8 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
heap_close(relation);
/* if support routines exist for this access method, load them */
if (maxSupportNumber > 0) {
if (maxSupportNumber > 0)
{
ScanKeyEntryInitialize(&entry[0], 0, Anum_pg_amproc_amid,
ObjectIdEqualRegProcedure,
@ -576,7 +611,8 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
for (attributeNumber = maxAttributeNumber; attributeNumber > 0;
attributeNumber--) {
attributeNumber--)
{
int16 support;
Form_pg_amproc form;
@ -584,7 +620,8 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
loc = &indexSupport[((attributeNumber - 1) * maxSupportNumber)];
for (support = maxSupportNumber; --support >= 0; ) {
for (support = maxSupportNumber; --support >= 0;)
{
loc[support] = InvalidOid;
}
@ -594,7 +631,8 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
scan = heap_beginscan(relation, false, NowTimeQual, 2, entry);
while (tuple = heap_getnext(scan, 0, (Buffer *) NULL),
HeapTupleIsValid(tuple)) {
HeapTupleIsValid(tuple))
{
form = (Form_pg_amproc) GETSTRUCT(tuple);
loc[(form->amprocnum - 1)] = form->amproc;
@ -618,7 +656,8 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
operatorRelation = heap_openr(OperatorRelationName);
for (attributeNumber = maxAttributeNumber; attributeNumber > 0;
attributeNumber--) {
attributeNumber--)
{
StrategyNumber strategy;
@ -635,7 +674,8 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
scan = heap_beginscan(relation, false, NowTimeQual, 2, entry);
while (tuple = heap_getnext(scan, 0, (Buffer *) NULL),
HeapTupleIsValid(tuple)) {
HeapTupleIsValid(tuple))
{
Form_pg_amop form;
form = (Form_pg_amop) GETSTRUCT(tuple);
@ -667,7 +707,8 @@ IndexStrategyDisplay(IndexStrategy indexStrategy,
StrategyNumber strategyNumber;
for (attributeNumber = 1; attributeNumber <= numberOfAttributes;
attributeNumber += 1) {
attributeNumber += 1)
{
strategyMap = IndexStrategyGetStrategyMap(indexStrategy,
numberOfStrategies,
@ -675,7 +716,8 @@ IndexStrategyDisplay(IndexStrategy indexStrategy,
for (strategyNumber = 1;
strategyNumber <= AMStrategies(numberOfStrategies);
strategyNumber += 1) {
strategyNumber += 1)
{
printf(":att %d\t:str %d\t:opr 0x%x(%d)\n",
attributeNumber, strategyNumber,
@ -684,6 +726,5 @@ IndexStrategyDisplay(IndexStrategy indexStrategy,
}
}
}
#endif /* defined(ISTRATDEBUG) */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.10 1997/06/11 05:20:05 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.11 1997/09/07 04:38:39 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -135,7 +135,8 @@ int32
bttextcmp(struct varlena * a, struct varlena * b)
{
int res;
unsigned char *ap, *bp;
unsigned char *ap,
*bp;
#ifdef USE_LOCALE
int la = VARSIZE(a) - VARHDRSZ;
@ -167,14 +168,16 @@ bttextcmp(struct varlena *a, struct varlena *b)
bp = (unsigned char *) VARDATA(b);
/*
* If the two strings differ in the first len bytes, or if they're
* the same in the first len bytes and they're both len bytes long,
* we're done.
* If the two strings differ in the first len bytes, or if they're the
* same in the first len bytes and they're both len bytes long, we're
* done.
*/
res = 0;
if (len > 0) {
do {
if (len > 0)
{
do
{
res = (int) (*ap++ - *bp++);
len--;
} while (res == 0 && len != 0);
@ -186,8 +189,8 @@ bttextcmp(struct varlena *a, struct varlena *b)
return (res);
/*
* The two strings are the same in the first len bytes, and they
* are of different lengths.
* The two strings are the same in the first len bytes, and they are
* of different lengths.
*/
if (VARSIZE(a) < VARSIZE(b))

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.17 1997/08/20 14:53:15 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.18 1997/09/07 04:38:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -69,11 +69,11 @@ _bt_doinsert(Relation rel, BTItem btitem, bool index_is_unique, Relation heapRel
buf = _bt_getbuf(rel, blkno, BT_WRITE);
/*
* If the page was split between the time that we surrendered our
* read lock and acquired our write lock, then this page may no
* longer be the right place for the key we want to insert. In this
* case, we need to move right in the tree. See Lehman and Yao for
* an excruciatingly precise description.
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be
* the right place for the key we want to insert. In this case, we
* need to move right in the tree. See Lehman and Yao for an
* excruciatingly precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE);
@ -82,7 +82,8 @@ _bt_doinsert(Relation rel, BTItem btitem, bool index_is_unique, Relation heapRel
/* already in the node */
if (index_is_unique)
{
OffsetNumber offset, maxoff;
OffsetNumber offset,
maxoff;
Page page;
page = BufferGetPage(buf);
@ -105,14 +106,15 @@ _bt_doinsert(Relation rel, BTItem btitem, bool index_is_unique, Relation heapRel
itupdesc = RelationGetTupleDescriptor(rel);
nbuf = InvalidBuffer;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) -
* this's how we handling NULLs - and so we must not use
* _bt_compare in real comparison, but only for
* ordering/finding items on pages. - vadim 03/24/97
while ( !_bt_compare (rel, itupdesc, page,
natts, itup_scankey, offset) )
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
* how we handling NULLs - and so we must not use _bt_compare
* in real comparison, but only for ordering/finding items on
* pages. - vadim 03/24/97
*
* while ( !_bt_compare (rel, itupdesc, page, natts,
* itup_scankey, offset) )
*/
while (_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
{ /* they're equal */
@ -135,9 +137,10 @@ _bt_doinsert(Relation rel, BTItem btitem, bool index_is_unique, Relation heapRel
if (!_bt_isequal(itupdesc, page, P_HIKEY,
natts, itup_scankey))
break;
/*
* min key of the right page is the same,
* ooh - so many dead duplicates...
* min key of the right page is the same, ooh - so
* many dead duplicates...
*/
blkno = opaque->btpo_next;
if (nbuf != InvalidBuffer)
@ -235,15 +238,15 @@ _bt_insertonpg(Relation rel,
itemsz = IndexTupleDSize(btitem->bti_itup)
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
itemsz = DOUBLEALIGN(itemsz); /* be safe, PageAddItem will do this
but we need to be consistent */
itemsz = DOUBLEALIGN(itemsz); /* be safe, PageAddItem will do
* this but we need to be
* consistent */
/*
* If we have to insert item on the leftmost page which is the first
* page in the chain of duplicates then:
* 1. if scankey == hikey (i.e. - new duplicate item) then
* insert it here;
* 2. if scankey < hikey then:
* 2.a if there is duplicate key(s) here - we force splitting;
* page in the chain of duplicates then: 1. if scankey == hikey (i.e.
* - new duplicate item) then insert it here; 2. if scankey < hikey
* then: 2.a if there is duplicate key(s) here - we force splitting;
* 2.b else - we may "eat" this page from duplicates chain.
*/
if (lpageop->btpo_flags & BTP_CHAIN)
@ -274,7 +277,8 @@ _bt_insertonpg(Relation rel,
firstright = P_FIRSTKEY;
do_split = true;
}
else /* "eat" page */
else
/* "eat" page */
{
Buffer pbuf;
Page ppage;
@ -314,7 +318,8 @@ _bt_insertonpg(Relation rel,
OffsetNumber offnum = (P_RIGHTMOST(lpageop)) ? P_HIKEY : P_FIRSTKEY;
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
ItemId itid;
BTItem previtem, chkitem;
BTItem previtem,
chkitem;
Size maxsize;
Size currsize;
@ -361,8 +366,8 @@ _bt_insertonpg(Relation rel,
bool left_chained = (lpageop->btpo_flags & BTP_CHAIN) ? true : false;
/*
* If we have to split leaf page in the chain of duplicates by
* new duplicate then we try to look at our right sibling first.
* If we have to split leaf page in the chain of duplicates by new
* duplicate then we try to look at our right sibling first.
*/
if ((lpageop->btpo_flags & BTP_CHAIN) &&
(lpageop->btpo_flags & BTP_LEAF) && keys_equal)
@ -373,10 +378,8 @@ _bt_insertonpg(Relation rel,
rpage = BufferGetPage(rbuf);
rpageop = (BTPageOpaque) PageGetSpecialPointer(rpage);
if (!P_RIGHTMOST(rpageop)) /* non-rightmost page */
{ /*
* If we have the same hikey here then it's
* yet another page in chain.
*/
{ /* If we have the same hikey here then
* it's yet another page in chain. */
if (_bt_skeycmp(rel, keysz, scankey, rpage,
PageGetItemId(rpage, P_HIKEY),
BTEqualStrategyNumber))
@ -389,6 +392,7 @@ _bt_insertonpg(Relation rel,
BTGreaterStrategyNumber))
elog(FATAL, "btree: hikey is out of order");
else if (rpageop->btpo_flags & BTP_CHAIN)
/*
* If hikey > scankey then it's last page in chain and
* BTP_CHAIN must be OFF
@ -399,7 +403,8 @@ _bt_insertonpg(Relation rel,
if (PageGetFreeSpace(rpage) > itemsz)
use_left = false;
}
else /* rightmost page */
else
/* rightmost page */
{
Assert(!(rpageop->btpo_flags & BTP_CHAIN));
/* if there is room here then we use this page. */
@ -414,13 +419,13 @@ _bt_insertonpg(Relation rel,
}
_bt_relbuf(rel, rbuf, BT_WRITE);
}
/*
* If after splitting un-chained page we'll got chain of pages
* with duplicates then we want to know
* 1. on which of two pages new btitem will go (current
* _bt_findsplitloc is quite bad);
* 2. what parent (if there's one) thinking about it
* (remember about deletions)
* with duplicates then we want to know 1. on which of two pages
* new btitem will go (current _bt_findsplitloc is quite bad); 2.
* what parent (if there's one) thinking about it (remember about
* deletions)
*/
else if (!(lpageop->btpo_flags & BTP_CHAIN))
{
@ -442,18 +447,20 @@ _bt_insertonpg(Relation rel,
if (_bt_skeycmp(rel, keysz, scankey, page,
PageGetItemId(page, firstright),
BTLessStrategyNumber))
/*
* force moving current items to the new page:
* new item will go on the current page.
* force moving current items to the new page: new
* item will go on the current page.
*/
firstright = start;
else
/*
* new btitem >= firstright, start item == firstright -
* new chain of duplicates: if this non-leftmost leaf
* page and parent item < start item then force moving
* all items to the new page - current page will be
* "empty" after it.
* new btitem >= firstright, start item == firstright
* - new chain of duplicates: if this non-leftmost
* leaf page and parent item < start item then force
* moving all items to the new page - current page
* will be "empty" after it.
*/
{
if (!P_LEFTMOST(lpageop) &&
@ -473,19 +480,23 @@ _bt_insertonpg(Relation rel,
_bt_relbuf(rel, pbuf, BT_WRITE);
}
}
} /* else - no new chain if start item < firstright one */
} /* else - no new chain if start item <
* firstright one */
}
/* split the buffer into left and right halves */
rbuf = _bt_split(rel, buf, firstright);
/* which new page (left half or right half) gets the tuple? */
if (_bt_goesonpg(rel, buf, keysz, scankey, afteritem)) {
if (_bt_goesonpg(rel, buf, keysz, scankey, afteritem))
{
/* left page */
itup_off = _bt_pgaddtup(rel, buf, keysz, scankey,
itemsz, btitem, afteritem);
itup_blkno = BufferGetBlockNumber(buf);
} else {
}
else
{
/* right page */
itup_off = _bt_pgaddtup(rel, rbuf, keysz, scankey,
itemsz, btitem, afteritem);
@ -514,26 +525,28 @@ _bt_insertonpg(Relation rel,
/*
* By here,
*
* + our target page has been split;
* + the original tuple has been inserted;
* + we have write locks on both the old (left half) and new
* (right half) buffers, after the split; and
* + we have the key we want to insert into the parent.
* + our target page has been split; + the original tuple has been
* inserted; + we have write locks on both the old (left half)
* and new (right half) buffers, after the split; and + we have
* the key we want to insert into the parent.
*
* Do the parent insertion. We need to hold onto the locks for
* the child pages until we locate the parent, but we can release
* them before doing the actual insertion (see Lehman and Yao for
* the reasoning).
* Do the parent insertion. We need to hold onto the locks for the
* child pages until we locate the parent, but we can release them
* before doing the actual insertion (see Lehman and Yao for the
* reasoning).
*/
if (stack == (BTStack) NULL) {
if (stack == (BTStack) NULL)
{
/* create a new root node and release the split buffers */
_bt_newroot(rel, buf, rbuf);
_bt_relbuf(rel, buf, BT_WRITE);
_bt_relbuf(rel, rbuf, BT_WRITE);
} else {
}
else
{
ScanKey newskey;
InsertIndexResult newres;
BTItem new_item;
@ -548,10 +561,10 @@ _bt_insertonpg(Relation rel,
rpageop = (BTPageOpaque) PageGetSpecialPointer(rpage);
/*
* By convention, the first entry (1) on every
* non-rightmost page is the high key for that page. In
* order to get the lowest key on the new right page, we
* actually look at its second (2) entry.
* By convention, the first entry (1) on every non-rightmost
* page is the high key for that page. In order to get the
* lowest key on the new right page, we actually look at its
* second (2) entry.
*/
if (!P_RIGHTMOST(rpageop))
@ -576,9 +589,9 @@ _bt_insertonpg(Relation rel,
/*
* Find the parent buffer and get the parent page.
*
* Oops - if we were moved right then we need to
* change stack item! We want to find parent pointing to
* where we are, right ? - vadim 05/27/97
* Oops - if we were moved right then we need to change stack
* item! We want to find parent pointing to where we are,
* right ? - vadim 05/27/97
*/
ItemPointerSet(&(stack->bts_btitem->bti_itup.t_tid),
bknum, P_HIKEY);
@ -591,24 +604,22 @@ _bt_insertonpg(Relation rel,
elog(FATAL, "nbtree: unexpected chained parent of unchained page");
/*
* If the key of new_item is < than the key of the item
* in the parent page pointing to the left page
* (stack->bts_btitem), we have to update the latter key;
* otherwise the keys on the parent page wouldn't be
* monotonically increasing after we inserted the new
* pointer to the right page (new_item). This only
* happens if our left page is the leftmost page and a
* If the key of new_item is < than the key of the item in the
* parent page pointing to the left page (stack->bts_btitem),
* we have to update the latter key; otherwise the keys on the
* parent page wouldn't be monotonically increasing after we
* inserted the new pointer to the right page (new_item). This
* only happens if our left page is the leftmost page and a
* new minimum key had been inserted before, which is not
* reflected in the parent page but didn't matter so
* far. If there are duplicate keys and this new minimum
* key spills over to our new right page, we get an
* inconsistency if we don't update the left key in the
* parent page.
* reflected in the parent page but didn't matter so far. If
* there are duplicate keys and this new minimum key spills
* over to our new right page, we get an inconsistency if we
* don't update the left key in the parent page.
*
* Also, new duplicates handling code require us to update
* parent item if some smaller items left on the left page
* (which is possible in splitting leftmost page) and
* current parent item == new_item. - vadim 05/27/97
* Also, new duplicates handling code require us to update parent
* item if some smaller items left on the left page (which is
* possible in splitting leftmost page) and current parent
* item == new_item. - vadim 05/27/97
*/
if (_bt_itemcmp(rel, keysz, stack->bts_btitem, new_item,
BTGreaterStrategyNumber) ||
@ -619,9 +630,10 @@ _bt_insertonpg(Relation rel,
new_item, BTLessStrategyNumber)))
{
do_update = true;
/*
* figure out which key is leftmost (if the parent page
* is rightmost, too, it must be the root)
* figure out which key is leftmost (if the parent page is
* rightmost, too, it must be the root)
*/
if (P_RIGHTMOST(ppageop))
upditem_offset = P_HIKEY;
@ -637,6 +649,7 @@ _bt_insertonpg(Relation rel,
{
if (shifted)
elog(FATAL, "btree: attempt to update parent for shifted page");
/*
* Try to update in place. If out parent page is chained
* then we must forse insertion.
@ -656,12 +669,12 @@ _bt_insertonpg(Relation rel,
PageIndexTupleDelete(ppage, upditem_offset);
/*
* don't write anything out yet--we still have the write
* lock, and now we call another _bt_insertonpg to
* insert the correct key.
* First, make a new item, using the tuple data from
* lowLeftItem. Point it to the left child.
* Update it on the stack at the same time.
* don't write anything out yet--we still have the
* write lock, and now we call another _bt_insertonpg
* to insert the correct key. First, make a new item,
* using the tuple data from lowLeftItem. Point it to
* the left child. Update it on the stack at the same
* time.
*/
pfree(stack->bts_btitem);
stack->bts_btitem = _bt_formitem(&(lowLeftItem->bti_itup));
@ -677,10 +690,10 @@ _bt_insertonpg(Relation rel,
_bt_relbuf(rel, rbuf, BT_WRITE);
/*
* A regular _bt_binsrch should find the right place to
* put the new entry, since it should be lower than any
* other key on the page.
* Therefore set afteritem to NULL.
* A regular _bt_binsrch should find the right place
* to put the new entry, since it should be lower than
* any other key on the page. Therefore set afteritem
* to NULL.
*/
newskey = _bt_mkscankey(rel, &(stack->bts_btitem->bti_itup));
newres = _bt_insertonpg(rel, pbuf, stack->bts_parent,
@ -753,7 +766,9 @@ _bt_insertonpg(Relation rel,
pfree(newskey);
pfree(new_item);
}
} else {
}
else
{
itup_off = _bt_pgaddtup(rel, buf, keysz, scankey,
itemsz, btitem, afteritem);
itup_blkno = BufferGetBlockNumber(buf);
@ -780,15 +795,19 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright)
{
Buffer rbuf;
Page origpage;
Page leftpage, rightpage;
BTPageOpaque ropaque, lopaque, oopaque;
Page leftpage,
rightpage;
BTPageOpaque ropaque,
lopaque,
oopaque;
Buffer sbuf;
Page spage;
BTPageOpaque sopaque;
Size itemsz;
ItemId itemid;
BTItem item;
OffsetNumber leftoff, rightoff;
OffsetNumber leftoff,
rightoff;
OffsetNumber start;
OffsetNumber maxoff;
OffsetNumber i;
@ -816,20 +835,21 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright)
ropaque->btpo_next = oopaque->btpo_next;
/*
* If the page we're splitting is not the rightmost page at its
* level in the tree, then the first (0) entry on the page is the
* high key for the page. We need to copy that to the right
* half. Otherwise (meaning the rightmost page case), we should
* treat the line pointers beginning at zero as user data.
* If the page we're splitting is not the rightmost page at its level
* in the tree, then the first (0) entry on the page is the high key
* for the page. We need to copy that to the right half. Otherwise
* (meaning the rightmost page case), we should treat the line
* pointers beginning at zero as user data.
*
* We leave a blank space at the start of the line table for the
* left page. We'll come back later and fill it in with the high
* key item we get from the right key.
* We leave a blank space at the start of the line table for the left
* page. We'll come back later and fill it in with the high key item
* we get from the right key.
*/
leftoff = P_FIRSTKEY;
ropaque->btpo_next = oopaque->btpo_next;
if (! P_RIGHTMOST(oopaque)) {
if (!P_RIGHTMOST(oopaque))
{
/* splitting a non-rightmost page, start at the first data item */
start = P_FIRSTKEY;
@ -839,7 +859,9 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright)
if (PageAddItem(rightpage, (Item) item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber)
elog(FATAL, "btree: failed to add hikey to the right sibling");
rightoff = P_FIRSTKEY;
} else {
}
else
{
/* splitting a rightmost page, "high key" is the first data item */
start = P_HIKEY;
@ -850,21 +872,26 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright)
if (firstright == InvalidOffsetNumber)
{
Size llimit = PageGetFreeSpace(leftpage) / 2;
firstright = _bt_findsplitloc(rel, origpage, start, maxoff, llimit);
}
for (i = start; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = start; i <= maxoff; i = OffsetNumberNext(i))
{
itemid = PageGetItemId(origpage, i);
itemsz = ItemIdGetLength(itemid);
item = (BTItem) PageGetItem(origpage, itemid);
/* decide which page to put it on */
if (i < firstright) {
if (i < firstright)
{
if (PageAddItem(leftpage, (Item) item, itemsz, leftoff,
LP_USED) == InvalidOffsetNumber)
elog(FATAL, "btree: failed to add item to the left sibling");
leftoff = OffsetNumberNext(leftoff);
} else {
}
else
{
if (PageAddItem(rightpage, (Item) item, itemsz, rightoff,
LP_USED) == InvalidOffsetNumber)
elog(FATAL, "btree: failed to add item to the right sibling");
@ -878,9 +905,12 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright)
* page.
*/
if (P_RIGHTMOST(ropaque)) {
if (P_RIGHTMOST(ropaque))
{
itemid = PageGetItemId(rightpage, P_HIKEY);
} else {
}
else
{
itemid = PageGetItemId(rightpage, P_FIRSTKEY);
}
itemsz = ItemIdGetLength(itemid);
@ -923,7 +953,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright)
* before trying to fetch its neighbors.
*/
if (! P_RIGHTMOST(ropaque)) {
if (!P_RIGHTMOST(ropaque))
{
sbuf = _bt_getbuf(rel, ropaque->btpo_next, BT_WRITE);
spage = BufferGetPage(sbuf);
sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
@ -965,8 +996,10 @@ _bt_findsplitloc(Relation rel,
{
OffsetNumber i;
OffsetNumber saferight;
ItemId nxtitemid, safeitemid;
BTItem safeitem, nxtitem;
ItemId nxtitemid,
safeitemid;
BTItem safeitem,
nxtitem;
Size nbytes;
int natts;
@ -989,9 +1022,9 @@ _bt_findsplitloc(Relation rel,
nxtitem = (BTItem) PageGetItem(page, nxtitemid);
/*
* Test against last known safe item:
* if the tuple we're looking at isn't equal to the last safe
* one we saw, then it's our new safe tuple.
* Test against last known safe item: if the tuple we're looking
* at isn't equal to the last safe one we saw, then it's our new
* safe tuple.
*/
if (!_bt_itemcmp(rel, natts,
safeitem, nxtitem, BTEqualStrategyNumber))
@ -1006,8 +1039,8 @@ _bt_findsplitloc(Relation rel,
}
/*
* If the chain of dups starts at the beginning of the page and extends
* past the halfway mark, we can split it in the middle.
* If the chain of dups starts at the beginning of the page and
* extends past the halfway mark, we can split it in the middle.
*/
if (saferight == start)
@ -1040,8 +1073,11 @@ static void
_bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
{
Buffer rootbuf;
Page lpage, rpage, rootpage;
BlockNumber lbkno, rbkno;
Page lpage,
rpage,
rootpage;
BlockNumber lbkno,
rbkno;
BlockNumber rootbknum;
BTPageOpaque rootopaque;
ItemId itemid;
@ -1069,8 +1105,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
rpage = BufferGetPage(rbuf);
/*
* step over the high key on the left page while building the
* left page pointer.
* step over the high key on the left page while building the left
* page pointer.
*/
itemid = PageGetItemId(lpage, P_FIRSTKEY);
itemsz = ItemIdGetLength(itemid);
@ -1079,17 +1115,17 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
ItemPointerSet(&(new_item->bti_itup.t_tid), lbkno, P_HIKEY);
/*
* insert the left page pointer into the new root page. the root
* page is the rightmost page on its level so the "high key" item
* is the first data item.
* insert the left page pointer into the new root page. the root page
* is the rightmost page on its level so the "high key" item is the
* first data item.
*/
if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber)
elog(FATAL, "btree: failed to add leftkey to new root page");
pfree(new_item);
/*
* the right page is the rightmost page on the second level, so
* the "high key" item is the first data item on that page as well.
* the right page is the rightmost page on the second level, so the
* "high key" item is the first data item on that page as well.
*/
itemid = PageGetItemId(rpage, P_HIKEY);
itemsz = ItemIdGetLength(itemid);
@ -1141,12 +1177,16 @@ _bt_pgaddtup(Relation rel,
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
first = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
if (afteritem == (BTItem) NULL) {
if (afteritem == (BTItem) NULL)
{
itup_off = _bt_binsrch(rel, buf, keysz, itup_scankey, BT_INSERTION);
} else {
}
else
{
itup_off = first;
do {
do
{
chkitem =
(BTItem) PageGetItem(page, PageGetItemId(page, itup_off));
itup_off = OffsetNumberNext(itup_off);
@ -1184,7 +1224,8 @@ _bt_goesonpg(Relation rel,
ItemId hikey;
BTPageOpaque opaque;
BTItem chkitem;
OffsetNumber offnum, maxoff;
OffsetNumber offnum,
maxoff;
bool found;
page = BufferGetPage(buf);
@ -1197,8 +1238,8 @@ _bt_goesonpg(Relation rel,
/*
* this is a non-rightmost page, so it must have a high key item.
*
* If the scan key is < the high key (the min key on the next page),
* then it for sure belongs here.
* If the scan key is < the high key (the min key on the next page), then
* it for sure belongs here.
*/
hikey = PageGetItemId(page, P_HIKEY);
if (_bt_skeycmp(rel, keysz, scankey, page, hikey, BTLessStrategyNumber))
@ -1237,20 +1278,22 @@ _bt_goesonpg(Relation rel,
maxoff = PageGetMaxOffsetNumber(page);
/*
* Search the entire page for the afteroid. We need to do this, rather
* than doing a binary search and starting from there, because if the
* key we're searching for is the leftmost key in the tree at this
* level, then a binary search will do the wrong thing. Splits are
* pretty infrequent, so the cost isn't as bad as it could be.
* Search the entire page for the afteroid. We need to do this,
* rather than doing a binary search and starting from there, because
* if the key we're searching for is the leftmost key in the tree at
* this level, then a binary search will do the wrong thing. Splits
* are pretty infrequent, so the cost isn't as bad as it could be.
*/
found = false;
for (offnum = P_FIRSTKEY;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum)) {
offnum = OffsetNumberNext(offnum))
{
chkitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum));
if ( BTItemSame (chkitem, afteritem) ) {
if (BTItemSame(chkitem, afteritem))
{
found = true;
break;
}
@ -1272,10 +1315,13 @@ _bt_itemcmp(Relation rel,
StrategyNumber strat)
{
TupleDesc tupDes;
IndexTuple indexTuple1, indexTuple2;
Datum attrDatum1, attrDatum2;
IndexTuple indexTuple1,
indexTuple2;
Datum attrDatum1,
attrDatum2;
int i;
bool isFirstNull, isSecondNull;
bool isFirstNull,
isSecondNull;
bool compare;
bool useEqual = false;
@ -1294,7 +1340,8 @@ _bt_itemcmp(Relation rel,
indexTuple1 = &(item1->bti_itup);
indexTuple2 = &(item2->bti_itup);
for (i = 1; i <= keysz; i++) {
for (i = 1; i <= keysz; i++)
{
attrDatum1 = index_getattr(indexTuple1, i, tupDes, &isFirstNull);
attrDatum2 = index_getattr(indexTuple2, i, tupDes, &isSecondNull);
@ -1320,14 +1367,15 @@ _bt_itemcmp(Relation rel,
if (strat != BTEqualStrategyNumber)
return (true);
}
else /* false for one of ">, <, =" */
else
/* false for one of ">, <, =" */
{
if (strat == BTEqualStrategyNumber)
return (false);
/*
* if original strat was "<=, >=" OR
* "<, >" but some attribute(s) left
* - need to test for Equality
* if original strat was "<=, >=" OR "<, >" but some
* attribute(s) left - need to test for Equality
*/
if (useEqual || i < keysz)
{
@ -1363,7 +1411,8 @@ _bt_updateitem(Relation rel,
OffsetNumber i;
ItemPointerData itemPtrData;
BTItem item;
IndexTuple oldIndexTuple, newIndexTuple;
IndexTuple oldIndexTuple,
newIndexTuple;
int first;
page = BufferGetPage(buf);
@ -1373,25 +1422,27 @@ _bt_updateitem(Relation rel,
first = P_RIGHTMOST((BTPageOpaque) PageGetSpecialPointer(page))
? P_HIKEY : P_FIRSTKEY;
i = first;
do {
do
{
item = (BTItem) PageGetItem(page, PageGetItemId(page, i));
i = OffsetNumberNext(i);
} while (i <= maxoff && !BTItemSame(item, oldItem));
/* this should never happen (in theory) */
if ( ! BTItemSame (item, oldItem) ) {
if (!BTItemSame(item, oldItem))
{
elog(FATAL, "_bt_getstackbuf was lying!!");
}
/*
* It's defined by caller (_bt_insertonpg)
*/
/*
if(IndexTupleDSize(newItem->bti_itup) >
IndexTupleDSize(item->bti_itup)) {
elog(NOTICE, "trying to overwrite a smaller value with a bigger one in _bt_updateitem");
elog(WARN, "this is not good.");
}
* if(IndexTupleDSize(newItem->bti_itup) >
* IndexTupleDSize(item->bti_itup)) { elog(NOTICE, "trying to
* overwrite a smaller value with a bigger one in _bt_updateitem");
* elog(WARN, "this is not good."); }
*/
oldIndexTuple = &(item->bti_itup);
@ -1558,11 +1609,12 @@ _bt_shift (Relation rel, Buffer buf, BTStack stack, int keysz,
_bt_wrtnorelbuf(rel, pbuf);
/*
* Now we want insert into the parent pointer to our old page. It has to
* be inserted before the pointer to new page. You may get problems here
* (in the _bt_goesonpg and/or _bt_pgaddtup), but may be not - I don't
* know. It works if old page is leftmost (nitem is NULL) and
* btitem < hikey and it's all what we need currently. - vadim 05/30/97
* Now we want insert into the parent pointer to our old page. It has
* to be inserted before the pointer to new page. You may get problems
* here (in the _bt_goesonpg and/or _bt_pgaddtup), but may be not - I
* don't know. It works if old page is leftmost (nitem is NULL) and
* btitem < hikey and it's all what we need currently. - vadim
* 05/30/97
*/
nitem = NULL;
afteroff = P_FIRSTKEY;
@ -1582,4 +1634,5 @@ _bt_shift (Relation rel, Buffer buf, BTStack stack, int keysz,
return (res);
}
#endif

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.9 1997/08/19 21:29:36 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.10 1997/09/07 04:38:52 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -48,7 +48,8 @@ static void _bt_unsetpagelock(Relation rel, BlockNumber blkno, int access);
#define BTREE_VERSION 0
#endif
typedef struct BTMetaPageData {
typedef struct BTMetaPageData
{
uint32 btm_magic;
uint32 btm_version;
BlockNumber btm_root;
@ -94,7 +95,8 @@ _bt_metapinit(Relation rel)
if (USELOCKING)
RelationSetLockForWrite(rel);
if ((nblocks = RelationGetNumberOfBlocks(rel)) != 0) {
if ((nblocks = RelationGetNumberOfBlocks(rel)) != 0)
{
elog(WARN, "Cannot initialize non-empty btree %s",
RelationGetRelationName(rel));
}
@ -142,18 +144,21 @@ _bt_checkmeta(Relation rel)
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
metap = BufferGetPage(metabuf);
op = (BTPageOpaque) PageGetSpecialPointer(metap);
if (!(op->btpo_flags & BTP_META)) {
if (!(op->btpo_flags & BTP_META))
{
elog(WARN, "Invalid metapage for index %s",
RelationGetRelationName(rel));
}
metad = BTPageGetMeta(metap);
if (metad->btm_magic != BTREE_MAGIC) {
if (metad->btm_magic != BTREE_MAGIC)
{
elog(WARN, "Index %s is not a btree",
RelationGetRelationName(rel));
}
if (metad->btm_version != BTREE_VERSION) {
if (metad->btm_version != BTREE_VERSION)
{
elog(WARN, "Version mismatch on %s: version %d file, version %d code",
RelationGetRelationName(rel),
metad->btm_version, BTREE_VERSION);
@ -161,6 +166,7 @@ _bt_checkmeta(Relation rel)
_bt_relbuf(rel, metabuf, BT_READ);
}
#endif
/*
@ -196,19 +202,22 @@ _bt_getroot(Relation rel, int access)
Assert(metaopaque->btpo_flags & BTP_META);
metad = BTPageGetMeta(metapg);
if (metad->btm_magic != BTREE_MAGIC) {
if (metad->btm_magic != BTREE_MAGIC)
{
elog(WARN, "Index %s is not a btree",
RelationGetRelationName(rel));
}
if (metad->btm_version != BTREE_VERSION) {
if (metad->btm_version != BTREE_VERSION)
{
elog(WARN, "Version mismatch on %s: version %d file, version %d code",
RelationGetRelationName(rel),
metad->btm_version, BTREE_VERSION);
}
/* if no root page initialized yet, do it */
if (metad->btm_root == P_NONE) {
if (metad->btm_root == P_NONE)
{
/* turn our read lock in for a write lock */
_bt_relbuf(rel, metabuf, BT_READ);
@ -219,12 +228,13 @@ _bt_getroot(Relation rel, int access)
metad = BTPageGetMeta(metapg);
/*
* Race condition: if someone else initialized the metadata between
* the time we released the read lock and acquired the write lock,
* above, we want to avoid doing it again.
* Race condition: if someone else initialized the metadata
* between the time we released the read lock and acquired the
* write lock, above, we want to avoid doing it again.
*/
if (metad->btm_root == P_NONE) {
if (metad->btm_root == P_NONE)
{
/*
* Get, initialize, write, and leave a lock of the appropriate
@ -245,25 +255,30 @@ _bt_getroot(Relation rel, int access)
_bt_wrtnorelbuf(rel, rootbuf);
/* swap write lock for read lock, if appropriate */
if (access != BT_WRITE) {
if (access != BT_WRITE)
{
_bt_setpagelock(rel, rootblkno, BT_READ);
_bt_unsetpagelock(rel, rootblkno, BT_WRITE);
}
/* okay, metadata is correct */
_bt_wrtbuf(rel, metabuf);
} else {
}
else
{
/*
* Metadata initialized by someone else. In order to guarantee
* no deadlocks, we have to release the metadata page and start
* all over again.
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
* page and start all over again.
*/
_bt_relbuf(rel, metabuf, BT_WRITE);
return (_bt_getroot(rel, access));
}
} else {
}
else
{
rootbuf = _bt_getbuf(rel, metad->btm_root, access);
/* done with the meta page */
@ -278,7 +293,8 @@ _bt_getroot(Relation rel, int access)
rootpg = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpg);
if (!(rootopaque->btpo_flags & BTP_ROOT)) {
if (!(rootopaque->btpo_flags & BTP_ROOT))
{
/* it happened, try again */
_bt_relbuf(rel, rootbuf, access);
@ -311,14 +327,17 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* until we've instantiated the buffer.
*/
if (blkno != P_NEW) {
if (blkno != P_NEW)
{
if (access == BT_WRITE)
_bt_setpagelock(rel, blkno, BT_WRITE);
else
_bt_setpagelock(rel, blkno, BT_READ);
buf = ReadBuffer(rel, blkno);
} else {
}
else
{
buf = ReadBuffer(rel, blkno);
blkno = BufferGetBlockNumber(buf);
page = BufferGetPage(buf);
@ -392,6 +411,7 @@ _bt_wrtnorelbuf(Relation rel, Buffer buf)
void
_bt_pageinit(Page page, Size size)
{
/*
* Cargo-cult programming -- don't really need this to be zero, but
* creating new pages is an infrequent occurrence and it makes me feel
@ -455,7 +475,9 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
{
Buffer buf;
BlockNumber blkno;
OffsetNumber start, offnum, maxoff;
OffsetNumber start,
offnum,
maxoff;
OffsetNumber i;
Page page;
ItemId itemid;
@ -470,7 +492,8 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber(page);
if (maxoff >= stack->bts_offset) {
if (maxoff >= stack->bts_offset)
{
itemid = PageGetItemId(page, stack->bts_offset);
item = (BTItem) PageGetItem(page, itemid);
@ -488,7 +511,8 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/* if the item has just moved right on this page, we're done */
for (i = OffsetNumberNext(stack->bts_offset);
i <= maxoff;
i = OffsetNumberNext(i)) {
i = OffsetNumberNext(i))
{
itemid = PageGetItemId(page, i);
item = (BTItem) PageGetItem(page, itemid);
@ -507,7 +531,8 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
}
/* by here, the item we're looking for moved right at least one page */
for (;;) {
for (;;)
{
blkno = opaque->btpo_next;
if (P_RIGHTMOST(opaque))
elog(FATAL, "my bits moved right off the end of the world!");
@ -524,7 +549,8 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/* see if it's on this page */
for (offnum = start;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum)) {
offnum = OffsetNumberNext(offnum))
{
itemid = PageGetItemId(page, offnum);
item = (BTItem) PageGetItem(page, itemid);
if (BTItemSame(item, stack->bts_btitem))
@ -547,7 +573,8 @@ _bt_setpagelock(Relation rel, BlockNumber blkno, int access)
{
ItemPointerData iptr;
if (USELOCKING) {
if (USELOCKING)
{
ItemPointerSet(&iptr, blkno, P_HIKEY);
if (access == BT_WRITE)
@ -562,7 +589,8 @@ _bt_unsetpagelock(Relation rel, BlockNumber blkno, int access)
{
ItemPointerData iptr;
if (USELOCKING) {
if (USELOCKING)
{
ItemPointerSet(&iptr, blkno, P_HIKEY);
if (access == BT_WRITE)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.19 1997/05/05 03:41:17 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.20 1997/09/07 04:38:54 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -36,11 +36,13 @@
#ifdef BTREE_BUILD_STATS
#include <tcop/tcopprot.h>
extern int ShowExecutorStats;
#endif
bool BuildingBtree = false; /* see comment in btbuild() */
bool FastBuild = true; /* use sort/build instead of insertion build */
bool FastBuild = true; /* use sort/build instead of
* insertion build */
/*
* btbuild() -- build a new btree index.
@ -65,20 +67,26 @@ btbuild(Relation heap,
Buffer buffer;
HeapTuple htup;
IndexTuple itup;
TupleDesc htupdesc, itupdesc;
TupleDesc htupdesc,
itupdesc;
Datum *attdata;
bool *nulls;
InsertIndexResult res = 0;
int nhtups, nitups;
int nhtups,
nitups;
int i;
BTItem btitem;
#ifndef OMIT_PARTIAL_INDEX
ExprContext *econtext = (ExprContext *) NULL;
TupleTable tupleTable = (TupleTable) NULL;
TupleTableSlot *slot = (TupleTableSlot *) NULL;
#endif
Oid hrelid, irelid;
Node *pred, *oldPred;
Oid hrelid,
irelid;
Node *pred,
*oldPred;
void *spool = (void *) NULL;
bool isunique;
bool usefast;
@ -91,9 +99,9 @@ btbuild(Relation heap,
/*
* bootstrap processing does something strange, so don't use
* sort/build for initial catalog indices. at some point i need
* to look harder at this. (there is some kind of incremental
* processing going on there.) -- pma 08/29/95
* sort/build for initial catalog indices. at some point i need to
* look harder at this. (there is some kind of incremental processing
* going on there.) -- pma 08/29/95
*/
usefast = (FastBuild && IsNormalProcessingMode());
@ -118,25 +126,27 @@ btbuild(Relation heap,
nulls = (bool *) palloc(natts * sizeof(bool));
/*
* If this is a predicate (partial) index, we will need to evaluate the
* predicate using ExecQual, which requires the current tuple to be in a
* slot of a TupleTable. In addition, ExecQual must have an ExprContext
* referring to that slot. Here, we initialize dummy TupleTable and
* ExprContext objects for this purpose. --Nels, Feb '92
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
* be in a slot of a TupleTable. In addition, ExecQual must have an
* ExprContext referring to that slot. Here, we initialize dummy
* TupleTable and ExprContext objects for this purpose. --Nels, Feb
* '92
*/
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) {
if (pred != NULL || oldPred != NULL)
{
tupleTable = ExecCreateTupleTable(1);
slot = ExecAllocTableSlot(tupleTable);
econtext = makeNode(ExprContext);
FillDummyExprContext(econtext, slot, htupdesc, InvalidBuffer);
/*
* we never want to use sort/build if we are extending an
* existing partial index -- it works by inserting the
* newly-qualifying tuples into the existing index.
* (sort/build would overwrite the existing index with one
* consisting of the newly-qualifying tuples.)
* we never want to use sort/build if we are extending an existing
* partial index -- it works by inserting the newly-qualifying
* tuples into the existing index. (sort/build would overwrite the
* existing index with one consisting of the newly-qualifying
* tuples.)
*/
usefast = false;
}
@ -149,12 +159,14 @@ btbuild(Relation heap,
/* build the index */
nhtups = nitups = 0;
if (usefast) {
if (usefast)
{
spool = _bt_spoolinit(index, 7, isunique);
res = (InsertIndexResult) NULL;
}
for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer)) {
for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer))
{
nhtups++;
@ -162,20 +174,26 @@ btbuild(Relation heap,
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
*/
if (oldPred != NULL) {
if (oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
if (ExecQual((List*)oldPred, econtext) == true) {
if (ExecQual((List *) oldPred, econtext) == true)
{
nitups++;
continue;
}
#endif /* OMIT_PARTIAL_INDEX */
}
/* Skip this tuple if it doesn't satisfy the partial-index predicate */
if (pred != NULL) {
/*
* Skip this tuple if it doesn't satisfy the partial-index
* predicate
*/
if (pred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
@ -187,18 +205,19 @@ btbuild(Relation heap,
nitups++;
/*
* For the current heap tuple, extract all the attributes
* we use in this index, and note which are null.
* For the current heap tuple, extract all the attributes we use
* in this index, and note which are null.
*/
for (i = 1; i <= natts; i++) {
for (i = 1; i <= natts; i++)
{
int attoff;
bool attnull;
/*
* Offsets are from the start of the tuple, and are
* zero-based; indices are one-based. The next call
* returns i - 1. That's data hiding for you.
* zero-based; indices are one-based. The next call returns i
* - 1. That's data hiding for you.
*/
attoff = AttrNumberGetAttrOffset(i);
@ -216,49 +235,50 @@ btbuild(Relation heap,
itup = index_formtuple(itupdesc, attdata, nulls);
/*
* If the single index key is null, we don't insert it into
* the index. Btrees support scans on <, <=, =, >=, and >.
* Relational algebra says that A op B (where op is one of the
* operators above) returns null if either A or B is null. This
* means that no qualification used in an index scan could ever
* return true on a null attribute. It also means that indices
* can't be used by ISNULL or NOTNULL scans, but that's an
* artifact of the strategy map architecture chosen in 1986, not
* of the way nulls are handled here.
* If the single index key is null, we don't insert it into the
* index. Btrees support scans on <, <=, =, >=, and >. Relational
* algebra says that A op B (where op is one of the operators
* above) returns null if either A or B is null. This means that
* no qualification used in an index scan could ever return true
* on a null attribute. It also means that indices can't be used
* by ISNULL or NOTNULL scans, but that's an artifact of the
* strategy map architecture chosen in 1986, not of the way nulls
* are handled here.
*/
/*
* New comments: NULLs handling.
* While we can't do NULL comparison, we can follow simple
* rule for ordering items on btree pages - NULLs greater
* NOT_NULLs and NULL = NULL is TRUE. Sure, it's just rule
* for placing/finding items and no more - keytest'll return
* FALSE for a = 5 for items having 'a' isNULL.
* Look at _bt_skeycmp, _bt_compare and _bt_itemcmp for
* how it works. - vadim 03/23/97
if (itup->t_info & INDEX_NULL_MASK) {
pfree(itup);
continue;
}
/*
* New comments: NULLs handling. While we can't do NULL
* comparison, we can follow simple rule for ordering items on
* btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE.
* Sure, it's just rule for placing/finding items and no more -
* keytest'll return FALSE for a = 5 for items having 'a' isNULL.
* Look at _bt_skeycmp, _bt_compare and _bt_itemcmp for how it
* works. - vadim 03/23/97
*
* if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; }
*/
itup->t_tid = htup->t_ctid;
btitem = _bt_formitem(itup);
/*
* if we are doing bottom-up btree build, we insert the index
* into a spool page for subsequent processing. otherwise, we
* insert into the btree.
* if we are doing bottom-up btree build, we insert the index into
* a spool page for subsequent processing. otherwise, we insert
* into the btree.
*/
if (usefast) {
if (usefast)
{
_bt_spool(index, btitem, spool);
} else {
}
else
{
res = _bt_doinsert(index, btitem, isunique, heap);
}
pfree(btitem);
pfree(itup);
if (res) {
if (res)
{
pfree(res);
}
}
@ -266,7 +286,8 @@ btbuild(Relation heap,
/* okay, all heap tuples are indexed */
heap_endscan(hscan);
if (pred != NULL || oldPred != NULL) {
if (pred != NULL || oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
ExecDestroyTupleTable(tupleTable, true);
pfree(econtext);
@ -275,11 +296,12 @@ btbuild(Relation heap,
/*
* if we are doing bottom-up btree build, we now have a bunch of
* sorted runs in the spool pages. finish the build by (1)
* merging the runs, (2) inserting the sorted tuples into btree
* pages and (3) building the upper levels.
* sorted runs in the spool pages. finish the build by (1) merging
* the runs, (2) inserting the sorted tuples into btree pages and (3)
* building the upper levels.
*/
if (usefast) {
if (usefast)
{
_bt_spool(index, (BTItem) NULL, spool); /* flush the spool */
_bt_leafbuild(index, spool);
_bt_spooldestroy(spool);
@ -295,10 +317,10 @@ btbuild(Relation heap,
#endif
/*
* Since we just counted the tuples in the heap, we update its
* stats in pg_class to guarantee that the planner takes advantage
* of the index we just created. Finally, only update statistics
* during normal index definitions, not for indices on system catalogs
* Since we just counted the tuples in the heap, we update its stats
* in pg_class to guarantee that the planner takes advantage of the
* index we just created. Finally, only update statistics during
* normal index definitions, not for indices on system catalogs
* created during bootstrap processing. We must close the relations
* before updatings statistics to guarantee that the relcache entries
* are flushed when we increment the command counter in UpdateStats().
@ -311,8 +333,10 @@ btbuild(Relation heap,
index_close(index);
UpdateStats(hrelid, nhtups, true);
UpdateStats(irelid, nitups, false);
if (oldPred != NULL) {
if (nitups == nhtups) pred = NULL;
if (oldPred != NULL)
{
if (nitups == nhtups)
pred = NULL;
UpdateIndexPredicate(irelid, oldPred, pred);
}
}
@ -344,9 +368,8 @@ btinsert(Relation rel, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
/*
* See comments in btbuild.
if (itup->t_info & INDEX_NULL_MASK)
return ((InsertIndexResult) NULL);
*
* if (itup->t_info & INDEX_NULL_MASK) return ((InsertIndexResult) NULL);
*/
btitem = _bt_formitem(itup);
@ -372,9 +395,9 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
RetrieveIndexResult res;
/*
* If we've already initialized this scan, we can just advance it
* in the appropriate direction. If we haven't done so yet, we
* call a routine to get the first item in the scan.
* If we've already initialized this scan, we can just advance it in
* the appropriate direction. If we haven't done so yet, we call a
* routine to get the first item in the scan.
*/
if (ItemPointerIsValid(&(scan->currentItemData)))
@ -414,14 +437,16 @@ btrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey)
so = (BTScanOpaque) scan->opaque;
/* we hold a read lock on the current page in the scan */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_bt_relbuf(scan->relation, so->btso_curbuf, BT_READ);
so->btso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* and we hold a read lock on the last marked item in the scan */
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
_bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ);
so->btso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
@ -439,11 +464,12 @@ btrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey)
}
/*
* Reset the scan keys. Note that keys ordering stuff
* moved to _bt_first. - vadim 05/05/97
* Reset the scan keys. Note that keys ordering stuff moved to
* _bt_first. - vadim 05/05/97
*/
so->numberOfKeys = scan->numberOfKeys;
if (scan->numberOfKeys > 0) {
if (scan->numberOfKeys > 0)
{
memmove(scan->keyData,
scankey,
scan->numberOfKeys * sizeof(ScanKeyData));
@ -463,7 +489,8 @@ btmovescan(IndexScanDesc scan, Datum v)
so = (BTScanOpaque) scan->opaque;
/* release any locks we still hold */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_bt_relbuf(scan->relation, so->btso_curbuf, BT_READ);
so->btso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
@ -485,14 +512,16 @@ btendscan(IndexScanDesc scan)
so = (BTScanOpaque) scan->opaque;
/* release any locks we still hold */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
if (BufferIsValid(so->btso_curbuf))
_bt_relbuf(scan->relation, so->btso_curbuf, BT_READ);
so->btso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
if (BufferIsValid(so->btso_mrkbuf))
_bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ);
so->btso_mrkbuf = InvalidBuffer;
@ -518,14 +547,16 @@ btmarkpos(IndexScanDesc scan)
so = (BTScanOpaque) scan->opaque;
/* release lock on old marked data, if any */
if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) {
if (ItemPointerIsValid(iptr = &(scan->currentMarkData)))
{
_bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ);
so->btso_mrkbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* bump lock on currentItemData and copy to currentMarkData */
if (ItemPointerIsValid(&(scan->currentItemData))) {
if (ItemPointerIsValid(&(scan->currentItemData)))
{
so->btso_mrkbuf = _bt_getbuf(scan->relation,
BufferGetBlockNumber(so->btso_curbuf),
BT_READ);
@ -545,14 +576,16 @@ btrestrpos(IndexScanDesc scan)
so = (BTScanOpaque) scan->opaque;
/* release lock on current data, if any */
if (ItemPointerIsValid(iptr = &(scan->currentItemData))) {
if (ItemPointerIsValid(iptr = &(scan->currentItemData)))
{
_bt_relbuf(scan->relation, so->btso_curbuf, BT_READ);
so->btso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(iptr);
}
/* bump lock on currentMarkData and copy to currentItemData */
if (ItemPointerIsValid(&(scan->currentMarkData))) {
if (ItemPointerIsValid(&(scan->currentMarkData)))
{
so->btso_curbuf = _bt_getbuf(scan->relation,
BufferGetBlockNumber(so->btso_mrkbuf),
BT_READ);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.7 1997/02/18 17:13:45 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.8 1997/09/07 04:38:57 momjian Exp $
*
*
* NOTES
@ -32,7 +32,8 @@
#include <storage/bufpage.h>
#include <access/nbtree.h>
typedef struct BTScanListData {
typedef struct BTScanListData
{
IndexScanDesc btsl_scan;
struct BTScanListData *btsl_next;
} BTScanListData;
@ -64,12 +65,14 @@ _bt_regscan(IndexScanDesc scan)
void
_bt_dropscan(IndexScanDesc scan)
{
BTScanList chk, last;
BTScanList chk,
last;
last = (BTScanList) NULL;
for (chk = BTScans;
chk != (BTScanList) NULL && chk->btsl_scan != scan;
chk = chk->btsl_next) {
chk = chk->btsl_next)
{
last = chk;
}
@ -95,7 +98,8 @@ _bt_adjscans(Relation rel, ItemPointer tid, int op)
Oid relid;
relid = rel->rd_id;
for (l = BTScans; l != (BTScanList) NULL; l = l->btsl_next) {
for (l = BTScans; l != (BTScanList) NULL; l = l->btsl_next)
{
if (relid == l->btsl_scan->relation->rd_id)
_bt_scandel(l->btsl_scan, op,
ItemPointerGetBlockNumber(tid),
@ -139,8 +143,10 @@ _bt_scandel(IndexScanDesc scan, int op, BlockNumber blkno, OffsetNumber offno)
current = &(scan->currentItemData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno) {
switch (op) {
&& ItemPointerGetOffsetNumber(current) >= offno)
{
switch (op)
{
case BT_INSERT:
_bt_step(scan, &buf, ForwardScanDirection);
break;
@ -157,12 +163,15 @@ _bt_scandel(IndexScanDesc scan, int op, BlockNumber blkno, OffsetNumber offno)
current = &(scan->currentMarkData);
if (ItemPointerIsValid(current)
&& ItemPointerGetBlockNumber(current) == blkno
&& ItemPointerGetOffsetNumber(current) >= offno) {
&& ItemPointerGetOffsetNumber(current) >= offno)
{
ItemPointerData tmp;
tmp = *current;
*current = scan->currentItemData;
scan->currentItemData = tmp;
switch (op) {
switch (op)
{
case BT_INSERT:
_bt_step(scan, &buf, ForwardScanDirection);
break;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.23 1997/08/19 21:29:42 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.24 1997/09/07 04:38:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -184,21 +184,21 @@ _bt_moveright(Relation rel,
do
{
OffsetNumber offmax = PageGetMaxOffsetNumber(page);
/*
* If this page consists of all duplicate keys (hikey and first
* key on the page have the same value), then we don't need to
* step right.
* If this page consists of all duplicate keys (hikey and
* first key on the page have the same value), then we don't
* need to step right.
*
* NOTE for multi-column indices: we may do scan using
* keys not for all attrs. But we handle duplicates
* using all attrs in _bt_insert/_bt_spool code.
* And so we've to compare scankey with _last_ item
* on this page to do not lose "good" tuples if number
* of attrs > keysize. Example: (2,0) - last items on
* this page, (2,1) - first item on next page (hikey),
* our scankey is x = 2. Scankey == (2,1) because of
* we compare first attrs only, but we shouldn't to move
* right of here. - vadim 04/15/97
* NOTE for multi-column indices: we may do scan using keys not
* for all attrs. But we handle duplicates using all attrs in
* _bt_insert/_bt_spool code. And so we've to compare scankey
* with _last_ item on this page to do not lose "good" tuples
* if number of attrs > keysize. Example: (2,0) - last items
* on this page, (2,1) - first item on next page (hikey), our
* scankey is x = 2. Scankey == (2,1) because of we compare
* first attrs only, but we shouldn't to move right of here.
* - vadim 04/15/97
*/
if (_bt_skeycmp(rel, keysz, scankey, page, hikey,
@ -297,7 +297,8 @@ _bt_skeycmp(Relation rel,
tupDes = RelationGetTupleDescriptor(rel);
/* see if the comparison is true for all of the key attributes */
for (i=1; i <= keysz; i++) {
for (i = 1; i <= keysz; i++)
{
entry = &scankey[i - 1];
Assert(entry->sk_attno == i);
@ -333,14 +334,15 @@ _bt_skeycmp(Relation rel,
if (strat != BTEqualStrategyNumber)
return (true);
}
else /* false for one of ">, <, =" */
else
/* false for one of ">, <, =" */
{
if (strat == BTEqualStrategyNumber)
return (false);
/*
* if original strat was "<=, >=" OR
* "<, >" but some attribute(s) left
* - need to test for Equality
* if original strat was "<=, >=" OR "<, >" but some
* attribute(s) left - need to test for Equality
*/
if (useEqual || i < keysz)
{
@ -383,7 +385,9 @@ _bt_binsrch(Relation rel,
TupleDesc itupdesc;
Page page;
BTPageOpaque opaque;
OffsetNumber low, mid, high;
OffsetNumber low,
mid,
high;
int natts = rel->rd_rel->relnatts;
int result;
@ -399,10 +403,10 @@ _bt_binsrch(Relation rel,
/*
* Since for non-rightmost pages, the first item on the page is the
* high key, there are two notions of emptiness. One is if nothing
* appears on the page. The other is if nothing but the high key does.
* The reason we test high <= low, rather than high == low, is that
* after vacuuming there may be nothing *but* the high key on a page.
* In that case, given the scheme above, low = 2 and high = 1.
* appears on the page. The other is if nothing but the high key
* does. The reason we test high <= low, rather than high == low, is
* that after vacuuming there may be nothing *but* the high key on a
* page. In that case, given the scheme above, low = 2 and high = 1.
*/
if (PageIsEmpty(page))
@ -419,7 +423,8 @@ _bt_binsrch(Relation rel,
return (low);
}
while ((high - low) > 1) {
while ((high - low) > 1)
{
mid = low + ((high - low) / 2);
result = _bt_compare(rel, itupdesc, page, keysz, scankey, mid);
@ -430,16 +435,17 @@ _bt_binsrch(Relation rel,
else
{
mid = _bt_firsteq(rel, itupdesc, page, keysz, scankey, mid);
/*
* NOTE for multi-column indices: we may do scan using
* keys not for all attrs. But we handle duplicates using
* all attrs in _bt_insert/_bt_spool code. And so while
* searching on internal pages having number of attrs > keysize
* we want to point at the last item < the scankey, not at the
* first item = the scankey (!!!), and let _bt_moveright
* decide later whether to move right or not (see comments and
* example there). Note also that INSERTions are not affected
* by this code (natts == keysz). - vadim 04/15/97
* NOTE for multi-column indices: we may do scan using keys
* not for all attrs. But we handle duplicates using all attrs
* in _bt_insert/_bt_spool code. And so while searching on
* internal pages having number of attrs > keysize we want to
* point at the last item < the scankey, not at the first item
* = the scankey (!!!), and let _bt_moveright decide later
* whether to move right or not (see comments and example
* there). Note also that INSERTions are not affected by this
* code (natts == keysz). - vadim 04/15/97
*/
if (natts == keysz || opaque->btpo_flags & BTP_LEAF)
return (mid);
@ -454,28 +460,28 @@ _bt_binsrch(Relation rel,
* We terminated because the endpoints got too close together. There
* are two cases to take care of.
*
* For non-insertion searches on internal pages, we want to point at
* the last key <, or first key =, the scankey on the page. This
* guarantees that we'll descend the tree correctly.
* (NOTE comments above for multi-column indices).
* For non-insertion searches on internal pages, we want to point at the
* last key <, or first key =, the scankey on the page. This
* guarantees that we'll descend the tree correctly. (NOTE comments
* above for multi-column indices).
*
* For all other cases, we want to point at the first key >=
* the scankey on the page. This guarantees that scans and
* insertions will happen correctly.
* For all other cases, we want to point at the first key >= the scankey
* on the page. This guarantees that scans and insertions will happen
* correctly.
*/
if (!(opaque->btpo_flags & BTP_LEAF) && srchtype == BT_DESCENT)
{ /*
* We want the last key <, or first key ==, the scan key.
*/
{ /* We want the last key <, or first key
* ==, the scan key. */
result = _bt_compare(rel, itupdesc, page, keysz, scankey, high);
if (result == 0)
{
mid = _bt_firsteq(rel, itupdesc, page, keysz, scankey, high);
/*
* If natts > keysz we want last item < the scan key.
* See comments above for multi-column indices.
* If natts > keysz we want last item < the scan key. See
* comments above for multi-column indices.
*/
if (natts == keysz)
return (mid);
@ -489,7 +495,8 @@ _bt_binsrch(Relation rel,
else
return (low);
}
else /* we want the first key >= the scan key */
else
/* we want the first key >= the scan key */
{
result = _bt_compare(rel, itupdesc, page, keysz, scankey, low);
if (result <= 0)
@ -527,7 +534,8 @@ _bt_firsteq(Relation rel,
/* walk backwards looking for the first key in the chain of duplicates */
while (offnum > limit
&& _bt_compare(rel, itupdesc, page,
keysz, scankey, OffsetNumberPrev(offnum)) == 0) {
keysz, scankey, OffsetNumberPrev(offnum)) == 0)
{
offnum = OffsetNumberPrev(offnum);
}
@ -576,9 +584,9 @@ _bt_compare(Relation rel,
bool null;
/*
* If this is a leftmost internal page, and if our comparison is
* with the first key on the page, then the item at that position is
* by definition less than the scan key.
* If this is a leftmost internal page, and if our comparison is with
* the first key on the page, then the item at that position is by
* definition less than the scan key.
*
* - see new comments above...
*/
@ -586,39 +594,41 @@ _bt_compare(Relation rel,
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (!(opaque->btpo_flags & BTP_LEAF)
&& P_LEFTMOST(opaque)
&& offnum == P_HIKEY) {
&& offnum == P_HIKEY)
{
itemid = PageGetItemId(page, offnum);
/*
* we just have to believe that this will only be called with
* offnum == P_HIKEY when P_HIKEY is the OffsetNumber of the
* first actual data key (i.e., this is also a rightmost
* page). there doesn't seem to be any code that implies
* that the leftmost page is normally missing a high key as
* well as the rightmost page. but that implies that this
* code path only applies to the root -- which seems
* unlikely..
* offnum == P_HIKEY when P_HIKEY is the OffsetNumber of the first
* actual data key (i.e., this is also a rightmost page). there
* doesn't seem to be any code that implies that the leftmost page
* is normally missing a high key as well as the rightmost page.
* but that implies that this code path only applies to the root
* -- which seems unlikely..
*
* - see new comments above...
*/
if (! P_RIGHTMOST(opaque)) {
if (!P_RIGHTMOST(opaque))
{
elog(WARN, "_bt_compare: invalid comparison to high key");
}
#if 0
/*
* We just have to belive that right answer will not
* break anything. I've checked code and all seems to be ok.
* See new comments above...
* We just have to belive that right answer will not break
* anything. I've checked code and all seems to be ok. See new
* comments above...
*
* -- Old comments
* If the item on the page is equal to the scankey, that's
* okay to admit. We just can't claim that the first key on
* the page is greater than anything.
* -- Old comments If the item on the page is equal to the scankey,
* that's okay to admit. We just can't claim that the first key
* on the page is greater than anything.
*/
if (_bt_skeycmp(rel, keysz, scankey, page, itemid,
BTEqualStrategyNumber)) {
BTEqualStrategyNumber))
{
return (0);
}
return (1);
@ -629,16 +639,17 @@ _bt_compare(Relation rel,
itup = &(btitem->bti_itup);
/*
* The scan key is set up with the attribute number associated with each
* term in the key. It is important that, if the index is multi-key,
* the scan contain the first k key attributes, and that they be in
* order. If you think about how multi-key ordering works, you'll
* understand why this is.
* The scan key is set up with the attribute number associated with
* each term in the key. It is important that, if the index is
* multi-key, the scan contain the first k key attributes, and that
* they be in order. If you think about how multi-key ordering works,
* you'll understand why this is.
*
* We don't test for violation of this condition here.
*/
for (i = 1; i <= keysz; i++) {
for (i = 1; i <= keysz; i++)
{
long tmpres;
entry = &scankey[i - 1];
@ -703,8 +714,8 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
/*
* XXX 10 may 91: somewhere there's a bug in our management of the
* cached buffer for this scan. wei discovered it. the following
* is a workaround so he can work until i figure out what's going on.
* cached buffer for this scan. wei discovered it. the following is
* a workaround so he can work until i figure out what's going on.
*/
if (!BufferIsValid(so->btso_curbuf))
@ -764,7 +775,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
Page page;
BTPageOpaque pop;
BTStack stack;
OffsetNumber offnum, maxoff;
OffsetNumber offnum,
maxoff;
bool offGmax = false;
BTItem btitem;
IndexTuple itup;
@ -782,8 +794,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
so = (BTScanOpaque) scan->opaque;
/*
* Order the keys in the qualification and be sure
* that the scan exploits the tree order.
* Order the keys in the qualification and be sure that the scan
* exploits the tree order.
*/
so->numberOfFirstKeys = 0; /* may be changed by _bt_orderkeys */
so->qual_ok = 1; /* may be changed by _bt_orderkeys */
@ -813,8 +825,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
current = &(scan->currentItemData);
/*
* Okay, we want something more complicated. What we'll do is use
* the first item in the scan key passed in (which has been correctly
* Okay, we want something more complicated. What we'll do is use the
* first item in the scan key passed in (which has been correctly
* ordered to take advantage of index ordering) to position ourselves
* at the right place in the scan.
*/
@ -835,13 +847,14 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
page = BufferGetPage(buf);
/*
* This will happen if the tree we're searching is entirely empty,
* or if we're doing a search for a key that would appear on an
* entirely empty internal page. In either case, there are no
* matching tuples in the index.
* This will happen if the tree we're searching is entirely empty, or
* if we're doing a search for a key that would appear on an entirely
* empty internal page. In either case, there are no matching tuples
* in the index.
*/
if (PageIsEmpty(page)) {
if (PageIsEmpty(page))
{
ItemPointerSetInvalid(current);
so->btso_curbuf = InvalidBuffer;
_bt_relbuf(rel, buf, BT_READ);
@ -851,10 +864,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
pop = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* Now _bt_moveright doesn't move from non-rightmost leaf page
* if scankey == hikey and there is only hikey there. It's
* good for insertion, but we need to do work for scan here.
* - vadim 05/27/97
* Now _bt_moveright doesn't move from non-rightmost leaf page if
* scankey == hikey and there is only hikey there. It's good for
* insertion, but we need to do work for scan here. - vadim 05/27/97
*/
while (maxoff == P_HIKEY && !P_RIGHTMOST(pop) &&
@ -867,7 +879,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
_bt_relbuf(rel, buf, BT_READ);
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
if (PageIsEmpty(page)) {
if (PageIsEmpty(page))
{
ItemPointerSetInvalid(current);
so->btso_curbuf = InvalidBuffer;
_bt_relbuf(rel, buf, BT_READ);
@ -890,9 +903,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ItemPointerSet(current, blkno, offnum);
/*
* Now find the right place to start the scan. Result is the
* value we're looking for minus the value we're looking at
* in the index.
* Now find the right place to start the scan. Result is the value
* we're looking for minus the value we're looking at in the index.
*/
result = _bt_compare(rel, itupdesc, page, 1, &skdata, offnum);
@ -901,10 +913,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
strat = _bt_getstrat(rel, 1, so->keyData[0].sk_procedure);
switch (strat) {
switch (strat)
{
case BTLessStrategyNumber:
if (result <= 0) {
do {
if (result <= 0)
{
do
{
if (!_bt_twostep(scan, &buf, BackwardScanDirection))
break;
@ -920,8 +935,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTLessEqualStrategyNumber:
if (result >= 0) {
do {
if (result >= 0)
{
do
{
if (!_bt_twostep(scan, &buf, ForwardScanDirection))
break;
@ -936,7 +953,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTEqualStrategyNumber:
if (result != 0) {
if (result != 0)
{
_bt_relbuf(scan->relation, buf, BT_READ);
so->btso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(&(scan->currentItemData));
@ -959,19 +977,21 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
}
else if (result > 0)
{ /*
* Just remember: _bt_binsrch() returns the OffsetNumber of
* the first matching key on the page, or the OffsetNumber at
* which the matching key WOULD APPEAR IF IT WERE on this page.
* No key on this page, but offnum from _bt_binsrch() greater
* maxoff - have to move right. - vadim 12/06/96
*/
{ /* Just remember: _bt_binsrch() returns
* the OffsetNumber of the first matching
* key on the page, or the OffsetNumber at
* which the matching key WOULD APPEAR IF
* IT WERE on this page. No key on this
* page, but offnum from _bt_binsrch()
* greater maxoff - have to move right. -
* vadim 12/06/96 */
_bt_twostep(scan, &buf, ForwardScanDirection);
}
}
else if (result < 0)
{
do {
do
{
if (!_bt_twostep(scan, &buf, BackwardScanDirection))
break;
@ -987,8 +1007,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTGreaterStrategyNumber:
/* offGmax helps as above */
if (result >= 0 || offGmax) {
do {
if (result >= 0 || offGmax)
{
do
{
if (!_bt_twostep(scan, &buf, ForwardScanDirection))
break;
@ -1042,7 +1064,8 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
Page page;
BTPageOpaque opaque;
OffsetNumber offnum, maxoff;
OffsetNumber offnum,
maxoff;
OffsetNumber start;
BlockNumber blkno;
BlockNumber obknum;
@ -1059,35 +1082,47 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
maxoff = PageGetMaxOffsetNumber(page);
/* get the next tuple */
if (ScanDirectionIsForward(dir)) {
if (!PageIsEmpty(page) && offnum < maxoff) {
if (ScanDirectionIsForward(dir))
{
if (!PageIsEmpty(page) && offnum < maxoff)
{
offnum = OffsetNumberNext(offnum);
} else {
}
else
{
/* if we're at end of scan, release the buffer and return */
blkno = opaque->btpo_next;
if (P_RIGHTMOST(opaque)) {
if (P_RIGHTMOST(opaque))
{
_bt_relbuf(rel, *bufP, BT_READ);
ItemPointerSetInvalid(current);
*bufP = so->btso_curbuf = InvalidBuffer;
return (false);
} else {
}
else
{
/* walk right to the next page with data */
_bt_relbuf(rel, *bufP, BT_READ);
for (;;) {
for (;;)
{
*bufP = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber(page);
start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
if (!PageIsEmpty(page) && start <= maxoff) {
if (!PageIsEmpty(page) && start <= maxoff)
{
break;
} else {
}
else
{
blkno = opaque->btpo_next;
_bt_relbuf(rel, *bufP, BT_READ);
if (blkno == P_NONE) {
if (blkno == P_NONE)
{
*bufP = so->btso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(current);
return (false);
@ -1097,42 +1132,52 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
offnum = start;
}
}
} else if (ScanDirectionIsBackward(dir)) {
}
else if (ScanDirectionIsBackward(dir))
{
/* remember that high key is item zero on non-rightmost pages */
start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
if (offnum > start) {
if (offnum > start)
{
offnum = OffsetNumberPrev(offnum);
} else {
}
else
{
/* if we're at end of scan, release the buffer and return */
blkno = opaque->btpo_prev;
if (P_LEFTMOST(opaque)) {
if (P_LEFTMOST(opaque))
{
_bt_relbuf(rel, *bufP, BT_READ);
*bufP = so->btso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(current);
return (false);
} else {
}
else
{
obknum = BufferGetBlockNumber(*bufP);
/* walk right to the next page with data */
_bt_relbuf(rel, *bufP, BT_READ);
for (;;) {
for (;;)
{
*bufP = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber(page);
/*
* If the adjacent page just split, then we may have the
* wrong block. Handle this case. Because pages only
* split right, we don't have to worry about this failing
* to terminate.
* If the adjacent page just split, then we may have
* the wrong block. Handle this case. Because pages
* only split right, we don't have to worry about this
* failing to terminate.
*/
while (opaque->btpo_next != obknum) {
while (opaque->btpo_next != obknum)
{
blkno = opaque->btpo_next;
_bt_relbuf(rel, *bufP, BT_READ);
*bufP = _bt_getbuf(rel, blkno, BT_READ);
@ -1145,13 +1190,17 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
/* anything to look at here? */
if (!PageIsEmpty(page) && maxoff >= start) {
if (!PageIsEmpty(page) && maxoff >= start)
{
break;
} else {
}
else
{
blkno = opaque->btpo_prev;
obknum = BufferGetBlockNumber(*bufP);
_bt_relbuf(rel, *bufP, BT_READ);
if (blkno == P_NONE) {
if (blkno == P_NONE)
{
*bufP = so->btso_curbuf = InvalidBuffer;
ItemPointerSetInvalid(current);
return (false);
@ -1191,7 +1240,8 @@ _bt_twostep(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
Page page;
BTPageOpaque opaque;
OffsetNumber offnum, maxoff;
OffsetNumber offnum,
maxoff;
OffsetNumber start;
ItemPointer current;
ItemId itemid;
@ -1210,27 +1260,33 @@ _bt_twostep(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
/* if we're safe, just do it */
if (ScanDirectionIsForward(dir) && offnum < maxoff) { /* XXX PageIsEmpty? */
if (ScanDirectionIsForward(dir) && offnum < maxoff)
{ /* XXX PageIsEmpty? */
ItemPointerSet(current, blkno, OffsetNumberNext(offnum));
return (true);
} else if (ScanDirectionIsBackward(dir) && offnum > start) {
}
else if (ScanDirectionIsBackward(dir) && offnum > start)
{
ItemPointerSet(current, blkno, OffsetNumberPrev(offnum));
return (true);
}
/* if we've hit end of scan we don't have to do any work */
if (ScanDirectionIsForward(dir) && P_RIGHTMOST(opaque)) {
if (ScanDirectionIsForward(dir) && P_RIGHTMOST(opaque))
{
return (false);
} else if (ScanDirectionIsBackward(dir) && P_LEFTMOST(opaque)) {
}
else if (ScanDirectionIsBackward(dir) && P_LEFTMOST(opaque))
{
return (false);
}
/*
* Okay, it's off the page; let _bt_step() do the hard work, and we'll
* try to remember where we were. This is not guaranteed to work; this
* is the only place in the code where concurrency can screw us up,
* and it's because we want to be able to move in two directions in
* the scan.
* try to remember where we were. This is not guaranteed to work;
* this is the only place in the code where concurrency can screw us
* up, and it's because we want to be able to move in two directions
* in the scan.
*/
itemid = PageGetItemId(page, offnum);
@ -1239,7 +1295,8 @@ _bt_twostep(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
svitem = (BTItem) palloc(itemsz);
memmove((char *) svitem, (char *) btitem, itemsz);
if (_bt_step(scan, bufP, dir)) {
if (_bt_step(scan, bufP, dir))
{
pfree(svitem);
return (true);
}
@ -1249,10 +1306,12 @@ _bt_twostep(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
page = BufferGetPage(*bufP);
maxoff = PageGetMaxOffsetNumber(page);
while (offnum <= maxoff) {
while (offnum <= maxoff)
{
itemid = PageGetItemId(page, offnum);
btitem = (BTItem) PageGetItem(page, itemid);
if ( BTItemSame (btitem, svitem) ) {
if (BTItemSame(btitem, svitem))
{
pfree(svitem);
ItemPointerSet(current, blkno, offnum);
return (false);
@ -1282,7 +1341,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Page page;
BTPageOpaque opaque;
ItemPointer current;
OffsetNumber offnum, maxoff;
OffsetNumber offnum,
maxoff;
OffsetNumber start = 0;
BlockNumber blkno;
BTItem btitem;
@ -1300,13 +1360,17 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
for (;;) {
for (;;)
{
if (opaque->btpo_flags & BTP_LEAF)
break;
if (ScanDirectionIsForward(dir)) {
if (ScanDirectionIsForward(dir))
{
offnum = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
} else {
}
else
{
offnum = PageGetMaxOffsetNumber(page);
}
@ -1321,14 +1385,16 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* Race condition: If the child page we just stepped onto is
* in the process of being split, we need to make sure we're
* all the way at the right edge of the tree. See the paper
* by Lehman and Yao.
* Race condition: If the child page we just stepped onto is in
* the process of being split, we need to make sure we're all the
* way at the right edge of the tree. See the paper by Lehman and
* Yao.
*/
if (ScanDirectionIsBackward(dir) && ! P_RIGHTMOST(opaque)) {
do {
if (ScanDirectionIsBackward(dir) && !P_RIGHTMOST(opaque))
{
do
{
blkno = opaque->btpo_next;
_bt_relbuf(rel, buf, BT_READ);
buf = _bt_getbuf(rel, blkno, BT_READ);
@ -1341,19 +1407,22 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
/* okay, we've got the {left,right}-most page in the tree */
maxoff = PageGetMaxOffsetNumber(page);
if (ScanDirectionIsForward(dir)) {
if (ScanDirectionIsForward(dir))
{
if (!P_LEFTMOST(opaque))/* non-leftmost page ? */
elog(WARN, "_bt_endpoint: leftmost page (%u) has not leftmost flag", blkno);
start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
/*
* I don't understand this stuff! It doesn't work for non-rightmost
* pages with only one element (P_HIKEY) which we have after
* deletion itups by vacuum (it's case of start > maxoff).
* Scanning in BackwardScanDirection is not understandable at all.
* Well - new stuff. - vadim 12/06/96
* I don't understand this stuff! It doesn't work for
* non-rightmost pages with only one element (P_HIKEY) which we
* have after deletion itups by vacuum (it's case of start >
* maxoff). Scanning in BackwardScanDirection is not
* understandable at all. Well - new stuff. - vadim 12/06/96
*/
#if 0
if (PageIsEmpty(page) || start > maxoff) {
if (PageIsEmpty(page) || start > maxoff)
{
ItemPointerSet(current, blkno, maxoff);
if (!_bt_step(scan, &buf, BackwardScanDirection))
return ((RetrieveIndexResult) NULL);
@ -1366,7 +1435,11 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
{
if (start != P_HIKEY) /* non-rightmost page */
elog(WARN, "_bt_endpoint: non-rightmost page (%u) is empty", blkno);
/* It's left- & right- most page - root page, - and it's empty... */
/*
* It's left- & right- most page - root page, - and it's
* empty...
*/
_bt_relbuf(rel, buf, BT_READ);
ItemPointerSetInvalid(current);
so->btso_curbuf = InvalidBuffer;
@ -1382,17 +1455,22 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
page = BufferGetPage(buf);
}
/* new stuff ends here */
else {
else
{
ItemPointerSet(current, blkno, start);
}
} else if (ScanDirectionIsBackward(dir)) {
}
else if (ScanDirectionIsBackward(dir))
{
/*
* I don't understand this stuff too! If RIGHT-most leaf page is
* empty why do scanning in ForwardScanDirection ???
* Well - new stuff. - vadim 12/06/96
* empty why do scanning in ForwardScanDirection ??? Well - new
* stuff. - vadim 12/06/96
*/
#if 0
if (PageIsEmpty(page)) {
if (PageIsEmpty(page))
{
ItemPointerSet(current, blkno, FirstOffsetNumber);
if (!_bt_step(scan, &buf, ForwardScanDirection))
return ((RetrieveIndexResult) NULL);
@ -1420,11 +1498,14 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
page = BufferGetPage(buf);
}
/* new stuff ends here */
else {
else
{
start = PageGetMaxOffsetNumber(page);
ItemPointerSet(current, blkno, start);
}
} else {
}
else
{
elog(WARN, "Illegal scan direction %d", dir);
}

View File

@ -5,7 +5,7 @@
*
*
* IDENTIFICATION
* $Id: nbtsort.c,v 1.19 1997/08/19 21:29:46 momjian Exp $
* $Id: nbtsort.c,v 1.20 1997/09/07 04:39:02 momjian Exp $
*
* NOTES
*
@ -66,6 +66,7 @@
#ifdef BTREE_BUILD_STATS
#include <tcop/tcopprot.h>
extern int ShowExecutorStats;
#endif
static BTItem _bt_buildadd(Relation index, void *pstate, BTItem bti, int flags);
@ -104,7 +105,8 @@ extern char *mktemp(char *template);
* the only thing like that so i'm not going to worry about wasting a
* few bytes.
*/
typedef struct {
typedef struct
{
int bttb_magic; /* magic number */
int bttb_fd; /* file descriptor */
int bttb_top; /* top of free space within bttb_data */
@ -120,7 +122,8 @@ typedef struct {
* right now. though if psort was in a condition that i could hack it
* to do this, you bet i would.)
*/
typedef struct {
typedef struct
{
int bts_ntapes;
int bts_tape;
BTTapeBlock **bts_itape; /* input tape blocks */
@ -146,7 +149,8 @@ typedef struct {
* what the heck.
* *-------------------------------------------------------------------------
*/
typedef struct {
typedef struct
{
Datum *btsk_datum;
char *btsk_nulls;
BTItem btsk_item;
@ -254,13 +258,15 @@ _bt_setsortkey(Relation index, BTItem bti, BTSortKey *sk)
* XXX these probably ought to be generic library functions.
*-------------------------------------------------------------------------
*/
typedef struct {
typedef struct
{
int btpqe_tape; /* tape identifier */
BTSortKey btpqe_item; /* pointer to BTItem in tape buffer */
} BTPriQueueElem;
#define MAXELEM MAXTAPES
typedef struct {
typedef struct
{
int btpq_nelem;
BTPriQueueElem btpq_queue[MAXELEM];
Relation btpq_rel;
@ -278,18 +284,24 @@ _bt_pqsift(BTPriQueue *q, int parent)
for (child = parent * 2 + 1;
child < q->btpq_nelem;
child = parent * 2 + 1) {
if (child < q->btpq_nelem - 1) {
if (GREATER(&(q->btpq_queue[child]), &(q->btpq_queue[child+1]))) {
child = parent * 2 + 1)
{
if (child < q->btpq_nelem - 1)
{
if (GREATER(&(q->btpq_queue[child]), &(q->btpq_queue[child + 1])))
{
++child;
}
}
if (GREATER(&(q->btpq_queue[parent]), &(q->btpq_queue[child]))) {
if (GREATER(&(q->btpq_queue[parent]), &(q->btpq_queue[child])))
{
e = q->btpq_queue[child]; /* struct = */
q->btpq_queue[child] = q->btpq_queue[parent]; /* struct = */
q->btpq_queue[parent] = e; /* struct = */
parent = child;
} else {
}
else
{
parent = child + 1;
}
}
@ -298,12 +310,14 @@ _bt_pqsift(BTPriQueue *q, int parent)
static int
_bt_pqnext(BTPriQueue * q, BTPriQueueElem * e)
{
if (q->btpq_nelem < 1) { /* already empty */
if (q->btpq_nelem < 1)
{ /* already empty */
return (-1);
}
*e = q->btpq_queue[0]; /* struct = */
if (--q->btpq_nelem < 1) { /* now empty, don't sift */
if (--q->btpq_nelem < 1)
{ /* now empty, don't sift */
return (0);
}
q->btpq_queue[0] = q->btpq_queue[q->btpq_nelem]; /* struct = */
@ -314,18 +328,24 @@ _bt_pqnext(BTPriQueue *q, BTPriQueueElem *e)
static void
_bt_pqadd(BTPriQueue * q, BTPriQueueElem * e)
{
int child, parent;
int child,
parent;
if (q->btpq_nelem >= MAXELEM) {
if (q->btpq_nelem >= MAXELEM)
{
elog(WARN, "_bt_pqadd: queue overflow");
}
child = q->btpq_nelem++;
while (child > 0) {
while (child > 0)
{
parent = child / 2;
if (GREATER(e, &(q->btpq_queue[parent]))) {
if (GREATER(e, &(q->btpq_queue[parent])))
{
break;
} else {
}
else
{
q->btpq_queue[child] = q->btpq_queue[parent]; /* struct = */
child = parent;
}
@ -404,7 +424,8 @@ _bt_tapecreate(char *fname)
{
BTTapeBlock *tape = (BTTapeBlock *) palloc(sizeof(BTTapeBlock));
if (tape == (BTTapeBlock *) NULL) {
if (tape == (BTTapeBlock *) NULL)
{
elog(WARN, "_bt_tapecreate: out of memory");
}
@ -456,19 +477,21 @@ _bt_taperead(BTTapeBlock *tape)
int fd;
int nread;
if (tape->bttb_eor) {
if (tape->bttb_eor)
{
return (0); /* we are already at End-Of-Run */
}
/*
* we're clobbering the old tape block, but we do need to save the
* VFD (the one in the block we're reading is bogus).
* we're clobbering the old tape block, but we do need to save the VFD
* (the one in the block we're reading is bogus).
*/
fd = tape->bttb_fd;
nread = FileRead(fd, (char *) tape, TAPEBLCKSZ);
tape->bttb_fd = fd;
if (nread != TAPEBLCKSZ) {
if (nread != TAPEBLCKSZ)
{
Assert(nread == 0); /* we are at EOF */
return (0);
}
@ -493,7 +516,8 @@ _bt_tapenext(BTTapeBlock *tape, char **pos)
Size itemsz;
BTItem bti;
if (*pos >= tape->bttb_data + tape->bttb_top) {
if (*pos >= tape->bttb_data + tape->bttb_top)
{
return ((BTItem) NULL);
}
bti = (BTItem) * pos;
@ -537,7 +561,8 @@ _bt_spoolinit(Relation index, int ntapes, bool isunique)
int i;
char *fname = (char *) palloc(sizeof(TAPETEMP) + 1);
if (btspool == (BTSpool *) NULL || fname == (char *) NULL) {
if (btspool == (BTSpool *) NULL || fname == (char *) NULL)
{
elog(WARN, "_bt_spoolinit: out of memory");
}
memset((char *) btspool, 0, sizeof(BTSpool));
@ -550,11 +575,13 @@ _bt_spoolinit(Relation index, int ntapes, bool isunique)
btspool->bts_otape =
(BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
if (btspool->bts_itape == (BTTapeBlock **) NULL ||
btspool->bts_otape == (BTTapeBlock **) NULL) {
btspool->bts_otape == (BTTapeBlock **) NULL)
{
elog(WARN, "_bt_spoolinit: out of memory");
}
for (i = 0; i < ntapes; ++i) {
for (i = 0; i < ntapes; ++i)
{
btspool->bts_itape[i] =
_bt_tapecreate(mktemp(strcpy(fname, TAPETEMP)));
btspool->bts_otape[i] =
@ -576,7 +603,8 @@ _bt_spooldestroy(void *spool)
BTSpool *btspool = (BTSpool *) spool;
int i;
for (i = 0; i < btspool->bts_ntapes; ++i) {
for (i = 0; i < btspool->bts_ntapes; ++i)
{
_bt_tapedestroy(btspool->bts_otape[i]);
_bt_tapedestroy(btspool->bts_itape[i]);
}
@ -591,8 +619,10 @@ _bt_spoolflush(BTSpool *btspool)
{
int i;
for (i = 0; i < btspool->bts_ntapes; ++i) {
if (!EMPTYTAPE(btspool->bts_otape[i])) {
for (i = 0; i < btspool->bts_ntapes; ++i)
{
if (!EMPTYTAPE(btspool->bts_otape[i]))
{
_bt_tapewrite(btspool->bts_otape[i], 1);
}
}
@ -612,7 +642,8 @@ _bt_spoolswap(BTSpool *btspool)
BTTapeBlock *otape;
int i;
for (i = 0; i < btspool->bts_ntapes; ++i) {
for (i = 0; i < btspool->bts_ntapes; ++i)
{
itape = btspool->bts_itape[i];
otape = btspool->bts_otape[i];
@ -662,12 +693,12 @@ _bt_spool(Relation index, BTItem btitem, void *spool)
itemsz = DOUBLEALIGN(itemsz);
/*
* if this buffer is too full for this BTItemData, or if we have
* run out of BTItems, we need to sort the buffer and write it
* out. in this case, the BTItemData will go into the next tape's
* buffer.
* if this buffer is too full for this BTItemData, or if we have run
* out of BTItems, we need to sort the buffer and write it out. in
* this case, the BTItemData will go into the next tape's buffer.
*/
if (btitem == (BTItem) NULL || SPCLEFT(itape) < itemsz) {
if (btitem == (BTItem) NULL || SPCLEFT(itape) < itemsz)
{
BTSortKey *parray = (BTSortKey *) NULL;
BTTapeBlock *otape;
BTItem bti;
@ -680,11 +711,13 @@ _bt_spool(Relation index, BTItem btitem, void *spool)
* build an array of pointers to the BTItemDatas on the input
* block.
*/
if (it_ntup > 0) {
if (it_ntup > 0)
{
parray =
(BTSortKey *) palloc(it_ntup * sizeof(BTSortKey));
pos = itape->bttb_data;
for (i = 0; i < it_ntup; ++i) {
for (i = 0; i < it_ntup; ++i)
{
_bt_setsortkey(index, _bt_tapenext(itape, &pos), &(parray[i]));
}
@ -700,12 +733,12 @@ _bt_spool(Relation index, BTItem btitem, void *spool)
* BTItemDatas in the order dictated by the sorted array of
* BTItems, not the original order.
*
* (since everything was DOUBLEALIGN'd and is all on a single
* tape block, everything had *better* still fit on one tape
* block..)
* (since everything was DOUBLEALIGN'd and is all on a single tape
* block, everything had *better* still fit on one tape block..)
*/
otape = btspool->bts_otape[btspool->bts_tape];
for (i = 0; i < it_ntup; ++i) {
for (i = 0; i < it_ntup; ++i)
{
bti = parray[i].btsk_item;
btisz = BTITEMSZ(bti);
btisz = DOUBLEALIGN(btisz);
@ -715,6 +748,7 @@ _bt_spool(Relation index, BTItem btitem, void *spool)
bool isnull;
Datum d = index_getattr(&(bti->bti_itup), 1, index->rd_att,
&isnull);
printf("_bt_spool: inserted <%x> into output tape %d\n",
d, btspool->bts_tape);
}
@ -729,12 +763,11 @@ _bt_spool(Relation index, BTItem btitem, void *spool)
/*
* reset the input buffer for the next run. we don't have to
* write it out or anything -- we only use it to hold the
* unsorted BTItemDatas, the output tape contains all the
* sorted stuff.
* write it out or anything -- we only use it to hold the unsorted
* BTItemDatas, the output tape contains all the sorted stuff.
*
* changing bts_tape changes the output tape and input tape;
* we change itape for the code below.
* changing bts_tape changes the output tape and input tape; we
* change itape for the code below.
*/
_bt_tapereset(itape);
btspool->bts_tape = (btspool->bts_tape + 1) % btspool->bts_ntapes;
@ -757,7 +790,8 @@ _bt_spool(Relation index, BTItem btitem, void *spool)
}
/* insert this item into the current buffer */
if (btitem != (BTItem) NULL) {
if (btitem != (BTItem) NULL)
{
_bt_tapeadd(itape, btitem, itemsz);
}
}
@ -795,10 +829,12 @@ _bt_slideleft(Relation index, Buffer buf, Page page)
ItemId previi;
ItemId thisii;
if (!PageIsEmpty(page)) {
if (!PageIsEmpty(page))
{
maxoff = PageGetMaxOffsetNumber(page);
previi = PageGetItemId(page, P_HIKEY);
for (off = P_FIRSTKEY; off <= maxoff; off = OffsetNumberNext(off)) {
for (off = P_FIRSTKEY; off <= maxoff; off = OffsetNumberNext(off))
{
thisii = PageGetItemId(page, off);
*previi = *thisii;
previi = thisii;
@ -910,38 +946,42 @@ _bt_buildadd(Relation index, void *pstate, BTItem bti, int flags)
pgspc = PageGetFreeSpace(npage);
btisz = BTITEMSZ(bti);
btisz = DOUBLEALIGN(btisz);
if (pgspc < btisz) {
if (pgspc < btisz)
{
Buffer obuf = nbuf;
Page opage = npage;
OffsetNumber o, n;
OffsetNumber o,
n;
ItemId ii;
ItemId hii;
_bt_blnewpage(index, &nbuf, &npage, flags);
/*
* if 'last' is part of a chain of duplicates that does not
* start at the beginning of the old page, the entire chain is
* copied to the new page; we delete all of the duplicates
* from the old page except the first, which becomes the high
* key item of the old page.
* if 'last' is part of a chain of duplicates that does not start
* at the beginning of the old page, the entire chain is copied to
* the new page; we delete all of the duplicates from the old page
* except the first, which becomes the high key item of the old
* page.
*
* if the chain starts at the beginning of the page or there
* is no chain ('first' == 'last'), we need only copy 'last'
* to the new page. again, 'first' (== 'last') becomes the
* high key of the old page.
* if the chain starts at the beginning of the page or there is no
* chain ('first' == 'last'), we need only copy 'last' to the new
* page. again, 'first' (== 'last') becomes the high key of the
* old page.
*
* note that in either case, we copy at least one item to the
* new page, so 'last_bti' will always be valid. 'bti' will
* never be the first data item on the new page.
* note that in either case, we copy at least one item to the new
* page, so 'last_bti' will always be valid. 'bti' will never be
* the first data item on the new page.
*/
if (first_off == P_FIRSTKEY) {
if (first_off == P_FIRSTKEY)
{
Assert(last_off != P_FIRSTKEY);
first_off = last_off;
}
for (o = first_off, n = P_FIRSTKEY;
o <= last_off;
o = OffsetNumberNext(o), n = OffsetNumberNext(n)) {
o = OffsetNumberNext(o), n = OffsetNumberNext(n))
{
ii = PageGetItemId(opage, o);
if (PageAddItem(npage, PageGetItem(opage, ii),
ii->lp_len, n, LP_USED) == InvalidOffsetNumber)
@ -954,19 +994,22 @@ _bt_buildadd(Relation index, void *pstate, BTItem bti, int flags)
(BTItem) PageGetItem(npage, PageGetItemId(npage, n));
Datum d = index_getattr(&(tmpbti->bti_itup), 1,
index->rd_att, &isnull);
printf("_bt_buildadd: moved <%x> to offset %d at level %d\n",
d, n, state->btps_level);
}
#endif /* FASTBUILD_DEBUG && FASTBUILD_MERGE */
#endif
}
/*
* this loop is backward because PageIndexTupleDelete shuffles
* the tuples to fill holes in the page -- by starting at the
* end and working back, we won't create holes (and thereby
* avoid shuffling).
* this loop is backward because PageIndexTupleDelete shuffles the
* tuples to fill holes in the page -- by starting at the end and
* working back, we won't create holes (and thereby avoid
* shuffling).
*/
for (o = last_off; o > first_off; o = OffsetNumberPrev(o)) {
for (o = last_off; o > first_off; o = OffsetNumberPrev(o))
{
PageIndexTupleDelete(opage, o);
}
hii = PageGetItemId(opage, P_HIKEY);
@ -998,14 +1041,16 @@ _bt_buildadd(Relation index, void *pstate, BTItem bti, int flags)
}
/*
* copy the old buffer's minimum key to its parent. if we
* don't have a parent, we have to create one; this adds a new
* btree level.
* copy the old buffer's minimum key to its parent. if we don't
* have a parent, we have to create one; this adds a new btree
* level.
*/
if (state->btps_doupper) {
if (state->btps_doupper)
{
BTItem nbti;
if (state->btps_next == (BTPageState *) NULL) {
if (state->btps_next == (BTPageState *) NULL)
{
state->btps_next =
_bt_pagestate(index, 0, state->btps_level + 1, true);
}
@ -1015,16 +1060,16 @@ _bt_buildadd(Relation index, void *pstate, BTItem bti, int flags)
}
/*
* write out the old stuff. we never want to see it again, so
* we can give up our lock (if we had one; BuildingBtree is
* set, so we aren't locking).
* write out the old stuff. we never want to see it again, so we
* can give up our lock (if we had one; BuildingBtree is set, so
* we aren't locking).
*/
_bt_wrtbuf(index, obuf);
}
/*
* if this item is different from the last item added, we start a
* new chain of duplicates.
* if this item is different from the last item added, we start a new
* chain of duplicates.
*/
off = OffsetNumberNext(last_off);
if (PageAddItem(npage, (Item) bti, btisz, off, LP_USED) == InvalidOffsetNumber)
@ -1034,6 +1079,7 @@ _bt_buildadd(Relation index, void *pstate, BTItem bti, int flags)
{
bool isnull;
Datum d = index_getattr(&(bti->bti_itup), 1, index->rd_att, &isnull);
printf("_bt_buildadd: inserted <%x> at offset %d at level %d\n",
d, off, state->btps_level);
}
@ -1068,23 +1114,27 @@ _bt_uppershutdown(Relation index, BTPageState *state)
BTPageOpaque opaque;
BTItem bti;
for (s = state; s != (BTPageState *) NULL; s = s->btps_next) {
for (s = state; s != (BTPageState *) NULL; s = s->btps_next)
{
blkno = BufferGetBlockNumber(s->btps_buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(s->btps_page);
/*
* if this is the root, attach it to the metapage. otherwise,
* stick the minimum key of the last page on this level (which
* has not been split, or else it wouldn't be the last page)
* into its parent. this may cause the last page of upper
* levels to split, but that's not a problem -- we haven't
* gotten to them yet.
* stick the minimum key of the last page on this level (which has
* not been split, or else it wouldn't be the last page) into its
* parent. this may cause the last page of upper levels to split,
* but that's not a problem -- we haven't gotten to them yet.
*/
if (s->btps_doupper) {
if (s->btps_next == (BTPageState *) NULL) {
if (s->btps_doupper)
{
if (s->btps_next == (BTPageState *) NULL)
{
opaque->btpo_flags |= BTP_ROOT;
_bt_metaproot(index, blkno, s->btps_level + 1);
} else {
}
else
{
bti = _bt_minitem(s->btps_page, blkno, 0);
_bt_buildadd(index, s->btps_next, bti, 0);
pfree((void *) bti);
@ -1132,12 +1182,14 @@ _bt_merge(Relation index, BTSpool *btspool)
state = (BTPageState *) _bt_pagestate(index, BTP_LEAF, 0, true);
npass = 0;
do { /* pass */
do
{ /* pass */
/*
* each pass starts by flushing the previous outputs and
* swapping inputs and outputs. flushing sets End-of-Run for
* any dirty output tapes. swapping clears the new output
* tapes and rewinds the new input tapes.
* each pass starts by flushing the previous outputs and swapping
* inputs and outputs. flushing sets End-of-Run for any dirty
* output tapes. swapping clears the new output tapes and rewinds
* the new input tapes.
*/
btspool->bts_tape = btspool->bts_ntapes - 1;
_bt_spoolflush(btspool);
@ -1146,93 +1198,110 @@ _bt_merge(Relation index, BTSpool *btspool)
++npass;
nruns = 0;
for (;;) { /* run */
for (;;)
{ /* run */
/*
* each run starts by selecting a new output tape. the
* merged results of a given run are always sent to this
* one tape.
* each run starts by selecting a new output tape. the merged
* results of a given run are always sent to this one tape.
*/
btspool->bts_tape = (btspool->bts_tape + 1) % btspool->bts_ntapes;
otape = btspool->bts_otape[btspool->bts_tape];
/*
* initialize the priority queue by loading it with the
* first element of the given run in each tape. since we
* are starting a new run, we reset the tape (clearing the
* initialize the priority queue by loading it with the first
* element of the given run in each tape. since we are
* starting a new run, we reset the tape (clearing the
* End-Of-Run marker) before reading it. this means that
* _bt_taperead will return 0 only if the tape is actually
* at EOF.
* _bt_taperead will return 0 only if the tape is actually at
* EOF.
*/
memset((char *) &q, 0, sizeof(BTPriQueue));
goodtapes = 0;
for (t = 0; t < btspool->bts_ntapes; ++t) {
for (t = 0; t < btspool->bts_ntapes; ++t)
{
itape = btspool->bts_itape[t];
tapepos[t] = itape->bttb_data;
tapedone[t] = 0;
_bt_tapereset(itape);
do {
if (_bt_taperead(itape) == 0) {
do
{
if (_bt_taperead(itape) == 0)
{
tapedone[t] = 1;
}
} while (!tapedone[t] && EMPTYTAPE(itape));
if (!tapedone[t]) {
if (!tapedone[t])
{
++goodtapes;
e.btpqe_tape = t;
_bt_setsortkey(index, _bt_tapenext(itape, &tapepos[t]),
&(e.btpqe_item));
if (e.btpqe_item.btsk_item != (BTItem) NULL) {
if (e.btpqe_item.btsk_item != (BTItem) NULL)
{
_bt_pqadd(&q, &e);
}
}
}
/*
* if we don't have any tapes with any input (i.e., they
* are all at EOF), there is no work to do in this run --
* we must be done with this pass.
* if we don't have any tapes with any input (i.e., they are
* all at EOF), there is no work to do in this run -- we must
* be done with this pass.
*/
if (goodtapes == 0) {
if (goodtapes == 0)
{
break; /* for */
}
++nruns;
/*
* output the smallest element from the queue until there
* are no more.
* output the smallest element from the queue until there are
* no more.
*/
while (_bt_pqnext(&q, &e) >= 0) { /* item */
while (_bt_pqnext(&q, &e) >= 0)
{ /* item */
/*
* replace the element taken from priority queue,
* fetching a new block if needed. a tape can run out
* if it hits either End-Of-Run or EOF.
* replace the element taken from priority queue, fetching
* a new block if needed. a tape can run out if it hits
* either End-Of-Run or EOF.
*/
t = e.btpqe_tape;
btsk = e.btpqe_item;
bti = btsk.btsk_item;
if (bti != (BTItem) NULL) {
if (bti != (BTItem) NULL)
{
btisz = BTITEMSZ(bti);
btisz = DOUBLEALIGN(btisz);
if (doleaf) {
if (doleaf)
{
_bt_buildadd(index, state, bti, BTP_LEAF);
#if defined(FASTBUILD_DEBUG) && defined(FASTBUILD_MERGE)
{
bool isnull;
Datum d = index_getattr(&(bti->bti_itup), 1,
index->rd_att, &isnull);
printf("_bt_merge: [pass %d run %d] inserted <%x> from tape %d into block %d\n",
npass, nruns, d, t,
BufferGetBlockNumber(state->btps_buf));
}
#endif /* FASTBUILD_DEBUG && FASTBUILD_MERGE */
} else {
if (SPCLEFT(otape) < btisz) {
}
else
{
if (SPCLEFT(otape) < btisz)
{
/*
* if it's full, write it out and add the
* item to the next block. (since we will
* be adding another tuple immediately
* after this, we can be sure that there
* will be at least one more block in this
* run and so we know we do *not* want to
* set End-Of-Run here.)
* if it's full, write it out and add the item
* to the next block. (since we will be
* adding another tuple immediately after
* this, we can be sure that there will be at
* least one more block in this run and so we
* know we do *not* want to set End-Of-Run
* here.)
*/
_bt_tapewrite(otape, 0);
}
@ -1242,6 +1311,7 @@ _bt_merge(Relation index, BTSpool *btspool)
bool isnull;
Datum d = index_getattr(&(bti->bti_itup), 1,
index->rd_att, &isnull);
printf("_bt_merge: [pass %d run %d] inserted <%x> from tape %d into output tape %d\n",
npass, nruns, d, t,
btspool->bts_tape);
@ -1256,21 +1326,27 @@ _bt_merge(Relation index, BTSpool *btspool)
}
itape = btspool->bts_itape[t];
if (!tapedone[t]) {
if (!tapedone[t])
{
BTItem newbti = _bt_tapenext(itape, &tapepos[t]);
if (newbti == (BTItem) NULL) {
do {
if (_bt_taperead(itape) == 0) {
if (newbti == (BTItem) NULL)
{
do
{
if (_bt_taperead(itape) == 0)
{
tapedone[t] = 1;
}
} while (!tapedone[t] && EMPTYTAPE(itape));
if (!tapedone[t]) {
if (!tapedone[t])
{
tapepos[t] = itape->bttb_data;
newbti = _bt_tapenext(itape, &tapepos[t]);
}
}
if (newbti != (BTItem) NULL) {
if (newbti != (BTItem) NULL)
{
BTPriQueueElem nexte;
nexte.btpqe_tape = t;
@ -1291,12 +1367,13 @@ _bt_merge(Relation index, BTSpool *btspool)
* we are here because we ran out of input on all of the input
* tapes.
*
* if this pass did not generate more actual output runs than
* we have tapes, we know we have at most one run in each
* tape. this means that we are ready to merge into the final
* btree leaf pages instead of merging into a tape file.
* if this pass did not generate more actual output runs than we have
* tapes, we know we have at most one run in each tape. this
* means that we are ready to merge into the final btree leaf
* pages instead of merging into a tape file.
*/
if (nruns <= btspool->bts_ntapes) {
if (nruns <= btspool->bts_ntapes)
{
doleaf = true;
}
} while (nruns > 0); /* pass */
@ -1328,9 +1405,8 @@ _bt_upperbuild(Relation index)
BTItem nbti;
/*
* find the first leaf block. while we're at it, clear the
* BTP_ROOT flag that we set while building it (so we could find
* it later).
* find the first leaf block. while we're at it, clear the BTP_ROOT
* flag that we set while building it (so we could find it later).
*/
rbuf = _bt_getroot(index, BT_WRITE);
blk = BufferGetBlockNumber(rbuf);
@ -1342,7 +1418,8 @@ _bt_upperbuild(Relation index)
state = (BTPageState *) _bt_pagestate(index, 0, 0, true);
/* for each page... */
do {
do
{
#if 0
printf("\t\tblk=%d\n", blk);
#endif
@ -1351,11 +1428,12 @@ _bt_upperbuild(Relation index)
ropaque = (BTPageOpaque) PageGetSpecialPointer(rpage);
/* for each item... */
if (!PageIsEmpty(rpage)) {
if (!PageIsEmpty(rpage))
{
/*
* form a new index tuple corresponding to the minimum key
* of the lower page and insert it into a page at this
* level.
* form a new index tuple corresponding to the minimum key of
* the lower page and insert it into a page at this level.
*/
nbti = _bt_minitem(rpage, blk, P_RIGHTMOST(ropaque));
#if defined(FASTBUILD_DEBUG) && defined(FASTBUILD_MERGE)
@ -1363,6 +1441,7 @@ _bt_upperbuild(Relation index)
bool isnull;
Datum d = index_getattr(&(nbti->bti_itup), 1, index->rd_att,
&isnull);
printf("_bt_upperbuild: inserting <%x> at %d\n",
d, state->btps_level);
}
@ -1376,6 +1455,7 @@ _bt_upperbuild(Relation index)
_bt_uppershutdown(index, state);
}
#endif
/*

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtstrat.c,v 1.4 1996/11/05 10:35:37 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtstrat.c,v 1.5 1997/09/07 04:39:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.11 1997/08/19 21:29:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.12 1997/09/07 04:39:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -49,7 +49,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
skey = (ScanKey) palloc(natts * sizeof(ScanKeyData));
for (i = 0; i < natts; i++) {
for (i = 0; i < natts; i++)
{
arg = index_getattr(itup, i + 1, itupdesc, &null);
if (null)
{
@ -79,7 +80,8 @@ _bt_freestack(BTStack stack)
{
BTStack ostack;
while (stack != (BTStack) NULL) {
while (stack != (BTStack) NULL)
{
ostack = stack;
stack = stack->bts_parent;
pfree(ostack->bts_btitem);
@ -102,7 +104,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
StrategyMap map;
int nbytes;
long test;
int i, j;
int i,
j;
int init[BTMaxStrategyNumber + 1];
ScanKey key;
uint16 numberOfKeys = so->numberOfKeys;
@ -120,10 +123,11 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (numberOfKeys == 1)
{
/*
* We don't use indices for 'A is null' and 'A is not null'
* currently and 'A < = > <> NULL' is non-sense' - so
* qual is not Ok. - vadim 03/21/97
* currently and 'A < = > <> NULL' is non-sense' - so qual is not
* Ok. - vadim 03/21/97
*/
if (cur->sk_flags & SK_ISNULL)
so->qual_ok = 0;
@ -157,14 +161,16 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
{
elog(WARN, "_bt_orderkeys: key(s) for attribute %d missed", attno + 1);
}
/*
* If = has been specified, no other key will be used.
* In case of key < 2 && key == 1 and so on
* we have to set qual_ok to 0
* If = has been specified, no other key will be used. In case
* of key < 2 && key == 1 and so on we have to set qual_ok to
* 0
*/
if (init[BTEqualStrategyNumber - 1])
{
ScanKeyData *eq, *chk;
ScanKeyData *eq,
*chk;
eq = &xform[BTEqualStrategyNumber - 1];
for (j = BTMaxStrategyNumber; --j >= 0;)
@ -186,16 +192,18 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (init[BTLessStrategyNumber - 1]
&& init[BTLessEqualStrategyNumber - 1])
{
ScanKeyData *lt, *le;
ScanKeyData *lt,
*le;
lt = &xform[BTLessStrategyNumber - 1];
le = &xform[BTLessEqualStrategyNumber - 1];
/*
* DO NOT use the cached function stuff here -- this is key
* ordering, happens only when the user expresses a hokey
* qualification, and gets executed only once, anyway. The
* transform maps are hard-coded, and can't be initialized
* in the correct way.
* DO NOT use the cached function stuff here -- this is
* key ordering, happens only when the user expresses a
* hokey qualification, and gets executed only once,
* anyway. The transform maps are hard-coded, and can't
* be initialized in the correct way.
*/
test = (long) fmgr(le->sk_procedure, lt->sk_argument, le->sk_argument);
if (test)
@ -208,7 +216,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (init[BTGreaterStrategyNumber - 1]
&& init[BTGreaterEqualStrategyNumber - 1])
{
ScanKeyData *gt, *ge;
ScanKeyData *gt,
*ge;
gt = &xform[BTGreaterStrategyNumber - 1];
ge = &xform[BTGreaterEqualStrategyNumber - 1];
@ -260,7 +269,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
xform[j].sk_argument = cur->sk_argument;
else if (j == (BTEqualStrategyNumber - 1))
so->qual_ok = 0;/* key == a && key == b, but a != b */
} else
}
else
{
/* nope, use this value */
memmove(&xform[j], cur, sizeof(*cur));
@ -283,10 +293,11 @@ _bt_formitem(IndexTuple itup)
Size tuplen;
extern Oid newoid();
/* see comments in btbuild
if (itup->t_info & INDEX_NULL_MASK)
elog(WARN, "btree indices cannot include null keys");
/*
* see comments in btbuild
*
* if (itup->t_info & INDEX_NULL_MASK) elog(WARN, "btree indices cannot
* include null keys");
*/
/* make a copy of the index tuple with room for the sequence number */
@ -316,6 +327,7 @@ _bt_checkqual(IndexScanDesc scan, IndexTuple itup)
else
return (true);
}
#endif
#ifdef NOT_USED
@ -331,6 +343,7 @@ _bt_checkforkeys(IndexScanDesc scan, IndexTuple itup, Size keysz)
else
return (true);
}
#endif
bool
@ -366,17 +379,21 @@ _bt_checkkeys (IndexScanDesc scan, IndexTuple tuple, Size *keysok)
return (false);
}
if (key[0].sk_flags & SK_COMMUTE) {
if (key[0].sk_flags & SK_COMMUTE)
{
test = (int) (*(key[0].sk_func))
(DatumGetPointer(key[0].sk_argument),
datum);
} else {
}
else
{
test = (int) (*(key[0].sk_func))
(datum,
DatumGetPointer(key[0].sk_argument));
}
if (!test == !(key[0].sk_flags & SK_NEGATE)) {
if (!test == !(key[0].sk_flags & SK_NEGATE))
{
return (false);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.7 1996/11/21 06:13:43 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.8 1997/09/07 04:39:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -27,7 +27,8 @@
#endif
static OffsetNumber findnext(IndexScanDesc s, Page p, OffsetNumber n,
static OffsetNumber
findnext(IndexScanDesc s, Page p, OffsetNumber n,
ScanDirection dir);
static RetrieveIndexResult rtscancache(IndexScanDesc s, ScanDirection dir);
static RetrieveIndexResult rtfirst(IndexScanDesc s, ScanDirection dir);
@ -45,9 +46,12 @@ rtgettuple(IndexScanDesc s, ScanDirection dir)
return (res);
/* not cached, so we'll have to do some work */
if (ItemPointerIsValid(&(s->currentItemData))) {
if (ItemPointerIsValid(&(s->currentItemData)))
{
res = rtnext(s, dir);
} else {
}
else
{
res = rtfirst(s, dir);
}
return (res);
@ -72,14 +76,16 @@ rtfirst(IndexScanDesc s, ScanDirection dir)
po = (RTreePageOpaque) PageGetSpecialPointer(p);
so = (RTreeScanOpaque) s->opaque;
for (;;) {
for (;;)
{
maxoff = PageGetMaxOffsetNumber(p);
if (ScanDirectionIsBackward(dir))
n = findnext(s, p, maxoff, dir);
else
n = findnext(s, p, FirstOffsetNumber, dir);
while (n < FirstOffsetNumber || n > maxoff) {
while (n < FirstOffsetNumber || n > maxoff)
{
ReleaseBuffer(b);
if (so->s_stack == (RTSTACK *) NULL)
@ -91,9 +97,12 @@ rtfirst(IndexScanDesc s, ScanDirection dir)
po = (RTreePageOpaque) PageGetSpecialPointer(p);
maxoff = PageGetMaxOffsetNumber(p);
if (ScanDirectionIsBackward(dir)) {
if (ScanDirectionIsBackward(dir))
{
n = OffsetNumberPrev(stk->rts_child);
} else {
}
else
{
n = OffsetNumberNext(stk->rts_child);
}
so->s_stack = stk->rts_parent;
@ -101,7 +110,8 @@ rtfirst(IndexScanDesc s, ScanDirection dir)
n = findnext(s, p, n, dir);
}
if (po->flags & F_LEAF) {
if (po->flags & F_LEAF)
{
ItemPointerSet(&(s->currentItemData), BufferGetBlockNumber(b), n);
it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n));
@ -110,7 +120,9 @@ rtfirst(IndexScanDesc s, ScanDirection dir)
ReleaseBuffer(b);
return (res);
} else {
}
else
{
stk = (RTSTACK *) palloc(sizeof(RTSTACK));
stk->rts_child = n;
stk->rts_blk = BufferGetBlockNumber(b);
@ -145,9 +157,12 @@ rtnext(IndexScanDesc s, ScanDirection dir)
blk = ItemPointerGetBlockNumber(&(s->currentItemData));
n = ItemPointerGetOffsetNumber(&(s->currentItemData));
if (ScanDirectionIsForward(dir)) {
if (ScanDirectionIsForward(dir))
{
n = OffsetNumberNext(n);
} else {
}
else
{
n = OffsetNumberPrev(n);
}
@ -156,11 +171,13 @@ rtnext(IndexScanDesc s, ScanDirection dir)
po = (RTreePageOpaque) PageGetSpecialPointer(p);
so = (RTreeScanOpaque) s->opaque;
for (;;) {
for (;;)
{
maxoff = PageGetMaxOffsetNumber(p);
n = findnext(s, p, n, dir);
while (n < FirstOffsetNumber || n > maxoff) {
while (n < FirstOffsetNumber || n > maxoff)
{
ReleaseBuffer(b);
if (so->s_stack == (RTSTACK *) NULL)
@ -172,9 +189,12 @@ rtnext(IndexScanDesc s, ScanDirection dir)
maxoff = PageGetMaxOffsetNumber(p);
po = (RTreePageOpaque) PageGetSpecialPointer(p);
if (ScanDirectionIsBackward(dir)) {
if (ScanDirectionIsBackward(dir))
{
n = OffsetNumberPrev(stk->rts_child);
} else {
}
else
{
n = OffsetNumberNext(stk->rts_child);
}
so->s_stack = stk->rts_parent;
@ -182,7 +202,8 @@ rtnext(IndexScanDesc s, ScanDirection dir)
n = findnext(s, p, n, dir);
}
if (po->flags & F_LEAF) {
if (po->flags & F_LEAF)
{
ItemPointerSet(&(s->currentItemData), BufferGetBlockNumber(b), n);
it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n));
@ -191,7 +212,9 @@ rtnext(IndexScanDesc s, ScanDirection dir)
ReleaseBuffer(b);
return (res);
} else {
}
else
{
stk = (RTSTACK *) palloc(sizeof(RTSTACK));
stk->rts_child = n;
stk->rts_blk = BufferGetBlockNumber(b);
@ -206,9 +229,12 @@ rtnext(IndexScanDesc s, ScanDirection dir)
p = BufferGetPage(b);
po = (RTreePageOpaque) PageGetSpecialPointer(p);
if (ScanDirectionIsBackward(dir)) {
if (ScanDirectionIsBackward(dir))
{
n = PageGetMaxOffsetNumber(p);
} else {
}
else
{
n = FirstOffsetNumber;
}
}
@ -232,28 +258,36 @@ findnext(IndexScanDesc s, Page p, OffsetNumber n, ScanDirection dir)
* a ghost tuple, before the scan. If this is the case, back up one.
*/
if (so->s_flags & RTS_CURBEFORE) {
if (so->s_flags & RTS_CURBEFORE)
{
so->s_flags &= ~RTS_CURBEFORE;
n = OffsetNumberPrev(n);
}
while (n >= FirstOffsetNumber && n <= maxoff) {
while (n >= FirstOffsetNumber && n <= maxoff)
{
it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n));
if (po->flags & F_LEAF) {
if (po->flags & F_LEAF)
{
if (index_keytest(it,
RelationGetTupleDescriptor(s->relation),
s->numberOfKeys, s->keyData))
break;
} else {
}
else
{
if (index_keytest(it,
RelationGetTupleDescriptor(s->relation),
so->s_internalNKey, so->s_internalKey))
break;
}
if (ScanDirectionIsBackward(dir)) {
if (ScanDirectionIsBackward(dir))
{
n = OffsetNumberPrev(n);
} else {
}
else
{
n = OffsetNumberNext(n);
}
}
@ -268,7 +302,8 @@ rtscancache(IndexScanDesc s, ScanDirection dir)
ItemPointer ip;
if (!(ScanDirectionIsNoMovement(dir)
&& ItemPointerIsValid(&(s->currentItemData)))) {
&& ItemPointerIsValid(&(s->currentItemData))))
{
return ((RetrieveIndexResult) NULL);
}
@ -299,7 +334,8 @@ rtheapptr(Relation r, ItemPointer itemp)
OffsetNumber n;
ip = (ItemPointer) palloc(sizeof(ItemPointerData));
if (ItemPointerIsValid(itemp)) {
if (ItemPointerIsValid(itemp))
{
b = ReadBuffer(r, ItemPointerGetBlockNumber(itemp));
p = BufferGetPage(b);
n = ItemPointerGetOffsetNumber(itemp);
@ -307,7 +343,9 @@ rtheapptr(Relation r, ItemPointer itemp)
memmove((char *) ip, (char *) &(it->t_tid),
sizeof(ItemPointerData));
ReleaseBuffer(b);
} else {
}
else
{
ItemPointerSetInvalid(ip);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.7 1997/04/22 17:31:23 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.8 1997/09/07 04:39:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -51,7 +51,8 @@ rt_box_inter(BOX *a, BOX *b)
n->low.x = Max(a->low.x, b->low.x);
n->low.y = Max(a->low.y, b->low.y);
if (n->high.x < n->low.x || n->high.y < n->low.y) {
if (n->high.x < n->low.x || n->high.y < n->low.y)
{
pfree(n);
return ((BOX *) NULL);
}
@ -107,14 +108,16 @@ rt_poly_union(POLYGON *a, POLYGON *b)
void
rt_poly_size(POLYGON * a, float *size)
{
double xdim, ydim;
double xdim,
ydim;
size = (float *) palloc(sizeof(float));
if (a == (POLYGON *) NULL ||
a->boundbox.high.x <= a->boundbox.low.x ||
a->boundbox.high.y <= a->boundbox.low.y)
*size = 0.0;
else {
else
{
xdim = (a->boundbox.high.x - a->boundbox.low.x);
ydim = (a->boundbox.high.y - a->boundbox.low.y);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.13 1997/08/12 22:51:54 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.14 1997/09/07 04:39:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -32,7 +32,8 @@
#include <string.h>
#endif
typedef struct SPLITVEC {
typedef struct SPLITVEC
{
OffsetNumber *spl_left;
int spl_nleft;
char *spl_ldatum;
@ -41,26 +42,33 @@ typedef struct SPLITVEC {
char *spl_rdatum;
} SPLITVEC;
typedef struct RTSTATE {
typedef struct RTSTATE
{
func_ptr unionFn; /* union function */
func_ptr sizeFn; /* size function */
func_ptr interFn; /* intersection function */
} RTSTATE;
/* non-export function prototypes */
static InsertIndexResult rtdoinsert(Relation r, IndexTuple itup,
static InsertIndexResult
rtdoinsert(Relation r, IndexTuple itup,
RTSTATE * rtstate);
static void rttighten(Relation r, RTSTACK *stk, char *datum, int att_size,
static void
rttighten(Relation r, RTSTACK * stk, char *datum, int att_size,
RTSTATE * rtstate);
static InsertIndexResult dosplit(Relation r, Buffer buffer, RTSTACK *stack,
static InsertIndexResult
dosplit(Relation r, Buffer buffer, RTSTACK * stack,
IndexTuple itup, RTSTATE * rtstate);
static void rtintinsert(Relation r, RTSTACK *stk, IndexTuple ltup,
static void
rtintinsert(Relation r, RTSTACK * stk, IndexTuple ltup,
IndexTuple rtup, RTSTATE * rtstate);
static void rtnewroot(Relation r, IndexTuple lt, IndexTuple rt);
static void picksplit(Relation r, Page page, SPLITVEC *v, IndexTuple itup,
static void
picksplit(Relation r, Page page, SPLITVEC * v, IndexTuple itup,
RTSTATE * rtstate);
static void RTInitBuffer(Buffer b, uint32 f);
static OffsetNumber choose(Relation r, Page p, IndexTuple it,
static OffsetNumber
choose(Relation r, Page p, IndexTuple it,
RTSTATE * rtstate);
static int nospace(Page p, IndexTuple it);
static void initRtstate(RTSTATE * rtstate, Relation index);
@ -82,18 +90,25 @@ rtbuild(Relation heap,
AttrNumber i;
HeapTuple htup;
IndexTuple itup;
TupleDesc hd, id;
TupleDesc hd,
id;
InsertIndexResult res;
Datum *d;
bool *nulls;
int nb, nh, ni;
int nb,
nh,
ni;
#ifndef OMIT_PARTIAL_INDEX
ExprContext *econtext;
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
Oid hrelid, irelid;
Node *pred, *oldPred;
Oid hrelid,
irelid;
Node *pred,
*oldPred;
RTSTATE rtState;
initRtstate(&rtState, index);
@ -105,15 +120,16 @@ rtbuild(Relation heap,
oldPred = predInfo->oldPred;
/*
* We expect to be called exactly once for any index relation.
* If that's not the case, big trouble's what we have.
* We expect to be called exactly once for any index relation. If
* that's not the case, big trouble's what we have.
*/
if (oldPred == NULL && (nb = RelationGetNumberOfBlocks(index)) != 0)
elog(WARN, "%s already contains data", index->rd_rel->relname.data);
/* initialize the root page (if this is a new index) */
if (oldPred == NULL) {
if (oldPred == NULL)
{
buffer = ReadBuffer(index, P_NEW);
RTInitBuffer(buffer, F_LEAF);
WriteBuffer(buffer);
@ -126,14 +142,16 @@ rtbuild(Relation heap,
nulls = (bool *) palloc(natts * sizeof(*nulls));
/*
* If this is a predicate (partial) index, we will need to evaluate the
* predicate using ExecQual, which requires the current tuple to be in a
* slot of a TupleTable. In addition, ExecQual must have an ExprContext
* referring to that slot. Here, we initialize dummy TupleTable and
* ExprContext objects for this purpose. --Nels, Feb '92
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
* be in a slot of a TupleTable. In addition, ExecQual must have an
* ExprContext referring to that slot. Here, we initialize dummy
* TupleTable and ExprContext objects for this purpose. --Nels, Feb
* '92
*/
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) {
if (pred != NULL || oldPred != NULL)
{
tupleTable = ExecCreateTupleTable(1);
slot = ExecAllocTableSlot(tupleTable);
econtext = makeNode(ExprContext);
@ -152,7 +170,8 @@ rtbuild(Relation heap,
/* count the tuples as we insert them */
nh = ni = 0;
for (; HeapTupleIsValid(htup); htup = heap_getnext(scan, 0, &buffer)) {
for (; HeapTupleIsValid(htup); htup = heap_getnext(scan, 0, &buffer))
{
nh++;
@ -160,19 +179,25 @@ rtbuild(Relation heap,
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
*/
if (oldPred != NULL) {
if (oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
if (ExecQual((List*)oldPred, econtext) == true) {
if (ExecQual((List *) oldPred, econtext) == true)
{
ni++;
continue;
}
#endif /* OMIT_PARTIAL_INDEX */
}
/* Skip this tuple if it doesn't satisfy the partial-index predicate */
if (pred != NULL) {
/*
* Skip this tuple if it doesn't satisfy the partial-index
* predicate
*/
if (pred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
@ -184,23 +209,25 @@ rtbuild(Relation heap,
ni++;
/*
* For the current heap tuple, extract all the attributes
* we use in this index, and note which are null.
* For the current heap tuple, extract all the attributes we use
* in this index, and note which are null.
*/
for (i = 1; i <= natts; i++) {
for (i = 1; i <= natts; i++)
{
int attoff;
bool attnull;
/*
* Offsets are from the start of the tuple, and are
* zero-based; indices are one-based. The next call
* returns i - 1. That's data hiding for you.
* zero-based; indices are one-based. The next call returns i
* - 1. That's data hiding for you.
*/
attoff = AttrNumberGetAttrOffset(i);
/*
d[attoff] = HeapTupleGetAttributeValue(htup, buffer,
* d[attoff] = HeapTupleGetAttributeValue(htup, buffer,
*/
d[attoff] = GetIndexValue(htup,
hd,
@ -217,12 +244,11 @@ rtbuild(Relation heap,
itup->t_tid = htup->t_ctid;
/*
* Since we already have the index relation locked, we
* call rtdoinsert directly. Normal access method calls
* dispatch through rtinsert, which locks the relation
* for write. This is the right thing to do if you're
* inserting single tups, but not when you're initializing
* the whole index at once.
* Since we already have the index relation locked, we call
* rtdoinsert directly. Normal access method calls dispatch
* through rtinsert, which locks the relation for write. This is
* the right thing to do if you're inserting single tups, but not
* when you're initializing the whole index at once.
*/
res = rtdoinsert(index, itup, &rtState);
@ -234,7 +260,8 @@ rtbuild(Relation heap,
heap_endscan(scan);
RelationUnsetLockForWrite(index);
if (pred != NULL || oldPred != NULL) {
if (pred != NULL || oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
ExecDestroyTupleTable(tupleTable, true);
pfree(econtext);
@ -242,14 +269,13 @@ rtbuild(Relation heap,
}
/*
* Since we just counted the tuples in the heap, we update its
* stats in pg_relation to guarantee that the planner takes
* advantage of the index we just created. UpdateStats() does a
* CommandCounterIncrement(), which flushes changed entries from
* the system relcache. The act of constructing an index changes
* these heap and index tuples in the system catalogs, so they
* need to be flushed. We close them to guarantee that they
* will be.
* Since we just counted the tuples in the heap, we update its stats
* in pg_relation to guarantee that the planner takes advantage of the
* index we just created. UpdateStats() does a
* CommandCounterIncrement(), which flushes changed entries from the
* system relcache. The act of constructing an index changes these
* heap and index tuples in the system catalogs, so they need to be
* flushed. We close them to guarantee that they will be.
*/
hrelid = heap->rd_id;
@ -260,8 +286,10 @@ rtbuild(Relation heap,
UpdateStats(hrelid, nh, true);
UpdateStats(irelid, ni, false);
if (oldPred != NULL) {
if (ni == nh) pred = NULL;
if (oldPred != NULL)
{
if (ni == nh)
pred = NULL;
UpdateIndexPredicate(irelid, oldPred, pred);
}
@ -312,7 +340,8 @@ rtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate)
buffer = InvalidBuffer;
stack = (RTSTACK *) NULL;
do {
do
{
/* let go of current buffer before getting next */
if (buffer != InvalidBuffer)
ReleaseBuffer(buffer);
@ -322,7 +351,8 @@ rtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate)
page = (Page) BufferGetPage(buffer);
opaque = (RTreePageOpaque) PageGetSpecialPointer(page);
if (!(opaque->flags & F_LEAF)) {
if (!(opaque->flags & F_LEAF))
{
RTSTACK *n;
ItemId iid;
@ -338,7 +368,8 @@ rtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate)
}
} while (!(opaque->flags & F_LEAF));
if (nospace(page, itup)) {
if (nospace(page, itup))
{
/* need to do a split */
res = dosplit(r, buffer, stack, itup, rtstate);
freestack(stack);
@ -347,11 +378,14 @@ rtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate)
}
/* add the item and write the buffer */
if (PageIsEmpty(page)) {
if (PageIsEmpty(page))
{
l = PageAddItem(page, (Item) itup, IndexTupleSize(itup),
FirstOffsetNumber,
LP_USED);
} else {
}
else
{
l = PageAddItem(page, (Item) itup, IndexTupleSize(itup),
OffsetNumberNext(PageGetMaxOffsetNumber(page)),
LP_USED);
@ -383,7 +417,8 @@ rttighten(Relation r,
char *oldud;
char *tdatum;
Page p;
float old_size, newd_size;
float old_size,
newd_size;
Buffer b;
if (stk == (RTSTACK *) NULL)
@ -400,17 +435,22 @@ rttighten(Relation r,
(*rtstate->sizeFn) (datum, &newd_size);
if (newd_size != old_size) {
if (newd_size != old_size)
{
TupleDesc td = RelationGetTupleDescriptor(r);
if (td->attrs[0]->attlen < 0) {
if (td->attrs[0]->attlen < 0)
{
/*
* This is an internal page, so 'oldud' had better be a
* union (constant-length) key, too. (See comment below.)
* This is an internal page, so 'oldud' had better be a union
* (constant-length) key, too. (See comment below.)
*/
Assert(VARSIZE(datum) == VARSIZE(oldud));
memmove(oldud, datum, VARSIZE(datum));
} else {
}
else
{
memmove(oldud, datum, att_size);
}
WriteBuffer(b);
@ -418,14 +458,16 @@ rttighten(Relation r,
/*
* The user may be defining an index on variable-sized data (like
* polygons). If so, we need to get a constant-sized datum for
* insertion on the internal page. We do this by calling the union
* proc, which is guaranteed to return a rectangle.
* insertion on the internal page. We do this by calling the
* union proc, which is guaranteed to return a rectangle.
*/
tdatum = (char *) (*rtstate->unionFn) (datum, datum);
rttighten(r, stk->rts_parent, tdatum, att_size, rtstate);
pfree(tdatum);
} else {
}
else
{
ReleaseBuffer(b);
}
pfree(datum);
@ -446,15 +488,20 @@ dosplit(Relation r,
RTSTATE * rtstate)
{
Page p;
Buffer leftbuf, rightbuf;
Page left, right;
Buffer leftbuf,
rightbuf;
Page left,
right;
ItemId itemid;
IndexTuple item;
IndexTuple ltup, rtup;
IndexTuple ltup,
rtup;
OffsetNumber maxoff;
OffsetNumber i;
OffsetNumber leftoff, rightoff;
BlockNumber lbknum, rbknum;
OffsetNumber leftoff,
rightoff;
BlockNumber lbknum,
rbknum;
BlockNumber bufblock;
RTreePageOpaque opaque;
int blank;
@ -470,17 +517,20 @@ dosplit(Relation r,
opaque = (RTreePageOpaque) PageGetSpecialPointer(p);
/*
* The root of the tree is the first block in the relation. If
* we're about to split the root, we need to do some hocus-pocus
* to enforce this guarantee.
* The root of the tree is the first block in the relation. If we're
* about to split the root, we need to do some hocus-pocus to enforce
* this guarantee.
*/
if (BufferGetBlockNumber(buffer) == P_ROOT) {
if (BufferGetBlockNumber(buffer) == P_ROOT)
{
leftbuf = ReadBuffer(r, P_NEW);
RTInitBuffer(leftbuf, opaque->flags);
lbknum = BufferGetBlockNumber(leftbuf);
left = (Page) BufferGetPage(leftbuf);
} else {
}
else
{
leftbuf = buffer;
IncrBufferRefCount(buffer);
lbknum = BufferGetBlockNumber(buffer);
@ -496,16 +546,20 @@ dosplit(Relation r,
leftoff = rightoff = FirstOffsetNumber;
maxoff = PageGetMaxOffsetNumber(p);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
itemid = PageGetItemId(p, i);
item = (IndexTuple) PageGetItem(p, itemid);
if (i == *(v.spl_left)) {
if (i == *(v.spl_left))
{
PageAddItem(left, (Item) item, IndexTupleSize(item),
leftoff, LP_USED);
leftoff = OffsetNumberNext(leftoff);
v.spl_left++; /* advance in left split vector */
} else {
}
else
{
PageAddItem(right, (Item) item, IndexTupleSize(item),
rightoff, LP_USED);
rightoff = OffsetNumberNext(rightoff);
@ -517,19 +571,23 @@ dosplit(Relation r,
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
/* now insert the new index tuple */
if (*(v.spl_left) != FirstOffsetNumber) {
if (*(v.spl_left) != FirstOffsetNumber)
{
PageAddItem(left, (Item) itup, IndexTupleSize(itup),
leftoff, LP_USED);
leftoff = OffsetNumberNext(leftoff);
ItemPointerSet(&(res->pointerData), lbknum, leftoff);
} else {
}
else
{
PageAddItem(right, (Item) itup, IndexTupleSize(itup),
rightoff, LP_USED);
rightoff = OffsetNumberNext(rightoff);
ItemPointerSet(&(res->pointerData), rbknum, rightoff);
}
if ((bufblock = BufferGetBlockNumber(buffer)) != P_ROOT) {
if ((bufblock = BufferGetBlockNumber(buffer)) != P_ROOT)
{
PageRestoreTempPage(left, p);
}
WriteBuffer(leftbuf);
@ -538,18 +596,17 @@ dosplit(Relation r,
/*
* Okay, the page is split. We have three things left to do:
*
* 1) Adjust any active scans on this index to cope with changes
* we introduced in its structure by splitting this page.
* 1) Adjust any active scans on this index to cope with changes we
* introduced in its structure by splitting this page.
*
* 2) "Tighten" the bounding box of the pointer to the left
* page in the parent node in the tree, if any. Since we
* moved a bunch of stuff off the left page, we expect it
* to get smaller. This happens in the internal insertion
* routine.
* 2) "Tighten" the bounding box of the pointer to the left page in the
* parent node in the tree, if any. Since we moved a bunch of stuff
* off the left page, we expect it to get smaller. This happens in
* the internal insertion routine.
*
* 3) Insert a pointer to the right page in the parent. This
* may cause the parent to split. If it does, we need to
* repeat steps one and two for each split node in the tree.
* 3) Insert a pointer to the right page in the parent. This may cause
* the parent to split. If it does, we need to repeat steps one and
* two for each split node in the tree.
*/
/* adjust active scans */
@ -584,10 +641,13 @@ rtintinsert(Relation r,
IndexTuple old;
Buffer b;
Page p;
char *ldatum, *rdatum, *newdatum;
char *ldatum,
*rdatum,
*newdatum;
InsertIndexResult res;
if (stk == (RTSTACK *) NULL) {
if (stk == (RTSTACK *) NULL)
{
rtnewroot(r, ltup, rtup);
return;
}
@ -597,10 +657,10 @@ rtintinsert(Relation r,
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
/*
* This is a hack. Right now, we force rtree keys to be constant size.
* To fix this, need delete the old key and add both left and right
* for the two new pages. The insertion of left may force a split if
* the new left key is bigger than the old key.
* This is a hack. Right now, we force rtree keys to be constant
* size. To fix this, need delete the old key and add both left and
* right for the two new pages. The insertion of left may force a
* split if the new left key is bigger than the old key.
*/
if (IndexTupleSize(old) != IndexTupleSize(ltup))
@ -609,14 +669,18 @@ rtintinsert(Relation r,
/* install pointer to left child */
memmove(old, ltup, IndexTupleSize(ltup));
if (nospace(p, rtup)) {
if (nospace(p, rtup))
{
newdatum = (((char *) ltup) + sizeof(IndexTupleData));
rttighten(r, stk->rts_parent, newdatum,
(IndexTupleSize(ltup) - sizeof(IndexTupleData)), rtstate);
res = dosplit(r, b, stk->rts_parent, rtup, rtstate);
WriteBuffer(b); /* don't forget to release buffer! - 01/31/94 */
WriteBuffer(b); /* don't forget to release buffer! -
* 01/31/94 */
pfree(res);
} else {
}
else
{
PageAddItem(p, (Item) rtup, IndexTupleSize(rtup),
PageGetMaxOffsetNumber(p), LP_USED);
WriteBuffer(b);
@ -655,19 +719,32 @@ picksplit(Relation r,
RTSTATE * rtstate)
{
OffsetNumber maxoff;
OffsetNumber i, j;
IndexTuple item_1, item_2;
char *datum_alpha, *datum_beta;
char *datum_l, *datum_r;
char *union_d, *union_dl, *union_dr;
OffsetNumber i,
j;
IndexTuple item_1,
item_2;
char *datum_alpha,
*datum_beta;
char *datum_l,
*datum_r;
char *union_d,
*union_dl,
*union_dr;
char *inter_d;
bool firsttime;
float size_alpha, size_beta, size_union, size_inter;
float size_waste, waste;
float size_l, size_r;
float size_alpha,
size_beta,
size_union,
size_inter;
float size_waste,
waste;
float size_l,
size_r;
int nbytes;
OffsetNumber seed_1 = 0, seed_2 = 0;
OffsetNumber *left, *right;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
*right;
maxoff = PageGetMaxOffsetNumber(page);
@ -678,10 +755,12 @@ picksplit(Relation r,
firsttime = true;
waste = 0.0;
for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i))
{
item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
datum_alpha = ((char *) item_1) + sizeof(IndexTupleData);
for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j)) {
for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j))
{
item_2 = (IndexTuple) PageGetItem(page, PageGetItemId(page, j));
datum_beta = ((char *) item_2) + sizeof(IndexTupleData);
@ -698,11 +777,12 @@ picksplit(Relation r,
pfree(inter_d);
/*
* are these a more promising split that what we've
* already seen?
* are these a more promising split that what we've already
* seen?
*/
if (size_waste > waste || firsttime) {
if (size_waste > waste || firsttime)
{
waste = size_waste;
seed_1 = i;
seed_2 = j;
@ -727,40 +807,46 @@ picksplit(Relation r,
/*
* Now split up the regions between the two seeds. An important
* property of this split algorithm is that the split vector v
* has the indices of items to be split in order in its left and
* right vectors. We exploit this property by doing a merge in
* the code that actually splits the page.
* property of this split algorithm is that the split vector v has the
* indices of items to be split in order in its left and right
* vectors. We exploit this property by doing a merge in the code
* that actually splits the page.
*
* For efficiency, we also place the new index tuple in this loop.
* This is handled at the very end, when we have placed all the
* existing tuples and i == maxoff + 1.
* For efficiency, we also place the new index tuple in this loop. This
* is handled at the very end, when we have placed all the existing
* tuples and i == maxoff + 1.
*/
maxoff = OffsetNumberNext(maxoff);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
/*
* If we've already decided where to place this item, just
* put it on the right list. Otherwise, we need to figure
* out which page needs the least enlargement in order to
* store the item.
* If we've already decided where to place this item, just put it
* on the right list. Otherwise, we need to figure out which page
* needs the least enlargement in order to store the item.
*/
if (i == seed_1) {
if (i == seed_1)
{
*left++ = i;
v->spl_nleft++;
continue;
} else if (i == seed_2) {
}
else if (i == seed_2)
{
*right++ = i;
v->spl_nright++;
continue;
}
/* okay, which page needs least enlargement? */
if (i == maxoff) {
if (i == maxoff)
{
item_1 = itup;
} else {
}
else
{
item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
}
@ -771,14 +857,17 @@ picksplit(Relation r,
(*rtstate->sizeFn) (union_dr, &size_beta);
/* pick which page to add it to */
if (size_alpha - size_l < size_beta - size_r) {
if (size_alpha - size_l < size_beta - size_r)
{
pfree(datum_l);
pfree(union_dr);
datum_l = union_dl;
size_l = size_alpha;
*left++ = i;
v->spl_nleft++;
} else {
}
else
{
pfree(datum_r);
pfree(union_dl);
datum_r = union_dr;
@ -815,9 +904,11 @@ choose(Relation r, Page p, IndexTuple it, RTSTATE *rtstate)
{
OffsetNumber maxoff;
OffsetNumber i;
char *ud, *id;
char *ud,
*id;
char *datum;
float usize, dsize;
float usize,
dsize;
OffsetNumber which;
float which_grow;
@ -826,14 +917,16 @@ choose(Relation r, Page p, IndexTuple it, RTSTATE *rtstate)
which_grow = -1.0;
which = -1;
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
datum = (char *) PageGetItem(p, PageGetItemId(p, i));
datum += sizeof(IndexTupleData);
(*rtstate->sizeFn) (datum, &dsize);
ud = (char *) (*rtstate->unionFn) (datum, id);
(*rtstate->sizeFn) (ud, &usize);
pfree(ud);
if (which_grow < 0 || usize - dsize < which_grow) {
if (which_grow < 0 || usize - dsize < which_grow)
{
which = i;
which_grow = usize - dsize;
if (which_grow == 0)
@ -855,7 +948,8 @@ freestack(RTSTACK *s)
{
RTSTACK *p;
while (s != (RTSTACK *) NULL) {
while (s != (RTSTACK *) NULL)
{
p = s->rts_parent;
pfree(s);
s = p;
@ -891,9 +985,12 @@ rtdelete(Relation r, ItemPointer tid)
return ((char *) NULL);
}
static void initRtstate(RTSTATE *rtstate, Relation index)
static void
initRtstate(RTSTATE * rtstate, Relation index)
{
RegProcedure union_proc, size_proc, inter_proc;
RegProcedure union_proc,
size_proc,
inter_proc;
func_ptr user_fn;
int pronargs;
@ -916,7 +1013,8 @@ _rtdump(Relation r)
{
Buffer buf;
Page page;
OffsetNumber offnum, maxoff;
OffsetNumber offnum,
maxoff;
BlockNumber blkno;
BlockNumber nblocks;
RTreePageOpaque po;
@ -927,7 +1025,8 @@ _rtdump(Relation r)
char *itkey;
nblocks = RelationGetNumberOfBlocks(r);
for (blkno = 0; blkno < nblocks; blkno++) {
for (blkno = 0; blkno < nblocks; blkno++)
{
buf = ReadBuffer(r, blkno);
page = BufferGetPage(buf);
po = (RTreePageOpaque) PageGetSpecialPointer(page);
@ -935,14 +1034,16 @@ _rtdump(Relation r)
printf("Page %d maxoff %d <%s>\n", blkno, maxoff,
(po->flags & F_LEAF ? "LEAF" : "INTERNAL"));
if (PageIsEmpty(page)) {
if (PageIsEmpty(page))
{
ReleaseBuffer(buf);
continue;
}
for (offnum = FirstOffsetNumber;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum)) {
offnum = OffsetNumberNext(offnum))
{
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
itblkno = ItemPointerGetBlockNumber(&(itup->t_tid));
itoffno = ItemPointerGetOffsetNumber(&(itup->t_tid));
@ -957,5 +1058,5 @@ _rtdump(Relation r)
ReleaseBuffer(buf);
}
}
#endif /* defined RTDEBUG */
#endif /* defined RTDEBUG */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.10 1997/05/20 10:29:30 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.11 1997/09/07 04:39:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -30,11 +30,14 @@
/* routines defined and used here */
static void rtregscan(IndexScanDesc s);
static void rtdropscan(IndexScanDesc s);
static void rtadjone(IndexScanDesc s, int op, BlockNumber blkno,
static void
rtadjone(IndexScanDesc s, int op, BlockNumber blkno,
OffsetNumber offnum);
static void adjuststack(RTSTACK *stk, BlockNumber blkno,
static void
adjuststack(RTSTACK * stk, BlockNumber blkno,
OffsetNumber offnum);
static void adjustiptr(IndexScanDesc s, ItemPointer iptr,
static void
adjustiptr(IndexScanDesc s, ItemPointer iptr,
int op, BlockNumber blkno, OffsetNumber offnum);
/*
@ -47,7 +50,8 @@ static void adjustiptr(IndexScanDesc s, ItemPointer iptr,
* locks on the same object, so that's why we need to handle this case.
*/
typedef struct RTScanListData {
typedef struct RTScanListData
{
IndexScanDesc rtsl_scan;
struct RTScanListData *rtsl_next;
} RTScanListData;
@ -79,7 +83,8 @@ rtrescan(IndexScanDesc s, bool fromEnd, ScanKey key)
RegProcedure internal_proc;
int i;
if (!IndexScanIsValid(s)) {
if (!IndexScanIsValid(s))
{
elog(WARN, "rtrescan: invalid scan.");
return;
}
@ -98,24 +103,31 @@ rtrescan(IndexScanDesc s, bool fromEnd, ScanKey key)
/*
* Set flags.
*/
if (RelationGetNumberOfBlocks(s->relation) == 0) {
if (RelationGetNumberOfBlocks(s->relation) == 0)
{
s->flags = ScanUnmarked;
} else if (fromEnd) {
}
else if (fromEnd)
{
s->flags = ScanUnmarked | ScanUncheckedPrevious;
} else {
}
else
{
s->flags = ScanUnmarked | ScanUncheckedNext;
}
s->scanFromEnd = fromEnd;
if (s->numberOfKeys > 0) {
if (s->numberOfKeys > 0)
{
memmove(s->keyData,
key,
s->numberOfKeys * sizeof(ScanKeyData));
}
p = (RTreeScanOpaque) s->opaque;
if (p != (RTreeScanOpaque) NULL) {
if (p != (RTreeScanOpaque) NULL)
{
freestack(p->s_stack);
freestack(p->s_markstk);
p->s_stack = p->s_markstk = (RTSTACK *) NULL;
@ -124,25 +136,29 @@ rtrescan(IndexScanDesc s, bool fromEnd, ScanKey key)
{
p->s_internalKey[i].sk_argument = s->keyData[i].sk_argument;
}
} else {
}
else
{
/* initialize opaque data */
p = (RTreeScanOpaque) palloc(sizeof(RTreeScanOpaqueData));
p->s_stack = p->s_markstk = (RTSTACK *) NULL;
p->s_internalNKey = s->numberOfKeys;
p->s_flags = 0x0;
s->opaque = p;
if (s->numberOfKeys > 0) {
if (s->numberOfKeys > 0)
{
p->s_internalKey =
(ScanKey) palloc(sizeof(ScanKeyData) * s->numberOfKeys);
/*
* Scans on internal pages use different operators than they
* do on leaf pages. For example, if the user wants all boxes
* that exactly match (x1,y1,x2,y2), then on internal pages
* we need to find all boxes that contain (x1,y1,x2,y2).
* that exactly match (x1,y1,x2,y2), then on internal pages we
* need to find all boxes that contain (x1,y1,x2,y2).
*/
for (i = 0; i < s->numberOfKeys; i++) {
for (i = 0; i < s->numberOfKeys; i++)
{
p->s_internalKey[i].sk_argument = s->keyData[i].sk_argument;
internal_proc = RTMapOperator(s->relation,
s->keyData[i].sk_attno,
@ -161,7 +177,9 @@ void
rtmarkpos(IndexScanDesc s)
{
RTreeScanOpaque p;
RTSTACK *o, *n, *tmp;
RTSTACK *o,
*n,
*tmp;
s->currentMarkData = s->currentItemData;
p = (RTreeScanOpaque) s->opaque;
@ -174,7 +192,8 @@ rtmarkpos(IndexScanDesc s)
n = p->s_stack;
/* copy the parent stack from the current item data */
while (n != (RTSTACK *) NULL) {
while (n != (RTSTACK *) NULL)
{
tmp = (RTSTACK *) palloc(sizeof(RTSTACK));
tmp->rts_child = n->rts_child;
tmp->rts_blk = n->rts_blk;
@ -191,7 +210,9 @@ void
rtrestrpos(IndexScanDesc s)
{
RTreeScanOpaque p;
RTSTACK *o, *n, *tmp;
RTSTACK *o,
*n,
*tmp;
s->currentItemData = s->currentMarkData;
p = (RTreeScanOpaque) s->opaque;
@ -204,7 +225,8 @@ rtrestrpos(IndexScanDesc s)
n = p->s_markstk;
/* copy the parent stack from the current item data */
while (n != (RTSTACK *) NULL) {
while (n != (RTSTACK *) NULL)
{
tmp = (RTSTACK *) palloc(sizeof(RTSTACK));
tmp->rts_child = n->rts_child;
tmp->rts_blk = n->rts_blk;
@ -224,7 +246,8 @@ rtendscan(IndexScanDesc s)
p = (RTreeScanOpaque) s->opaque;
if (p != (RTreeScanOpaque) NULL) {
if (p != (RTreeScanOpaque) NULL)
{
freestack(p->s_stack);
freestack(p->s_markstk);
pfree(s->opaque);
@ -255,7 +278,8 @@ rtdropscan(IndexScanDesc s)
for (l = RTScans;
l != (RTScanList) NULL && l->rtsl_scan != s;
l = l->rtsl_next) {
l = l->rtsl_next)
{
prev = l;
}
@ -277,7 +301,8 @@ rtadjscans(Relation r, int op, BlockNumber blkno, OffsetNumber offnum)
Oid relid;
relid = r->rd_id;
for (l = RTScans; l != (RTScanList) NULL; l = l->rtsl_next) {
for (l = RTScans; l != (RTScanList) NULL; l = l->rtsl_next)
{
if (l->rtsl_scan->relation->rd_id == relid)
rtadjone(l->rtsl_scan, op, blkno, offnum);
}
@ -306,7 +331,8 @@ rtadjone(IndexScanDesc s,
so = (RTreeScanOpaque) s->opaque;
if (op == RTOP_SPLIT) {
if (op == RTOP_SPLIT)
{
adjuststack(so->s_stack, blkno, offnum);
adjuststack(so->s_markstk, blkno, offnum);
}
@ -329,20 +355,27 @@ adjustiptr(IndexScanDesc s,
OffsetNumber curoff;
RTreeScanOpaque so;
if (ItemPointerIsValid(iptr)) {
if (ItemPointerGetBlockNumber(iptr) == blkno) {
if (ItemPointerIsValid(iptr))
{
if (ItemPointerGetBlockNumber(iptr) == blkno)
{
curoff = ItemPointerGetOffsetNumber(iptr);
so = (RTreeScanOpaque) s->opaque;
switch (op) {
switch (op)
{
case RTOP_DEL:
/* back up one if we need to */
if (curoff >= offnum) {
if (curoff >= offnum)
{
if (curoff > FirstOffsetNumber) {
if (curoff > FirstOffsetNumber)
{
/* just adjust the item pointer */
ItemPointerSet(iptr, blkno, OffsetNumberPrev(curoff));
} else {
}
else
{
/* remember that we're before the current tuple */
ItemPointerSet(iptr, blkno, FirstOffsetNumber);
if (iptr == &(s->currentItemData))
@ -388,7 +421,8 @@ adjuststack(RTSTACK *stk,
BlockNumber blkno,
OffsetNumber offnum)
{
while (stk != (RTSTACK *) NULL) {
while (stk != (RTSTACK *) NULL)
{
if (stk->rts_blk == blkno)
stk->rts_child = FirstOffsetNumber;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtstrat.c,v 1.6 1997/08/19 21:29:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtstrat.c,v 1.7 1997/09/07 04:39:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -18,7 +18,8 @@
#include <access/rtree.h>
#include <access/istrat.h>
static StrategyNumber RelationGetRTStrategy(Relation r,
static StrategyNumber
RelationGetRTStrategy(Relation r,
AttrNumber attnum, RegProcedure proc);
/*
@ -222,6 +223,7 @@ RelationInvokeRTStrategy(Relation r,
return (RelationInvokeStrategy(r, &RTEvaluationData, attnum, s,
left, right));
}
#endif
RegProcedure

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.9 1997/08/19 21:29:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.10 1997/09/07 04:39:29 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@ -29,7 +29,8 @@
static int RecoveryCheckingEnabled(void);
static void TransRecover(Relation logRelation);
static bool TransactionLogTest(TransactionId transactionId, XidStatus status);
static void TransactionLogUpdate(TransactionId transactionId,
static void
TransactionLogUpdate(TransactionId transactionId,
XidStatus status);
/* ----------------
@ -113,6 +114,7 @@ SetRecoveryCheckingEnabled(bool state)
{
RecoveryCheckingEnableState = (state == true);
}
#endif
/* ----------------------------------------------------------------
@ -132,7 +134,8 @@ SetRecoveryCheckingEnabled(bool state)
* --------------------------------
*/
static bool /* true/false: does transaction id have specified status? */
static bool /* true/false: does transaction id have
* specified status? */
TransactionLogTest(TransactionId transactionId, /* transaction id to test */
XidStatus status) /* transaction status */
{
@ -171,7 +174,8 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
transactionId,
&fail);
if (! fail) {
if (!fail)
{
TransactionIdStore(transactionId, &cachedTestXid);
cachedTestXidStatus = xidstatus;
return (bool)
@ -238,7 +242,8 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */
* (we only record commit times)
* ----------------
*/
if (RelationIsValid(TimeRelation) && status == XID_COMMIT) {
if (RelationIsValid(TimeRelation) && status == XID_COMMIT)
{
TransComputeBlockNumber(TimeRelation, transactionId, &blockNumber);
TransBlockNumberSetCommitTime(TimeRelation,
blockNumber,
@ -268,7 +273,8 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */
*/
AbsoluteTime /* commit time of transaction id */
TransactionIdGetCommitTime(TransactionId transactionId) /* transaction id to test */
TransactionIdGetCommitTime(TransactionId transactionId) /* transaction id to
* test */
{
BlockNumber blockNumber;
AbsoluteTime commitTime; /* commit time */
@ -305,11 +311,13 @@ TransactionIdGetCommitTime(TransactionId transactionId) /* transaction id to tes
* update our cache and return the transaction commit time
* ----------------
*/
if (! fail) {
if (!fail)
{
TransactionIdStore(transactionId, &cachedGetCommitTimeXid);
cachedGetCommitTime = commitTime;
return commitTime;
} else
}
else
return INVALID_ABSTIME;
}
@ -516,7 +524,8 @@ InitializeTransactionLog(void)
* ----------------
*/
SpinAcquire(OidGenLockId);
if (!TransactionIdDidCommit(AmiTransactionId)) {
if (!TransactionIdDidCommit(AmiTransactionId))
{
/* ----------------
* SOMEDAY initialize the information stored in
@ -526,7 +535,9 @@ InitializeTransactionLog(void)
TransactionLogUpdate(AmiTransactionId, XID_COMMIT);
VariableRelationPutNextXid(FirstTransactionId);
} else if (RecoveryCheckingEnabled()) {
}
else if (RecoveryCheckingEnabled())
{
/* ----------------
* if we have a pre-initialized database and if the
* perform recovery checking flag was passed then we
@ -641,10 +652,10 @@ TransactionIdCommit(TransactionId transactionId)
return;
/*
* Within TransactionLogUpdate we call UpdateLastCommited()
* which assumes we have exclusive access to pg_variable.
* Therefore we need to get exclusive access before calling
* TransactionLogUpdate. -mer 18 Aug 1992
* Within TransactionLogUpdate we call UpdateLastCommited() which
* assumes we have exclusive access to pg_variable. Therefore we need
* to get exclusive access before calling TransactionLogUpdate. -mer
* 18 Aug 1992
*/
SpinAcquire(OidGenLockId);
TransactionLogUpdate(transactionId, XID_COMMIT);
@ -681,4 +692,5 @@ TransactionIdSetInProgress(TransactionId transactionId)
TransactionLogUpdate(transactionId, XID_INPROGRESS);
}
#endif

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.9 1997/08/19 21:30:12 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.10 1997/09/07 04:39:32 momjian Exp $
*
* NOTES
* This file contains support functions for the high
@ -23,13 +23,17 @@
#include <access/xact.h>
#include <storage/lmgr.h>
static AbsoluteTime TransBlockGetCommitTime(Block tblock,
static AbsoluteTime
TransBlockGetCommitTime(Block tblock,
TransactionId transactionId);
static XidStatus TransBlockGetXidStatus(Block tblock,
static XidStatus
TransBlockGetXidStatus(Block tblock,
TransactionId transactionId);
static void TransBlockSetCommitTime(Block tblock,
static void
TransBlockSetCommitTime(Block tblock,
TransactionId transactionId, AbsoluteTime commitTime);
static void TransBlockSetXidStatus(Block tblock,
static void
TransBlockSetXidStatus(Block tblock,
TransactionId transactionId, XidStatus xstatus);
/* ----------------------------------------------------------------
@ -55,7 +59,8 @@ AmiTransactionOverride(bool flag)
*/
void
TransComputeBlockNumber(Relation relation, /* relation to test */
TransactionId transactionId, /* transaction id to test */
TransactionId transactionId, /* transaction id to
* test */
BlockNumber * blockNumberOutP)
{
long itemsPerBlock = 0;
@ -129,7 +134,8 @@ TransBlockGetLastTransactionIdStatus(Block tblock,
* ----------------
*/
maxIndex = TP_NumXidStatusPerBlock;
for (index = maxIndex; index > 0; index--) {
for (index = maxIndex; index > 0; index--)
{
offset = BitIndexOf(index - 1);
bit1 = ((bits8) BitArrayBitIsSet((BitArray) tblock, offset++)) << 1;
bit2 = (bits8) BitArrayBitIsSet((BitArray) tblock, offset);
@ -142,8 +148,10 @@ TransBlockGetLastTransactionIdStatus(Block tblock,
* we save the transaction id in the place specified by the caller.
* ----------------
*/
if (xstatus != XID_INPROGRESS) {
if (returnXidP != NULL) {
if (xstatus != XID_INPROGRESS)
{
if (returnXidP != NULL)
{
TransactionIdStore(baseXid, returnXidP);
TransactionIdAdd(returnXidP, index - 1);
}
@ -158,7 +166,8 @@ TransBlockGetLastTransactionIdStatus(Block tblock,
* status is "in progress" to know this condition has arisen.
* ----------------
*/
if (index == 0) {
if (index == 0)
{
if (returnXidP != NULL)
TransactionIdStore(baseXid, returnXidP);
}
@ -169,6 +178,7 @@ TransBlockGetLastTransactionIdStatus(Block tblock,
*/
return xstatus;
}
#endif
/* --------------------------------
@ -191,7 +201,8 @@ TransBlockGetXidStatus(Block tblock,
* sanity check
* ----------------
*/
if (tblock == NULL) {
if (tblock == NULL)
{
return XID_INVALID;
}
@ -262,7 +273,8 @@ TransBlockSetXidStatus(Block tblock,
* store the transaction value at the specified offset
* ----------------
*/
switch(xstatus) {
switch (xstatus)
{
case XID_COMMIT: /* set 10 */
BitArraySetBit((BitArray) tblock, offset);
BitArrayClearBit((BitArray) tblock, offset + 1);
@ -604,7 +616,8 @@ TransBlockNumberSetCommitTime(Relation relation,
#ifdef NOT_USED
void
TransGetLastRecordedTransaction(Relation relation,
TransactionId xid, /* return: transaction id */
TransactionId xid, /* return: transaction
* id */
bool * failP)
{
BlockNumber blockNumber;/* block number */
@ -633,7 +646,8 @@ TransGetLastRecordedTransaction(Relation relation,
* ----------------
*/
n = RelationGetNumberOfBlocks(relation);
if (n == 0) {
if (n == 0)
{
(*failP) = true;
return;
}
@ -663,4 +677,5 @@ TransGetLastRecordedTransaction(Relation relation,
*/
RelationUnsetLockForRead(relation);
}
#endif

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.9 1997/08/19 21:30:16 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.10 1997/09/07 04:39:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -235,7 +235,8 @@ VariableRelationGetNextOid(Oid *oid_return)
* should be called instead..
* ----------------
*/
if (! RelationIsValid(VariableRelation)) {
if (!RelationIsValid(VariableRelation))
{
if (PointerIsValid(oid_return))
(*oid_return) = InvalidOid;
return;
@ -256,7 +257,8 @@ VariableRelationGetNextOid(Oid *oid_return)
var = (VariableRelationContents) BufferGetBlock(buf);
if (PointerIsValid(oid_return)) {
if (PointerIsValid(oid_return))
{
/* ----------------
* nothing up my sleeve... what's going on here is that this code
@ -394,7 +396,8 @@ GetNewTransactionId(TransactionId *xid)
* bootstrap transaction id.
* ----------------
*/
if (AMI_OVERRIDE) {
if (AMI_OVERRIDE)
{
TransactionIdStore(AmiTransactionId, xid);
return;
}
@ -405,7 +408,8 @@ GetNewTransactionId(TransactionId *xid)
* ----------------
*/
if (prefetched_xid_count == 0) {
if (prefetched_xid_count == 0)
{
/* ----------------
* obtain exclusive access to the variable relation page
*
@ -456,8 +460,9 @@ UpdateLastCommittedXid(TransactionId xid)
TransactionId lastid;
/* we assume that spinlock OidGenLockId has been acquired
* prior to entering this function
/*
* we assume that spinlock OidGenLockId has been acquired prior to
* entering this function
*/
/* ----------------
@ -492,7 +497,8 @@ UpdateLastCommittedXid(TransactionId xid)
* ----------------
*/
static void
GetNewObjectIdBlock(Oid *oid_return, /* place to return the new object id */
GetNewObjectIdBlock(Oid * oid_return, /* place to return the new object
* id */
int oid_block_size) /* number of oids desired */
{
Oid nextoid;
@ -560,7 +566,8 @@ GetNewObjectId(Oid *oid_return) /* place to return the new object id */
* ----------------
*/
if (prefetched_oid_count == 0) {
if (prefetched_oid_count == 0)
{
int oid_block_size = VAR_OID_PREFETCH;
/* ----------------
@ -605,7 +612,8 @@ CheckMaxObjectId(Oid assigned_oid)
Oid pass_oid;
if (prefetched_oid_count == 0) /* make sure next/max is set, or reload */
if (prefetched_oid_count == 0) /* make sure next/max is set, or
* reload */
GetNewObjectId(&pass_oid);
/* ----------------
@ -656,4 +664,3 @@ Oid pass_oid;
GetNewObjectId(&pass_oid); /* throw away returned oid */
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.13 1997/08/29 09:02:11 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.14 1997/09/07 04:39:38 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@ -253,6 +253,7 @@ SetTransactionFlushEnabled(bool state)
{
TransactionFlushState = (state == true);
}
#endif
/* --------------------------------
@ -267,14 +268,22 @@ IsTransactionState(void)
{
TransactionState s = CurrentTransactionState;
switch (s->state) {
case TRANS_DEFAULT: return false;
case TRANS_START: return true;
case TRANS_INPROGRESS: return true;
case TRANS_COMMIT: return true;
case TRANS_ABORT: return true;
case TRANS_DISABLED: return false;
switch (s->state)
{
case TRANS_DEFAULT:
return false;
case TRANS_START:
return true;
case TRANS_INPROGRESS:
return true;
case TRANS_COMMIT:
return true;
case TRANS_ABORT:
return true;
case TRANS_DISABLED:
return false;
}
/*
* Shouldn't get here, but lint is not happy with this...
*/
@ -315,13 +324,16 @@ OverrideTransactionSystem(bool flag)
{
TransactionState s = CurrentTransactionState;
if (flag == true) {
if (flag == true)
{
if (s->state == TRANS_DISABLED)
return;
SavedTransactionState = s->state;
s->state = TRANS_DISABLED;
} else {
}
else
{
if (s->state != TRANS_DISABLED)
return;
@ -471,6 +483,7 @@ ClearCommandIdCounterOverflowFlag()
{
CommandIdCounterOverflowFlag = false;
}
#endif
/* --------------------------------
@ -481,7 +494,8 @@ void
CommandCounterIncrement()
{
CurrentTransactionStateData.commandId += 1;
if (CurrentTransactionStateData.commandId == FirstCommandId) {
if (CurrentTransactionStateData.commandId == FirstCommandId)
{
CommandIdCounterOverflowFlag = true;
elog(WARN, "You may only have 65535 commands per transaction");
}
@ -534,11 +548,12 @@ AtStart_Cache()
static void
AtStart_Locks()
{
/*
* at present, it is unknown to me what belongs here -cim 3/18/90
*
* There isn't anything to do at the start of a xact for locks.
* -mer 5/24/92
* There isn't anything to do at the start of a xact for locks. -mer
* 5/24/92
*/
}
@ -603,7 +618,8 @@ RecordTransactionCommit()
*/
leak = BufferPoolCheckLeak();
FlushBufferPool(!TransactionFlushEnabled());
if (leak) ResetBufferPool();
if (leak)
ResetBufferPool();
/* ----------------
* have the transaction access methods record the status
@ -618,7 +634,8 @@ RecordTransactionCommit()
*/
leak = BufferPoolCheckLeak();
FlushBufferPool(!TransactionFlushEnabled());
if (leak) ResetBufferPool();
if (leak)
ResetBufferPool();
}
@ -823,8 +840,8 @@ StartTransaction()
s->state = TRANS_INPROGRESS;
/*
* Let others to know about current transaction is in progress
* - vadim 11/26/96
* Let others to know about current transaction is in progress - vadim
* 11/26/96
*/
if (MyProc != (PROC *) NULL)
MyProc->xid = s->transactionIdData;
@ -893,8 +910,8 @@ CommitTransaction()
}
/*
* Let others to know about no transaction in progress
* - vadim 11/26/96
* Let others to know about no transaction in progress - vadim
* 11/26/96
*/
if (MyProc != (PROC *) NULL)
MyProc->xid = InvalidTransactionId;
@ -911,8 +928,8 @@ AbortTransaction()
TransactionState s = CurrentTransactionState;
/*
* Let others to know about no transaction in progress
* - vadim 11/26/96
* Let others to know about no transaction in progress - vadim
* 11/26/96
*/
if (MyProc != (PROC *) NULL)
MyProc->xid = InvalidTransactionId;
@ -954,10 +971,12 @@ AbortTransaction()
*/
s->state = TRANS_DEFAULT;
{
/* We need to do this in case another process notified us while
we are in the middle of an aborted transaction. We need to
notify our frontend after we finish the current transaction.
-- jw, 1/3/94
/*
* We need to do this in case another process notified us while we
* are in the middle of an aborted transaction. We need to notify
* our frontend after we finish the current transaction. -- jw,
* 1/3/94
*/
if (IsNormalProcessingMode())
Async_NotifyAtAbort();
@ -973,7 +992,8 @@ StartTransactionCommand()
{
TransactionState s = CurrentTransactionState;
switch(s->blockState) {
switch (s->blockState)
{
/* ----------------
* if we aren't in a transaction block, we
* just do our usual start transaction.
@ -1043,6 +1063,7 @@ StartTransactionCommand()
break;
}
}
/* --------------------------------
* CommitTransactionCommand
* --------------------------------
@ -1052,7 +1073,8 @@ CommitTransactionCommand()
{
TransactionState s = CurrentTransactionState;
switch(s->blockState) {
switch (s->blockState)
{
/* ----------------
* if we aren't in a transaction block, we
* just do our usual transaction commit
@ -1134,7 +1156,8 @@ AbortCurrentTransaction()
{
TransactionState s = CurrentTransactionState;
switch(s->blockState) {
switch (s->blockState)
{
/* ----------------
* if we aren't in a transaction block, we
* just do our usual abort transaction.
@ -1261,7 +1284,8 @@ EndTransactionBlock(void)
if (s->state == TRANS_DISABLED)
return;
if (s->blockState == TBLOCK_INPROGRESS) {
if (s->blockState == TBLOCK_INPROGRESS)
{
/* ----------------
* here we are in a transaction block which should commit
* when we get to the upcoming CommitTransactionCommand()
@ -1274,7 +1298,8 @@ EndTransactionBlock(void)
return;
}
if (s->blockState == TBLOCK_ABORT) {
if (s->blockState == TBLOCK_ABORT)
{
/* ----------------
* here, we are in a transaction block which aborted
* and since the AbortTransaction() was already done,
@ -1316,7 +1341,8 @@ AbortTransactionBlock(void)
if (s->state == TRANS_DISABLED)
return;
if (s->blockState == TBLOCK_INPROGRESS) {
if (s->blockState == TBLOCK_INPROGRESS)
{
/* ----------------
* here we were inside a transaction block something
* screwed up inside the system so we enter the abort state,
@ -1347,6 +1373,7 @@ AbortTransactionBlock(void)
AbortTransaction();
s->blockState = TBLOCK_ENDABORT;
}
#endif
/* --------------------------------
@ -1366,16 +1393,18 @@ UserAbortTransactionBlock()
return;
/*
* if the transaction has already been automatically aborted with an error,
* and the user subsequently types 'abort', allow it. (the behavior is
* the same as if they had typed 'end'.)
* if the transaction has already been automatically aborted with an
* error, and the user subsequently types 'abort', allow it. (the
* behavior is the same as if they had typed 'end'.)
*/
if (s->blockState == TBLOCK_ABORT) {
if (s->blockState == TBLOCK_ABORT)
{
s->blockState = TBLOCK_ENDABORT;
return;
}
if (s->blockState == TBLOCK_INPROGRESS) {
if (s->blockState == TBLOCK_INPROGRESS)
{
/* ----------------
* here we were inside a transaction block and we
* got an abort command from the user, so we move to
@ -1420,7 +1449,8 @@ IsTransactionBlock()
TransactionState s = CurrentTransactionState;
if (s->blockState == TBLOCK_INPROGRESS
|| s->blockState == TBLOCK_ENDABORT) {
|| s->blockState == TBLOCK_ENDABORT)
{
return (true);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/xid.c,v 1.7 1997/08/19 21:30:20 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/xid.c,v 1.8 1997/09/07 04:39:40 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING
@ -142,6 +142,7 @@ TransactionIdIncrement(TransactionId *transactionId)
elog(FATAL, "TransactionIdIncrement: exhausted XID's");
return;
}
#endif
/* ----------------------------------------------------------------
@ -154,4 +155,3 @@ TransactionIdAdd(TransactionId *xid, int value)
*xid += value;
return;
}

View File

@ -7,7 +7,7 @@
* Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.21 1997/08/19 21:30:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.22 1997/09/07 04:39:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -137,7 +137,8 @@ static void cleanup(void);
char *strtable[STRTABLESIZE];
hashnode *hashtable[HASHTABLESIZE];
static int strtable_end = -1; /* Tells us last occupied string space */
static int strtable_end = -1; /* Tells us last occupied string
* space */
/*-
* Basic information associated with each type. This is used before
@ -147,7 +148,8 @@ static int strtable_end = -1; /* Tells us last occupied string space */
* (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
* order dependencies in the catalog creation process.
*/
struct typinfo {
struct typinfo
{
char name[NAMEDATALEN];
Oid oid;
Oid elem;
@ -180,7 +182,8 @@ static struct typinfo Procid[] = {
static int n_types = sizeof(Procid) / sizeof(struct typinfo);
struct typmap { /* a hack */
struct typmap
{ /* a hack */
Oid am_oid;
TypeTupleFormData am_typ;
};
@ -200,14 +203,17 @@ extern int fsyncOff; /* do not fsync the database */
#ifndef HAVE_SIGSETJMP
static jmp_buf Warn_restart;
#define sigsetjmp(x,y) setjmp(x)
#define siglongjmp longjmp
#else
static sigjmp_buf Warn_restart;
#endif
int DebugMode;
static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem context */
static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem
* context */
extern int optind;
extern char *optarg;
@ -218,7 +224,8 @@ extern char *optarg;
* to allow us to build the indices after they've been declared.
*/
typedef struct _IndexList {
typedef struct _IndexList
{
char *il_heap;
char *il_ind;
int il_natts;
@ -235,7 +242,8 @@ static IndexList *ILHead = (IndexList *) NULL;
typedef void (*sig_func) ();
/* ----------------------------------------------------------------
* misc functions
* ----------------------------------------------------------------
@ -289,7 +297,8 @@ BootstrapMain(int argc, char *argv[])
int portFd = -1;
char *dbName;
int flag;
int override = 1; /* use BootstrapProcessing or InitProcessing mode */
int override = 1; /* use BootstrapProcessing or
* InitProcessing mode */
extern int optind;
extern char *optarg;
@ -322,8 +331,10 @@ BootstrapMain(int argc, char *argv[])
dbName = NULL;
DataDir = getenv("PGDATA"); /* Null if no PGDATA variable */
while ((flag = getopt(argc, argv, "D:dCOQP:F")) != EOF) {
switch (flag) {
while ((flag = getopt(argc, argv, "D:dCOQP:F")) != EOF)
{
switch (flag)
{
case 'D':
DataDir = optarg;
break;
@ -351,14 +362,17 @@ BootstrapMain(int argc, char *argv[])
}
} /* while */
if (argc - optind > 1) {
if (argc - optind > 1)
{
usage();
} else
if (argc - optind == 1) {
}
else if (argc - optind == 1)
{
dbName = argv[optind];
}
if (!DataDir) {
if (!DataDir)
{
fprintf(stderr, "%s does not know where to find the database system "
"data. You must specify the directory that contains the "
"database system either by specifying the -D invocation "
@ -367,9 +381,11 @@ BootstrapMain(int argc, char *argv[])
exitpg(1);
}
if (dbName == NULL) {
if (dbName == NULL)
{
dbName = getenv("USER");
if (dbName == NULL) {
if (dbName == NULL)
{
fputs("bootstrap backend: failed, no db name specified\n", stderr);
fputs(" and no USER enviroment variable\n", stderr);
exitpg(1);
@ -380,7 +396,8 @@ BootstrapMain(int argc, char *argv[])
* initialize input fd
* ----------------
*/
if (IsUnderPostmaster == true && portFd < 0) {
if (IsUnderPostmaster == true && portFd < 0)
{
fputs("backend: failed, no -P option with -postmaster opt.\n", stderr);
exitpg(1);
}
@ -399,7 +416,8 @@ BootstrapMain(int argc, char *argv[])
InitPostgres(dbName);
LockDisable(true);
for (i = 0 ; i < MAXATTR; i++) {
for (i = 0; i < MAXATTR; i++)
{
attrtypes[i] = (AttributeTupleForm) NULL;
Blanks[i] = ' ';
}
@ -415,9 +433,11 @@ BootstrapMain(int argc, char *argv[])
#ifndef win32
pqsignal(SIGHUP, handle_warn);
if (sigsetjmp(Warn_restart, 1) != 0) {
if (sigsetjmp(Warn_restart, 1) != 0)
{
#else
if (setjmp(Warn_restart) != 0) {
if (setjmp(Warn_restart) != 0)
{
#endif /* win32 */
Warnings++;
AbortCurrentTransaction();
@ -428,9 +448,10 @@ BootstrapMain(int argc, char *argv[])
* ----------------
*/
/* the sed script boot.sed renamed yyparse to Int_yyparse
for the bootstrap parser to avoid conflicts with the normal SQL
parser */
/*
* the sed script boot.sed renamed yyparse to Int_yyparse for the
* bootstrap parser to avoid conflicts with the normal SQL parser
*/
Int_yyparse();
/* clean up processing */
@ -463,7 +484,8 @@ boot_openrel(char *relname)
if (strlen(relname) > 15)
relname[15] = '\000';
if (Typ == (struct typmap **)NULL) {
if (Typ == (struct typmap **) NULL)
{
StartPortalAllocMode(DefaultAllocMode, 0);
rdesc = heap_openr(TypeRelationName);
sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 0, (ScanKey) NULL);
@ -475,7 +497,8 @@ boot_openrel(char *relname)
*app = (struct typmap *) NULL;
sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 0, (ScanKey) NULL);
app = Typ;
while (PointerIsValid(tup = heap_getnext(sdesc, 0, (Buffer *)NULL))) {
while (PointerIsValid(tup = heap_getnext(sdesc, 0, (Buffer *) NULL)))
{
(*app)->am_oid = tup->t_oid;
memmove((char *) &(*app++)->am_typ,
(char *) GETSTRUCT(tup),
@ -486,7 +509,8 @@ boot_openrel(char *relname)
EndPortalAllocMode();
}
if (reldesc != NULL) {
if (reldesc != NULL)
{
closerel(NULL);
}
@ -497,8 +521,10 @@ boot_openrel(char *relname)
reldesc = heap_openr(relname);
Assert(reldesc);
numattr = reldesc->rd_rel->relnatts;
for (i = 0; i < numattr; i++) {
if (attrtypes[i] == NULL) {
for (i = 0; i < numattr; i++)
{
if (attrtypes[i] == NULL)
{
attrtypes[i] = AllocateAttribute();
}
memmove((char *) attrtypes[i],
@ -506,8 +532,10 @@ boot_openrel(char *relname)
ATTRIBUTE_TUPLE_SIZE);
/* Some old pg_attribute tuples might not have attisset. */
/* If the attname is attisset, don't look for it - it may
not be defined yet.
/*
* If the attname is attisset, don't look for it - it may not be
* defined yet.
*/
if (namestrcmp(&attrtypes[i]->attname, "attisset") == 0)
attrtypes[i]->attisset = get_attisset(reldesc->rd_id,
@ -515,8 +543,10 @@ boot_openrel(char *relname)
else
attrtypes[i]->attisset = false;
if (DebugMode) {
if (DebugMode)
{
AttributeTupleForm at = attrtypes[i];
printf("create attribute %d name %s len %d num %d type %d\n",
i, at->attname.data, at->attlen, at->attnum,
at->atttypid
@ -533,27 +563,35 @@ boot_openrel(char *relname)
void
closerel(char *name)
{
if (name) {
if (reldesc) {
if (name)
{
if (reldesc)
{
if (namestrcmp(RelationGetRelationName(reldesc), name) != 0)
elog(WARN, "closerel: close of '%s' when '%s' was expected",
name, relname ? relname : "(null)");
} else
}
else
elog(WARN, "closerel: close of '%s' before any relation was opened",
name);
}
if (reldesc == NULL) {
if (reldesc == NULL)
{
elog(WARN, "Warning: no opened relation to close.\n");
} else {
if (!Quiet) printf("Amclose: relation %s.\n", relname ? relname : "(null)");
}
else
{
if (!Quiet)
printf("Amclose: relation %s.\n", relname ? relname : "(null)");
heap_close(reldesc);
reldesc = (Relation) NULL;
}
}
/* ----------------
* DEFINEATTR()
*
@ -568,7 +606,8 @@ DefineAttr(char *name, char *type, int attnum)
int attlen;
int t;
if (reldesc != NULL) {
if (reldesc != NULL)
{
fputs("Warning: no open relations allowed with 't' command.\n", stderr);
closerel(relname);
}
@ -576,17 +615,22 @@ DefineAttr(char *name, char *type, int attnum)
t = gettype(type);
if (attrtypes[attnum] == (AttributeTupleForm) NULL)
attrtypes[attnum] = AllocateAttribute();
if (Typ != (struct typmap **)NULL) {
if (Typ != (struct typmap **) NULL)
{
attrtypes[attnum]->atttypid = Ap->am_oid;
namestrcpy(&attrtypes[attnum]->attname, name);
if (!Quiet) printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
if (!Quiet)
printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
attlen = attrtypes[attnum]->attlen = Ap->am_typ.typlen;
attrtypes[attnum]->attbyval = Ap->am_typ.typbyval;
} else {
}
else
{
attrtypes[attnum]->atttypid = Procid[t].oid;
namestrcpy(&attrtypes[attnum]->attname, name);
if (!Quiet) printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
if (!Quiet)
printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
attlen = attrtypes[attnum]->attlen = Procid[t].len;
attrtypes[attnum]->attbyval = (attlen == 1) || (attlen == 2) || (attlen == 4);
@ -607,7 +651,8 @@ InsertOneTuple(Oid objectid)
int i;
if (DebugMode) {
if (DebugMode)
{
printf("InsertOneTuple oid %d, %d attrs\n", objectid, numattr);
fflush(stdout);
}
@ -616,15 +661,18 @@ InsertOneTuple(Oid objectid)
tuple = heap_formtuple(tupDesc, (Datum *) values, Blanks);
pfree(tupDesc); /* just free's tupDesc, not the attrtypes */
if(objectid !=(Oid)0) {
if (objectid != (Oid) 0)
{
tuple->t_oid = objectid;
}
heap_insert(reldesc, tuple);
pfree(tuple);
if (DebugMode) {
if (DebugMode)
{
printf("End InsertOneTuple, objectid=%d\n", objectid);
fflush(stdout);
}
/*
* Reset blanks for next tuple
*/
@ -645,20 +693,24 @@ InsertOneValue(Oid objectid, char *value, int i)
if (DebugMode)
printf("Inserting value: '%s'\n", value);
if (i < 0 || i >= MAXATTR) {
if (i < 0 || i >= MAXATTR)
{
printf("i out of range: %d\n", i);
Assert(0);
}
if (Typ != (struct typmap **)NULL) {
if (Typ != (struct typmap **) NULL)
{
struct typmap *ap;
if (DebugMode)
puts("Typ != NULL");
app = Typ;
while (*app && (*app)->am_oid != reldesc->rd_att->attrs[i]->atttypid)
++app;
ap = *app;
if (ap == NULL) {
if (ap == NULL)
{
printf("Unable to find atttypid in Typ list! %d\n",
reldesc->rd_att->attrs[i]->atttypid
);
@ -667,13 +719,17 @@ InsertOneValue(Oid objectid, char *value, int i)
values[i] = fmgr(ap->am_typ.typinput,
value,
ap->am_typ.typelem,
-1); /* shouldn't have char() or varchar() types
during boostrapping but just to be safe */
-1); /* shouldn't have char() or varchar()
* types during boostrapping but just to
* be safe */
prt = fmgr(ap->am_typ.typoutput, values[i],
ap->am_typ.typelem);
if (!Quiet) printf("%s ", prt);
if (!Quiet)
printf("%s ", prt);
pfree(prt);
} else {
}
else
{
typeindex = attrtypes[i]->atttypid - FIRST_TYPE_OID;
if (DebugMode)
printf("Typ == NULL, typeindex = %d idx = %d\n", typeindex, i);
@ -681,10 +737,12 @@ InsertOneValue(Oid objectid, char *value, int i)
Procid[typeindex].elem, -1);
prt = fmgr(Procid[typeindex].outproc, values[i],
Procid[typeindex].elem);
if (!Quiet) printf("%s ", prt);
if (!Quiet)
printf("%s ", prt);
pfree(prt);
}
if (DebugMode) {
if (DebugMode)
{
puts("End InsertValue");
fflush(stdout);
}
@ -699,7 +757,8 @@ InsertOneNull(int i)
{
if (DebugMode)
printf("Inserting null\n");
if (i < 0 || i >= MAXATTR) {
if (i < 0 || i >= MAXATTR)
{
elog(FATAL, "i out of range (too many attrs): %d\n", i);
}
values[i] = (char *) NULL;
@ -718,13 +777,16 @@ BootstrapAlreadySeen(Oid id)
seenthis = false;
for (i=0; i < nseen; i++) {
if (seenArray[i] == id) {
for (i = 0; i < nseen; i++)
{
if (seenArray[i] == id)
{
seenthis = true;
break;
}
}
if (!seenthis) {
if (!seenthis)
{
seenArray[nseen] = id;
nseen++;
}
@ -742,11 +804,13 @@ cleanup()
if (!beenhere)
beenhere = 1;
else {
else
{
elog(FATAL, "Memory manager fault: cleanup called twice.\n", stderr);
exitpg(1);
}
if (reldesc != (Relation)NULL) {
if (reldesc != (Relation) NULL)
{
heap_close(reldesc);
}
CommitTransactionCommand();
@ -766,16 +830,23 @@ gettype(char *type)
HeapTuple tup;
struct typmap **app;
if (Typ != (struct typmap **)NULL) {
for (app = Typ; *app != (struct typmap *)NULL; app++) {
if (strncmp((*app)->am_typ.typname.data, type, NAMEDATALEN) == 0) {
if (Typ != (struct typmap **) NULL)
{
for (app = Typ; *app != (struct typmap *) NULL; app++)
{
if (strncmp((*app)->am_typ.typname.data, type, NAMEDATALEN) == 0)
{
Ap = *app;
return ((*app)->am_oid);
}
}
} else {
for (i = 0; i <= n_types; i++) {
if (strncmp(type, Procid[i].name, NAMEDATALEN) == 0) {
}
else
{
for (i = 0; i <= n_types; i++)
{
if (strncmp(type, Procid[i].name, NAMEDATALEN) == 0)
{
return (i);
}
}
@ -793,7 +864,8 @@ gettype(char *type)
*app = (struct typmap *) NULL;
sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 0, (ScanKey) NULL);
app = Typ;
while (PointerIsValid(tup = heap_getnext(sdesc, 0, (Buffer *)NULL))) {
while (PointerIsValid(tup = heap_getnext(sdesc, 0, (Buffer *) NULL)))
{
(*app)->am_oid = tup->t_oid;
memmove((char *) &(*app++)->am_typ,
(char *) GETSTRUCT(tup),
@ -819,7 +891,8 @@ AllocateAttribute()
AttributeTupleForm attribute =
(AttributeTupleForm) malloc(ATTRIBUTE_TUPLE_SIZE);
if (!PointerIsValid(attribute)) {
if (!PointerIsValid(attribute))
{
elog(FATAL, "AllocateAttribute: malloc failed");
}
memset(attribute, 0, ATTRIBUTE_TUPLE_SIZE);
@ -844,8 +917,10 @@ AllocateAttribute()
char *
MapArrayTypeName(char *s)
{
int i, j;
static char newStr[NAMEDATALEN]; /* array type names < NAMEDATALEN long */
int i,
j;
static char newStr[NAMEDATALEN]; /* array type names <
* NAMEDATALEN long */
if (s == NULL || s[0] == '\0')
return s;
@ -875,9 +950,12 @@ EnterString (char *str)
len = strlen(str);
node = FindStr(str, len, 0);
if (node) {
if (node)
{
return (node->strnum);
} else {
}
else
{
node = AddStr(str, len, 0);
return (node->strnum);
}
@ -928,16 +1006,21 @@ static hashnode *
FindStr(char *str, int length, hashnode * mderef)
{
hashnode *node;
node = hashtable[CompHash(str, length)];
while (node != NULL) {
while (node != NULL)
{
/*
* We must differentiate between string constants that
* might have the same value as a identifier
* and the identifier itself.
* We must differentiate between string constants that might have
* the same value as a identifier and the identifier itself.
*/
if (!strcmp(str, strtable[node->strnum])) {
if (!strcmp(str, strtable[node->strnum]))
{
return (node); /* no need to check */
} else {
}
else
{
node = node->next;
}
}
@ -957,11 +1040,14 @@ FindStr(char *str, int length, hashnode *mderef)
static hashnode *
AddStr(char *str, int strlength, int mderef)
{
hashnode *temp, *trail, *newnode;
hashnode *temp,
*trail,
*newnode;
int hashresult;
int len;
if (++strtable_end == STRTABLESIZE) {
if (++strtable_end == STRTABLESIZE)
{
/* Error, string table overflow, so we Punt */
elog(FATAL,
"There are too many string constants and identifiers for the compiler to handle.");
@ -970,11 +1056,11 @@ AddStr(char *str, int strlength, int mderef)
}
/*
* Some of the utilites (eg, define type, create relation) assume
* that the string they're passed is a NAMEDATALEN. We get array bound
* read violations from purify if we don't allocate at least NAMEDATALEN
* bytes for strings of this sort. Because we're lazy, we allocate
* at least NAMEDATALEN bytes all the time.
* Some of the utilites (eg, define type, create relation) assume that
* the string they're passed is a NAMEDATALEN. We get array bound
* read violations from purify if we don't allocate at least
* NAMEDATALEN bytes for strings of this sort. Because we're lazy, we
* allocate at least NAMEDATALEN bytes all the time.
*/
if ((len = strlength + 1) < NAMEDATALEN)
@ -992,12 +1078,16 @@ AddStr(char *str, int strlength, int mderef)
/* Find out where it goes */
hashresult = CompHash(str, strlength);
if (hashtable [hashresult] == NULL) {
if (hashtable[hashresult] == NULL)
{
hashtable[hashresult] = newnode;
} else { /* There is something in the list */
}
else
{ /* There is something in the list */
trail = hashtable[hashresult];
temp = trail->next;
while (temp != NULL) {
while (temp != NULL)
{
trail = temp;
temp = temp->next;
}
@ -1034,9 +1124,9 @@ index_register(char *heap,
MemoryContext oldcxt;
/*
* XXX mao 10/31/92 -- don't gc index reldescs, associated info
* at bootstrap time. we'll declare the indices now, but want to
* create them later.
* XXX mao 10/31/92 -- don't gc index reldescs, associated info at
* bootstrap time. we'll declare the indices now, but want to create
* them later.
*/
if (nogc == (GlobalMemory) NULL)
@ -1057,29 +1147,39 @@ index_register(char *heap,
newind->il_attnos = (AttrNumber *) palloc(len);
memmove(newind->il_attnos, attnos, len);
if ((newind->il_nparams = nparams) > 0) {
if ((newind->il_nparams = nparams) > 0)
{
v = newind->il_params = (Datum *) palloc(2 * nparams * sizeof(Datum));
nparams *= 2;
while (nparams-- > 0) {
while (nparams-- > 0)
{
*v = (Datum) palloc(strlen((char *) (*params)) + 1);
strcpy((char *) *v++, (char *) *params++);
}
} else {
}
else
{
newind->il_params = (Datum *) NULL;
}
if (finfo != (FuncIndexInfo *) NULL) {
if (finfo != (FuncIndexInfo *) NULL)
{
newind->il_finfo = (FuncIndexInfo *) palloc(sizeof(FuncIndexInfo));
memmove(newind->il_finfo, finfo, sizeof(FuncIndexInfo));
} else {
}
else
{
newind->il_finfo = (FuncIndexInfo *) NULL;
}
if (predInfo != NULL) {
if (predInfo != NULL)
{
newind->il_predInfo = (PredInfo *) palloc(sizeof(PredInfo));
newind->il_predInfo->pred = predInfo->pred;
newind->il_predInfo->oldPred = predInfo->oldPred;
} else {
}
else
{
newind->il_predInfo = NULL;
}
@ -1096,7 +1196,8 @@ build_indices()
Relation heap;
Relation ind;
for ( ; ILHead != (IndexList *) NULL; ILHead = ILHead->il_next) {
for (; ILHead != (IndexList *) NULL; ILHead = ILHead->il_next)
{
heap = heap_openr(ILHead->il_heap);
ind = index_openr(ILHead->il_ind);
index_build(heap, ind, ILHead->il_natts, ILHead->il_attnos,
@ -1104,17 +1205,18 @@ build_indices()
ILHead->il_predInfo);
/*
* All of the rest of this routine is needed only because in bootstrap
* processing we don't increment xact id's. The normal DefineIndex
* code replaces a pg_class tuple with updated info including the
* relhasindex flag (which we need to have updated). Unfortunately,
* there are always two indices defined on each catalog causing us to
* update the same pg_class tuple twice for each catalog getting an
* index during bootstrap resulting in the ghost tuple problem (see
* heap_replace). To get around this we change the relhasindex
* field ourselves in this routine keeping track of what catalogs we
* already changed so that we don't modify those tuples twice. The
* normal mechanism for updating pg_class is disabled during bootstrap.
* All of the rest of this routine is needed only because in
* bootstrap processing we don't increment xact id's. The normal
* DefineIndex code replaces a pg_class tuple with updated info
* including the relhasindex flag (which we need to have updated).
* Unfortunately, there are always two indices defined on each
* catalog causing us to update the same pg_class tuple twice for
* each catalog getting an index during bootstrap resulting in the
* ghost tuple problem (see heap_replace). To get around this we
* change the relhasindex field ourselves in this routine keeping
* track of what catalogs we already changed so that we don't
* modify those tuples twice. The normal mechanism for updating
* pg_class is disabled during bootstrap.
*
* -mer
*/
@ -1124,4 +1226,3 @@ build_indices()
UpdateStats(heap->rd_id, 0, true);
}
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.7 1997/08/18 20:51:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.8 1997/09/07 04:40:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -31,7 +31,8 @@ relpath(char relname[])
{
char *path;
if (IsSharedSystemRelationName(relname)) {
if (IsSharedSystemRelationName(relname))
{
path = (char *) palloc(strlen(DataDir) + sizeof(NameData) + 2);
sprintf(path, "%s/%s", DataDir, relname);
return (path);
@ -59,6 +60,7 @@ issystem(char relname[])
else
return FALSE;
}
#endif
/*
@ -99,7 +101,8 @@ IsSharedSystemRelationName(char *relname)
return FALSE;
i = 0;
while ( SharedSystemRelationNames[i] != NULL) {
while (SharedSystemRelationNames[i] != NULL)
{
if (strcmp(SharedSystemRelationNames[i], relname) == 0)
return TRUE;
i++;
@ -122,7 +125,8 @@ IsSharedSystemRelationName(char *relname)
* for a block of OID's to be declared as invalid ones to allow
* user programs to use them for temporary object identifiers.
*/
Oid newoid()
Oid
newoid()
{
Oid lastoid;
@ -159,33 +163,42 @@ fillatt(TupleDesc tupleDesc)
if (natts < 0 || natts > MaxHeapAttributeNumber)
elog(WARN, "fillatt: %d attributes is too large", natts);
if (natts == 0) {
if (natts == 0)
{
elog(DEBUG, "fillatt: called with natts == 0");
return;
}
attributeP = &att[0];
for (i = 0; i < natts;) {
for (i = 0; i < natts;)
{
tuple = SearchSysCacheTuple(TYPOID,
Int32GetDatum((*attributeP)->atttypid),
0, 0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "fillatt: unknown atttypid %ld",
(*attributeP)->atttypid);
} else {
}
else
{
(*attributeP)->attnum = (int16)++ i;
/* Check if the attr is a set before messing with the length
and byval, since those were already set in
TupleDescInitEntry. In fact, this seems redundant
here, but who knows what I'll break if I take it out...
same for char() and varchar() stuff. I share the same
sentiments. This function is poorly written anyway. -ay 6/95
/*
* Check if the attr is a set before messing with the length
* and byval, since those were already set in
* TupleDescInitEntry. In fact, this seems redundant here,
* but who knows what I'll break if I take it out...
*
* same for char() and varchar() stuff. I share the same
* sentiments. This function is poorly written anyway. -ay
* 6/95
*/
if (!(*attributeP)->attisset &&
(*attributeP)->atttypid != BPCHAROID &&
(*attributeP)->atttypid!=VARCHAROID) {
(*attributeP)->atttypid != VARCHAROID)
{
typp = (TypeTupleForm) GETSTRUCT(tuple); /* XXX */
(*attributeP)->attlen = typp->typlen;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.24 1997/09/05 18:13:45 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.25 1997/09/07 04:40:10 momjian Exp $
*
* INTERFACE ROUTINES
* heap_creatr() - Create an uncataloged heap relation
@ -58,7 +58,8 @@
#include <string.h>
#endif
static void AddPgRelationTuple(Relation pg_class_desc,
static void
AddPgRelationTuple(Relation pg_class_desc,
Relation new_rel_desc, Oid new_rel_oid, int arch, unsigned natts);
static void AddToTempRelList(Relation r);
static void DeletePgAttributeTuples(Relation rdesc);
@ -157,10 +158,12 @@ static AttributeTupleForm HeapAtt[] =
the list of temporary uncatalogued relations that are created.
these relations should be destroyed at the end of transactions
*/
typedef struct tempRelList {
typedef struct tempRelList
{
Relation *rels; /* array of relation descriptors */
int num; /* number of temporary relations */
int size; /* size of space allocated for the rels array */
int size; /* size of space allocated for the rels
* array */
} TempRelList;
#define TEMP_REL_LIST_SIZE 32
@ -199,6 +202,7 @@ heap_creatr(char *name,
char tempname[40];
int isTemp = 0;
int natts = tupDesc->natts;
/* AttributeTupleForm *att = tupDesc->attrs; */
extern GlobalMemory CacheCxt;
@ -308,13 +312,15 @@ heap_creatr(char *name,
if (tupDesc->constr)
rdesc->rd_rel->relchecks = tupDesc->constr->num_check;
for (i = 0; i < natts; i++) {
for (i = 0; i < natts; i++)
{
rdesc->rd_att->attrs[i]->attrelid = relid;
}
rdesc->rd_id = relid;
if (nailme) {
if (nailme)
{
/* for system relations, set the reltype field here */
rdesc->rd_rel->reltype = relid;
}
@ -339,8 +345,9 @@ heap_creatr(char *name,
MemoryContextSwitchTo(oldcxt);
/* add all temporary relations to the tempRels list
so they can be properly disposed of at the end of transaction
/*
* add all temporary relations to the tempRels list so they can be
* properly disposed of at the end of transaction
*/
if (isTemp)
AddToTempRelList(rdesc);
@ -430,10 +437,13 @@ CheckAttributeNames(TupleDesc tupdesc)
* an unknown typid (usually as a result of a 'retrieve into'
* - jolly
*/
for (i = 0; i < natts; i += 1) {
for (j = 0; j < sizeof HeapAtt / sizeof HeapAtt[0]; j += 1) {
for (i = 0; i < natts; i += 1)
{
for (j = 0; j < sizeof HeapAtt / sizeof HeapAtt[0]; j += 1)
{
if (nameeq(&(HeapAtt[j]->attname),
&(tupdesc->attrs[i]->attname))) {
&(tupdesc->attrs[i]->attname)))
{
elog(WARN,
"create: system attribute named \"%s\"",
HeapAtt[j]->attname.data);
@ -451,10 +461,13 @@ CheckAttributeNames(TupleDesc tupdesc)
* next check for repeated attribute names
* ----------------
*/
for (i = 1; i < natts; i += 1) {
for (j = 0; j < i; j += 1) {
for (i = 1; i < natts; i += 1)
{
for (j = 0; j < i; j += 1)
{
if (nameeq(&(tupdesc->attrs[j]->attname),
&(tupdesc->attrs[i]->attname))) {
&(tupdesc->attrs[i]->attname)))
{
elog(WARN,
"create: repeated attribute \"%s\"",
tupdesc->attrs[j]->attname.data);
@ -479,16 +492,19 @@ RelationAlreadyExists(Relation pg_class_desc, char relname[])
HeapTuple tup;
/*
* If this is not bootstrap (initdb) time, use the catalog index
* on pg_class.
* If this is not bootstrap (initdb) time, use the catalog index on
* pg_class.
*/
if (!IsBootstrapProcessingMode()) {
if (!IsBootstrapProcessingMode())
{
tup = ClassNameIndexScan(pg_class_desc, relname);
if (HeapTupleIsValid(tup)) {
if (HeapTupleIsValid(tup))
{
pfree(tup);
return ((int) true);
} else
}
else
return ((int) false);
}
@ -580,7 +596,8 @@ AddNewAttributeTuples(Oid new_rel_oid,
* ----------------
*/
dpp = tupdesc->attrs;
for (i = 0; i < natts; i++) {
for (i = 0; i < natts; i++)
{
(*dpp)->attrelid = new_rel_oid;
(*dpp)->attdisbursion = 0;
@ -601,7 +618,8 @@ AddNewAttributeTuples(Oid new_rel_oid,
* ----------------
*/
dpp = HeapAtt;
for (i = 0; i < -1 - FirstLowInvalidHeapAttributeNumber; i++) {
for (i = 0; i < -1 - FirstLowInvalidHeapAttributeNumber; i++)
{
(*dpp)->attrelid = new_rel_oid;
/* (*dpp)->attdisbursion = 0; unneeded */
@ -645,8 +663,9 @@ AddPgRelationTuple(Relation pg_class_desc,
HeapTuple tup;
Relation idescs[Num_pg_class_indices];
bool isBootstrap;
extern bool ItsSequenceCreation; /* It's hack, I know...
* - vadim 03/28/97 */
extern bool ItsSequenceCreation; /* It's hack, I know... -
* vadim 03/28/97 */
/* ----------------
* first we munge some of the information in our
* uncataloged relation's relation descriptor.
@ -689,10 +708,12 @@ AddPgRelationTuple(Relation pg_class_desc,
heap_insert(pg_class_desc, tup);
if (! isBootstrap) {
if (!isBootstrap)
{
/*
* First, open the catalog indices and insert index tuples for
* the new relation.
* First, open the catalog indices and insert index tuples for the
* new relation.
*/
CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs);
@ -718,15 +739,14 @@ addNewRelationType(char *typeName, Oid new_rel_oid)
{
Oid new_type_oid;
/* The sizes are set to oid size because it makes implementing sets MUCH
* easier, and no one (we hope) uses these fields to figure out
* how much space to allocate for the type.
* An oid is the type used for a set definition. When a user
* requests a set, what they actually get is the oid of a tuple in
* the pg_proc catalog, so the size of the "set" is the size
* of an oid.
* Similarly, byval being true makes sets much easier, and
* it isn't used by anything else.
/*
* The sizes are set to oid size because it makes implementing sets
* MUCH easier, and no one (we hope) uses these fields to figure out
* how much space to allocate for the type. An oid is the type used
* for a set definition. When a user requests a set, what they
* actually get is the oid of a tuple in the pg_proc catalog, so the
* size of the "set" is the size of an oid. Similarly, byval being
* true makes sets much easier, and it isn't used by anything else.
* Note the assumption that OIDs are the same size as int4s.
*/
new_type_oid = TypeCreate(typeName, /* type name */
@ -761,6 +781,7 @@ heap_create(char relname[],
Relation pg_class_desc;
Relation new_rel_desc;
Oid new_rel_oid;
/* NameData typeNameData; */
int natts = tupdesc->natts;
@ -782,7 +803,8 @@ heap_create(char relname[],
*/
pg_class_desc = heap_openr(RelationRelationName);
if (RelationAlreadyExists(pg_class_desc, relname)) {
if (RelationAlreadyExists(pg_class_desc, relname))
{
heap_close(pg_class_desc);
elog(WARN, "amcreate: %s relation already exists", relname);
}
@ -915,7 +937,8 @@ RelationRemoveInheritance(Relation relation)
* ----------------
*/
tuple = heap_getnext(scan, 0, (Buffer *) NULL);
if (HeapTupleIsValid(tuple)) {
if (HeapTupleIsValid(tuple))
{
heap_endscan(scan);
heap_close(catalogRelation);
@ -937,9 +960,11 @@ RelationRemoveInheritance(Relation relation)
1,
&entry);
for (;;) {
for (;;)
{
tuple = heap_getnext(scan, 0, (Buffer *) NULL);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
break;
}
heap_delete(catalogRelation, &tuple->t_ctid);
@ -963,9 +988,11 @@ RelationRemoveInheritance(Relation relation)
1,
&entry);
for (;;) {
for (;;)
{
tuple = heap_getnext(scan, 0, (Buffer *) NULL);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
break;
}
heap_delete(catalogRelation, &tuple->t_ctid);
@ -1000,9 +1027,11 @@ RelationRemoveIndexes(Relation relation)
1,
&entry);
for (;;) {
for (;;)
{
tuple = heap_getnext(scan, 0, (Buffer *) NULL);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
break;
}
@ -1053,7 +1082,8 @@ DeletePgRelationTuple(Relation rdesc)
*/
tup = heap_getnext(pg_class_scan, 0, (Buffer *) NULL);
if (! PointerIsValid(tup)) {
if (!PointerIsValid(tup))
{
heap_endscan(pg_class_scan);
heap_close(pg_class_desc);
elog(WARN, "DeletePgRelationTuple: %s relation nonexistent",
@ -1115,7 +1145,8 @@ DeletePgAttributeTuples(Relation rdesc)
* ----------------
*/
while (tup = heap_getnext(pg_attribute_scan, 0, (Buffer *) NULL),
PointerIsValid(tup)) {
PointerIsValid(tup))
{
heap_delete(pg_attribute_desc, &tup->t_ctid);
}
@ -1184,7 +1215,8 @@ DeletePgTypeTuple(Relation rdesc)
*/
tup = heap_getnext(pg_type_scan, 0, (Buffer *) NULL);
if (! PointerIsValid(tup)) {
if (!PointerIsValid(tup))
{
heap_endscan(pg_type_scan);
heap_close(pg_type_desc);
elog(WARN, "DeletePgTypeTuple: %s type nonexistent",
@ -1220,7 +1252,8 @@ DeletePgTypeTuple(Relation rdesc)
*/
atttup = heap_getnext(pg_attribute_scan, 0, (Buffer *) NULL);
if (PointerIsValid(atttup)) {
if (PointerIsValid(atttup))
{
Oid relid = ((AttributeTupleForm) GETSTRUCT(atttup))->attrelid;
heap_endscan(pg_type_scan);
@ -1287,7 +1320,8 @@ heap_destroy(char *relname)
* remove indexes if necessary
* ----------------
*/
if (rdesc->rd_rel->relhasindex) {
if (rdesc->rd_rel->relhasindex)
{
RelationRemoveIndexes(rdesc);
}
@ -1295,7 +1329,8 @@ heap_destroy(char *relname)
* remove rules if necessary
* ----------------
*/
if (rdesc->rd_rules != NULL) {
if (rdesc->rd_rules != NULL)
{
RelationRemoveRules(rid);
}
@ -1400,7 +1435,8 @@ heap_destroyr(Relation rdesc)
void
InitTempRelList(void)
{
if (tempRels) {
if (tempRels)
{
free(tempRels->rels);
free(tempRels);
}
@ -1427,8 +1463,10 @@ RemoveFromTempRelList(Relation r)
if (!tempRels)
return;
for (i=0; i<tempRels->num; i++) {
if (tempRels->rels[i] == r) {
for (i = 0; i < tempRels->num; i++)
{
if (tempRels->rels[i] == r)
{
tempRels->rels[i] = NULL;
break;
}
@ -1446,7 +1484,8 @@ AddToTempRelList(Relation r)
if (!tempRels)
return;
if (tempRels->num == tempRels->size) {
if (tempRels->num == tempRels->size)
{
tempRels->size += TEMP_REL_LIST_SIZE;
tempRels->rels = realloc(tempRels->rels,
sizeof(Relation) * tempRels->size);
@ -1467,7 +1506,8 @@ DestroyTempRels(void)
if (!tempRels)
return;
for (i=0;i<tempRels->num;i++) {
for (i = 0; i < tempRels->num; i++)
{
rdesc = tempRels->rels[i];
/* rdesc may be NULL if it has been removed from the list already */
if (rdesc)
@ -1479,7 +1519,8 @@ DestroyTempRels(void)
}
extern List *flatten_tlist(List * tlist);
extern List *pg_plan(char *query_string, Oid *typev, int nargs,
extern List *
pg_plan(char *query_string, Oid * typev, int nargs,
QueryTreeList ** queryListP, CommandDest dest);
static void

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.19 1997/08/22 14:10:26 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.20 1997/09/07 04:40:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -63,26 +63,31 @@
#define NTUPLES_PER_PAGE(natts) (BLCKSZ/((natts)*AVG_TUPLE_SIZE))
/* non-export function prototypes */
static Oid RelationNameGetObjectId(char *relationName, Relation pg_class,
static Oid
RelationNameGetObjectId(char *relationName, Relation pg_class,
bool setHasIndexAttribute);
static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName);
static TupleDesc BuildFuncTupleDesc(FuncIndexInfo * funcInfo);
static TupleDesc ConstructTupleDescriptor(Oid heapoid, Relation heapRelation,
static TupleDesc
ConstructTupleDescriptor(Oid heapoid, Relation heapRelation,
List * attributeList,
int numatts, AttrNumber attNums[]);
static void ConstructIndexReldesc(Relation indexRelation, Oid amoid);
static Oid UpdateRelationRelation(Relation indexRelation);
static void InitializeAttributeOids(Relation indexRelation,
static void
InitializeAttributeOids(Relation indexRelation,
int numatts,
Oid indexoid);
static void
AppendAttributeTuples(Relation indexRelation, int numatts);
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
static void
UpdateIndexRelation(Oid indexoid, Oid heapoid,
FuncIndexInfo * funcInfo, int natts,
AttrNumber attNums[], Oid classOids[], Node * predicate,
List * attributeList, bool islossy, bool unique);
static void DefaultBuild(Relation heapRelation, Relation indexRelation,
static void
DefaultBuild(Relation heapRelation, Relation indexRelation,
int numberOfAttributes, AttrNumber attributeNumber[],
IndexStrategy indexStrategy, uint16 parameterCount,
Datum parameter[], FuncIndexInfoPtr funcInfo, PredInfo * predInfo);
@ -155,12 +160,15 @@ RelationNameGetObjectId(char *relationName,
* speed this up.
*/
if (!IsBootstrapProcessingMode()) {
if (!IsBootstrapProcessingMode())
{
pg_class_tuple = ClassNameIndexScan(pg_class, relationName);
if (HeapTupleIsValid(pg_class_tuple)) {
if (HeapTupleIsValid(pg_class_tuple))
{
relationObjectId = pg_class_tuple->t_oid;
pfree(pg_class_tuple);
} else
}
else
relationObjectId = InvalidOid;
return (relationObjectId);
@ -184,9 +192,12 @@ RelationNameGetObjectId(char *relationName,
*/
pg_class_tuple = heap_getnext(pg_class_scan, 0, &buffer);
if (! HeapTupleIsValid(pg_class_tuple)) {
if (!HeapTupleIsValid(pg_class_tuple))
{
relationObjectId = InvalidOid;
} else {
}
else
{
relationObjectId = pg_class_tuple->t_oid;
ReleaseBuffer(buffer);
}
@ -357,7 +368,8 @@ ConstructTupleDescriptor(Oid heapoid,
* tuple forms or the relation tuple descriptor
* ----------------
*/
for (i = 0; i < numatts; i += 1) {
for (i = 0; i < numatts; i += 1)
{
/* ----------------
* get the attribute number and make sure it's valid
@ -367,11 +379,14 @@ ConstructTupleDescriptor(Oid heapoid,
if (atnum > natts)
elog(WARN, "Cannot create index: attribute %d does not exist",
atnum);
if (attributeList) {
if (attributeList)
{
IndexKey = (IndexElem *) lfirst(attributeList);
attributeList = lnext(attributeList);
IndexKeyType = IndexKey->tname;
} else {
}
else
{
IndexKeyType = NULL;
}
@ -381,7 +396,8 @@ ConstructTupleDescriptor(Oid heapoid,
* determine which tuple descriptor to copy
* ----------------
*/
if (!AttrNumberIsForUserDefinedAttr(atnum)) {
if (!AttrNumberIsForUserDefinedAttr(atnum))
{
/* ----------------
* here we are indexing on a system attribute (-1...-12)
@ -396,7 +412,9 @@ ConstructTupleDescriptor(Oid heapoid,
from = (char *) (&sysatts[atind]);
} else {
}
else
{
/* ----------------
* here we are indexing on a normal attribute (1...n)
* ----------------
@ -423,9 +441,12 @@ ConstructTupleDescriptor(Oid heapoid,
((AttributeTupleForm) to)->attnotnull = false;
((AttributeTupleForm) to)->atthasdef = false;
/* if the keytype is defined, we need to change the tuple form's
atttypid & attlen field to match that of the key's type */
if (IndexKeyType != NULL) {
/*
* if the keytype is defined, we need to change the tuple form's
* atttypid & attlen field to match that of the key's type
*/
if (IndexKeyType != NULL)
{
HeapTuple tup;
tup = SearchSysCacheTuple(TYPNAME,
@ -439,7 +460,8 @@ ConstructTupleDescriptor(Oid heapoid,
((TypeTupleForm) ((char *) tup + tup->t_hoff))->typbyval;
if (IndexKeyType->typlen > 0)
((AttributeTupleForm) to)->attlen = IndexKeyType->typlen;
else ((AttributeTupleForm) to)->attlen =
else
((AttributeTupleForm) to)->attlen =
((TypeTupleForm) ((char *) tup + tup->t_hoff))->typlen;
}
@ -496,7 +518,8 @@ AccessMethodObjectIdGetAccessMethodTupleForm(Oid accessMethodObjectId)
* return NULL if not found
* ----------------
*/
if (! HeapTupleIsValid(pg_am_tuple)) {
if (!HeapTupleIsValid(pg_am_tuple))
{
heap_endscan(pg_am_scan);
heap_close(pg_am_desc);
return (NULL);
@ -591,7 +614,8 @@ UpdateRelationRelation(Relation indexRelation)
* just before exiting.
*/
if (!IsBootstrapProcessingMode()) {
if (!IsBootstrapProcessingMode())
{
CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs);
CatalogIndexInsert(idescs, Num_pg_class_indices, pg_class, tuple);
CatalogCloseIndices(Num_pg_class_indices, idescs);
@ -674,7 +698,8 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
(char *) (indexRelation->rd_att->attrs[0]));
hasind = false;
if (!IsBootstrapProcessingMode() && pg_attribute->rd_rel->relhasindex) {
if (!IsBootstrapProcessingMode() && pg_attribute->rd_rel->relhasindex)
{
hasind = true;
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
}
@ -701,7 +726,8 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
*/
indexTupDesc = RelationGetTupleDescriptor(indexRelation);
for (i = 1; i < numatts; i += 1) {
for (i = 1; i < numatts; i += 1)
{
/* ----------------
* process the remaining attributes...
* ----------------
@ -764,7 +790,8 @@ UpdateIndexRelation(Oid indexoid,
IndexElem *IndexKey;
char *predString;
text *predText;
int predLen, itupLen;
int predLen,
itupLen;
Relation pg_index;
HeapTuple tuple;
int i;
@ -774,11 +801,14 @@ UpdateIndexRelation(Oid indexoid,
* index-predicate (if any) in string form
* ----------------
*/
if (predicate != NULL) {
if (predicate != NULL)
{
predString = nodeToString(predicate);
predText = (text *) fmgr(F_TEXTIN, predString);
pfree(predString);
} else {
}
else
{
predText = (text *) fmgr(F_TEXTIN, "");
}
predLen = VARSIZE(predText);
@ -817,10 +847,12 @@ UpdateIndexRelation(Oid indexoid,
* copy index key and op class information
* ----------------
*/
for (i = 0; i < natts; i += 1) {
for (i = 0; i < natts; i += 1)
{
indexForm->indkey[i] = attNums[i];
indexForm->indclass[i] = classOids[i];
}
/*
* If we have a functional index, add all attribute arguments
*/
@ -897,7 +929,8 @@ UpdateIndexPredicate(Oid indexoid, Node *oldPred, Node *predicate)
* changing "a>2 OR TRUE" to "TRUE". --Nels, Jan '93
*/
newPred = NULL;
if (predicate != NULL) {
if (predicate != NULL)
{
newPred =
(Node *) make_orclause(lcons(make_andclause((List *) predicate),
lcons(make_andclause((List *) oldPred),
@ -906,11 +939,14 @@ UpdateIndexPredicate(Oid indexoid, Node *oldPred, Node *predicate)
}
/* translate the index-predicate to string form */
if (newPred != NULL) {
if (newPred != NULL)
{
predString = nodeToString(newPred);
predText = (text *) fmgr(F_TEXTIN, predString);
pfree(predString);
} else {
}
else
{
predText = (text *) fmgr(F_TEXTIN, "");
}
@ -925,7 +961,8 @@ UpdateIndexPredicate(Oid indexoid, Node *oldPred, Node *predicate)
tuple = heap_getnext(scan, 0, &buffer);
heap_endscan(scan);
for (i = 0; i < Natts_pg_index; i++) {
for (i = 0; i < Natts_pg_index; i++)
{
nulls[i] = heap_attisnull(tuple, i + 1) ? 'n' : ' ';
replace[i] = ' ';
values[i] = (Datum) NULL;
@ -987,11 +1024,14 @@ InitIndexStrategy(int numatts,
strategy = (IndexStrategy)
MemoryContextAlloc((MemoryContext) CacheCxt, strsize);
if (amsupport > 0) {
if (amsupport > 0)
{
strsize = numatts * (amsupport * sizeof(RegProcedure));
support = (RegProcedure *) MemoryContextAlloc((MemoryContext) CacheCxt,
strsize);
} else {
}
else
{
support = (RegProcedure *) NULL;
}
@ -1118,7 +1158,8 @@ index_create(char *heapRelationName,
PointerGetDatum(FIgetArglist(funcInfo)),
0);
if (!HeapTupleIsValid(proc_tup)) {
if (!HeapTupleIsValid(proc_tup))
{
func_error("index_create", FIgetname(funcInfo),
FIgetnArgs(funcInfo),
FIgetArglist(funcInfo));
@ -1162,16 +1203,19 @@ index_create(char *heapRelationName,
InitIndexStrategy(numatts, indexRelation, accessMethodObjectId);
/*
* If this is bootstrap (initdb) time, then we don't actually
* fill in the index yet. We'll be creating more indices and classes
* later, so we delay filling them in until just before we're done
* with bootstrapping. Otherwise, we call the routine that constructs
* the index. The heap and index relations are closed by index_build().
* If this is bootstrap (initdb) time, then we don't actually fill in
* the index yet. We'll be creating more indices and classes later,
* so we delay filling them in until just before we're done with
* bootstrapping. Otherwise, we call the routine that constructs the
* index. The heap and index relations are closed by index_build().
*/
if (IsBootstrapProcessingMode()) {
if (IsBootstrapProcessingMode())
{
index_register(heapRelationName, indexRelationName, numatts, attNums,
parameterCount, parameter, funcInfo, predInfo);
} else {
}
else
{
heapRelation = heap_openr(heapRelationName);
index_build(heapRelation, indexRelation, numatts, attNums,
parameterCount, parameter, funcInfo, predInfo);
@ -1227,7 +1271,8 @@ index_destroy(Oid indexId)
scan = heap_beginscan(catalogRelation, 0, NowTimeQual, 1, &entry);
while (tuple = heap_getnext(scan, 0, (Buffer *) NULL),
HeapTupleIsValid(tuple)) {
HeapTupleIsValid(tuple))
{
heap_delete(catalogRelation, &tuple->t_ctid);
}
@ -1244,7 +1289,8 @@ index_destroy(Oid indexId)
scan = heap_beginscan(catalogRelation, 0, NowTimeQual, 1, &entry);
tuple = heap_getnext(scan, 0, (Buffer *) NULL);
if (! HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(NOTICE, "IndexRelationDestroy: %s's INDEX tuple missing",
RelationGetRelationName(indexRelation));
}
@ -1290,7 +1336,8 @@ FormIndexDatum(int numberOfAttributes,
* ----------------
*/
for (i = 1; i <= numberOfAttributes; i += 1) {
for (i = 1; i <= numberOfAttributes; i += 1)
{
offset = AttrNumberGetAttrOffset(i);
datum[offset] =
@ -1363,7 +1410,8 @@ UpdateStats(Oid relid, long reltuples, bool hasindex)
* ----------------
*/
pg_class = heap_openr(RelationRelationName);
if (! RelationIsValid(pg_class)) {
if (!RelationIsValid(pg_class))
{
elog(WARN, "UpdateStats: could not open RELATION relation");
}
key[0].sk_argument = ObjectIdGetDatum(relid);
@ -1371,7 +1419,8 @@ UpdateStats(Oid relid, long reltuples, bool hasindex)
pg_class_scan =
heap_beginscan(pg_class, 0, NowTimeQual, 1, key);
if (! HeapScanIsValid(pg_class_scan)) {
if (!HeapScanIsValid(pg_class_scan))
{
heap_close(pg_class);
elog(WARN, "UpdateStats: cannot scan RELATION relation");
}
@ -1387,16 +1436,17 @@ UpdateStats(Oid relid, long reltuples, bool hasindex)
relpages = RelationGetNumberOfBlocks(whichRel);
/*
* We shouldn't have to do this, but we do... Modify the reldesc
* in place with the new values so that the cache contains the
* latest copy.
* We shouldn't have to do this, but we do... Modify the reldesc in
* place with the new values so that the cache contains the latest
* copy.
*/
whichRel->rd_rel->relhasindex = hasindex;
whichRel->rd_rel->relpages = relpages;
whichRel->rd_rel->reltuples = reltuples;
for (i = 0; i < Natts_pg_class; i++) {
for (i = 0; i < Natts_pg_class; i++)
{
nulls[i] = heap_attisnull(htup, i + 1) ? 'n' : ' ';
replace[i] = ' ';
values[i] = (Datum) NULL;
@ -1408,18 +1458,21 @@ UpdateStats(Oid relid, long reltuples, bool hasindex)
if (reltuples == 0)
reltuples = relpages * NTUPLES_PER_PAGE(whichRel->rd_rel->relnatts);
if (IsBootstrapProcessingMode()) {
if (IsBootstrapProcessingMode())
{
/*
* At bootstrap time, we don't need to worry about concurrency
* or visibility of changes, so we cheat.
* At bootstrap time, we don't need to worry about concurrency or
* visibility of changes, so we cheat.
*/
rd_rel = (Form_pg_class) GETSTRUCT(htup);
rd_rel->relpages = relpages;
rd_rel->reltuples = reltuples;
rd_rel->relhasindex = hasindex;
} else {
}
else
{
/* during normal processing, work harder */
replace[Anum_pg_class_relpages - 1] = 'r';
values[Anum_pg_class_relpages - 1] = (Datum) relpages;
@ -1490,11 +1543,14 @@ DefaultBuild(Relation heapRelation,
TupleDesc indexDescriptor;
Datum *datum;
char *nullv;
long reltuples, indtuples;
long reltuples,
indtuples;
#ifndef OMIT_PARTIAL_INDEX
ExprContext *econtext;
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
Node *predicate;
Node *oldPred;
@ -1524,18 +1580,20 @@ DefaultBuild(Relation heapRelation,
nullv = (char *) palloc(numberOfAttributes * sizeof *nullv);
/*
* If this is a predicate (partial) index, we will need to evaluate the
* predicate using ExecQual, which requires the current tuple to be in a
* slot of a TupleTable. In addition, ExecQual must have an ExprContext
* referring to that slot. Here, we initialize dummy TupleTable and
* ExprContext objects for this purpose. --Nels, Feb '92
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
* be in a slot of a TupleTable. In addition, ExecQual must have an
* ExprContext referring to that slot. Here, we initialize dummy
* TupleTable and ExprContext objects for this purpose. --Nels, Feb
* '92
*/
predicate = predInfo->pred;
oldPred = predInfo->oldPred;
#ifndef OMIT_PARTIAL_INDEX
if (predicate != NULL || oldPred != NULL) {
if (predicate != NULL || oldPred != NULL)
{
tupleTable = ExecCreateTupleTable(1);
slot = ExecAllocTableSlot(tupleTable);
econtext = makeNode(ExprContext);
@ -1569,7 +1627,8 @@ DefaultBuild(Relation heapRelation,
* ----------------
*/
while (heapTuple = heap_getnext(scan, 0, &buffer),
HeapTupleIsValid(heapTuple)) {
HeapTupleIsValid(heapTuple))
{
reltuples++;
@ -1577,19 +1636,25 @@ DefaultBuild(Relation heapRelation,
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
*/
if (oldPred != NULL) {
if (oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, heapTuple); */
slot->val = heapTuple;
if (ExecQual((List*)oldPred, econtext) == true) {
if (ExecQual((List *) oldPred, econtext) == true)
{
indtuples++;
continue;
}
#endif /* OMIT_PARTIAL_INDEX */
}
/* Skip this tuple if it doesn't satisfy the partial-index predicate */
if (predicate != NULL) {
/*
* Skip this tuple if it doesn't satisfy the partial-index
* predicate
*/
if (predicate != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, heapTuple); */
slot->val = heapTuple;
@ -1623,13 +1688,15 @@ DefaultBuild(Relation heapRelation,
insertResult = index_insert(indexRelation, datum, nullv,
&(heapTuple->t_ctid), heapRelation);
if (insertResult) pfree(insertResult);
if (insertResult)
pfree(insertResult);
pfree(indexTuple);
}
heap_endscan(scan);
if (predicate != NULL || oldPred != NULL) {
if (predicate != NULL || oldPred != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
ExecDestroyTupleTable(tupleTable, false);
#endif /* OMIT_PARTIAL_INDEX */
@ -1639,16 +1706,18 @@ DefaultBuild(Relation heapRelation,
pfree(datum);
/*
* Okay, now update the reltuples and relpages statistics for both
* the heap relation and the index. These statistics are used by
* the planner to choose a scan type. They are maintained generally
* by the vacuum daemon, but we update them here to make the index
* useful as soon as possible.
* Okay, now update the reltuples and relpages statistics for both the
* heap relation and the index. These statistics are used by the
* planner to choose a scan type. They are maintained generally by
* the vacuum daemon, but we update them here to make the index useful
* as soon as possible.
*/
UpdateStats(heapRelation->rd_id, reltuples, true);
UpdateStats(indexRelation->rd_id, indtuples, false);
if (oldPred != NULL) {
if (indtuples == reltuples) predicate = NULL;
if (oldPred != NULL)
{
if (indtuples == reltuples)
predicate = NULL;
UpdateIndexPredicate(indexRelation->rd_id, oldPred, predicate);
}
}
@ -1718,7 +1787,8 @@ IndexIsUnique(Oid indexId)
tuple = SearchSysCacheTuple(INDEXRELID,
ObjectIdGetDatum(indexId),
0, 0, 0);
if(!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "IndexIsUnique: can't find index id %d",
indexId);
}
@ -1760,7 +1830,8 @@ IndexIsUniqueNoCache(Oid indexId)
scandesc = heap_beginscan(pg_index, 0, SelfTimeQual, 1, skey);
tuple = heap_getnext(scandesc, 0, NULL);
if(!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "IndexIsUniqueNoCache: can't find index id %d",
indexId);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.11 1997/08/31 09:56:18 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.12 1997/09/07 04:40:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -61,7 +61,8 @@ char *Name_pg_relcheck_indices[Num_pg_relcheck_indices]= { RelCheckIndex };
char *Name_pg_trigger_indices[Num_pg_trigger_indices] = {TriggerRelidIndex};
static HeapTuple CatalogIndexFetchTuple(Relation heapRelation,
static HeapTuple
CatalogIndexFetchTuple(Relation heapRelation,
Relation idesc,
ScanKey skey);
@ -113,7 +114,8 @@ CatalogIndexInsert(Relation *idescs,
Datum datum;
int natts;
AttrNumber *attnumP;
FuncIndexInfo finfo, *finfoP;
FuncIndexInfo finfo,
*finfoP;
char nulls[INDEX_MAX_KEYS];
int i;
@ -132,9 +134,8 @@ CatalogIndexInsert(Relation *idescs,
pgIndexP = (IndexTupleForm) GETSTRUCT(pgIndexTup);
/*
* Compute the number of attributes we are indexing upon.
* very important - can't assume one if this is a functional
* index.
* Compute the number of attributes we are indexing upon. very
* important - can't assume one if this is a functional index.
*/
for (attnumP = (&pgIndexP->indkey[0]), natts = 0;
*attnumP != InvalidAttrNumber;
@ -163,7 +164,8 @@ CatalogIndexInsert(Relation *idescs,
indexRes = index_insert(idescs[i], &datum, nulls,
&(heapTuple->t_ctid), heapRelation);
if (indexRes) pfree(indexRes);
if (indexRes)
pfree(indexRes);
}
}
@ -187,8 +189,10 @@ CatalogHasIndex(char *catName, Oid catId)
if (IsBootstrapProcessingMode())
return false;
if (IsInitProcessingMode()) {
for (i = 0; IndexedCatalogNames[i] != NULL; i++) {
if (IsInitProcessingMode())
{
for (i = 0; IndexedCatalogNames[i] != NULL; i++)
{
if (strcmp(IndexedCatalogNames[i], catName) == 0)
return (true);
}
@ -199,7 +203,8 @@ CatalogHasIndex(char *catName, Oid catId)
htup = ClassOidIndexScan(pg_class, catId);
heap_close(pg_class);
if (! HeapTupleIsValid(htup)) {
if (!HeapTupleIsValid(htup))
{
elog(NOTICE, "CatalogHasIndex: no relation with oid %d", catId);
return false;
}
@ -229,19 +234,23 @@ CatalogIndexFetchTuple(Relation heapRelation,
sd = index_beginscan(idesc, false, 1, skey);
tuple = (HeapTuple) NULL;
do {
do
{
indexRes = index_getnext(sd, ForwardScanDirection);
if (indexRes) {
if (indexRes)
{
ItemPointer iptr;
iptr = &indexRes->heap_iptr;
tuple = heap_fetch(heapRelation, NowTimeQual, iptr, &buffer);
pfree(indexRes);
} else
}
else
break;
} while (!HeapTupleIsValid(tuple));
if (HeapTupleIsValid(tuple)) {
if (HeapTupleIsValid(tuple))
{
tuple = heap_copytuple(tuple);
ReleaseBuffer(buffer);
}
@ -341,18 +350,23 @@ ProcedureNameIndexScan(Relation heapRelation,
Relation idesc;
ScanKeyData skey;
HeapTuple tuple; /* tuple being tested */
HeapTuple return_tuple; /* The tuple pointer we eventually return */
HeapTuple return_tuple; /* The tuple pointer we eventually
* return */
IndexScanDesc sd;
RetrieveIndexResult indexRes;
Buffer buffer;
Form_pg_proc pgProcP;
bool ScanComplete;
/* The index scan is complete, i.e. we've scanned everything there
is to scan.
/*
* The index scan is complete, i.e. we've scanned everything there is
* to scan.
*/
bool FoundMatch;
/* In scanning pg_proc, we have found a row that meets our search
criteria.
/*
* In scanning pg_proc, we have found a row that meets our search
* criteria.
*/
ScanKeyEntryInitialize(&skey,
@ -366,41 +380,52 @@ ProcedureNameIndexScan(Relation heapRelation,
sd = index_beginscan(idesc, false, 1, &skey);
/*
* for now, we do the work usually done by CatalogIndexFetchTuple
* by hand, so that we can check that the other keys match. when
* for now, we do the work usually done by CatalogIndexFetchTuple by
* hand, so that we can check that the other keys match. when
* multi-key indices are added, they will be used here.
*/
tuple = (HeapTuple) NULL; /* initial value */
ScanComplete = false; /* Scan hasn't begun yet */
FoundMatch = false; /* No match yet; haven't even looked. */
while (!FoundMatch && !ScanComplete) {
while (!FoundMatch && !ScanComplete)
{
indexRes = index_getnext(sd, ForwardScanDirection);
if (indexRes) {
if (indexRes)
{
ItemPointer iptr;
iptr = &indexRes->heap_iptr;
tuple = heap_fetch(heapRelation, NowTimeQual, iptr, &buffer);
pfree(indexRes);
if (HeapTupleIsValid(tuple)) {
/* Here's a row for a procedure that has the sought procedure
name. To be a match, though, we need it to have the
right number and type of arguments too, so we check that
now.
if (HeapTupleIsValid(tuple))
{
/*
* Here's a row for a procedure that has the sought
* procedure name. To be a match, though, we need it to
* have the right number and type of arguments too, so we
* check that now.
*/
pgProcP = (Form_pg_proc) GETSTRUCT(tuple);
if (pgProcP->pronargs == nargs &&
oid8eq(&(pgProcP->proargtypes[0]), argTypes))
FoundMatch = true;
else ReleaseBuffer(buffer);
else
ReleaseBuffer(buffer);
}
} else ScanComplete = true;
}
else
ScanComplete = true;
}
if (FoundMatch) {
if (FoundMatch)
{
Assert(HeapTupleIsValid(tuple));
return_tuple = heap_copytuple(tuple);
ReleaseBuffer(buffer);
} else return_tuple = (HeapTuple)NULL;
}
else
return_tuple = (HeapTuple) NULL;
index_endscan(sd);
index_close(idesc);
@ -430,16 +455,19 @@ ProcedureSrcIndexScan(Relation heapRelation, text *procSrc)
sd = index_beginscan(idesc, false, 1, &skey);
indexRes = index_getnext(sd, ForwardScanDirection);
if (indexRes) {
if (indexRes)
{
ItemPointer iptr;
iptr = &indexRes->heap_iptr;
tuple = heap_fetch(heapRelation, NowTimeQual, iptr, &buffer);
pfree(indexRes);
} else
}
else
tuple = (HeapTuple) NULL;
if (HeapTupleIsValid(tuple)) {
if (HeapTupleIsValid(tuple))
{
tuple = heap_copytuple(tuple);
ReleaseBuffer(buffer);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.5 1997/07/24 20:11:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.6 1997/09/07 04:40:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -96,7 +96,8 @@ AggregateCreate(char *aggName,
elog(WARN, "AggregateCreate: Type '%s' undefined", aggbasetypeName);
xbase = tup->t_oid;
if (aggtransfn1Name) {
if (aggtransfn1Name)
{
tup = SearchSysCacheTuple(TYPNAME,
PointerGetDatum(aggtransfn1typeName),
0, 0, 0);
@ -125,7 +126,8 @@ AggregateCreate(char *aggName,
elog(WARN, "AggregateCreate: bogus function '%s'", aggfinalfnName);
}
if (aggtransfn2Name) {
if (aggtransfn2Name)
{
tup = SearchSysCacheTuple(TYPNAME,
PointerGetDatum(aggtransfn2typeName),
0, 0, 0);
@ -167,7 +169,8 @@ AggregateCreate(char *aggName,
if ((!aggtransfn1Name || !aggtransfn2Name) && aggfinalfnName)
elog(WARN, "AggregateCreate: Aggregate cannot have final function without both transition functions");
if (aggfinalfnName) {
if (aggfinalfnName)
{
fnArgs[0] = xret1;
fnArgs[1] = xret2;
tup = SearchSysCacheTuple(PRONAME,
@ -194,7 +197,8 @@ AggregateCreate(char *aggName,
elog(WARN, "AggregateCreate: transition function 2 MUST have an initial value");
/* initialize nulls and values */
for(i=0; i < Natts_pg_aggregate; i++) {
for (i = 0; i < Natts_pg_aggregate; i++)
{
nulls[i] = ' ';
values[i] = (Datum) NULL;
}
@ -210,7 +214,8 @@ AggregateCreate(char *aggName,
values[Anum_pg_aggregate_aggbasetype - 1] =
ObjectIdGetDatum(xbase);
if (!OidIsValid(xfn1)) {
if (!OidIsValid(xfn1))
{
values[Anum_pg_aggregate_aggtranstype1 - 1] =
ObjectIdGetDatum(InvalidOid);
values[Anum_pg_aggregate_aggtranstype2 - 1] =
@ -218,7 +223,8 @@ AggregateCreate(char *aggName,
values[Anum_pg_aggregate_aggfinaltype - 1] =
ObjectIdGetDatum(xret2);
}
else if (!OidIsValid(xfn2)) {
else if (!OidIsValid(xfn2))
{
values[Anum_pg_aggregate_aggtranstype1 - 1] =
ObjectIdGetDatum(xret1);
values[Anum_pg_aggregate_aggtranstype2 - 1] =
@ -226,7 +232,8 @@ AggregateCreate(char *aggName,
values[Anum_pg_aggregate_aggfinaltype - 1] =
ObjectIdGetDatum(xret1);
}
else {
else
{
values[Anum_pg_aggregate_aggtranstype1 - 1] =
ObjectIdGetDatum(xret1);
values[Anum_pg_aggregate_aggtranstype2 - 1] =
@ -268,7 +275,8 @@ AggNameGetInitVal(char *aggName, Oid basetype, int xfuncno, bool *isNull)
int initValAttno;
Oid transtype;
text *textInitVal;
char *strInitVal, *initVal;
char *strInitVal,
*initVal;
Assert(PointerIsValid(aggName));
Assert(PointerIsValid(isNull));
@ -281,11 +289,14 @@ AggNameGetInitVal(char *aggName, Oid basetype, int xfuncno, bool *isNull)
if (!HeapTupleIsValid(tup))
elog(WARN, "AggNameGetInitVal: cache lookup failed for aggregate '%s'",
aggName);
if (xfuncno == 1) {
if (xfuncno == 1)
{
transtype = ((Form_pg_aggregate) GETSTRUCT(tup))->aggtranstype1;
initValAttno = Anum_pg_aggregate_agginitval1;
}
else /* can only be 1 or 2 */ {
else
/* can only be 1 or 2 */
{
transtype = ((Form_pg_aggregate) GETSTRUCT(tup))->aggtranstype2;
initValAttno = Anum_pg_aggregate_agginitval2;
}
@ -294,15 +305,18 @@ AggNameGetInitVal(char *aggName, Oid basetype, int xfuncno, bool *isNull)
if (!RelationIsValid(aggRel))
elog(WARN, "AggNameGetInitVal: could not open \"%-.*s\"",
AggregateRelationName);
/*
* must use fastgetattr in case one or other of the init values is NULL
* must use fastgetattr in case one or other of the init values is
* NULL
*/
textInitVal = (text *) fastgetattr(tup, initValAttno,
RelationGetTupleDescriptor(aggRel),
isNull);
if (!PointerIsValid(textInitVal))
*isNull = true;
if (*isNull) {
if (*isNull)
{
heap_close(aggRel);
return ((char *) NULL);
}
@ -311,7 +325,8 @@ AggNameGetInitVal(char *aggName, Oid basetype, int xfuncno, bool *isNull)
tup = SearchSysCacheTuple(TYPOID, ObjectIdGetDatum(transtype),
0, 0, 0);
if (!HeapTupleIsValid(tup)) {
if (!HeapTupleIsValid(tup))
{
pfree(strInitVal);
elog(WARN, "AggNameGetInitVal: cache lookup failed on aggregate transition function return type");
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.11 1997/08/18 20:52:04 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.12 1997/09/07 04:40:27 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@ -31,23 +31,28 @@
#include <string.h>
#endif
static Oid OperatorGetWithOpenRelation(Relation pg_operator_desc,
static Oid
OperatorGetWithOpenRelation(Relation pg_operator_desc,
const char *operatorName,
Oid leftObjectId,
Oid rightObjectId);
static Oid OperatorGet(char *operatorName,
static Oid
OperatorGet(char *operatorName,
char *leftTypeName,
char *rightTypeName);
static Oid OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
static Oid
OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
char *operatorName,
Oid leftObjectId,
Oid rightObjectId);
static Oid OperatorShellMake(char *operatorName,
static Oid
OperatorShellMake(char *operatorName,
char *leftTypeName,
char *rightTypeName);
static void OperatorDef(char *operatorName,
static void
OperatorDef(char *operatorName,
int definedOK,
char *leftTypeName,
char *rightTypeName,
@ -159,14 +164,16 @@ OperatorGet(char *operatorName,
* Note: types must be defined before operators
* ----------------
*/
if (leftTypeName) {
if (leftTypeName)
{
leftObjectId = TypeGet(leftTypeName, &leftDefined);
if (!OidIsValid(leftObjectId) || !leftDefined)
elog(WARN, "OperatorGet: left type '%s' nonexistent", leftTypeName);
}
if (rightTypeName) {
if (rightTypeName)
{
rightObjectId = TypeGet(rightTypeName, &rightDefined);
if (!OidIsValid(rightObjectId) || !rightDefined)
@ -226,7 +233,8 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
* initialize our nulls[] and values[] arrays
* ----------------
*/
for (i = 0; i < Natts_pg_operator; ++i) {
for (i = 0; i < Natts_pg_operator; ++i)
{
nulls[i] = ' ';
values[i] = (Datum) NULL; /* redundant, but safe */
}
@ -442,7 +450,8 @@ OperatorDef(char *operatorName,
char *leftSortName,
char *rightSortName)
{
register i, j;
register i,
j;
Relation pg_operator_desc;
HeapScanDesc pg_operator_scan;
@ -496,7 +505,8 @@ OperatorDef(char *operatorName,
(OidIsValid(rightTypeId && rightDefined))))
elog(WARN, "OperatorGet: no argument types??");
for (i = 0; i < Natts_pg_operator; ++i) {
for (i = 0; i < Natts_pg_operator; ++i)
{
values[i] = (Datum) NULL;
replaces[i] = 'r';
nulls[i] = ' ';
@ -510,15 +520,18 @@ OperatorDef(char *operatorName,
* ----------------
*/
memset(typeId, 0, 8 * sizeof(Oid));
if (!leftTypeName) {
if (!leftTypeName)
{
typeId[0] = rightTypeId;
nargs = 1;
}
else if (!rightTypeName) {
else if (!rightTypeName)
{
typeId[0] = leftTypeId;
nargs = 1;
}
else {
else
{
typeId[0] = leftTypeId;
typeId[1] = rightTypeId;
nargs = 2;
@ -541,7 +554,8 @@ OperatorDef(char *operatorName,
* find restriction
* ----------------
*/
if (restrictionName) { /* optional */
if (restrictionName)
{ /* optional */
memset(typeId, 0, 8 * sizeof(Oid));
typeId[0] = OIDOID; /* operator OID */
typeId[1] = OIDOID; /* relation OID */
@ -557,14 +571,16 @@ OperatorDef(char *operatorName,
func_error("OperatorDef", restrictionName, 5, typeId);
values[Anum_pg_operator_oprrest - 1] = ObjectIdGetDatum(tup->t_oid);
} else
}
else
values[Anum_pg_operator_oprrest - 1] = ObjectIdGetDatum(InvalidOid);
/* ----------------
* find join - only valid for binary operators
* ----------------
*/
if (joinName) { /* optional */
if (joinName)
{ /* optional */
memset(typeId, 0, 8 * sizeof(Oid));
typeId[0] = OIDOID; /* operator OID */
typeId[1] = OIDOID; /* relation OID 1 */
@ -581,7 +597,8 @@ OperatorDef(char *operatorName,
func_error("OperatorDef", joinName, 5, typeId);
values[Anum_pg_operator_oprjoin - 1] = ObjectIdGetDatum(tup->t_oid);
} else
}
else
values[Anum_pg_operator_oprjoin - 1] = ObjectIdGetDatum(InvalidOid);
/* ----------------
@ -601,23 +618,28 @@ OperatorDef(char *operatorName,
++i; /* Skip "prorettype", this was done above */
/*
* Set up the other operators. If they do not currently exist,
* set up shells in order to get ObjectId's and call OperatorDef
* again later to fill in the shells.
* Set up the other operators. If they do not currently exist, set up
* shells in order to get ObjectId's and call OperatorDef again later
* to fill in the shells.
*/
name[0] = commutatorName;
name[1] = negatorName;
name[2] = leftSortName;
name[3] = rightSortName;
for (j = 0; j < 4; ++j) {
if (name[j]) {
for (j = 0; j < 4; ++j)
{
if (name[j])
{
/* for the commutator, switch order of arguments */
if (j == 0) {
if (j == 0)
{
other_oid = OperatorGet(name[j], rightTypeName, leftTypeName);
commutatorId = other_oid;
} else {
}
else
{
other_oid = OperatorGet(name[j], leftTypeName, rightTypeName);
if (j == 1)
negatorId = other_oid;
@ -625,15 +647,19 @@ OperatorDef(char *operatorName,
if (OidIsValid(other_oid)) /* already in catalogs */
values[i++] = ObjectIdGetDatum(other_oid);
else if (strcmp(operatorName, name[j]) != 0) {
else if (strcmp(operatorName, name[j]) != 0)
{
/* not in catalogs, different from operator */
/* for the commutator, switch order of arguments */
if (j == 0) {
if (j == 0)
{
other_oid = OperatorShellMake(name[j],
rightTypeName,
leftTypeName);
} else {
}
else
{
other_oid = OperatorShellMake(name[j],
leftTypeName,
rightTypeName);
@ -645,22 +671,26 @@ OperatorDef(char *operatorName,
name[j]);
values[i++] = ObjectIdGetDatum(other_oid);
} else /* not in catalogs, same as operator ??? */
}
else
/* not in catalogs, same as operator ??? */
values[i++] = ObjectIdGetDatum(InvalidOid);
} else /* new operator is optional */
}
else
/* new operator is optional */
values[i++] = ObjectIdGetDatum(InvalidOid);
}
/* last three fields were filled in first */
/*
* If we are adding to an operator shell, get its t_ctid and a
* buffer.
* If we are adding to an operator shell, get its t_ctid and a buffer.
*/
pg_operator_desc = heap_openr(OperatorRelationName);
if (operatorObjectId) {
if (operatorObjectId)
{
opKey[0].sk_argument = PointerGetDatum(operatorName);
opKey[1].sk_argument = ObjectIdGetDatum(leftTypeId);
opKey[2].sk_argument = ObjectIdGetDatum(rightTypeId);
@ -672,7 +702,8 @@ OperatorDef(char *operatorName,
opKey);
tup = heap_getnext(pg_operator_scan, 0, &buffer);
if (HeapTupleIsValid(tup)) {
if (HeapTupleIsValid(tup))
{
tup = heap_modifytuple(tup,
buffer,
pg_operator_desc,
@ -684,12 +715,15 @@ OperatorDef(char *operatorName,
setheapoverride(true);
heap_replace(pg_operator_desc, &itemPointerData, tup);
setheapoverride(false);
} else
}
else
elog(WARN, "OperatorDef: no operator %d", other_oid);
heap_endscan(pg_operator_scan);
} else {
}
else
{
tupDesc = pg_operator_desc->rd_att;
tup = heap_formtuple(tupDesc, values, nulls);
@ -700,17 +734,17 @@ OperatorDef(char *operatorName,
heap_close(pg_operator_desc);
/*
* It's possible that we're creating a skeleton operator here for
* the commute or negate attributes of a real operator. If we are,
* then we're done. If not, we may need to update the negator and
* It's possible that we're creating a skeleton operator here for the
* commute or negate attributes of a real operator. If we are, then
* we're done. If not, we may need to update the negator and
* commutator for this attribute. The reason for this is that the
* user may want to create two operators (say < and >=). When he
* defines <, if he uses >= as the negator or commutator, he won't
* be able to insert it later, since (for some reason) define operator
* defines <, if he uses >= as the negator or commutator, he won't be
* able to insert it later, since (for some reason) define operator
* defines it for him. So what he does is to define > without a
* negator or commutator. Then he defines >= with < as the negator
* and commutator. As a side effect, this will update the > tuple
* if it has no commutator or negator defined.
* and commutator. As a side effect, this will update the > tuple if
* it has no commutator or negator defined.
*
* Alstublieft, Tom Vijlbrief.
*/
@ -748,7 +782,8 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
fmgr_info(ObjectIdEqualRegProcedure,
&opKey[0].sk_func, &opKey[0].sk_nargs);
for (i = 0; i < Natts_pg_operator; ++i) {
for (i = 0; i < Natts_pg_operator; ++i)
{
values[i] = (Datum) NULL;
replaces[i] = ' ';
nulls[i] = ' ';
@ -768,21 +803,26 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
tup = heap_getnext(pg_operator_scan, 0, &buffer);
/* if the commutator and negator are the same operator, do one update */
if (commId == negId) {
if (HeapTupleIsValid(tup)) {
if (commId == negId)
{
if (HeapTupleIsValid(tup))
{
OperatorTupleForm t;
t = (OperatorTupleForm) GETSTRUCT(tup);
if (!OidIsValid(t->oprcom)
|| !OidIsValid(t->oprnegate)) {
|| !OidIsValid(t->oprnegate))
{
if (!OidIsValid(t->oprnegate)) {
if (!OidIsValid(t->oprnegate))
{
values[Anum_pg_operator_oprnegate - 1] =
ObjectIdGetDatum(baseId);
replaces[Anum_pg_operator_oprnegate - 1] = 'r';
}
if (!OidIsValid(t->oprcom)) {
if (!OidIsValid(t->oprcom))
{
values[Anum_pg_operator_oprcom - 1] =
ObjectIdGetDatum(baseId);
replaces[Anum_pg_operator_oprcom - 1] = 'r';
@ -816,7 +856,8 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
/* if commutator and negator are different, do two updates */
if (HeapTupleIsValid(tup) &&
!(OidIsValid(((OperatorTupleForm) GETSTRUCT(tup))->oprcom))) {
!(OidIsValid(((OperatorTupleForm) GETSTRUCT(tup))->oprcom)))
{
values[Anum_pg_operator_oprcom - 1] = ObjectIdGetDatum(baseId);
replaces[Anum_pg_operator_oprcom - 1] = 'r';
tup = heap_modifytuple(tup,
@ -851,7 +892,8 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
tup = heap_getnext(pg_operator_scan, 0, &buffer);
if (HeapTupleIsValid(tup) &&
!(OidIsValid(((OperatorTupleForm) GETSTRUCT(tup))->oprnegate))) {
!(OidIsValid(((OperatorTupleForm) GETSTRUCT(tup))->oprnegate)))
{
values[Anum_pg_operator_oprnegate - 1] = ObjectIdGetDatum(baseId);
replaces[Anum_pg_operator_oprnegate - 1] = 'r';
tup = heap_modifytuple(tup,
@ -943,8 +985,10 @@ OperatorCreate(char *operatorName,
char *leftSortName,
char *rightSortName)
{
Oid commObjectId, negObjectId;
Oid leftSortObjectId, rightSortObjectId;
Oid commObjectId,
negObjectId;
Oid leftSortObjectId,
rightSortObjectId;
int definedOK;
if (!leftTypeName && !rightTypeName)
@ -958,25 +1002,29 @@ OperatorCreate(char *operatorName,
commObjectId = OperatorGet(commutatorName, /* commute type order */
rightTypeName,
leftTypeName);
else commObjectId = 0;
else
commObjectId = 0;
if (negatorName)
negObjectId = OperatorGet(negatorName,
leftTypeName,
rightTypeName);
else negObjectId = 0;
else
negObjectId = 0;
if (leftSortName)
leftSortObjectId = OperatorGet(leftSortName,
leftTypeName,
rightTypeName);
else leftSortObjectId = 0;
else
leftSortObjectId = 0;
if (rightSortName)
rightSortObjectId = OperatorGet(rightSortName,
rightTypeName,
leftTypeName);
else rightSortObjectId = 0;
else
rightSortObjectId = 0;
/* ----------------
* Use OperatorDef() to define the specified operator and

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.5 1996/11/08 00:44:34 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.6 1997/09/07 04:40:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -82,26 +82,33 @@ ProcedureCreate(char *procedureName,
parameterCount = 0;
memset(typev, 0, 8 * sizeof(Oid));
foreach (x, argList) {
foreach(x, argList)
{
Value *t = lfirst(x);
if (parameterCount == 8)
elog(WARN, "Procedures cannot take more than 8 arguments");
if (strcmp(strVal(t), "opaque") == 0) {
if (strcmp(languageName, "sql") == 0) {
if (strcmp(strVal(t), "opaque") == 0)
{
if (strcmp(languageName, "sql") == 0)
{
elog(WARN, "ProcedureDefine: sql functions cannot take type \"opaque\"");
}
toid = 0;
} else {
}
else
{
toid = TypeGet(strVal(t), &defined);
if (!OidIsValid(toid)) {
if (!OidIsValid(toid))
{
elog(WARN, "ProcedureCreate: arg type '%s' is not defined",
strVal(t));
}
if (!defined) {
if (!defined)
{
elog(NOTICE, "ProcedureCreate: arg type '%s' is only a shell",
strVal(t));
}
@ -120,13 +127,17 @@ ProcedureCreate(char *procedureName,
elog(WARN, "ProcedureCreate: procedure %s already exists with same arguments",
procedureName);
if (!strcmp(languageName, "sql")) {
/* If this call is defining a set, check if the set is already
if (!strcmp(languageName, "sql"))
{
/*
* If this call is defining a set, check if the set is already
* defined by looking to see whether this call's function text
* matches a function already in pg_proc. If so just return the
* OID of the existing set.
*/
if (!strcmp(procedureName, GENERICSETNAME)) {
if (!strcmp(procedureName, GENERICSETNAME))
{
prosrctext = textin(prosrc);
tup = SearchSysCacheTuple(PROSRC,
PointerGetDatum(prosrctext),
@ -146,17 +157,21 @@ ProcedureCreate(char *procedureName,
languageObjectId = tup->t_oid;
if (strcmp(returnTypeName, "opaque") == 0) {
if (strcmp(languageName, "sql") == 0) {
if (strcmp(returnTypeName, "opaque") == 0)
{
if (strcmp(languageName, "sql") == 0)
{
elog(WARN, "ProcedureCreate: sql functions cannot return type \"opaque\"");
}
typeObjectId = 0;
}
else {
else
{
typeObjectId = TypeGet(returnTypeName, &defined);
if (!OidIsValid(typeObjectId)) {
if (!OidIsValid(typeObjectId))
{
elog(NOTICE, "ProcedureCreate: type '%s' is not yet defined",
returnTypeName);
#if 0
@ -164,20 +179,24 @@ ProcedureCreate(char *procedureName,
returnTypeName);
#endif
typeObjectId = TypeShellMake(returnTypeName);
if (!OidIsValid(typeObjectId)) {
if (!OidIsValid(typeObjectId))
{
elog(WARN, "ProcedureCreate: could not create type '%s'",
returnTypeName);
}
}
else if (!defined) {
else if (!defined)
{
elog(NOTICE, "ProcedureCreate: return type '%s' is only a shell",
returnTypeName);
}
}
/* don't allow functions of complex types that have the same name as
existing attributes of the type */
/*
* don't allow functions of complex types that have the same name as
* existing attributes of the type
*/
if (parameterCount == 1 &&
(toid = TypeGet(strVal(lfirst(argList)), &defined)) &&
defined &&
@ -188,13 +207,14 @@ ProcedureCreate(char *procedureName,
/*
* If this is a postquel procedure, we parse it here in order to
* be sure that it contains no syntax errors. We should store
* the plan in an Inversion file for use later, but for now, we
* just store the procedure's text in the prosrc attribute.
* If this is a postquel procedure, we parse it here in order to be
* sure that it contains no syntax errors. We should store the plan
* in an Inversion file for use later, but for now, we just store the
* procedure's text in the prosrc attribute.
*/
if (strcmp(languageName, "sql") == 0) {
if (strcmp(languageName, "sql") == 0)
{
plan_list = pg_plan(prosrc, typev, parameterCount,
&querytree_list, dest);
@ -202,7 +222,8 @@ ProcedureCreate(char *procedureName,
pg_checkretval(typeObjectId, querytree_list);
}
for (i = 0; i < Natts_pg_proc; ++i) {
for (i = 0; i < Natts_pg_proc; ++i)
{
nulls[i] = ' ';
values[i] = (Datum) NULL;
}
@ -225,6 +246,7 @@ ProcedureCreate(char *procedureName,
values[i++] = ObjectIdGetDatum(typeObjectId);
values[i++] = (Datum) typev;
/*
* The following assignments of constants are made. The real values
* will have to be extracted from the arglist someday soon.
@ -257,4 +279,3 @@ ProcedureCreate(char *procedureName,
heap_close(rdesc);
return tup->t_oid;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.7 1997/08/19 21:30:38 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.8 1997/09/07 04:40:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -30,7 +30,8 @@
#include <string.h>
#endif
static Oid TypeShellMakeWithOpenRelation(Relation pg_type_desc,
static Oid
TypeShellMakeWithOpenRelation(Relation pg_type_desc,
char *typeName);
/* ----------------------------------------------------------------
@ -80,7 +81,8 @@ TypeGetWithOpenRelation(Relation pg_type_desc,
* end the scan and return appropriate information.
* ----------------
*/
if (! HeapTupleIsValid(tup)) {
if (!HeapTupleIsValid(tup))
{
heap_endscan(scan);
*defined = false;
return InvalidOid;
@ -162,7 +164,8 @@ TypeShellMakeWithOpenRelation(Relation pg_type_desc, char *typeName)
* initialize our nulls[] and values[] arrays
* ----------------
*/
for (i = 0; i < Natts_pg_type; ++i) {
for (i = 0; i < Natts_pg_type; ++i)
{
nulls[i] = ' ';
values[i] = (Datum) NULL; /* redundant, but safe */
}
@ -292,7 +295,8 @@ TypeCreate(char *typeName,
bool passedByValue,
char alignment)
{
register i, j;
register i,
j;
Relation pg_type_desc;
HeapScanDesc pg_type_scan;
@ -326,7 +330,8 @@ TypeCreate(char *typeName,
* ----------------
*/
typeObjectId = TypeGet(typeName, &defined);
if (OidIsValid(typeObjectId) && defined) {
if (OidIsValid(typeObjectId) && defined)
{
elog(WARN, "TypeCreate: type %s already defined", typeName);
}
@ -335,9 +340,11 @@ TypeCreate(char *typeName,
* it is defined.
* ----------------
*/
if (elementTypeName) {
if (elementTypeName)
{
elementObjectId = TypeGet(elementTypeName, &defined);
if (!defined) {
if (!defined)
{
elog(WARN, "TypeCreate: type %s is not defined", elementTypeName);
}
}
@ -346,7 +353,8 @@ TypeCreate(char *typeName,
* XXX comment me
* ----------------
*/
if (externalSize == 0) {
if (externalSize == 0)
{
externalSize = -1; /* variable length */
}
@ -354,7 +362,8 @@ TypeCreate(char *typeName,
* initialize arrays needed by FormHeapTuple
* ----------------
*/
for (i = 0; i < Natts_pg_type; ++i) {
for (i = 0; i < Natts_pg_type; ++i)
{
nulls[i] = ' ';
replaces[i] = 'r';
values[i] = (Datum) NULL; /* redundant, but nice */
@ -397,7 +406,8 @@ TypeCreate(char *typeName,
procs[2] = (receiveProcedure) ? receiveProcedure : inputProcedure;
procs[3] = (sendProcedure) ? sendProcedure : outputProcedure;
for (j = 0; j < 4; ++j) {
for (j = 0; j < 4; ++j)
{
procname = procs[j];
tup = SearchSysCacheTuple(PRONAME,
@ -406,20 +416,24 @@ TypeCreate(char *typeName,
PointerGetDatum(argList),
0);
if (!HeapTupleIsValid(tup)) {
if (!HeapTupleIsValid(tup))
{
/*
* it is possible for the input/output procedure
* to take two arguments, where the second argument
* is the element type (eg array_in/array_out)
* it is possible for the input/output procedure to take two
* arguments, where the second argument is the element type
* (eg array_in/array_out)
*/
if (OidIsValid(elementObjectId)) {
if (OidIsValid(elementObjectId))
{
tup = SearchSysCacheTuple(PRONAME,
PointerGetDatum(procname),
Int32GetDatum(2),
PointerGetDatum(argList),
0);
}
if (!HeapTupleIsValid(tup)) {
if (!HeapTupleIsValid(tup))
{
func_error("TypeCreate", procname, 1, argList);
}
}
@ -439,7 +453,8 @@ TypeCreate(char *typeName,
*/
values[i] = (Datum) fmgr(TextInRegProcedure, /* 16 */
PointerIsValid(defaultTypeValue)
? defaultTypeValue : "-"); /* XXX default typdefault */
? defaultTypeValue : "-"); /* XXX default
* typdefault */
/* ----------------
* open pg_type and begin a scan for the type name.
@ -468,7 +483,8 @@ TypeCreate(char *typeName,
* ----------------
*/
tup = heap_getnext(pg_type_scan, 0, &buffer);
if (HeapTupleIsValid(tup)) {
if (HeapTupleIsValid(tup))
{
tup = heap_modifytuple(tup,
buffer,
pg_type_desc,
@ -484,7 +500,9 @@ TypeCreate(char *typeName,
setheapoverride(false);
typeObjectId = tup->t_oid;
} else {
}
else
{
tupDesc = pg_type_desc->rd_att;
tup = heap_formtuple(tupDesc,
@ -536,7 +554,8 @@ TypeRename(char *oldTypeName, char *newTypeName)
/* check that that the new type is not already defined */
type_oid = TypeGet(newTypeName, &defined);
if (OidIsValid(type_oid) && defined) {
if (OidIsValid(type_oid) && defined)
{
elog(WARN, "TypeRename: type %s already defined", newTypeName);
}
@ -548,7 +567,8 @@ TypeRename(char *oldTypeName, char *newTypeName)
* change the name of the type
* ----------------
*/
if (HeapTupleIsValid(tup)) {
if (HeapTupleIsValid(tup))
{
namestrcpy(&(((TypeTupleForm) GETSTRUCT(tup))->typname), newTypeName);
@ -566,7 +586,9 @@ TypeRename(char *oldTypeName, char *newTypeName)
/* all done */
pfree(tup);
} else {
}
else
{
elog(WARN, "TypeRename: type %s not defined", oldTypeName);
}
@ -586,7 +608,8 @@ makeArrayTypeName(char* typeName)
{
char *arr;
if (!typeName) return NULL;
if (!typeName)
return NULL;
arr = palloc(strlen(typeName) + 2);
arr[0] = '_';
strcpy(arr + 1, typeName);

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/version.c,v 1.5 1997/08/19 21:30:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/version.c,v 1.6 1997/09/07 04:41:04 momjian Exp $
*
* NOTES
* At the point the version is defined, 2 physical relations are created
@ -35,8 +35,10 @@
#define MAX_QUERY_LEN 1024
char rule_buf[MAX_QUERY_LEN];
#ifdef NOT_USED
static char attr_list[MAX_QUERY_LEN];
#endif
/*
@ -86,13 +88,15 @@ static char attr_list[MAX_QUERY_LEN];
static void
eval_as_new_xact(char *query)
{
/* WARNING! do not uncomment the following lines WARNING!
* CommitTransactionCommand();
* StartTransactionCommand();
/*
* WARNING! do not uncomment the following lines WARNING!
* CommitTransactionCommand(); StartTransactionCommand();
*/
CommandCounterIncrement();
pg_eval(query, (char **) NULL, (Oid *) NULL, 0);
}
#endif
/*
* Define a version.
@ -105,12 +109,15 @@ DefineVersion(char *name, char *fromRelname, char *date)
static char saved_basename[512];
static char saved_snapshot[512];
if (date == NULL) {
if (date == NULL)
{
/* no time ranges */
bname = fromRelname;
strcpy(saved_basename, (char *) bname);
*saved_snapshot = (char) NULL;
} else {
}
else
{
/* version is a snapshot */
bname = fromRelname;
strcpy(saved_basename, (char *) bname);
@ -119,10 +126,9 @@ DefineVersion(char *name, char *fromRelname, char *date)
/*
* Calls the routine ``GetAttrList'' get the list of attributes
* from the base relation.
* Code is put here so that we only need to look up the attribute once for
* both appends and replaces.
* Calls the routine ``GetAttrList'' get the list of attributes from
* the base relation. Code is put here so that we only need to look up
* the attribute once for both appends and replaces.
*/
setAttrList(bname);
@ -132,6 +138,7 @@ DefineVersion(char *name, char *fromRelname, char *date)
VersionReplace(name, saved_basename, saved_snapshot);
VersionRetrieve(name, saved_basename, saved_snapshot);
}
#endif
/*
@ -164,6 +171,7 @@ VersionCreate(char *vname, char *bname)
sprintf(query_buf, "CREATE TABLE %s_del (DOID oid)", vname);
eval_as_new_xact(query_buf);
}
#endif
@ -184,7 +192,8 @@ setAttrList(char *bname)
int notfirst = 0;
rdesc = heap_openr(bname);
if (rdesc == NULL ) {
if (rdesc == NULL)
{
elog(WARN, "Unable to expand all -- amopenr failed ");
return;
}
@ -192,12 +201,16 @@ setAttrList(char *bname)
attr_list[0] = '\0';
for ( i = maxattrs-1 ; i > -1 ; --i ) {
for (i = maxattrs - 1; i > -1; --i)
{
attrname = (rdesc->rd_att->attrs[i]->attname).data;
if (notfirst == 1) {
if (notfirst == 1)
{
sprintf(temp_buf, ", %s = new.%s", attrname, attrname);
} else {
}
else
{
sprintf(temp_buf, "%s = new.%s", attrname, attrname);
notfirst = 1;
}
@ -208,6 +221,7 @@ setAttrList(char *bname)
return;
}
#endif
/*
@ -225,6 +239,7 @@ VersionAppend(char *vname, char *bname)
eval_as_new_xact(rule_buf);
}
#endif
/*
@ -253,6 +268,7 @@ where _%s.oid !!= '%s_del.DOID'",
/* printf("%s\n",rule_buf); */
}
#endif
/*
@ -290,6 +306,7 @@ bname,bname,snapshot,bname);
eval_as_new_xact(rule_buf);
#endif /* OLD_REWRITE */
}
#endif
/*

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.17 1997/08/19 21:30:42 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.18 1997/09/07 04:40:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -122,14 +122,17 @@ Async_NotifyHandler(SIGNAL_ARGS)
extern TransactionState CurrentTransactionState;
if ((CurrentTransactionState->state == TRANS_DEFAULT) &&
(CurrentTransactionState->blockState == TRANS_DEFAULT)) {
(CurrentTransactionState->blockState == TRANS_DEFAULT))
{
#ifdef ASYNC_DEBUG
elog(DEBUG, "Waking up sleeping backend process");
#endif
Async_NotifyFrontEnd();
}else {
}
else
{
#ifdef ASYNC_DEBUG
elog(DEBUG, "Process is in the middle of another transaction, state = %d, block state = %d",
CurrentTransactionState->state,
@ -166,15 +169,18 @@ void
Async_Notify(char *relname)
{
HeapTuple lTuple, rTuple;
HeapTuple lTuple,
rTuple;
Relation lRel;
HeapScanDesc sRel;
TupleDesc tdesc;
ScanKeyData key;
Buffer b;
Datum d, value[3];
Datum d,
value[3];
bool isnull;
char repl[3], nulls[3];
char repl[3],
nulls[3];
char *notifyName;
@ -208,10 +214,12 @@ Async_Notify(char *relname)
value[0] = value[1] = value[2] = (Datum) 0;
value[Anum_pg_listener_notify - 1] = Int32GetDatum(1);
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0, &b))) {
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0, &b)))
{
d = (Datum) heap_getattr(lTuple, b, Anum_pg_listener_notify,
tdesc, &isnull);
if (!DatumGetInt32(d)) {
if (!DatumGetInt32(d))
{
rTuple = heap_modifytuple(lTuple, b, lRel, value, nulls, repl);
heap_replace(lRel, &lTuple->t_ctid, rTuple);
}
@ -263,9 +271,11 @@ Async_NotifyAtCommit()
pendingNotifies = DLNewList();
if ((CurrentTransactionState->state == TRANS_DEFAULT) &&
(CurrentTransactionState->blockState == TRANS_DEFAULT)) {
(CurrentTransactionState->blockState == TRANS_DEFAULT))
{
if (notifyIssued) { /* 'notify <relname>' issued by us */
if (notifyIssued)
{ /* 'notify <relname>' issued by us */
notifyIssued = 0;
StartTransactionCommand();
#ifdef ASYNC_DEBUG
@ -281,26 +291,33 @@ Async_NotifyAtCommit()
tdesc = RelationGetTupleDescriptor(lRel);
ourpid = getpid();
while (HeapTupleIsValid(lTuple = heap_getnext(sRel,0, &b))) {
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0, &b)))
{
d = (Datum) heap_getattr(lTuple, b, Anum_pg_listener_relname,
tdesc, &isnull);
if (AsyncExistsPendingNotify((char *) DatumGetPointer(d))) {
if (AsyncExistsPendingNotify((char *) DatumGetPointer(d)))
{
d = (Datum) heap_getattr(lTuple, b, Anum_pg_listener_pid,
tdesc, &isnull);
if (ourpid == DatumGetInt32(d)) {
if (ourpid == DatumGetInt32(d))
{
#ifdef ASYNC_DEBUG
elog(DEBUG, "Notifying self, setting notifyFronEndPending to 1");
#endif
notifyFrontEndPending = 1;
} else {
}
else
{
#ifdef ASYNC_DEBUG
elog(DEBUG, "Notifying others");
#endif
#ifdef HAVE_KILL
if (kill(DatumGetInt32(d), SIGUSR2) < 0) {
if (errno == ESRCH) {
if (kill(DatumGetInt32(d), SIGUSR2) < 0)
{
if (errno == ESRCH)
{
heap_delete(lRel, &lTuple->t_ctid);
}
}
@ -317,8 +334,9 @@ Async_NotifyAtCommit()
ClearPendingNotify();
}
if (notifyFrontEndPending) { /* we need to notify the frontend of
all pending notifies. */
if (notifyFrontEndPending)
{ /* we need to notify the frontend of all
* pending notifies. */
notifyFrontEndPending = 1;
Async_NotifyFrontEnd();
}
@ -346,7 +364,8 @@ Async_NotifyAtAbort()
{
extern TransactionState CurrentTransactionState;
if (notifyIssued) {
if (notifyIssued)
{
ClearPendingNotify();
}
notifyIssued = 0;
@ -355,8 +374,10 @@ Async_NotifyAtAbort()
pendingNotifies = DLNewList();
if ((CurrentTransactionState->state == TRANS_DEFAULT) &&
(CurrentTransactionState->blockState == TRANS_DEFAULT)) {
if (notifyFrontEndPending) { /* don't forget to notify front end */
(CurrentTransactionState->blockState == TRANS_DEFAULT))
{
if (notifyFrontEndPending)
{ /* don't forget to notify front end */
Async_NotifyFrontEnd();
}
}
@ -389,7 +410,8 @@ Async_Listen(char *relname, int pid)
char nulls[Natts_pg_listener];
TupleDesc tdesc;
HeapScanDesc s;
HeapTuple htup,tup;
HeapTuple htup,
tup;
Relation lDesc;
Buffer b;
Datum d;
@ -403,7 +425,8 @@ Async_Listen(char *relname, int pid)
#ifdef ASYNC_DEBUG
elog(DEBUG, "Async_Listen: %s", relname);
#endif
for (i = 0 ; i < Natts_pg_listener; i++) {
for (i = 0; i < Natts_pg_listener; i++)
{
nulls[i] = ' ';
values[i] = PointerGetDatum(NULL);
}
@ -419,14 +442,17 @@ Async_Listen(char *relname, int pid)
/* is someone already listening. One listener per relation */
tdesc = RelationGetTupleDescriptor(lDesc);
s = heap_beginscan(lDesc, 0, NowTimeQual, 0, (ScanKey) NULL);
while (HeapTupleIsValid(htup = heap_getnext(s,0,&b))) {
while (HeapTupleIsValid(htup = heap_getnext(s, 0, &b)))
{
d = (Datum) heap_getattr(htup, b, Anum_pg_listener_relname, tdesc,
&isnull);
relnamei = DatumGetPointer(d);
if (!strncmp(relnamei,relname, NAMEDATALEN)) {
if (!strncmp(relnamei, relname, NAMEDATALEN))
{
d = (Datum) heap_getattr(htup, b, Anum_pg_listener_pid, tdesc, &isnull);
pid = DatumGetInt32(d);
if (pid == ourPid) {
if (pid == ourPid)
{
alreadyListener = 1;
}
}
@ -434,7 +460,8 @@ Async_Listen(char *relname, int pid)
}
heap_endscan(s);
if (alreadyListener) {
if (alreadyListener)
{
elog(NOTICE, "Async_Listen: We are already listening on %s",
relname);
return;
@ -447,16 +474,18 @@ Async_Listen(char *relname, int pid)
heap_insert(lDesc, tup);
pfree(tup);
/* if (alreadyListener) {
elog(NOTICE,"Async_Listen: already one listener on %s (possibly dead)",relname);
}*/
/*
* if (alreadyListener) { elog(NOTICE,"Async_Listen: already one
* listener on %s (possibly dead)",relname); }
*/
RelationUnsetLockForWrite(lDesc);
heap_close(lDesc);
/*
* now that we are listening, we should make a note to ourselves
* to unlisten prior to dying.
* now that we are listening, we should make a note to ourselves to
* unlisten prior to dying.
*/
relnamei = malloc(NAMEDATALEN); /* persists to process exit */
strNcpy(relnamei, relname, NAMEDATALEN - 1);
@ -493,7 +522,8 @@ Async_Unlisten(char *relname, int pid)
lDesc = heap_openr(ListenerRelationName);
RelationSetLockForWrite(lDesc);
if (lTuple != NULL) {
if (lTuple != NULL)
{
heap_delete(lDesc, &lTuple->t_ctid);
}
@ -536,13 +566,16 @@ static void
Async_NotifyFrontEnd()
{
extern CommandDest whereToSendOutput;
HeapTuple lTuple, rTuple;
HeapTuple lTuple,
rTuple;
Relation lRel;
HeapScanDesc sRel;
TupleDesc tdesc;
ScanKeyData key[2];
Datum d, value[3];
char repl[3], nulls[3];
Datum d,
value[3];
char repl[3],
nulls[3];
Buffer b;
int ourpid;
bool isnull;
@ -574,7 +607,8 @@ Async_NotifyFrontEnd()
value[0] = value[1] = value[2] = (Datum) 0;
value[Anum_pg_listener_notify - 1] = Int32GetDatum(0);
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0,&b))) {
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0, &b)))
{
d = (Datum) heap_getattr(lTuple, b, Anum_pg_listener_relname,
tdesc, &isnull);
rTuple = heap_modifytuple(lTuple, b, lRel, value, nulls, repl);
@ -582,12 +616,15 @@ Async_NotifyFrontEnd()
/* notifying the front end */
if (whereToSendOutput == Remote) {
if (whereToSendOutput == Remote)
{
pq_putnchar("A", 1);
pq_putint(ourpid, 4);
pq_putstr(DatumGetName(d)->data);
pq_flush();
} else {
}
else
{
elog(NOTICE, "Async_NotifyFrontEnd: no asynchronous notification to frontend on interactive sessions");
}
ReleaseBuffer(b);
@ -599,9 +636,11 @@ static int
AsyncExistsPendingNotify(char *relname)
{
Dlelem *p;
for (p = DLGetHead(pendingNotifies);
p != NULL;
p = DLGetSucc(p)) {
p = DLGetSucc(p))
{
/* Use NAMEDATALEN for relname comparison. DZ - 26-08-1996 */
if (!strncmp(DLE_VAL(p), relname, NAMEDATALEN))
return 1;
@ -614,7 +653,7 @@ static void
ClearPendingNotify()
{
Dlelem *p;
while ((p = DLRemHead(pendingNotifies)) != NULL)
free(DLE_VAL(p));
}

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.13 1997/08/19 21:30:45 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.14 1997/09/07 04:40:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -82,9 +82,12 @@ static void rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
void
cluster(char oldrelname[], char oldindexname[])
{
Oid OIDOldHeap, OIDOldIndex, OIDNewHeap;
Oid OIDOldHeap,
OIDOldIndex,
OIDNewHeap;
Relation OldHeap, OldIndex;
Relation OldHeap,
OldIndex;
Relation NewHeap;
char NewIndexName[NAMEDATALEN];
@ -93,40 +96,43 @@ cluster(char oldrelname[], char oldindexname[])
char saveoldindexname[NAMEDATALEN];
/* Save the old names because they will get lost when the old relations
* are destroyed.
/*
* Save the old names because they will get lost when the old
* relations are destroyed.
*/
strcpy(saveoldrelname, oldrelname);
strcpy(saveoldindexname, oldindexname);
/*
*
* I'm going to force all checking back into the commands.c function.
*
* Get the list if indicies for this relation. If the index we want
* is among them, do not add it to the 'kill' list, as it will be
* handled by the 'clean up' code which commits this transaction.
* Get the list if indicies for this relation. If the index we want is
* among them, do not add it to the 'kill' list, as it will be handled
* by the 'clean up' code which commits this transaction.
*
* I'm not using the SysCache, because this will happen but
* once, and the slow way is the sure way in this case.
* I'm not using the SysCache, because this will happen but once, and the
* slow way is the sure way in this case.
*
*/
/*
* Like vacuum, cluster spans transactions, so I'm going to handle it in
* the same way.
* Like vacuum, cluster spans transactions, so I'm going to handle it
* in the same way.
*/
/* matches the StartTransaction in PostgresMain() */
OldHeap = heap_openr(oldrelname);
if (!RelationIsValid(OldHeap)) {
if (!RelationIsValid(OldHeap))
{
elog(WARN, "cluster: unknown relation: \"%s\"",
oldrelname);
}
OIDOldHeap = OldHeap->rd_id;/* Get OID for the index scan */
OldIndex = index_openr(oldindexname); /* Open old index relation */
if (!RelationIsValid(OldIndex)) {
if (!RelationIsValid(OldIndex))
{
elog(WARN, "cluster: unknown index: \"%s\"",
oldindexname);
}
@ -137,10 +143,11 @@ cluster(char oldrelname[], char oldindexname[])
/*
* I need to build the copies of the heap and the index. The Commit()
* between here is *very* bogus. If someone is appending stuff, they will
* get the lock after being blocked and add rows which won't be present in
* the new table. Bleagh! I'd be best to try and ensure that no-one's
* in the tables for the entire duration of this process with a pg_vlock.
* between here is *very* bogus. If someone is appending stuff, they
* will get the lock after being blocked and add rows which won't be
* present in the new table. Bleagh! I'd be best to try and ensure
* that no-one's in the tables for the entire duration of this process
* with a pg_vlock.
*/
NewHeap = copy_heap(OIDOldHeap);
OIDNewHeap = NewHeap->rd_id;
@ -160,8 +167,8 @@ cluster(char oldrelname[], char oldindexname[])
sprintf(NewIndexName, "temp_%x", OIDOldIndex);
/*
* make this really happen. Flush all the buffers.
* (Believe me, it is necessary ... ended up in a mess without it.)
* make this really happen. Flush all the buffers. (Believe me, it is
* necessary ... ended up in a mess without it.)
*/
CommitTransactionCommand();
StartTransactionCommand();
@ -186,9 +193,11 @@ static Relation
copy_heap(Oid OIDOldHeap)
{
char NewName[NAMEDATALEN];
TupleDesc OldHeapDesc, tupdesc;
TupleDesc OldHeapDesc,
tupdesc;
Oid OIDNewHeap;
Relation NewHeap, OldHeap;
Relation NewHeap,
OldHeap;
/*
* Create a new heap relation with a temporary name, which has the
@ -226,8 +235,11 @@ copy_heap(Oid OIDOldHeap)
static void
copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
{
Relation OldIndex, NewHeap;
HeapTuple Old_pg_index_Tuple, Old_pg_index_relation_Tuple, pg_proc_Tuple;
Relation OldIndex,
NewHeap;
HeapTuple Old_pg_index_Tuple,
Old_pg_index_relation_Tuple,
pg_proc_Tuple;
IndexTupleForm Old_pg_index_Form;
Form_pg_class Old_pg_index_relation_Form;
Form_pg_proc pg_proc_Form;
@ -240,10 +252,9 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
OldIndex = index_open(OIDOldIndex);
/*
* OK. Create a new (temporary) index for the one that's already
* here. To do this I get the info from pg_index, re-build the
* FunctInfo if I have to, and add a new index with a temporary
* name.
* OK. Create a new (temporary) index for the one that's already here.
* To do this I get the info from pg_index, re-build the FunctInfo if
* I have to, and add a new index with a temporary name.
*/
Old_pg_index_Tuple =
SearchSysCacheTuple(INDEXRELID,
@ -267,8 +278,8 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
/*
* Ugly as it is, the only way I have of working out the number of
* attribues is to count them. Mostly there'll be just one but
* I've got to be sure.
* attribues is to count them. Mostly there'll be just one but I've
* got to be sure.
*/
for (attnumP = &(Old_pg_index_Form->indkey[0]), natts = 0;
*attnumP != InvalidAttrNumber;
@ -278,7 +289,8 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
* If this is a functional index, I need to rebuild the functional
* component to pass it to the defining procedure.
*/
if (Old_pg_index_Form->indproc != InvalidOid) {
if (Old_pg_index_Form->indproc != InvalidOid)
{
finfo = (FuncIndexInfo *) palloc(sizeof(FuncIndexInfo));
FIgetnArgs(finfo) = natts;
FIgetProcOid(finfo) = Old_pg_index_Form->indproc;
@ -291,7 +303,9 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
Assert(pg_proc_Tuple);
pg_proc_Form = (Form_pg_proc) GETSTRUCT(pg_proc_Tuple);
namecpy(&(finfo->funcName), &(pg_proc_Form->proname));
} else {
}
else
{
finfo = (FuncIndexInfo *) NULL;
natts = 1;
}
@ -316,7 +330,9 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
static void
rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
{
Relation LocalNewHeap, LocalOldHeap, LocalOldIndex;
Relation LocalNewHeap,
LocalOldHeap,
LocalOldIndex;
IndexScanDesc ScanDesc;
RetrieveIndexResult ScanResult;
ItemPointer HeapTid;
@ -325,8 +341,8 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
Oid OIDNewHeapInsert;
/*
* Open the relations I need. Scan through the OldHeap on the OldIndex and
* insert each tuple into the NewHeap.
* Open the relations I need. Scan through the OldHeap on the OldIndex
* and insert each tuple into the NewHeap.
*/
LocalNewHeap = (Relation) heap_open(OIDNewHeap);
LocalOldHeap = (Relation) heap_open(OIDOldHeap);
@ -335,7 +351,8 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
ScanDesc = index_beginscan(LocalOldIndex, false, 0, (ScanKey) NULL);
while ((ScanResult =
index_getnext(ScanDesc, ForwardScanDirection)) != NULL) {
index_getnext(ScanDesc, ForwardScanDirection)) != NULL)
{
HeapTid = &ScanResult->heap_iptr;
LocalHeapTuple = heap_fetch(LocalOldHeap, 0, HeapTid, &LocalBuffer);
@ -350,4 +367,3 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
heap_close(LocalOldHeap);
heap_close(LocalNewHeap);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.13 1997/08/22 14:22:07 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.14 1997/09/07 04:40:38 momjian Exp $
*
* NOTES
* The PortalExecutorHeapMemory crap needs to be eliminated
@ -106,7 +106,8 @@ PerformPortalFetch(char *name,
* sanity checks
* ----------------
*/
if (name == NULL) {
if (name == NULL)
{
elog(NOTICE, "PerformPortalFetch: blank portal unsupported");
return;
}
@ -116,7 +117,8 @@ PerformPortalFetch(char *name,
* ----------------
*/
portal = GetPortalByName(name);
if (! PortalIsValid(portal)) {
if (!PortalIsValid(portal))
{
elog(NOTICE, "PerformPortalFetch: portal \"%s\" not found",
name);
return;
@ -148,9 +150,12 @@ PerformPortalFetch(char *name,
queryDesc = PortalGetQueryDesc(portal);
BeginCommand(name,
queryDesc->operation,
portal->attinfo,/* QueryDescGetTypeInfo(queryDesc), */
false, /* portal fetches don't end up in relations */
false, /* this is a portal fetch, not a "retrieve portal" */
portal->attinfo, /* QueryDescGetTypeInfo(queryDesc),
* */
false, /* portal fetches don't end up in
* relations */
false, /* this is a portal fetch, not a "retrieve
* portal" */
tag,
dest);
@ -191,7 +196,8 @@ PerformPortalClose(char *name, CommandDest dest)
* sanity checks
* ----------------
*/
if (name == NULL) {
if (name == NULL)
{
elog(NOTICE, "PerformPortalClose: blank portal unsupported");
return;
}
@ -201,7 +207,8 @@ PerformPortalClose(char *name, CommandDest dest)
* ----------------
*/
portal = GetPortalByName(name);
if (! PortalIsValid(portal)) {
if (!PortalIsValid(portal))
{
elog(NOTICE, "PerformPortalClose: portal \"%s\" not found",
name);
return;
@ -250,14 +257,16 @@ PerformAddAttribute(char *relationName,
bool inherits,
ColumnDef * colDef)
{
Relation relrdesc, attrdesc;
Relation relrdesc,
attrdesc;
HeapScanDesc attsdesc;
HeapTuple reltup;
HeapTuple attributeTuple;
AttributeTupleForm attribute;
FormData_pg_attribute attributeD;
int i;
int minattnum, maxatts;
int minattnum,
maxatts;
HeapTuple tup;
ScanKeyData key[2];
ItemPointerData oldTID;
@ -279,6 +288,7 @@ PerformAddAttribute(char *relationName,
elog(WARN, "PerformAddAttribute: you do not own class \"%s\"",
relationName);
#endif
/*
* we can't add a not null attribute
*/
@ -286,22 +296,28 @@ PerformAddAttribute(char *relationName,
elog(WARN, "Can't add a not null attribute to a existent relation");
if (colDef->defval)
elog(WARN, "ADD ATTRIBUTE: DEFAULT is not implemented, yet");
/*
* if the first element in the 'schema' list is a "*" then we are
* supposed to add this attribute to all classes that inherit from
* 'relationName' (as well as to 'relationName').
*
* any permissions or problems with duplicate attributes will cause
* the whole transaction to abort, which is what we want -- all or
* any permissions or problems with duplicate attributes will cause the
* whole transaction to abort, which is what we want -- all or
* nothing.
*/
if (colDef != NULL) {
if (inherits) {
Oid myrelid, childrelid;
List *child, *children;
if (colDef != NULL)
{
if (inherits)
{
Oid myrelid,
childrelid;
List *child,
*children;
relrdesc = heap_openr(relationName);
if (!RelationIsValid(relrdesc)) {
if (!RelationIsValid(relrdesc))
{
elog(WARN, "PerformAddAttribute: unknown relation: \"%s\"",
relationName);
}
@ -313,15 +329,17 @@ PerformAddAttribute(char *relationName,
/*
* find_all_inheritors does the recursive search of the
* inheritance hierarchy, so all we have to do is process
* all of the relids in the list that it returns.
* inheritance hierarchy, so all we have to do is process all
* of the relids in the list that it returns.
*/
foreach (child, children) {
foreach(child, children)
{
childrelid = lfirsti(child);
if (childrelid == myrelid)
continue;
relrdesc = heap_open(childrelid);
if (!RelationIsValid(relrdesc)) {
if (!RelationIsValid(relrdesc))
{
elog(WARN, "PerformAddAttribute: can't find catalog entry for inheriting class with oid %d",
childrelid);
}
@ -335,15 +353,18 @@ PerformAddAttribute(char *relationName,
relrdesc = heap_openr(RelationRelationName);
reltup = ClassNameIndexScan(relrdesc, relationName);
if (!PointerIsValid(reltup)) {
if (!PointerIsValid(reltup))
{
heap_close(relrdesc);
elog(WARN, "PerformAddAttribute: relation \"%s\" not found",
relationName);
}
/*
* XXX is the following check sufficient?
*/
if (((Form_pg_class) GETSTRUCT(reltup))->relkind == RELKIND_INDEX) {
if (((Form_pg_class) GETSTRUCT(reltup))->relkind == RELKIND_INDEX)
{
elog(WARN, "PerformAddAttribute: index relation \"%s\" not changed",
relationName);
return;
@ -351,7 +372,8 @@ PerformAddAttribute(char *relationName,
minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
maxatts = minattnum + 1;
if (maxatts > MaxHeapAttributeNumber) {
if (maxatts > MaxHeapAttributeNumber)
{
pfree(reltup); /* XXX temp */
heap_close(relrdesc); /* XXX temp */
elog(WARN, "PerformAddAttribute: relations limited to %d attributes",
@ -409,7 +431,8 @@ PerformAddAttribute(char *relationName,
tup = heap_getnext(attsdesc, 0, (Buffer *) NULL);
if (HeapTupleIsValid(tup)) {
if (HeapTupleIsValid(tup))
{
pfree(reltup); /* XXX temp */
heap_endscan(attsdesc); /* XXX temp */
heap_close(attrdesc); /* XXX temp */
@ -440,14 +463,16 @@ PerformAddAttribute(char *relationName,
0, 0, 0);
form = (TypeTupleForm) GETSTRUCT(typeTuple);
if (!HeapTupleIsValid(typeTuple)) {
if (!HeapTupleIsValid(typeTuple))
{
elog(WARN, "Add: type \"%s\" nonexistent", p);
}
namestrcpy(&(attribute->attname), (char *) key[1].sk_argument);
attribute->atttypid = typeTuple->t_oid;
if (colDef->typename->typlen > 0)
attribute->attlen = colDef->typename->typlen;
else /* bpchar, varchar, text */
else
/* bpchar, varchar, text */
attribute->attlen = form->typlen;
attribute->attnum = i;
attribute->attbyval = form->typbyval;

View File

@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.29 1997/09/04 13:18:59 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.30 1997/09/07 04:40:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -48,22 +48,28 @@ static Oid GetOutputFunction(Oid type);
static Oid GetTypeElement(Oid type);
static Oid GetInputFunction(Oid type);
static Oid IsTypeByVal(Oid type);
static void GetIndexRelations(Oid main_relation_oid,
static void
GetIndexRelations(Oid main_relation_oid,
int *n_indices,
Relation ** index_rels);
#ifdef COPY_PATCH
static void CopyReadNewline(FILE * fp, int *newline);
static char *CopyReadAttribute(FILE * fp, bool * isnull, char *delim, int *newline);
#else
static char *CopyReadAttribute(FILE * fp, bool * isnull, char *delim);
#endif
static void CopyAttributeOut(FILE * fp, char *string, char *delim);
static int CountTuples(Relation relation);
extern FILE *Pfout, *Pfin;
extern FILE *Pfout,
*Pfin;
#ifdef COPY_DEBUG
static int lineno;
#endif
/*
@ -72,7 +78,8 @@ static int lineno;
void
DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
char *filename, char *delim) {
char *filename, char *delim)
{
/*----------------------------------------------------------------------------
Either unload or reload contents of class <relname>, depending on <from>.
@ -107,7 +114,8 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
int result;
rel = heap_openr(relname);
if (rel == NULL) elog(WARN, "COPY command failed. Class %s "
if (rel == NULL)
elog(WARN, "COPY command failed. Class %s "
"does not exist.", relname);
result = pg_aclcheck(relname, UserName, required_access);
@ -119,16 +127,24 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
"directly to or from a file. Anyone can COPY to stdout or "
"from stdin. Psql's \\copy command also works for anyone.");
/* Above should not return. */
else {
if (from) { /* copy from file to database */
else
{
if (from)
{ /* copy from file to database */
if (rel->rd_rel->relkind == RELKIND_SEQUENCE)
elog(WARN, "You can't change sequence relation %s", relname);
if (pipe) {
if (IsUnderPostmaster) {
if (pipe)
{
if (IsUnderPostmaster)
{
ReceiveCopyBegin();
fp = Pfin;
} else fp = stdin;
} else {
}
else
fp = stdin;
}
else
{
fp = AllocateFile(filename, "r");
if (fp == NULL)
elog(WARN, "COPY command, running in backend with "
@ -138,14 +154,23 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
/* Above should not return */
}
CopyFrom(rel, binary, oids, fp, delim);
} else { /* copy from database to file */
if (pipe) {
if (IsUnderPostmaster) {
}
else
{ /* copy from database to file */
if (pipe)
{
if (IsUnderPostmaster)
{
SendCopyBegin();
fp = Pfout;
} else fp = stdout;
} else {
}
else
fp = stdout;
}
else
{
mode_t oumask; /* Pre-existing umask value */
oumask = umask((mode_t) 0);
fp = AllocateFile(filename, "w");
umask(oumask);
@ -160,9 +185,11 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
}
if (!pipe)
FreeFile(fp);
else if (!from && !binary) {
else if (!from && !binary)
{
fputs("\\.\n", fp);
if (IsUnderPostmaster) fflush(Pfout);
if (IsUnderPostmaster)
fflush(Pfout);
}
}
}
@ -175,7 +202,8 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
HeapTuple tuple;
HeapScanDesc scandesc;
int32 attr_count, i;
int32 attr_count,
i;
AttributeTupleForm *attr;
func_ptr *out_functions;
int dummy;
@ -184,11 +212,13 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
Datum value;
bool isnull; /* The attribute we are copying is null */
char *nulls;
/* <nulls> is a (dynamically allocated) array with one character
per attribute in the instance being copied. nulls[I-1] is
'n' if Attribute Number I is null, and ' ' otherwise.
<nulls> is meaningful only if we are doing a binary copy.
/*
* <nulls> is a (dynamically allocated) array with one character per
* attribute in the instance being copied. nulls[I-1] is 'n' if
* Attribute Number I is null, and ' ' otherwise.
*
* <nulls> is meaningful only if we are doing a binary copy.
*/
char *string;
int32 ntuples;
@ -200,20 +230,26 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
attr = rel->rd_att->attrs;
tupDesc = rel->rd_att;
if (!binary) {
if (!binary)
{
out_functions = (func_ptr *) palloc(attr_count * sizeof(func_ptr));
elements = (Oid *) palloc(attr_count * sizeof(Oid));
for (i = 0; i < attr_count; i++) {
for (i = 0; i < attr_count; i++)
{
out_func_oid = (Oid) GetOutputFunction(attr[i]->atttypid);
fmgr_info(out_func_oid, &out_functions[i], &dummy);
elements[i] = GetTypeElement(attr[i]->atttypid);
}
nulls = NULL; /* meaningless, but compiler doesn't know that */
}else {
nulls = NULL; /* meaningless, but compiler doesn't know
* that */
}
else
{
elements = NULL;
out_functions = NULL;
nulls = (char *) palloc(attr_count);
for (i = 0; i < attr_count; i++) nulls[i] = ' ';
for (i = 0; i < attr_count; i++)
nulls[i] = ' ';
/* XXX expensive */
@ -223,18 +259,23 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
for (tuple = heap_getnext(scandesc, 0, NULL);
tuple != NULL;
tuple = heap_getnext(scandesc, 0, NULL)) {
tuple = heap_getnext(scandesc, 0, NULL))
{
if (oids && !binary) {
if (oids && !binary)
{
fputs(oidout(tuple->t_oid), fp);
fputc(delim[0], fp);
}
for (i = 0; i < attr_count; i++) {
for (i = 0; i < attr_count; i++)
{
value = (Datum)
heap_getattr(tuple, InvalidBuffer, i + 1, tupDesc, &isnull);
if (!binary) {
if (!isnull) {
if (!binary)
{
if (!isnull)
{
string = (char *) (out_functions[i]) (value, elements[i]);
CopyAttributeOut(fp, string, delim);
pfree(string);
@ -242,27 +283,41 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
else
fputs("\\N", fp); /* null indicator */
if (i == attr_count - 1) {
if (i == attr_count - 1)
{
fputc('\n', fp);
}else {
/* when copying out, only use the first char of the delim
string */
}
else
{
/*
* when copying out, only use the first char of the
* delim string
*/
fputc(delim[0], fp);
}
}else {
}
else
{
/*
* only interesting thing heap_getattr tells us in this case
* is if we have a null attribute or not.
* only interesting thing heap_getattr tells us in this
* case is if we have a null attribute or not.
*/
if (isnull) nulls[i] = 'n';
if (isnull)
nulls[i] = 'n';
}
}
if (binary) {
int32 null_ct = 0, length;
if (binary)
{
int32 null_ct = 0,
length;
for (i = 0; i < attr_count; i++) {
if (nulls[i] == 'n') null_ct++;
for (i = 0; i < attr_count; i++)
{
if (nulls[i] == 'n')
null_ct++;
}
length = tuple->t_len - tuple->t_hoff;
@ -271,9 +326,12 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
fwrite((char *) &tuple->t_oid, sizeof(int32), 1, fp);
fwrite(&null_ct, sizeof(int32), 1, fp);
if (null_ct > 0) {
for (i = 0; i < attr_count; i++) {
if (nulls[i] == 'n') {
if (null_ct > 0)
{
for (i = 0; i < attr_count; i++)
{
if (nulls[i] == 'n')
{
fwrite(&i, sizeof(int32), 1, fp);
nulls[i] = ' ';
}
@ -284,9 +342,12 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
}
heap_endscan(scandesc);
if (binary) {
if (binary)
{
pfree(nulls);
}else {
}
else
{
pfree(out_functions);
pfree(elements);
}
@ -301,21 +362,28 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
AttrNumber attr_count;
AttributeTupleForm *attr;
func_ptr *in_functions;
int i, dummy;
int i,
dummy;
Oid in_func_oid;
Datum *values;
char *nulls, *index_nulls;
char *nulls,
*index_nulls;
bool *byval;
bool isnull;
bool has_index;
int done = 0;
char *string = NULL, *ptr;
char *string = NULL,
*ptr;
Relation *index_rels;
int32 len, null_ct, null_id;
int32 ntuples, tuples_read = 0;
int32 len,
null_ct,
null_id;
int32 ntuples,
tuples_read = 0;
bool reading_to_eof = true;
Oid *elements;
FuncIndexInfo *finfo, **finfoP = NULL;
FuncIndexInfo *finfo,
**finfoP = NULL;
TupleDesc *itupdescArr;
HeapTuple pgIndexTup;
IndexTupleForm *pgIndexP = NULL;
@ -324,9 +392,11 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
Node **indexPred = NULL;
TupleDesc rtupdesc;
ExprContext *econtext = NULL;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot = NULL;
#endif
int natts;
AttrNumber *attnumP;
@ -345,13 +415,14 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
/*
* This may be a scalar or a functional index. We initialize all
* kinds of arrays here to avoid doing extra work at every tuple
* copy.
* kinds of arrays here to avoid doing extra work at every tuple copy.
*/
if (rel->rd_rel->relhasindex) {
if (rel->rd_rel->relhasindex)
{
GetIndexRelations(rel->rd_id, &n_indices, &index_rels);
if (n_indices > 0) {
if (n_indices > 0)
{
has_index = true;
itupdescArr =
(TupleDesc *) palloc(n_indices * sizeof(TupleDesc));
@ -362,7 +433,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
finfoP = (FuncIndexInfo **) palloc(n_indices * sizeof(FuncIndexInfo *));
indexPred = (Node **) palloc(n_indices * sizeof(Node *));
econtext = NULL;
for (i = 0; i < n_indices; i++) {
for (i = 0; i < n_indices; i++)
{
itupdescArr[i] = RelationGetTupleDescriptor(index_rels[i]);
pgIndexTup =
SearchSysCacheTuple(INDEXRELID,
@ -373,21 +445,25 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
for (attnumP = &(pgIndexP[i]->indkey[0]), natts = 0;
*attnumP != InvalidAttrNumber;
attnumP++, natts++);
if (pgIndexP[i]->indproc != InvalidOid) {
if (pgIndexP[i]->indproc != InvalidOid)
{
FIgetnArgs(&finfo[i]) = natts;
natts = 1;
FIgetProcOid(&finfo[i]) = pgIndexP[i]->indproc;
*(FIgetname(&finfo[i])) = '\0';
finfoP[i] = &finfo[i];
} else
}
else
finfoP[i] = (FuncIndexInfo *) NULL;
indexNatts[i] = natts;
if (VARSIZE(&pgIndexP[i]->indpred) != 0) {
if (VARSIZE(&pgIndexP[i]->indpred) != 0)
{
predString = fmgr(F_TEXTOUT, &pgIndexP[i]->indpred);
indexPred[i] = stringToNode(predString);
pfree(predString);
/* make dummy ExprContext for use by ExecQual */
if (econtext == NULL) {
if (econtext == NULL)
{
#ifndef OMIT_PARTIAL_INDEX
tupleTable = ExecCreateTupleTable(1);
slot = ExecAllocTableSlot(tupleTable);
@ -395,17 +471,19 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
econtext->ecxt_scantuple = slot;
rtupdesc = RelationGetTupleDescriptor(rel);
slot->ttc_tupleDescriptor = rtupdesc;
/*
* There's no buffer associated with heap tuples here,
* so I set the slot's buffer to NULL. Currently, it
* appears that the only way a buffer could be needed
* would be if the partial index predicate referred to
* the "lock" system attribute. If it did, then
* heap_getattr would call HeapTupleGetRuleLock, which
* uses the buffer's descriptor to get the relation id.
* There's no buffer associated with heap tuples
* here, so I set the slot's buffer to NULL.
* Currently, it appears that the only way a
* buffer could be needed would be if the partial
* index predicate referred to the "lock" system
* attribute. If it did, then heap_getattr would
* call HeapTupleGetRuleLock, which uses the
* buffer's descriptor to get the relation id.
* Rather than try to fix this, I'll just disallow
* partial indexes on "lock", which wouldn't be useful
* anyway. --Nels, Nov '92
* partial indexes on "lock", which wouldn't be
* useful anyway. --Nels, Nov '92
*/
/* SetSlotBuffer(slot, (Buffer) NULL); */
/* SetSlotShouldFree(slot, false); */
@ -413,7 +491,9 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
slot->ttc_shouldFree = false;
#endif /* OMIT_PARTIAL_INDEX */
}
} else {
}
else
{
indexPred[i] = NULL;
}
}
@ -436,7 +516,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
in_functions = NULL;
elements = NULL;
fread(&ntuples, sizeof(int32), 1, fp);
if (ntuples != 0) reading_to_eof = false;
if (ntuples != 0)
reading_to_eof = false;
}
values = (Datum *) palloc(sizeof(Datum) * attr_count);
@ -445,7 +526,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
idatum = (Datum *) palloc(sizeof(Datum) * attr_count);
byval = (bool *) palloc(attr_count * sizeof(bool));
for (i = 0; i < attr_count; i++) {
for (i = 0; i < attr_count; i++)
{
nulls[i] = ' ';
index_nulls[i] = ' ';
byval[i] = (bool) IsTypeByVal(attr[i]->atttypid);
@ -454,16 +536,20 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
#ifdef COPY_DEBUG
lineno = 0;
#endif
while (!done) {
if (!binary) {
while (!done)
{
if (!binary)
{
#ifdef COPY_PATCH
int newline = 0;
#endif
#ifdef COPY_DEBUG
lineno++;
elog(DEBUG, "line %d", lineno);
#endif
if (oids) {
if (oids)
{
#ifdef COPY_PATCH
string = CopyReadAttribute(fp, &isnull, delim, &newline);
#else
@ -471,34 +557,43 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
#endif
if (string == NULL)
done = 1;
else {
else
{
loaded_oid = oidin(string);
if (loaded_oid < BootstrapObjectIdData)
elog(WARN, "COPY TEXT: Invalid Oid");
}
}
for (i = 0; i < attr_count && !done; i++) {
for (i = 0; i < attr_count && !done; i++)
{
#ifdef COPY_PATCH
string = CopyReadAttribute(fp, &isnull, delim, &newline);
#else
string = CopyReadAttribute(fp, &isnull, delim);
#endif
if (isnull) {
if (isnull)
{
values[i] = PointerGetDatum(NULL);
nulls[i] = 'n';
}else if (string == NULL) {
}
else if (string == NULL)
{
done = 1;
}else {
}
else
{
values[i] =
(Datum) (in_functions[i]) (string,
elements[i],
attr[i]->attlen);
/*
* Sanity check - by reference attributes cannot return
* NULL
* Sanity check - by reference attributes cannot
* return NULL
*/
if (!PointerIsValid(values[i]) &&
!(rel->rd_att->attrs[i]->attbyval)) {
!(rel->rd_att->attrs[i]->attbyval))
{
#ifdef COPY_DEBUG
elog(WARN,
"copy from: line %d - Bad file format", lineno);
@ -509,23 +604,32 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
}
}
#ifdef COPY_PATCH
if (!done) {
if (!done)
{
CopyReadNewline(fp, &newline);
}
#endif
}else { /* binary */
}
else
{ /* binary */
fread(&len, sizeof(int32), 1, fp);
if (feof(fp)) {
if (feof(fp))
{
done = 1;
}else {
if (oids) {
}
else
{
if (oids)
{
fread(&loaded_oid, sizeof(int32), 1, fp);
if (loaded_oid < BootstrapObjectIdData)
elog(WARN, "COPY BINARY: Invalid Oid");
}
fread(&null_ct, sizeof(int32), 1, fp);
if (null_ct > 0) {
for (i = 0; i < null_ct; i++) {
if (null_ct > 0)
{
for (i = 0; i < null_ct; i++)
{
fread(&null_id, sizeof(int32), 1, fp);
nulls[null_id] = 'n';
}
@ -536,10 +640,13 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
ptr = string;
for (i = 0; i < attr_count; i++) {
if (byval[i] && nulls[i] != 'n') {
for (i = 0; i < attr_count; i++)
{
if (byval[i] && nulls[i] != 'n')
{
switch(attr[i]->attlen) {
switch (attr[i]->attlen)
{
case sizeof(char):
values[i] = (Datum) * (unsigned char *) ptr;
ptr += sizeof(char);
@ -558,8 +665,11 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
elog(WARN, "COPY BINARY: impossible size!");
break;
}
}else if (nulls[i] != 'n') {
switch (attr[i]->attlen) {
}
else if (nulls[i] != 'n')
{
switch (attr[i]->attlen)
{
case -1:
if (attr[i]->attalign == 'd')
ptr = (char *) DOUBLEALIGN(ptr);
@ -594,12 +704,13 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
}
}
}
if (done) continue;
if (done)
continue;
/*
* Does it have any sence ? - vadim 12/14/96
*
tupDesc = CreateTupleDesc(attr_count, attr);
* tupDesc = CreateTupleDesc(attr_count, attr);
*/
tuple = heap_formtuple(tupDesc, values, nulls);
if (oids)
@ -652,9 +763,10 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
if (indexPred[i] != NULL)
{
#ifndef OMIT_PARTIAL_INDEX
/*
* if tuple doesn't satisfy predicate,
* don't update index
* if tuple doesn't satisfy predicate, don't
* update index
*/
slot->val = tuple;
/* SetSlotContents(slot, tuple); */
@ -672,7 +784,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
finfoP[i]);
indexRes = index_insert(index_rels[i], idatum, index_nulls,
&(tuple->t_ctid), rel);
if (indexRes) pfree(indexRes);
if (indexRes)
pfree(indexRes);
}
}
/* AFTER ROW INSERT Triggers */
@ -681,12 +794,18 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
ExecARInsertTriggers(rel, tuple);
}
if (binary) pfree(string);
if (binary)
pfree(string);
for (i = 0; i < attr_count; i++) {
if (!byval[i] && nulls[i] != 'n') {
if (!binary) pfree((void*)values[i]);
}else if (nulls[i] == 'n') {
for (i = 0; i < attr_count; i++)
{
if (!byval[i] && nulls[i] != 'n')
{
if (!binary)
pfree((void *) values[i]);
}
else if (nulls[i] == 'n')
{
nulls[i] = ' ';
}
}
@ -694,10 +813,12 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
pfree(tuple);
tuples_read++;
if (!reading_to_eof && ntuples == tuples_read) done = true;
if (!reading_to_eof && ntuples == tuples_read)
done = true;
}
pfree(values);
if (!binary) pfree(in_functions);
if (!binary)
pfree(in_functions);
pfree(nulls);
pfree(byval);
heap_close(rel);
@ -779,7 +900,8 @@ IsTypeByVal(Oid type)
* Space for the array itself is palloc'ed.
*/
typedef struct rel_list {
typedef struct rel_list
{
Oid index_rel_oid;
struct rel_list *next;
} RelationList;
@ -789,7 +911,8 @@ GetIndexRelations(Oid main_relation_oid,
int *n_indices,
Relation ** index_rels)
{
RelationList *head, *scan;
RelationList *head,
*scan;
Relation pg_index_rel;
HeapScanDesc scandesc;
Oid index_relation_oid;
@ -810,12 +933,14 @@ GetIndexRelations(Oid main_relation_oid,
for (tuple = heap_getnext(scandesc, 0, NULL);
tuple != NULL;
tuple = heap_getnext(scandesc, 0, NULL)) {
tuple = heap_getnext(scandesc, 0, NULL))
{
index_relation_oid =
(Oid) DatumGetInt32(heap_getattr(tuple, InvalidBuffer, 2,
tupDesc, &isnull));
if (index_relation_oid == main_relation_oid) {
if (index_relation_oid == main_relation_oid)
{
scan->index_rel_oid =
(Oid) DatumGetInt32(heap_getattr(tuple, InvalidBuffer,
Anum_pg_index_indexrelid,
@ -835,11 +960,13 @@ GetIndexRelations(Oid main_relation_oid,
*index_rels = (Relation *) palloc(*n_indices * sizeof(Relation));
for (i = 0, scan = head; i < *n_indices; i++, scan = scan->next) {
for (i = 0, scan = head; i < *n_indices; i++, scan = scan->next)
{
(*index_rels)[i] = index_open(scan->index_rel_oid);
}
for (i = 0, scan = head; i < *n_indices + 1; i++) {
for (i = 0, scan = head; i < *n_indices + 1; i++)
{
scan = head->next;
pfree(head);
head = scan;
@ -856,9 +983,11 @@ inString(char c, char* s)
{
int i;
if (s) {
if (s)
{
i = 0;
while (s[i] != '\0') {
while (s[i] != '\0')
{
if (s[i] == c)
return 1;
i++;
@ -875,7 +1004,8 @@ inString(char c, char* s)
void
CopyReadNewline(FILE * fp, int *newline)
{
if (!*newline) {
if (!*newline)
{
#ifdef COPY_DEBUG
elog(NOTICE, "CopyReadNewline: line %d - extra fields ignored",
lineno);
@ -886,6 +1016,7 @@ CopyReadNewline(FILE *fp, int *newline)
}
*newline = 0;
}
#endif
/*
@ -909,7 +1040,8 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim)
#ifdef COPY_PATCH
/* if last delimiter was a newline return a NULL attribute */
if (*newline) {
if (*newline)
{
*isnull = (bool) true;
return (NULL);
}
@ -919,16 +1051,19 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim)
if (feof(fp))
return (NULL);
while (!done) {
while (!done)
{
c = getc(fp);
if (feof(fp))
return (NULL);
else if (c == '\\') {
else if (c == '\\')
{
c = getc(fp);
if (feof(fp))
return (NULL);
switch (c) {
switch (c)
{
case '0':
case '1':
case '2':
@ -936,21 +1071,29 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim)
case '4':
case '5':
case '6':
case '7': {
case '7':
{
int val;
val = VALUE(c);
c = getc(fp);
if (ISOCTAL(c)) {
if (ISOCTAL(c))
{
val = (val << 3) + VALUE(c);
c = getc(fp);
if (ISOCTAL(c)) {
if (ISOCTAL(c))
{
val = (val << 3) + VALUE(c);
} else {
}
else
{
if (feof(fp))
return (NULL);
ungetc(c, fp);
}
} else {
}
else
{
if (feof(fp))
return (NULL);
ungetc(c, fp);
@ -987,15 +1130,19 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim)
return (NULL);
break;
}
}else if (inString(c,delim) || c == '\n') {
}
else if (inString(c, delim) || c == '\n')
{
#ifdef COPY_PATCH
if (c == '\n') {
if (c == '\n')
{
*newline = 1;
}
#endif
done = 1;
}
if (!done) attribute[i++] = c;
if (!done)
attribute[i++] = c;
if (i == EXT_ATTLEN - 1)
elog(WARN, "CopyReadAttribute - attribute length too long");
}
@ -1014,19 +1161,22 @@ CopyAttributeOut(FILE *fp, char *string, char *delim)
if (len && (string[0] == '{') && (string[len - 1] == '}'))
is_array = true;
for ( ; (c = *string) != '\0'; string++) {
for (; (c = *string) != '\0'; string++)
{
if (c == delim[0] || c == '\n' ||
(c == '\\' && !is_array))
fputc('\\', fp);
else
if (c == '\\' && is_array)
if (*(string+1) == '\\') {
else if (c == '\\' && is_array)
if (*(string + 1) == '\\')
{
/* translate \\ to \\\\ */
fputc('\\', fp);
fputc('\\', fp);
fputc('\\', fp);
string++;
} else if (*(string+1) == '"') {
}
else if (*(string + 1) == '"')
{
/* translate \" to \\\" */
fputc('\\', fp);
fputc('\\', fp);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.14 1997/08/22 03:03:56 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.15 1997/09/07 04:40:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -33,7 +33,8 @@
* ----------------
*/
static int checkAttrExists(char *attributeName,
static int
checkAttrExists(char *attributeName,
char *attributeType, List * schema);
static List *MergeAttributes(List * schema, List * supers, List ** supconstr);
static void StoreCatalogInheritance(Oid relationId, List * supers);
@ -55,14 +56,17 @@ DefineRelation(CreateStmt *stmt)
char *archiveName = NULL;
TupleDesc descriptor;
List *constraints;
int heaploc, archloc;
int heaploc,
archloc;
char* typename = NULL; /* the typename of this relation. not useod for now */
char *typename = NULL; /* the typename of this relation.
* not useod for now */
if (strlen(stmt->relname) >= NAMEDATALEN)
elog(WARN, "the relation name %s is >= %d characters long", stmt->relname,
NAMEDATALEN);
strNcpy(relname,stmt->relname,NAMEDATALEN-1); /* make full length for copy */
strNcpy(relname, stmt->relname, NAMEDATALEN - 1); /* make full length for
* copy */
/* ----------------
* Handle parameters
@ -78,7 +82,8 @@ DefineRelation(CreateStmt *stmt)
*/
archChar = 'n';
switch (stmt->archiveType) {
switch (stmt->archiveType)
{
case ARCH_NONE:
archChar = 'n';
break;
@ -100,13 +105,17 @@ DefineRelation(CreateStmt *stmt)
heaploc = stmt->location;
/*
* For now, any user-defined relation defaults to the magnetic
* disk storgage manager. --mao 2 july 91
* For now, any user-defined relation defaults to the magnetic disk
* storgage manager. --mao 2 july 91
*/
if (stmt->archiveLoc == -1) {
if (stmt->archiveLoc == -1)
{
archloc = 0;
} else {
if (archChar == 'n') {
}
else
{
if (archChar == 'n')
{
elog(WARN, "Set archive location, but not mode, for %s",
relname);
}
@ -121,7 +130,8 @@ DefineRelation(CreateStmt *stmt)
constraints = nconc(constraints, stmt->constraints);
numberOfAttributes = length(schema);
if (numberOfAttributes <= 0) {
if (numberOfAttributes <= 0)
{
elog(WARN, "DefineRelation: %s",
"please inherit from a relation or define an attribute");
}
@ -195,16 +205,18 @@ DefineRelation(CreateStmt *stmt)
if (archChar != 'n')
{
TupleDesc tupdesc;
/*
* Need to create an archive relation for this heap relation.
* We cobble up the command by hand, and increment the command
* Need to create an archive relation for this heap relation. We
* cobble up the command by hand, and increment the command
* counter ourselves.
*/
CommandCounterIncrement();
archiveName = MakeArchiveName(relationId);
tupdesc = CreateTupleDescCopy (descriptor); /* get rid of constraints */
tupdesc = CreateTupleDescCopy(descriptor); /* get rid of
* constraints */
(void) heap_create(archiveName,
typename,
'n', /* archive isn't archived */
@ -275,30 +287,37 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
List *constraints = NIL;
/*
* Validates that there are no duplications.
* Validity checking of types occurs later.
* Validates that there are no duplications. Validity checking of
* types occurs later.
*/
foreach (entry, schema) {
foreach(entry, schema)
{
List *rest;
ColumnDef *coldef = lfirst(entry);
foreach (rest, lnext(entry)) {
foreach(rest, lnext(entry))
{
/*
* check for duplicated relation names
*/
ColumnDef *restdef = lfirst(rest);
if (!strcmp(coldef->colname, restdef->colname)) {
if (!strcmp(coldef->colname, restdef->colname))
{
elog(WARN, "attribute \"%s\" duplicated",
coldef->colname);
}
}
}
foreach (entry, supers) {
foreach(entry, supers)
{
List *rest;
foreach (rest, lnext(entry)) {
if (!strcmp(strVal(lfirst(entry)), strVal(lfirst(rest)))) {
foreach(rest, lnext(entry))
{
if (!strcmp(strVal(lfirst(entry)), strVal(lfirst(rest))))
{
elog(WARN, "relation \"%s\" duplicated",
strVal(lfirst(entry)));
}
@ -308,7 +327,8 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
/*
* merge the inherited attributes into the schema
*/
foreach (entry, supers) {
foreach(entry, supers)
{
char *name = strVal(lfirst(entry));
Relation relation;
List *partialResult = NIL;
@ -317,7 +337,8 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
TupleConstr *constr;
relation = heap_openr(name);
if (relation==NULL) {
if (relation == NULL)
{
elog(WARN,
"MergeAttr: Can't inherit from non-existent superclass '%s'",
name);
@ -330,7 +351,8 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
tupleDesc = RelationGetTupleDescriptor(relation);
constr = tupleDesc->constr;
for (attrno = relation->rd_rel->relnatts - 1; attrno >= 0; attrno--) {
for (attrno = relation->rd_rel->relnatts - 1; attrno >= 0; attrno--)
{
AttributeTupleForm attribute = tupleDesc->attrs[attrno];
char *attributeName;
char *attributeType;
@ -349,12 +371,15 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
AssertState(HeapTupleIsValid(tuple));
attributeType =
(((TypeTupleForm) GETSTRUCT(tuple))->typname).data;
/*
* check validity
*
*/
if (checkAttrExists(attributeName, attributeType, inhSchema) ||
checkAttrExists(attributeName, attributeType, schema)) {
checkAttrExists(attributeName, attributeType, schema))
{
/*
* this entry already exists
*/
@ -461,7 +486,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
seqNumber = 1;
idList = NIL;
foreach (entry, supers) {
foreach(entry, supers)
{
Datum datum[Natts_pg_inherits];
char nullarr[Natts_pg_inherits];
@ -508,7 +534,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
* 1.
* ----------------
*/
foreach (entry, idList) {
foreach(entry, idList)
{
HeapTuple tuple;
Oid id;
int16 number;
@ -519,7 +546,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
current = entry;
next = lnext(entry);
for (number = 1; ; number += 1) {
for (number = 1;; number += 1)
{
tuple = SearchSysCacheTuple(INHRELID,
ObjectIdGetDatum(id),
Int16GetDatum(number),
@ -542,20 +570,25 @@ StoreCatalogInheritance(Oid relationId, List *supers)
* 2.
* ----------------
*/
foreach (entry, idList) {
foreach(entry, idList)
{
Oid name;
List *rest;
bool found = false;
again:
name = lfirsti(entry);
foreach (rest, lnext(entry)) {
if (name == lfirsti(rest)) {
foreach(rest, lnext(entry))
{
if (name == lfirsti(rest))
{
found = true;
break;
}
}
if (found) {
if (found)
{
/*
* entry list must be of length >= 2 or else no match
*
@ -578,7 +611,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
seqNumber = 1;
foreach (entry, idList) {
foreach(entry, idList)
{
Datum datum[Natts_pg_ipl];
char nullarr[Natts_pg_ipl];
@ -610,14 +644,18 @@ checkAttrExists(char *attributeName, char *attributeType, List *schema)
{
List *s;
foreach (s, schema) {
foreach(s, schema)
{
ColumnDef *def = lfirst(s);
if (!strcmp(attributeName, def->colname)) {
if (!strcmp(attributeName, def->colname))
{
/*
* attribute exists. Make sure the types are the same.
*/
if (strcmp(attributeType, def->typename->name) != 0) {
if (strcmp(attributeType, def->typename->name) != 0)
{
elog(WARN, "%s and %s conflict for %s",
attributeType, def->typename->name, attributeName);
}
@ -640,9 +678,9 @@ MakeArchiveName(Oid relationId)
char *arch;
/*
* Archive relations are named a,XXXXX where XXXXX == the OID
* of the relation they archive. Create a string containing
* this name and find the reldesc for the archive relation.
* Archive relations are named a,XXXXX where XXXXX == the OID of the
* relation they archive. Create a string containing this name and
* find the reldesc for the archive relation.
*/
arch = palloc(NAMEDATALEN);
sprintf(arch, "a,%d", relationId);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/defind.c,v 1.12 1997/03/26 03:05:28 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/defind.c,v 1.13 1997/09/07 04:40:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -40,13 +40,16 @@
/* non-export function prototypes */
static void CheckPredicate(List * predList, List * rangeTable, Oid baseRelOid);
static void CheckPredExpr(Node *predicate, List *rangeTable,
static void
CheckPredExpr(Node * predicate, List * rangeTable,
Oid baseRelOid);
static void
CheckPredClause(Expr * predicate, List * rangeTable, Oid baseRelOid);
static void FuncIndexArgs(IndexElem *funcIndex, AttrNumber *attNumP,
static void
FuncIndexArgs(IndexElem * funcIndex, AttrNumber * attNumP,
Oid * argTypes, Oid * opOidP, Oid relId);
static void NormIndexAttrs(List *attList, AttrNumber *attNumP,
static void
NormIndexAttrs(List * attList, AttrNumber * attNumP,
Oid * opOidP, Oid relId);
static char *GetDefaultOpClass(Oid atttypid);
@ -90,7 +93,8 @@ DefineIndex(char *heapRelationName,
* Handle attributes
*/
numberOfAttributes = length(attributeList);
if (numberOfAttributes <= 0) {
if (numberOfAttributes <= 0)
{
elog(WARN, "DefineIndex: must specify at least one attribute");
}
@ -100,7 +104,8 @@ DefineIndex(char *heapRelationName,
tuple = SearchSysCacheTuple(RELNAME,
PointerGetDatum(heapRelationName),
0, 0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "DefineIndex: %s relation not found",
heapRelationName);
}
@ -117,7 +122,8 @@ DefineIndex(char *heapRelationName,
*/
tuple = SearchSysCacheTuple(AMNAME, PointerGetDatum(accessMethodName),
0, 0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "DefineIndex: %s access method not found",
accessMethodName);
}
@ -125,13 +131,13 @@ DefineIndex(char *heapRelationName,
/*
* Handle parameters
* [param list is now different (NOT USED, really) - ay 10/94]
* Handle parameters [param list is now different (NOT USED, really) -
* ay 10/94]
*
* WITH clause reinstated to handle lossy indices.
* -- JMH, 7/22/96
* WITH clause reinstated to handle lossy indices. -- JMH, 7/22/96
*/
foreach(pl, parameterList) {
foreach(pl, parameterList)
{
ParamString *param = (ParamString *) lfirst(pl);
if (!strcasecmp(param->name, "islossy"))
@ -141,24 +147,27 @@ DefineIndex(char *heapRelationName,
/*
* Convert the partial-index predicate from parsetree form to plan
* form, so it can be readily evaluated during index creation.
* Note: "predicate" comes in as a list containing (1) the predicate
* itself (a where_clause), and (2) a corresponding range table.
* form, so it can be readily evaluated during index creation. Note:
* "predicate" comes in as a list containing (1) the predicate itself
* (a where_clause), and (2) a corresponding range table.
*
* [(1) is 'predicate' and (2) is 'rangetable' now. - ay 10/94]
*/
if (predicate != NULL && rangetable != NIL) {
if (predicate != NULL && rangetable != NIL)
{
cnfPred = cnfify((Expr *) copyObject(predicate), true);
fix_opids(cnfPred);
CheckPredicate(cnfPred, rangetable, relationId);
}
if (IsFuncIndex(attributeList)) {
if (IsFuncIndex(attributeList))
{
IndexElem *funcIndex = lfirst(attributeList);
int nargs;
nargs = length(funcIndex->args);
if (nargs > INDEX_MAX_KEYS) {
if (nargs > INDEX_MAX_KEYS)
{
elog(WARN,
"Too many args to function, limit of %d",
INDEX_MAX_KEYS);
@ -184,7 +193,9 @@ DefineIndex(char *heapRelationName,
numberOfAttributes, attributeNumberA,
classObjectId, parameterCount, parameterA, (Node *) cnfPred,
lossy, unique);
}else {
}
else
{
attributeNumberA =
(AttrNumber *) palloc(numberOfAttributes *
sizeof attributeNumberA[0]);
@ -216,7 +227,8 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable)
{
Oid *classObjectId;
Oid accessMethodId;
Oid indexId, relationId;
Oid indexId,
relationId;
Oid indproc;
int numberOfAttributes;
AttrNumber *attributeNumberA;
@ -236,7 +248,8 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable)
*/
tuple = SearchSysCacheTuple(RELNAME, PointerGetDatum(indexRelationName),
0, 0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "ExtendIndex: %s index not found",
indexRelationName);
}
@ -249,7 +262,8 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable)
tuple = SearchSysCacheTuple(INDEXRELID,
ObjectIdGetDatum(indexId),
0, 0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "ExtendIndex: %s is not an index",
indexRelationName);
}
@ -263,10 +277,12 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable)
indproc = index->indproc;
for (i = 0; i < INDEX_MAX_KEYS; i++)
if (index->indkey[i] == 0) break;
if (index->indkey[i] == 0)
break;
numberOfAttributes = i;
if (VARSIZE(&index->indpred) != 0) {
if (VARSIZE(&index->indpred) != 0)
{
char *predString;
predString = fmgr(F_TEXTOUT, &index->indpred);
@ -278,12 +294,13 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable)
indexRelationName);
/*
* Convert the extension predicate from parsetree form to plan
* form, so it can be readily evaluated during index creation.
* Note: "predicate" comes in as a list containing (1) the predicate
* itself (a where_clause), and (2) a corresponding range table.
* Convert the extension predicate from parsetree form to plan form,
* so it can be readily evaluated during index creation. Note:
* "predicate" comes in as a list containing (1) the predicate itself
* (a where_clause), and (2) a corresponding range table.
*/
if (rangetable != NIL) {
if (rangetable != NIL)
{
cnfPred = cnfify((Expr *) copyObject(predicate), true);
fix_opids(cnfPred);
CheckPredicate(cnfPred, rangetable, relationId);
@ -301,12 +318,14 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable)
(Oid *) palloc(numberOfAttributes * sizeof classObjectId[0]);
for (i=0; i<numberOfAttributes; i++) {
for (i = 0; i < numberOfAttributes; i++)
{
attributeNumberA[i] = index->indkey[i];
classObjectId[i] = index->indclass[i];
}
if (indproc != InvalidOid) {
if (indproc != InvalidOid)
{
funcInfo = &fInfo;
/* FIgetnArgs(funcInfo) = numberOfAttributes; */
FIsetnArgs(funcInfo, numberOfAttributes);
@ -349,7 +368,8 @@ CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid)
{
List *item;
foreach (item, predList) {
foreach(item, predList)
{
CheckPredExpr(lfirst(item), rangeTable, baseRelOid);
}
}
@ -357,19 +377,23 @@ CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid)
static void
CheckPredExpr(Node * predicate, List * rangeTable, Oid baseRelOid)
{
List *clauses = NIL, *clause;
List *clauses = NIL,
*clause;
if (is_opclause(predicate)) {
if (is_opclause(predicate))
{
CheckPredClause((Expr *) predicate, rangeTable, baseRelOid);
return;
} else if (or_clause(predicate))
}
else if (or_clause(predicate))
clauses = ((Expr *) predicate)->args;
else if (and_clause(predicate))
clauses = ((Expr *) predicate)->args;
else
elog(WARN, "Unsupported partial-index predicate expression type");
foreach (clause, clauses) {
foreach(clause, clauses)
{
CheckPredExpr(lfirst(clause), rangeTable, baseRelOid);
}
}
@ -385,7 +409,8 @@ CheckPredClause(Expr *predicate, List *rangeTable, Oid baseRelOid)
if (!IsA(predicate->oper, Oper) ||
!IsA(pred_var, Var) ||
!IsA(pred_const,Const)) {
!IsA(pred_const, Const))
{
elog(WARN, "Unsupported partial-index predicate clause type");
}
@ -422,7 +447,8 @@ FuncIndexArgs(IndexElem *funcIndex,
/*
* process the function arguments
*/
for (rest=funcIndex->args; rest != NIL; rest = lnext(rest)) {
for (rest = funcIndex->args; rest != NIL; rest = lnext(rest))
{
char *arg;
arg = strVal(lfirst(rest));
@ -431,7 +457,8 @@ FuncIndexArgs(IndexElem *funcIndex,
ObjectIdGetDatum(relId),
PointerGetDatum(arg), 0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN,
"DefineIndex: attribute \"%s\" not found",
arg);
@ -455,7 +482,8 @@ NormIndexAttrs(List *attList, /* list of IndexElem's */
* process attributeList
*/
for (rest=attList; rest != NIL; rest = lnext(rest)) {
for (rest = attList; rest != NIL; rest = lnext(rest))
{
IndexElem *attribute;
AttributeTupleForm attform;
@ -468,7 +496,8 @@ NormIndexAttrs(List *attList, /* list of IndexElem's */
ObjectIdGetDatum(relId),
PointerGetDatum(attribute->name),
0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN,
"DefineIndex: attribute \"%s\" not found",
attribute->name);
@ -477,10 +506,12 @@ NormIndexAttrs(List *attList, /* list of IndexElem's */
attform = (AttributeTupleForm) GETSTRUCT(tuple);
*attNumP++ = attform->attnum;
if (attribute->class == NULL) {
if (attribute->class == NULL)
{
/* no operator class specified, so find the default */
attribute->class = GetDefaultOpClass(attform->atttypid);
if(attribute->class == NULL) {
if (attribute->class == NULL)
{
elog(WARN,
"Can't find a default operator class for type %d.",
attform->atttypid);
@ -491,7 +522,8 @@ NormIndexAttrs(List *attList, /* list of IndexElem's */
PointerGetDatum(attribute->class),
0, 0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "DefineIndex: %s class not found",
attribute->class);
}
@ -507,7 +539,8 @@ GetDefaultOpClass(Oid atttypid)
tuple = SearchSysCacheTuple(CLADEFTYPE,
ObjectIdGetDatum(atttypid),
0, 0, 0);
if(!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
return 0;
}
@ -532,11 +565,13 @@ RemoveIndex(char *name)
PointerGetDatum(name),
0, 0, 0);
if (!HeapTupleIsValid(tuple)) {
if (!HeapTupleIsValid(tuple))
{
elog(WARN, "index \"%s\" nonexistent", name);
}
if (((Form_pg_class)GETSTRUCT(tuple))->relkind != RELKIND_INDEX) {
if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
{
elog(WARN, "relation \"%s\" is of type \"%c\"",
name,
((Form_pg_class) GETSTRUCT(tuple))->relkind);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.13 1997/08/12 22:52:23 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.14 1997/09/07 04:40:46 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -60,7 +60,8 @@ static int defGetTypeLength(DefElem *def);
static void
case_translate_language_name(const char *input, char *output) {
case_translate_language_name(const char *input, char *output)
{
/*-------------------------------------------------------------------------
Translate the input language name to lower case, except if it's C,
translate to upper case.
@ -72,24 +73,30 @@ case_translate_language_name(const char *input, char *output) {
output[i] = '\0';
if (strcmp(output, "c") == 0) output[0] = 'C';
if (strcmp(output, "c") == 0)
output[0] = 'C';
}
static void
compute_return_type(const Node * returnType,
char **prorettype_p, bool *returnsSet_p) {
char **prorettype_p, bool * returnsSet_p)
{
/*---------------------------------------------------------------------------
Examine the "returns" clause returnType of the CREATE FUNCTION statement
and return information about it as **prorettype_p and **returnsSet.
----------------------------------------------------------------------------*/
if (nodeTag(returnType) == T_TypeName) {
if (nodeTag(returnType) == T_TypeName)
{
/* a set of values */
TypeName *setType = (TypeName *) returnType;
*prorettype_p = setType->name;
*returnsSet_p = true;
}else {
}
else
{
/* singleton */
*prorettype_p = strVal(returnType);
*returnsSet_p = false;
@ -101,7 +108,8 @@ compute_return_type(const Node *returnType,
static void
compute_full_attributes(const List * parameters, int32 * byte_pct_p,
int32 * perbyte_cpu_p, int32 * percall_cpu_p,
int32 *outin_ratio_p, bool *canCache_p) {
int32 * outin_ratio_p, bool * canCache_p)
{
/*--------------------------------------------------------------------------
Interpret the parameters *parameters and return their contents as
*byte_pct_p, etc.
@ -116,40 +124,59 @@ compute_full_attributes(const List *parameters, int32 *byte_pct_p,
*percall_cpu_p = PERCALL_CPU;
*outin_ratio_p = OUTIN_RATIO;
foreach(pl, (List *)parameters) {
foreach(pl, (List *) parameters)
{
ParamString *param = (ParamString *) lfirst(pl);
if (strcasecmp(param->name, "iscachable") == 0) {
if (strcasecmp(param->name, "iscachable") == 0)
{
*canCache_p = true;
} else if (strcasecmp(param->name, "trusted") == 0) {
}
else if (strcasecmp(param->name, "trusted") == 0)
{
/*
* we don't have untrusted functions any more. The 4.2
* implementation is lousy anyway so I took it out.
* -ay 10/94
* implementation is lousy anyway so I took it out. -ay 10/94
*/
elog(WARN, "untrusted function has been decommissioned.");
} else if (strcasecmp(param->name, "byte_pct") == 0) {
}
else if (strcasecmp(param->name, "byte_pct") == 0)
{
/*
* * handle expensive function parameters
*/
*byte_pct_p = atoi(param->val);
} else if (strcasecmp(param->name, "perbyte_cpu") == 0) {
if (sscanf(param->val, "%d", perbyte_cpu_p) == 0) {
}
else if (strcasecmp(param->name, "perbyte_cpu") == 0)
{
if (sscanf(param->val, "%d", perbyte_cpu_p) == 0)
{
int count;
char *ptr;
for (count = 0, ptr = param->val; *ptr != '\0'; ptr++)
if (*ptr == '!') count++;
if (*ptr == '!')
count++;
*perbyte_cpu_p = (int) pow(10.0, (double) count);
}
} else if (strcasecmp(param->name, "percall_cpu") == 0) {
if (sscanf(param->val, "%d", percall_cpu_p) == 0) {
}
else if (strcasecmp(param->name, "percall_cpu") == 0)
{
if (sscanf(param->val, "%d", percall_cpu_p) == 0)
{
int count;
char *ptr;
for (count = 0, ptr = param->val; *ptr != '\0'; ptr++)
if (*ptr == '!') count++;
if (*ptr == '!')
count++;
*percall_cpu_p = (int) pow(10.0, (double) count);
}
} else if (strcasecmp(param->name, "outin_ratio") == 0) {
}
else if (strcasecmp(param->name, "outin_ratio") == 0)
{
*outin_ratio_p = atoi(param->val);
}
}
@ -159,13 +186,17 @@ compute_full_attributes(const List *parameters, int32 *byte_pct_p,
static void
interpret_AS_clause(const char languageName[], const char as[],
char **prosrc_str_p, char **probin_str_p) {
char **prosrc_str_p, char **probin_str_p)
{
if (strcmp(languageName, "C") == 0 ||
strcmp(languageName, "internal") == 0 ) {
strcmp(languageName, "internal") == 0)
{
*prosrc_str_p = "-";
*probin_str_p = (char *) as;
} else {
}
else
{
*prosrc_str_p = (char *) as;
*probin_str_p = "-";
}
@ -182,21 +213,32 @@ void
CreateFunction(ProcedureStmt * stmt, CommandDest dest)
{
char *probin_str;
/* pathname of executable file that executes this function, if any */
char *prosrc_str;
/* SQL that executes this function, if any */
char *prorettype;
/* Type of return value (or member of set of values) from function */
char languageName[NAMEDATALEN];
/* name of language of function, with case adjusted:
"C", "internal", or "SQL"
/*
* name of language of function, with case adjusted: "C", "internal",
* or "SQL"
*/
/* The following are attributes of the function, as expressed in the
CREATE FUNCTION statement, where applicable.
/*
* The following are attributes of the function, as expressed in the
* CREATE FUNCTION statement, where applicable.
*/
int32 byte_pct, perbyte_cpu, percall_cpu, outin_ratio;
int32 byte_pct,
perbyte_cpu,
percall_cpu,
outin_ratio;
bool canCache;
bool returnsSet;
/* The function returns a set of values, as opposed to a singleton. */
@ -205,16 +247,21 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
compute_return_type(stmt->returnType, &prorettype, &returnsSet);
if (strcmp(languageName, "C") == 0 ||
strcmp(languageName, "internal") == 0 ) {
strcmp(languageName, "internal") == 0)
{
compute_full_attributes(stmt->withClause,
&byte_pct, &perbyte_cpu, &percall_cpu,
&outin_ratio, &canCache);
} else if (strcmp(languageName, "sql") == 0) {
}
else if (strcmp(languageName, "sql") == 0)
{
/* query optimizer groks sql, these are meaningless */
perbyte_cpu = percall_cpu = 0;
byte_pct = outin_ratio = 100;
canCache = false;
} else {
}
else
{
elog(WARN,
"Unrecognized language specified in a CREATE FUNCTION: "
"'%s'. Recognized languages are sql, C, and internal.",
@ -230,9 +277,12 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
"in the '%s' language. Others may use the 'sql' language.",
languageName);
/* Above does not return. */
else {
/* And now that we have all the parameters, and know we're permitted
to do so, go ahead and create the function.
else
{
/*
* And now that we have all the parameters, and know we're
* permitted to do so, go ahead and create the function.
*/
ProcedureCreate(stmt->funcname,
returnsSet,
@ -269,14 +319,18 @@ DefineOperator(char *oprName,
{
uint16 precedence = 0; /* operator precedence */
bool canHash = false; /* operator hashes */
bool isLeftAssociative=true; /* operator is left associative */
bool isLeftAssociative = true; /* operator is left
* associative */
char *functionName = NULL; /* function for operator */
char *typeName1 = NULL; /* first type name */
char *typeName2 = NULL; /* second type name */
char *commutatorName=NULL; /* optional commutator operator name */
char *commutatorName = NULL; /* optional commutator
* operator name */
char *negatorName = NULL; /* optional negator operator name */
char *restrictionName=NULL; /* optional restrict. sel. procedure */
char *joinName=NULL; /* optional join sel. procedure name */
char *restrictionName = NULL; /* optional restrict. sel.
* procedure */
char *joinName = NULL; /* optional join sel. procedure
* name */
char *sortName1 = NULL; /* optional first sort operator */
char *sortName2 = NULL; /* optional second sort operator */
List *pl;
@ -284,48 +338,76 @@ DefineOperator(char *oprName,
/*
* loop over the definition list and extract the information we need.
*/
foreach (pl, parameters) {
foreach(pl, parameters)
{
DefElem *defel = (DefElem *) lfirst(pl);
if (!strcasecmp(defel->defname, "leftarg")) {
if (!strcasecmp(defel->defname, "leftarg"))
{
/* see gram.y, must be setof */
if (nodeTag(defel->arg) == T_TypeName)
elog(WARN, "setof type not implemented for leftarg");
if (nodeTag(defel->arg)==T_String) {
if (nodeTag(defel->arg) == T_String)
{
typeName1 = defGetString(defel);
}else {
}
else
{
elog(WARN, "type for leftarg is malformed.");
}
} else if (!strcasecmp(defel->defname, "rightarg")) {
}
else if (!strcasecmp(defel->defname, "rightarg"))
{
/* see gram.y, must be setof */
if (nodeTag(defel->arg) == T_TypeName)
elog(WARN, "setof type not implemented for rightarg");
if (nodeTag(defel->arg)==T_String) {
if (nodeTag(defel->arg) == T_String)
{
typeName2 = defGetString(defel);
}else {
}
else
{
elog(WARN, "type for rightarg is malformed.");
}
} else if (!strcasecmp(defel->defname, "procedure")) {
}
else if (!strcasecmp(defel->defname, "procedure"))
{
functionName = defGetString(defel);
} else if (!strcasecmp(defel->defname, "precedence")) {
}
else if (!strcasecmp(defel->defname, "precedence"))
{
/* NOT IMPLEMENTED (never worked in v4.2) */
elog(NOTICE, "CREATE OPERATOR: precedence not implemented");
} else if (!strcasecmp(defel->defname, "associativity")) {
}
else if (!strcasecmp(defel->defname, "associativity"))
{
/* NOT IMPLEMENTED (never worked in v4.2) */
elog(NOTICE, "CREATE OPERATOR: associativity not implemented");
} else if (!strcasecmp(defel->defname, "commutator")) {
}
else if (!strcasecmp(defel->defname, "commutator"))
{
commutatorName = defGetString(defel);
} else if (!strcasecmp(defel->defname, "negator")) {
}
else if (!strcasecmp(defel->defname, "negator"))
{
negatorName = defGetString(defel);
} else if (!strcasecmp(defel->defname, "restrict")) {
}
else if (!strcasecmp(defel->defname, "restrict"))
{
restrictionName = defGetString(defel);
} else if (!strcasecmp(defel->defname, "join")) {
}
else if (!strcasecmp(defel->defname, "join"))
{
joinName = defGetString(defel);
} else if (!strcasecmp(defel->defname, "hashes")) {
}
else if (!strcasecmp(defel->defname, "hashes"))
{
canHash = TRUE;
} else if (!strcasecmp(defel->defname, "sort1")) {
}
else if (!strcasecmp(defel->defname, "sort1"))
{
/* ----------------
* XXX ( ... [ , sort1 = oprname ] [ , sort2 = oprname ] ... )
* XXX is undocumented in the reference manual source as of
@ -333,9 +415,13 @@ DefineOperator(char *oprName,
* ----------------
*/
sortName1 = defGetString(defel);
} else if (!strcasecmp(defel->defname, "sort2")) {
}
else if (!strcasecmp(defel->defname, "sort2"))
{
sortName2 = defGetString(defel);
} else {
}
else
{
elog(NOTICE, "DefineOperator: attribute \"%s\" not recognized",
defel->defname);
}
@ -344,7 +430,8 @@ DefineOperator(char *oprName,
/*
* make sure we have our required definitions
*/
if (functionName==NULL) {
if (functionName == NULL)
{
elog(WARN, "Define: \"procedure\" unspecified");
}
@ -358,9 +445,11 @@ DefineOperator(char *oprName,
functionName,/* function for operator */
precedence, /* operator precedence */
isLeftAssociative, /* operator is left associative */
commutatorName, /* optional commutator operator name */
commutatorName, /* optional commutator operator
* name */
negatorName, /* optional negator operator name */
restrictionName, /* optional restrict. sel. procedure */
restrictionName, /* optional restrict. sel.
* procedure */
joinName, /* optional join sel. procedure name */
canHash, /* operator hashes */
sortName1, /* optional first sort operator */
@ -386,39 +475,59 @@ DefineAggregate(char *aggName, List *parameters)
char *init2 = NULL;
List *pl;
foreach (pl, parameters) {
foreach(pl, parameters)
{
DefElem *defel = (DefElem *) lfirst(pl);
/*
* sfunc1
*/
if (!strcasecmp(defel->defname, "sfunc1")) {
if (!strcasecmp(defel->defname, "sfunc1"))
{
stepfunc1Name = defGetString(defel);
} else if (!strcasecmp(defel->defname, "basetype")) {
}
else if (!strcasecmp(defel->defname, "basetype"))
{
baseType = defGetString(defel);
} else if (!strcasecmp(defel->defname, "stype1")) {
}
else if (!strcasecmp(defel->defname, "stype1"))
{
stepfunc1Type = defGetString(defel);
/*
* sfunc2
*/
} else if (!strcasecmp(defel->defname, "sfunc2")) {
}
else if (!strcasecmp(defel->defname, "sfunc2"))
{
stepfunc2Name = defGetString(defel);
} else if (!strcasecmp(defel->defname, "stype2")) {
}
else if (!strcasecmp(defel->defname, "stype2"))
{
stepfunc2Type = defGetString(defel);
/*
* final
*/
} else if (!strcasecmp(defel->defname, "finalfunc")) {
}
else if (!strcasecmp(defel->defname, "finalfunc"))
{
finalfuncName = defGetString(defel);
/*
* initial conditions
*/
} else if (!strcasecmp(defel->defname, "initcond1")) {
}
else if (!strcasecmp(defel->defname, "initcond1"))
{
init1 = defGetString(defel);
} else if (!strcasecmp(defel->defname, "initcond2")) {
}
else if (!strcasecmp(defel->defname, "initcond2"))
{
init2 = defGetString(defel);
} else {
}
else
{
elog(NOTICE, "DefineAggregate: attribute \"%s\" not recognized",
defel->defname);
}
@ -429,11 +538,13 @@ DefineAggregate(char *aggName, List *parameters)
*/
if (baseType == NULL)
elog(WARN, "Define: \"basetype\" unspecified");
if (stepfunc1Name!=NULL) {
if (stepfunc1Name != NULL)
{
if (stepfunc1Type == NULL)
elog(WARN, "Define: \"stype1\" unspecified");
}
if (stepfunc2Name!=NULL) {
if (stepfunc2Name != NULL)
{
if (stepfunc2Type == NULL)
elog(WARN, "Define: \"stype2\" unspecified");
}
@ -480,46 +591,78 @@ DefineType(char *typeName, List *parameters)
* Type names can only be 15 characters long, so that the shadow type
* can be created using the 16th character as necessary.
*/
if (strlen(typeName) >= (NAMEDATALEN - 1)) {
if (strlen(typeName) >= (NAMEDATALEN - 1))
{
elog(WARN, "DefineType: type names must be %d characters or less",
NAMEDATALEN - 1);
}
foreach(pl, parameters) {
foreach(pl, parameters)
{
DefElem *defel = (DefElem *) lfirst(pl);
if (!strcasecmp(defel->defname, "internallength")) {
if (!strcasecmp(defel->defname, "internallength"))
{
internalLength = defGetTypeLength(defel);
}else if (!strcasecmp(defel->defname, "externallength")) {
}
else if (!strcasecmp(defel->defname, "externallength"))
{
externalLength = defGetTypeLength(defel);
}else if (!strcasecmp(defel->defname, "input")) {
}
else if (!strcasecmp(defel->defname, "input"))
{
inputName = defGetString(defel);
}else if (!strcasecmp(defel->defname, "output")) {
}
else if (!strcasecmp(defel->defname, "output"))
{
outputName = defGetString(defel);
}else if (!strcasecmp(defel->defname, "send")) {
}
else if (!strcasecmp(defel->defname, "send"))
{
sendName = defGetString(defel);
}else if (!strcasecmp(defel->defname, "delimiter")) {
}
else if (!strcasecmp(defel->defname, "delimiter"))
{
char *p = defGetString(defel);
delimiter = p[0];
}else if (!strcasecmp(defel->defname, "receive")) {
}
else if (!strcasecmp(defel->defname, "receive"))
{
receiveName = defGetString(defel);
}else if (!strcasecmp(defel->defname, "element")) {
}
else if (!strcasecmp(defel->defname, "element"))
{
elemName = defGetString(defel);
}else if (!strcasecmp(defel->defname, "default")) {
}
else if (!strcasecmp(defel->defname, "default"))
{
defaultValue = defGetString(defel);
}else if (!strcasecmp(defel->defname, "passedbyvalue")) {
}
else if (!strcasecmp(defel->defname, "passedbyvalue"))
{
byValue = true;
}else if (!strcasecmp(defel->defname, "alignment")) {
}
else if (!strcasecmp(defel->defname, "alignment"))
{
char *a = defGetString(defel);
if (!strcasecmp(a, "double")) {
if (!strcasecmp(a, "double"))
{
alignment = 'd';
} else if (!strcasecmp(a, "int")) {
}
else if (!strcasecmp(a, "int"))
{
alignment = 'i';
} else {
}
else
{
elog(WARN, "DefineType: \"%s\" alignment not recognized",
a);
}
}else {
}
else
{
elog(NOTICE, "DefineType: attribute \"%s\" not recognized",
defel->defname);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.10 1997/08/18 20:52:17 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.11 1997/09/07 04:40:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -25,7 +25,8 @@
#include <optimizer/planner.h>
#include <access/xact.h>
typedef struct ExplainState {
typedef struct ExplainState
{
/* options */
bool printCost; /* print cost */
bool printNodes; /* do nodeToString() instead */
@ -43,13 +44,16 @@ static char *Explain_PlanToString(Plan *plan, ExplainState *es);
void
ExplainQuery(Query * query, bool verbose, CommandDest dest)
{
char *s = NULL, *s2;
char *s = NULL,
*s2;
Plan *plan;
ExplainState *es;
int len;
if (IsAbortedTransactionBlockState()) {
if (IsAbortedTransactionBlockState())
{
char *tag = "*ABORT STATE*";
EndCommand(tag, dest);
elog(NOTICE, "(transaction aborted): %s",
@ -78,11 +82,13 @@ ExplainQuery(Query *query, bool verbose, CommandDest dest)
if (es->printNodes)
s = nodeToString(plan);
if (es->printCost) {
if (es->printCost)
{
s2 = Explain_PlanToString(plan, es);
if (s == NULL)
s = s2;
else {
else
{
strcat(s, "\n\n");
strcat(s, s2);
}
@ -92,7 +98,8 @@ ExplainQuery(Query *query, bool verbose, CommandDest dest)
len = strlen(s);
elog(NOTICE, "QUERY PLAN:\n\n%.*s", ELOG_MAXLEN - 64, s);
len -= ELOG_MAXLEN - 64;
while (len > 0) {
while (len > 0)
{
s += ELOG_MAXLEN - 64;
elog(NOTICE, "%.*s", ELOG_MAXLEN - 64, s);
len -= ELOG_MAXLEN - 64;
@ -115,12 +122,14 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
char buf[1000];
int i;
if (plan==NULL) {
if (plan == NULL)
{
appendStringInfo(str, "\n");
return;
}
switch(nodeTag(plan)) {
switch (nodeTag(plan))
{
case T_Result:
pname = "Result";
break;
@ -172,11 +181,14 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
appendStringInfo(str, " ");
appendStringInfo(str, pname);
switch(nodeTag(plan)) {
switch (nodeTag(plan))
{
case T_SeqScan:
case T_IndexScan:
if (((Scan*)plan)->scanrelid > 0) {
if (((Scan *) plan)->scanrelid > 0)
{
RangeTblEntry *rte = nth(((Scan *) plan)->scanrelid - 1, es->rtable);
sprintf(buf, " on %s", rte->refname);
appendStringInfo(str, buf);
}
@ -184,7 +196,8 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
default:
break;
}
if (es->printCost) {
if (es->printCost)
{
sprintf(buf, " (cost=%.2f size=%d width=%d)",
plan->cost, plan->plan_size, plan->plan_width);
appendStringInfo(str, buf);
@ -192,7 +205,8 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
appendStringInfo(str, "\n");
/* lefttree */
if (outerPlan(plan)) {
if (outerPlan(plan))
{
for (i = 0; i < indent; i++)
appendStringInfo(str, " ");
appendStringInfo(str, " -> ");
@ -200,7 +214,8 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
}
/* righttree */
if (innerPlan(plan)) {
if (innerPlan(plan))
{
for (i = 0; i < indent; i++)
appendStringInfo(str, " ");
appendStringInfo(str, " -> ");

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/purge.c,v 1.6 1997/08/12 22:52:25 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/purge.c,v 1.7 1997/09/07 04:40:51 momjian Exp $
*
* Note:
* XXX There are many instances of int32 instead of ...Time. These
@ -48,7 +48,8 @@ RelationPurge(char *relationName,
{0, Anum_pg_class_relname, F_NAMEEQ}
};
Buffer buffer;
HeapTuple newTuple, oldTuple;
HeapTuple newTuple,
oldTuple;
AbsoluteTime currentTime;
char *values[Natts_pg_class];
char nulls[Natts_pg_class];
@ -56,23 +57,26 @@ RelationPurge(char *relationName,
Relation idescs[Num_pg_class_indices];
/*
* XXX for some reason getmyrelids (in inval.c) barfs when
* you heap_replace tuples from these classes. i thought
* setheapoverride would fix it but it didn't. for now,
* just disallow purge on these classes.
* XXX for some reason getmyrelids (in inval.c) barfs when you
* heap_replace tuples from these classes. i thought setheapoverride
* would fix it but it didn't. for now, just disallow purge on these
* classes.
*/
if (strcmp(RelationRelationName, relationName) == 0 ||
strcmp(AttributeRelationName, relationName) == 0 ||
strcmp(AccessMethodRelationName, relationName) == 0 ||
strcmp(AccessMethodOperatorRelationName, relationName) == 0) {
strcmp(AccessMethodOperatorRelationName, relationName) == 0)
{
elog(WARN, "%s: cannot purge catalog \"%s\"",
cmdname, relationName);
}
if (PointerIsValid(absoluteTimeString)) {
if (PointerIsValid(absoluteTimeString))
{
absoluteTime = (int32) nabstimein(absoluteTimeString);
absoluteTimeString[0] = '\0';
if (absoluteTime == INVALID_ABSTIME) {
if (absoluteTime == INVALID_ABSTIME)
{
elog(NOTICE, "%s: bad absolute time string \"%s\"",
cmdname, absoluteTimeString);
elog(WARN, "purge not executed");
@ -84,8 +88,10 @@ RelationPurge(char *relationName,
cmdname, absoluteTimeString, absoluteTime);
#endif /* defined(PURGEDEBUG) */
if (PointerIsValid(relativeTimeString)) {
if (isreltime(relativeTimeString) != 1) {
if (PointerIsValid(relativeTimeString))
{
if (isreltime(relativeTimeString) != 1)
{
elog(WARN, "%s: bad relative time string \"%s\"",
cmdname, relativeTimeString);
}
@ -106,7 +112,8 @@ RelationPurge(char *relationName,
scan = heap_beginscan(relation, 0, NowTimeQual, 1, key);
oldTuple = heap_getnext(scan, 0, &buffer);
if (!HeapTupleIsValid(oldTuple)) {
if (!HeapTupleIsValid(oldTuple))
{
heap_endscan(scan);
heap_close(relation);
elog(WARN, "%s: no such relation: %s", cmdname, relationName);
@ -117,26 +124,31 @@ RelationPurge(char *relationName,
* Dig around in the tuple.
*/
currentTime = GetCurrentTransactionStartTime();
if (!RelativeTimeIsValid(relativeTime)) {
if (!RelativeTimeIsValid(relativeTime))
{
dateTag = ABSOLUTE;
if (!AbsoluteTimeIsValid(absoluteTime))
absoluteTime = currentTime;
} else if (!AbsoluteTimeIsValid(absoluteTime))
}
else if (!AbsoluteTimeIsValid(absoluteTime))
dateTag = RELATIVE;
else
dateTag = ABSOLUTE | RELATIVE;
for (i = 0; i < Natts_pg_class; ++i) {
for (i = 0; i < Natts_pg_class; ++i)
{
nulls[i] = heap_attisnull(oldTuple, i + 1) ? 'n' : ' ';
values[i] = NULL;
replace[i] = ' ';
}
if (dateTag & ABSOLUTE) {
if (dateTag & ABSOLUTE)
{
values[Anum_pg_class_relexpires - 1] =
(char *) UInt32GetDatum(absoluteTime);
replace[Anum_pg_class_relexpires - 1] = 'r';
}
if (dateTag & RELATIVE) {
if (dateTag & RELATIVE)
{
values[Anum_pg_class_relpreserved - 1] =
(char *) UInt32GetDatum(relativeTime);
replace[Anum_pg_class_relpreserved - 1] = 'r';
@ -162,4 +174,3 @@ RelationPurge(char *relationName,
heap_close(relation);
return (1);
}

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.10 1997/08/18 20:52:17 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.11 1997/09/07 04:40:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -59,17 +59,21 @@ RemoveOperator(char *operatorName, /* operator name */
ScanKeyData operatorKey[3];
char *userName;
if (typeName1) {
if (typeName1)
{
typeId1 = TypeGet(typeName1, &defined);
if (!OidIsValid(typeId1)) {
if (!OidIsValid(typeId1))
{
elog(WARN, "RemoveOperator: type '%s' does not exist", typeName1);
return;
}
}
if (typeName2) {
if (typeName2)
{
typeId2 = TypeGet(typeName2, &defined);
if (!OidIsValid(typeId2)) {
if (!OidIsValid(typeId2))
{
elog(WARN, "RemoveOperator: type '%s' does not exist", typeName2);
return;
}
@ -93,7 +97,8 @@ RemoveOperator(char *operatorName, /* operator name */
relation = heap_openr(OperatorRelationName);
scan = heap_beginscan(relation, 0, NowTimeQual, 3, operatorKey);
tup = heap_getnext(scan, 0, &buffer);
if (HeapTupleIsValid(tup)) {
if (HeapTupleIsValid(tup))
{
#ifndef NO_SECURITY
userName = GetPgUserName();
if (!pg_ownercheck(userName,
@ -104,17 +109,24 @@ RemoveOperator(char *operatorName, /* operator name */
#endif
ItemPointerCopy(&tup->t_ctid, &itemPointerData);
heap_delete(relation, &itemPointerData);
} else {
if (OidIsValid(typeId1) && OidIsValid(typeId2)) {
}
else
{
if (OidIsValid(typeId1) && OidIsValid(typeId2))
{
elog(WARN, "RemoveOperator: binary operator '%s' taking '%s' and '%s' does not exist",
operatorName,
typeName1,
typeName2);
} else if (OidIsValid(typeId1)) {
}
else if (OidIsValid(typeId1))
{
elog(WARN, "RemoveOperator: right unary operator '%s' taking '%s' does not exist",
operatorName,
typeName1);
} else {
}
else
{
elog(WARN, "RemoveOperator: left unary operator '%s' taking '%s' does not exist",
operatorName,
typeName2);
@ -148,10 +160,12 @@ SingleOpOperatorRemove(Oid typeOid)
ScanKeyEntryInitialize(&key[0],
0, 0, ObjectIdEqualRegProcedure, (Datum) typeOid);
rdesc = heap_openr(OperatorRelationName);
for (i = 0; i < 3; ++i) {
for (i = 0; i < 3; ++i)
{
key[0].sk_attno = attnums[i];
sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 1, key);
while (PointerIsValid(tup = heap_getnext(sdesc, 0, &buffer))) {
while (PointerIsValid(tup = heap_getnext(sdesc, 0, &buffer)))
{
ItemPointerCopy(&tup->t_ctid, &itemPointerData);
/* XXX LOCK not being passed */
heap_delete(rdesc, &itemPointerData);
@ -170,11 +184,13 @@ SingleOpOperatorRemove(Oid typeOid)
static void
AttributeAndRelationRemove(Oid typeOid)
{
struct oidlist {
struct oidlist
{
Oid reloid;
struct oidlist *next;
};
struct oidlist *oidptr, *optr;
struct oidlist *oidptr,
*optr;
Relation rdesc;
ScanKeyData key[1];
HeapScanDesc sdesc;
@ -183,11 +199,10 @@ AttributeAndRelationRemove(Oid typeOid)
Buffer buffer;
/*
* Get the oid's of the relations to be removed by scanning the
* entire attribute relation.
* We don't need to remove the attributes here,
* because amdestroy will remove all attributes of the relation.
* XXX should check for duplicate relations
* Get the oid's of the relations to be removed by scanning the entire
* attribute relation. We don't need to remove the attributes here,
* because amdestroy will remove all attributes of the relation. XXX
* should check for duplicate relations
*/
ScanKeyEntryInitialize(&key[0],
@ -198,7 +213,8 @@ AttributeAndRelationRemove(Oid typeOid)
optr = oidptr;
rdesc = heap_openr(AttributeRelationName);
sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 1, key);
while (PointerIsValid(tup = heap_getnext(sdesc, 0, &buffer))) {
while (PointerIsValid(tup = heap_getnext(sdesc, 0, &buffer)))
{
ItemPointerCopy(&tup->t_ctid, &itemPointerData);
optr->reloid = ((AttributeTupleForm) GETSTRUCT(tup))->attrelid;
optr->next = (struct oidlist *) palloc(sizeof(*oidptr));
@ -214,11 +230,13 @@ AttributeAndRelationRemove(Oid typeOid)
ObjectIdEqualRegProcedure, (Datum) 0);
optr = oidptr;
rdesc = heap_openr(RelationRelationName);
while (PointerIsValid((char *) optr->next)) {
while (PointerIsValid((char *) optr->next))
{
key[0].sk_argument = (Datum) (optr++)->reloid;
sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 1, key);
tup = heap_getnext(sdesc, 0, &buffer);
if (PointerIsValid(tup)) {
if (PointerIsValid(tup))
{
char *name;
name = (((Form_pg_class) GETSTRUCT(tup))->relname).data;
@ -228,6 +246,7 @@ AttributeAndRelationRemove(Oid typeOid)
heap_endscan(sdesc);
heap_close(rdesc);
}
#endif /* NOTYET */
/*
@ -266,7 +285,8 @@ RemoveType(char *typeName) /* type name to be removed */
scan = heap_beginscan(relation, 0, NowTimeQual, 1, typeKey);
tup = heap_getnext(scan, 0, (Buffer *) 0);
if (!HeapTupleIsValid(tup)) {
if (!HeapTupleIsValid(tup))
{
heap_endscan(scan);
heap_close(relation);
elog(WARN, "RemoveType: type '%s' does not exist",
@ -328,18 +348,21 @@ RemoveFunction(char *functionName, /* function name to be removed */
int i;
memset(argList, 0, 8 * sizeof(Oid));
for (i=0; i<nargs; i++) {
for (i = 0; i < nargs; i++)
{
/* typename = ((TypeName*)(lfirst(argNameList)))->name; */
typename = strVal(lfirst(argNameList));
argNameList = lnext(argNameList);
if (strcmp(typename, "opaque") == 0)
argList[i] = 0;
else {
else
{
tup = SearchSysCacheTuple(TYPNAME, PointerGetDatum(typename),
0, 0, 0);
if (!HeapTupleIsValid(tup)) {
if (!HeapTupleIsValid(tup))
{
elog(WARN, "RemoveFunction: type '%s' not found", typename);
}
argList[i] = tup->t_oid;
@ -354,7 +377,8 @@ RemoveFunction(char *functionName, /* function name to be removed */
#ifndef NO_SECURITY
userName = GetPgUserName();
if (!pg_func_ownercheck(userName, functionName, nargs, argList)) {
if (!pg_func_ownercheck(userName, functionName, nargs, argList))
{
elog(WARN, "RemoveFunction: function '%s': permission denied",
functionName);
}
@ -367,8 +391,10 @@ RemoveFunction(char *functionName, /* function name to be removed */
relation = heap_openr(ProcedureRelationName);
scan = heap_beginscan(relation, 0, NowTimeQual, 1, key);
do { /* hope this is ok because it's indexed */
if (bufferUsed) {
do
{ /* hope this is ok because it's indexed */
if (bufferUsed)
{
ReleaseBuffer(buffer);
bufferUsed = FALSE;
}
@ -416,21 +442,25 @@ RemoveAggregate(char *aggName, char *aggType)
/*
* if a basetype is passed in, then attempt to find an aggregate for that
* specific type.
* if a basetype is passed in, then attempt to find an aggregate for
* that specific type.
*
* else if the basetype is blank, then attempt to find an aggregate with a
* basetype of zero. This is valid. It means that the aggregate is to apply
* to all basetypes. ie, a counter of some sort.
* else if the basetype is blank, then attempt to find an aggregate with
* a basetype of zero. This is valid. It means that the aggregate is
* to apply to all basetypes. ie, a counter of some sort.
*
*/
if (aggType) {
if (aggType)
{
basetypeID = TypeGet(aggType, &defined);
if (!OidIsValid(basetypeID)) {
if (!OidIsValid(basetypeID))
{
elog(WARN, "RemoveAggregate: type '%s' does not exist", aggType);
}
} else {
}
else
{
basetypeID = 0;
}
@ -438,11 +468,15 @@ RemoveAggregate(char *aggName, char *aggType)
#ifndef NO_SECURITY
*/
userName = GetPgUserName();
if (!pg_aggr_ownercheck(userName, aggName, basetypeID)) {
if (aggType) {
if (!pg_aggr_ownercheck(userName, aggName, basetypeID))
{
if (aggType)
{
elog(WARN, "RemoveAggregate: aggregate '%s' on type '%s': permission denied",
aggName, aggType);
} else {
}
else
{
elog(WARN, "RemoveAggregate: aggregate '%s': permission denied",
aggName);
}
@ -464,13 +498,17 @@ RemoveAggregate(char *aggName, char *aggType)
relation = heap_openr(AggregateRelationName);
scan = heap_beginscan(relation, 0, NowTimeQual, 2, aggregateKey);
tup = heap_getnext(scan, 0, (Buffer *) 0);
if (!HeapTupleIsValid(tup)) {
if (!HeapTupleIsValid(tup))
{
heap_endscan(scan);
heap_close(relation);
if (aggType) {
if (aggType)
{
elog(WARN, "RemoveAggregate: aggregate '%s' for '%s' does not exist",
aggName, aggType);
} else {
}
else
{
elog(WARN, "RemoveAggregate: aggregate '%s' for all types does not exist",
aggName);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.7 1997/08/18 20:52:18 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.8 1997/09/07 04:40:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,8 +66,11 @@ renameatt(char *relname,
char *userName,
int recurse)
{
Relation relrdesc, attrdesc;
HeapTuple reltup, oldatttup, newatttup;
Relation relrdesc,
attrdesc;
HeapTuple reltup,
oldatttup,
newatttup;
ItemPointerData oldTID;
Relation idescs[Num_pg_attr_indices];
@ -89,19 +92,23 @@ renameatt(char *relname,
/*
* if the 'recurse' flag is set then we are supposed to rename this
* attribute in all classes that inherit from 'relname' (as well as
* in 'relname').
* attribute in all classes that inherit from 'relname' (as well as in
* 'relname').
*
* any permissions or problems with duplicate attributes will cause
* the whole transaction to abort, which is what we want -- all or
* any permissions or problems with duplicate attributes will cause the
* whole transaction to abort, which is what we want -- all or
* nothing.
*/
if (recurse) {
Oid myrelid, childrelid;
List *child, *children;
if (recurse)
{
Oid myrelid,
childrelid;
List *child,
*children;
relrdesc = heap_openr(relname);
if (!RelationIsValid(relrdesc)) {
if (!RelationIsValid(relrdesc))
{
elog(WARN, "renameatt: unknown relation: \"%s\"",
relname);
}
@ -114,17 +121,19 @@ renameatt(char *relname,
/*
* find_all_inheritors does the recursive search of the
* inheritance hierarchy, so all we have to do is process
* all of the relids in the list that it returns.
* inheritance hierarchy, so all we have to do is process all of
* the relids in the list that it returns.
*/
foreach (child, children) {
foreach(child, children)
{
char *childname;
childrelid = lfirsti(child);
if (childrelid == myrelid)
continue;
relrdesc = heap_open(childrelid);
if (!RelationIsValid(relrdesc)) {
if (!RelationIsValid(relrdesc))
{
elog(WARN, "renameatt: can't find catalog entry for inheriting class with oid %d",
childrelid);
}
@ -137,7 +146,8 @@ renameatt(char *relname,
relrdesc = heap_openr(RelationRelationName);
reltup = ClassNameIndexScan(relrdesc, relname);
if (!PointerIsValid(reltup)) {
if (!PointerIsValid(reltup))
{
heap_close(relrdesc);
elog(WARN, "renameatt: relation \"%s\" nonexistent",
relname);
@ -147,18 +157,21 @@ renameatt(char *relname,
attrdesc = heap_openr(AttributeRelationName);
oldatttup = AttributeNameIndexScan(attrdesc, reltup->t_oid, oldattname);
if (!PointerIsValid(oldatttup)) {
if (!PointerIsValid(oldatttup))
{
heap_close(attrdesc);
elog(WARN, "renameatt: attribute \"%s\" nonexistent",
oldattname);
}
if (((AttributeTupleForm ) GETSTRUCT(oldatttup))->attnum < 0) {
if (((AttributeTupleForm) GETSTRUCT(oldatttup))->attnum < 0)
{
elog(WARN, "renameatt: system attribute \"%s\" not renamed",
oldattname);
}
newatttup = AttributeNameIndexScan(attrdesc, reltup->t_oid, newattname);
if (PointerIsValid(newatttup)) {
if (PointerIsValid(newatttup))
{
pfree(oldatttup);
heap_close(attrdesc);
elog(WARN, "renameatt: attribute \"%s\" exists",
@ -201,17 +214,21 @@ void
renamerel(char oldrelname[], char newrelname[])
{
Relation relrdesc; /* for RELATION relation */
HeapTuple oldreltup, newreltup;
HeapTuple oldreltup,
newreltup;
ItemPointerData oldTID;
char oldpath[MAXPGPATH], newpath[MAXPGPATH];
char oldpath[MAXPGPATH],
newpath[MAXPGPATH];
Relation idescs[Num_pg_class_indices];
if (IsSystemRelationName(oldrelname)) {
if (IsSystemRelationName(oldrelname))
{
elog(WARN, "renamerel: system relation \"%s\" not renamed",
oldrelname);
return;
}
if (IsSystemRelationName(newrelname)) {
if (IsSystemRelationName(newrelname))
{
elog(WARN, "renamerel: Illegal class name: \"%s\" -- pg_ is reserved for system catalogs",
newrelname);
return;
@ -220,14 +237,16 @@ renamerel(char oldrelname[], char newrelname[])
relrdesc = heap_openr(RelationRelationName);
oldreltup = ClassNameIndexScan(relrdesc, oldrelname);
if (!PointerIsValid(oldreltup)) {
if (!PointerIsValid(oldreltup))
{
heap_close(relrdesc);
elog(WARN, "renamerel: relation \"%s\" does not exist",
oldrelname);
}
newreltup = ClassNameIndexScan(relrdesc, newrelname);
if (PointerIsValid(newreltup)) {
if (PointerIsValid(newreltup))
{
pfree(oldreltup);
heap_close(relrdesc);
elog(WARN, "renamerel: relation \"%s\" exists",

View File

@ -26,7 +26,8 @@
bool ItsSequenceCreation = false;
typedef struct FormData_pg_sequence {
typedef struct FormData_pg_sequence
{
NameData sequence_name;
int4 last_value;
int4 increment_by;
@ -39,11 +40,13 @@ typedef struct FormData_pg_sequence {
typedef FormData_pg_sequence *SequenceTupleForm;
typedef struct sequence_magic {
typedef struct sequence_magic
{
uint32 magic;
} sequence_magic;
typedef struct SeqTableData {
typedef struct SeqTableData
{
char *name;
Oid relid;
Relation rel;
@ -157,7 +160,10 @@ DefineSequence (CreateSeqStmt *seq)
DefineRelation(stmt);
/* Xact abort calls CloseSequences, which turns ItsSequenceCreation off */
/*
* Xact abort calls CloseSequences, which turns ItsSequenceCreation
* off
*/
ItsSequenceCreation = false;/* hack */
rel = heap_openr(seq->seqname);
@ -202,8 +208,13 @@ nextval (struct varlena * seqin)
Buffer buf;
SequenceTupleForm seq;
ItemPointerData iptr;
int4 incby, maxv, minv, cache;
int4 result, next, rescnt = 0;
int4 incby,
maxv,
minv,
cache;
int4 result,
next,
rescnt = 0;
/* open and WIntentLock sequence */
elm = init_sequence("nextval", seqname);
@ -215,7 +226,8 @@ nextval (struct varlena * seqin)
return (elm->last);
}
seq = read_info ("nextval", elm, &buf); /* lock page and read tuple */
seq = read_info("nextval", elm, &buf); /* lock page and read
* tuple */
next = result = seq->last_value;
incby = seq->increment_by;
@ -228,9 +240,10 @@ nextval (struct varlena * seqin)
while (rescnt < cache) /* try to fetch cache numbers */
{
/*
* Check MAXVALUE for ascending sequences
* and MINVALUE for descending sequences
* Check MAXVALUE for ascending sequences and MINVALUE for
* descending sequences
*/
if (incby > 0) /* ascending sequence */
{
@ -247,7 +260,8 @@ nextval (struct varlena * seqin)
else
next += incby;
}
else /* descending sequence */
else
/* descending sequence */
{
if ((minv < 0 && next < minv - incby) ||
(minv >= 0 && next + incby < minv))
@ -351,7 +365,8 @@ read_info (char * caller, SeqTable elm, Buffer * buf)
static SeqTable
init_sequence(char *caller, char *name)
{
SeqTable elm, priv = (SeqTable) NULL;
SeqTable elm,
priv = (SeqTable) NULL;
SeqTable temp;
for (elm = seqtab; elm != (SeqTable) NULL;)
@ -371,7 +386,8 @@ init_sequence (char * caller, char * name)
temp->cached = temp->last = temp->increment = 0;
temp->next = (SeqTable) NULL;
}
else /* found */
else
/* found */
{
if (elm->rel != (Relation) NULL) /* already opened */
return (elm);

View File

@ -116,6 +116,7 @@ CreateTrigger (CreateTrigStmt *stmt)
while (tuple = heap_getnext(tgscan, 0, (Buffer *) NULL), PointerIsValid(tuple))
{
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
if (namestrcmp(&(pg_trigger->tgname), stmt->trigname) == 0)
elog(WARN, "CreateTrigger: trigger %s already defined on relation %s",
stmt->trigname, stmt->relname);
@ -152,6 +153,7 @@ CreateTrigger (CreateTrigStmt *stmt)
foreach(le, stmt->args)
{
char *ar = (char *) lfirst(le);
len += strlen(ar) + 4;
}
args = (char *) palloc(len + 1);
@ -242,6 +244,7 @@ DropTrigger (DropTrigStmt *stmt)
while (tuple = heap_getnext(tgscan, 0, (Buffer *) NULL), PointerIsValid(tuple))
{
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
if (namestrcmp(&(pg_trigger->tgname), stmt->trigname) == 0)
{
heap_delete(tgrel, &tuple->t_ctid);
@ -481,9 +484,11 @@ static void
DescribeTrigger(TriggerDesc * trigdesc, Trigger * trigger)
{
uint16 *n;
Trigger ***t, ***tp;
Trigger ***t,
***tp;
if ( TRIGGER_FOR_ROW (trigger->tgtype) ) /* Is ROW/STATEMENT trigger */
if (TRIGGER_FOR_ROW(trigger->tgtype)) /* Is ROW/STATEMENT
* trigger */
{
if (TRIGGER_FOR_BEFORE(trigger->tgtype))
{
@ -496,7 +501,8 @@ DescribeTrigger (TriggerDesc *trigdesc, Trigger *trigger)
t = trigdesc->tg_after_row;
}
}
else /* STATEMENT (NI) */
else
/* STATEMENT (NI) */
{
if (TRIGGER_FOR_BEFORE(trigger->tgtype))
{

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.42 1997/08/22 04:13:08 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.43 1997/09/07 04:41:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -115,9 +115,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *va_spec)
/*
* Create a portal for safe memory across transctions. We need to
* palloc the name space for it because our hash function expects
* the name to be on a longword boundary. CreatePortal copies the
* name to safe storage for us.
* palloc the name space for it because our hash function expects the
* name to be on a longword boundary. CreatePortal copies the name to
* safe storage for us.
*/
pname = (char *) palloc(strlen(VACPNAME) + 1);
strcpy(pname, VACPNAME);
@ -238,7 +238,8 @@ vc_abort()
static void
vc_vacuum(NameData * VacRelP, bool analyze, List * va_cols)
{
VRelList vrl, cur;
VRelList vrl,
cur;
/* get list of relations */
vrl = vc_getrels(VacRelP);
@ -263,7 +264,8 @@ vc_getrels(NameData *VacRelP)
Buffer buf;
PortalVariableMemory portalmem;
MemoryContext old;
VRelList vrl, cur;
VRelList vrl,
cur;
Datum d;
char *rname;
char rkind;
@ -274,11 +276,14 @@ vc_getrels(NameData *VacRelP)
StartTransactionCommand();
if (VacRelP->data) {
if (VacRelP->data)
{
ScanKeyEntryInitialize(&pgckey, 0x0, Anum_pg_class_relname,
NameEqualRegProcedure,
PointerGetDatum(VacRelP->data));
} else {
}
else
{
ScanKeyEntryInitialize(&pgckey, 0x0, Anum_pg_class_relkind,
CharacterEqualRegProcedure, CharGetDatum('r'));
}
@ -291,15 +296,16 @@ vc_getrels(NameData *VacRelP)
pgcscan = heap_beginscan(pgclass, false, NowTimeQual, 1, &pgckey);
while (HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &buf))) {
while (HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &buf)))
{
found = true;
/*
* We have to be careful not to vacuum the archive (since it
* already contains vacuumed tuples), and not to vacuum
* relations on write-once storage managers like the Sony
* jukebox at Berkeley.
* already contains vacuumed tuples), and not to vacuum relations
* on write-once storage managers like the Sony jukebox at
* Berkeley.
*/
d = (Datum) heap_getattr(pgctup, buf, Anum_pg_class_relname,
@ -307,12 +313,16 @@ vc_getrels(NameData *VacRelP)
rname = (char *) d;
/* skip archive relations */
if (vc_isarchrel(rname)) {
if (vc_isarchrel(rname))
{
ReleaseBuffer(buf);
continue;
}
/* don't vacuum large objects for now - something breaks when we do */
/*
* don't vacuum large objects for now - something breaks when we
* do
*/
if ((strlen(rname) >= 5) && rname[0] == 'x' &&
rname[1] == 'i' && rname[2] == 'n' &&
(rname[3] == 'v' || rname[3] == 'x') &&
@ -329,7 +339,8 @@ vc_getrels(NameData *VacRelP)
smgrno = DatumGetInt16(d);
/* skip write-once storage managers */
if (smgriswo(smgrno)) {
if (smgriswo(smgrno))
{
ReleaseBuffer(buf);
continue;
}
@ -340,7 +351,8 @@ vc_getrels(NameData *VacRelP)
rkind = DatumGetChar(d);
/* skip system relations */
if (rkind != 'r') {
if (rkind != 'r')
{
ReleaseBuffer(buf);
elog(NOTICE, "Vacuum: can not process index and certain system tables");
continue;
@ -348,9 +360,12 @@ vc_getrels(NameData *VacRelP)
/* get a relation list entry for this guy */
old = MemoryContextSwitchTo((MemoryContext) portalmem);
if (vrl == (VRelList) NULL) {
if (vrl == (VRelList) NULL)
{
vrl = cur = (VRelList) palloc(sizeof(VRelListData));
} else {
}
else
{
cur->vrl_next = (VRelList) palloc(sizeof(VRelListData));
cur = cur->vrl_next;
}
@ -391,16 +406,20 @@ vc_vacone (Oid relid, bool analyze, List *va_cols)
{
Relation pgclass;
TupleDesc pgcdesc;
HeapTuple pgctup, pgttup;
HeapTuple pgctup,
pgttup;
Buffer pgcbuf;
HeapScanDesc pgcscan;
Relation onerel;
ScanKeyData pgckey;
VPageListData Vvpl; /* List of pages to vacuum and/or clean indices */
VPageListData Fvpl; /* List of pages with space enough for re-using */
VPageListData Vvpl; /* List of pages to vacuum and/or clean
* indices */
VPageListData Fvpl; /* List of pages with space enough for
* re-using */
VPageDescr *vpp;
Relation *Irel;
int32 nindices, i;
int32 nindices,
i;
VRelStats *vacrelstats;
StartTransactionCommand();
@ -418,7 +437,8 @@ vc_vacone (Oid relid, bool analyze, List *va_cols)
* last time we saw it, we don't need to vacuum it.
*/
if (!HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &pgcbuf))) {
if (!HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &pgcbuf)))
{
heap_endscan(pgcscan);
heap_close(pgclass);
CommitTransactionCommand();
@ -434,7 +454,8 @@ vc_vacone (Oid relid, bool analyze, List *va_cols)
vacrelstats->hasindex = false;
if (analyze && !IsSystemRelationName((RelationGetRelationName(onerel))->data))
{
int attr_cnt, *attnums = NULL;
int attr_cnt,
*attnums = NULL;
AttributeTupleForm *attr;
attr_cnt = onerel->rd_att->natts;
@ -565,7 +586,8 @@ vc_vacone (Oid relid, bool analyze, List *va_cols)
for (i = 0; i < nindices; i++)
vc_vaconeind(&Vvpl, Irel[i], vacrelstats->ntups);
}
else /* just scan indices to update statistic */
else
/* just scan indices to update statistic */
{
for (i = 0; i < nindices; i++)
vc_scanoneind(Irel[i], vacrelstats->ntups);
@ -620,22 +642,38 @@ static void
vc_scanheap(VRelStats * vacrelstats, Relation onerel,
VPageList Vvpl, VPageList Fvpl)
{
int nblocks, blkno;
int nblocks,
blkno;
ItemId itemid;
ItemPointer itemptr;
HeapTuple htup;
Buffer buf;
Page page, tempPage = NULL;
OffsetNumber offnum, maxoff;
bool pgchanged, tupgone, dobufrel, notup;
Page page,
tempPage = NULL;
OffsetNumber offnum,
maxoff;
bool pgchanged,
tupgone,
dobufrel,
notup;
char *relname;
VPageDescr vpc, vp;
uint32 nvac, ntups, nunused, ncrash, nempg, nnepg, nchpg, nemend;
Size frsize, frsusf;
VPageDescr vpc,
vp;
uint32 nvac,
ntups,
nunused,
ncrash,
nempg,
nnepg,
nchpg,
nemend;
Size frsize,
frsusf;
Size min_tlen = MAXTUPLEN;
Size max_tlen = 0;
int32 i /* , attr_cnt */ ;
struct rusage ru0, ru1;
struct rusage ru0,
ru1;
bool do_shrinking = true;
getrusage(RUSAGE_SELF, &ru0);
@ -650,13 +688,15 @@ vc_scanheap (VRelStats *vacrelstats, Relation onerel,
vpc = (VPageDescr) palloc(sizeof(VPageDescrData) + MaxOffsetNumber * sizeof(OffsetNumber));
vpc->vpd_nusd = 0;
for (blkno = 0; blkno < nblocks; blkno++) {
for (blkno = 0; blkno < nblocks; blkno++)
{
buf = ReadBuffer(onerel, blkno);
page = BufferGetPage(buf);
vpc->vpd_blkno = blkno;
vpc->vpd_noff = 0;
if (PageIsNew(page)) {
if (PageIsNew(page))
{
elog(NOTICE, "Rel %s: Uninitialized page %u - fixing",
relname, blkno);
PageInit(page, BufferGetPageSize(buf), 0);
@ -669,7 +709,8 @@ vc_scanheap (VRelStats *vacrelstats, Relation onerel,
continue;
}
if (PageIsEmpty(page)) {
if (PageIsEmpty(page))
{
vpc->vpd_free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
frsize += (vpc->vpd_free - sizeof(ItemIdData));
nempg++;
@ -684,14 +725,16 @@ vc_scanheap (VRelStats *vacrelstats, Relation onerel,
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
offnum <= maxoff;
offnum = OffsetNumberNext(offnum)) {
offnum = OffsetNumberNext(offnum))
{
itemid = PageGetItemId(page, offnum);
/*
* Collect un-used items too - it's possible to have
* indices pointing here after crash.
* Collect un-used items too - it's possible to have indices
* pointing here after crash.
*/
if (!ItemIdIsUsed(itemid)) {
if (!ItemIdIsUsed(itemid))
{
vpc->vpd_voff[vpc->vpd_noff++] = offnum;
nunused++;
continue;
@ -701,17 +744,24 @@ vc_scanheap (VRelStats *vacrelstats, Relation onerel,
tupgone = false;
if (!AbsoluteTimeIsBackwardCompatiblyValid(htup->t_tmin) &&
TransactionIdIsValid((TransactionId)htup->t_xmin)) {
TransactionIdIsValid((TransactionId) htup->t_xmin))
{
if (TransactionIdDidAbort(htup->t_xmin)) {
if (TransactionIdDidAbort(htup->t_xmin))
{
tupgone = true;
} else if (TransactionIdDidCommit(htup->t_xmin)) {
}
else if (TransactionIdDidCommit(htup->t_xmin))
{
htup->t_tmin = TransactionIdGetCommitTime(htup->t_xmin);
pgchanged = true;
} else if ( !TransactionIdIsInProgress (htup->t_xmin) ) {
}
else if (!TransactionIdIsInProgress(htup->t_xmin))
{
/*
* Not Aborted, Not Committed, Not in Progress -
* so it from crashed process. - vadim 11/26/96
* Not Aborted, Not Committed, Not in Progress - so it
* from crashed process. - vadim 11/26/96
*/
ncrash++;
tupgone = true;
@ -733,10 +783,12 @@ vc_scanheap (VRelStats *vacrelstats, Relation onerel,
}
else if (TransactionIdDidCommit(htup->t_xmax))
tupgone = true;
else if ( !TransactionIdIsInProgress (htup->t_xmax) ) {
else if (!TransactionIdIsInProgress(htup->t_xmax))
{
/*
* Not Aborted, Not Committed, Not in Progress -
* so it from crashed process. - vadim 06/02/97
* Not Aborted, Not Committed, Not in Progress - so it
* from crashed process. - vadim 06/02/97
*/
StoreInvalidTransactionId(&(htup->t_xmax));
pgchanged = true;
@ -762,8 +814,8 @@ DELETE_TRANSACTION_ID_VALID %d, TUPGONE %d.",
}
/*
* It's possibly! But from where it comes ?
* And should we fix it ? - vadim 11/28/96
* It's possibly! But from where it comes ? And should we fix
* it ? - vadim 11/28/96
*/
itemptr = &(htup->t_ctid);
if (!ItemPointerIsValid(itemptr) ||
@ -790,7 +842,8 @@ DELETE_TRANSACTION_ID_VALID %d, TUPGONE %d.",
relname, blkno, offnum, tupgone);
}
if (tupgone) {
if (tupgone)
{
ItemId lpp;
if (tempPage == (Page) NULL)
@ -810,7 +863,9 @@ DELETE_TRANSACTION_ID_VALID %d, TUPGONE %d.",
vpc->vpd_voff[vpc->vpd_noff++] = offnum;
nvac++;
} else {
}
else
{
ntups++;
notup = false;
if (htup->t_len < min_tlen)
@ -821,7 +876,8 @@ DELETE_TRANSACTION_ID_VALID %d, TUPGONE %d.",
}
}
if (pgchanged) {
if (pgchanged)
{
WriteBuffer(buf);
dobufrel = false;
nchpg++;
@ -866,8 +922,8 @@ DELETE_TRANSACTION_ID_VALID %d, TUPGONE %d.",
Fvpl->vpl_nemend = nemend;
/*
* Try to make Fvpl keeping in mind that we can't use free space
* of "empty" end-pages and last page if it reapped.
* Try to make Fvpl keeping in mind that we can't use free space of
* "empty" end-pages and last page if it reapped.
*/
if (do_shrinking && Vvpl->vpl_npages - nemend > 0)
{
@ -919,27 +975,47 @@ vc_rpfheap (VRelStats *vacrelstats, Relation onerel,
TransactionId myXID;
CommandId myCID;
AbsoluteTime myCTM = 0;
Buffer buf, ToBuf;
int nblocks, blkno;
Page page, ToPage = NULL;
OffsetNumber offnum = 0, maxoff = 0, newoff, moff;
ItemId itemid, newitemid;
HeapTuple htup, newtup;
Buffer buf,
ToBuf;
int nblocks,
blkno;
Page page,
ToPage = NULL;
OffsetNumber offnum = 0,
maxoff = 0,
newoff,
moff;
ItemId itemid,
newitemid;
HeapTuple htup,
newtup;
TupleDesc tupdesc = NULL;
Datum *idatum = NULL;
char *inulls = NULL;
InsertIndexResult iresult;
VPageListData Nvpl;
VPageDescr ToVpd = NULL, Fvplast, Vvplast, vpc, *vpp;
VPageDescr ToVpd = NULL,
Fvplast,
Vvplast,
vpc,
*vpp;
int ToVpI = 0;
IndDesc *Idesc, *idcur;
int Fblklast, Vblklast, i;
IndDesc *Idesc,
*idcur;
int Fblklast,
Vblklast,
i;
Size tlen;
int nmoved, Fnpages, Vnpages;
int nchkmvd, ntups;
bool isempty, dowrite;
int nmoved,
Fnpages,
Vnpages;
int nchkmvd,
ntups;
bool isempty,
dowrite;
Relation archrel;
struct rusage ru0, ru1;
struct rusage ru0,
ru1;
getrusage(RUSAGE_SELF, &ru0);
@ -1065,6 +1141,7 @@ vc_rpfheap (VRelStats *vacrelstats, Relation onerel,
{
WriteBuffer(ToBuf);
ToBuf = InvalidBuffer;
/*
* If no one tuple can't be added to this page -
* remove page from Fvpl. - vadim 11/27/96
@ -1160,7 +1237,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
inulls,
&(newtup->t_ctid),
onerel);
if (iresult) pfree(iresult);
if (iresult)
pfree(iresult);
}
}
@ -1191,11 +1269,12 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (nmoved > 0)
{
/*
* We have to commit our tuple' movings before we'll truncate
* relation, but we shouldn't lose our locks. And so - quick hack:
* flush buffers and record status of current transaction
* as committed, and continue. - vadim 11/13/96
* flush buffers and record status of current transaction as
* committed, and continue. - vadim 11/13/96
*/
FlushBufferPool(!TransactionFlushEnabled());
TransactionIdCommit(myXID);
@ -1204,8 +1283,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
}
/*
* Clean uncleaned reapped pages from Vvpl list
* and set commit' times for inserted tuples
* Clean uncleaned reapped pages from Vvpl list and set commit' times
* for inserted tuples
*/
nchkmvd = 0;
for (i = 0, vpp = Vvpl->vpl_pgdesc; i < Vnpages; i++, vpp++)
@ -1215,11 +1294,16 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
page = BufferGetPage(buf);
if ((*vpp)->vpd_nusd == 0) /* this page was not used */
{
/* noff == 0 in empty pages only - such pages should be re-used */
/*
* noff == 0 in empty pages only - such pages should be
* re-used
*/
Assert((*vpp)->vpd_noff > 0);
vc_vacpage(page, *vpp, archrel);
}
else /* this page was used */
else
/* this page was used */
{
ntups = 0;
moff = PageGetMaxOffsetNumber(page);
@ -1258,22 +1342,26 @@ Elapsed %u/%u sec.",
/* vacuum indices again if needed */
if (Irel != (Relation *) NULL)
{
VPageDescr *vpleft, *vpright, vpsave;
VPageDescr *vpleft,
*vpright,
vpsave;
/* re-sort Nvpl.vpl_pgdesc */
for (vpleft = Nvpl.vpl_pgdesc,
vpright = Nvpl.vpl_pgdesc + Nvpl.vpl_npages - 1;
vpleft < vpright; vpleft++, vpright--)
{
vpsave = *vpleft; *vpleft = *vpright; *vpright = vpsave;
vpsave = *vpleft;
*vpleft = *vpright;
*vpright = vpsave;
}
for (i = 0; i < nindices; i++)
vc_vaconeind(&Nvpl, Irel[i], vacrelstats->ntups);
}
/*
* clean moved tuples from last page in Nvpl list
* if some tuples left there
* clean moved tuples from last page in Nvpl list if some tuples
* left there
*/
if (vpc->vpd_noff > 0 && offnum <= maxoff)
{
@ -1376,8 +1464,8 @@ vc_vacheap (VRelStats *vacrelstats, Relation onerel, VPageList Vvpl)
vacrelstats->npages, nblocks);
/*
* we have to flush "empty" end-pages (if changed, but who knows it)
* before truncation
* we have to flush "empty" end-pages (if changed, but who knows
* it) before truncation
*/
FlushBufferPool(!TransactionFlushEnabled());
@ -1428,7 +1516,8 @@ vc_scanoneind (Relation indrel, int nhtups)
IndexScanDesc iscan;
int nitups;
int nipages;
struct rusage ru0, ru1;
struct rusage ru0,
ru1;
getrusage(RUSAGE_SELF, &ru0);
@ -1484,7 +1573,8 @@ vc_vaconeind(VPageList vpl, Relation indrel, int nhtups)
int nitups;
int nipages;
VPageDescr vp;
struct rusage ru0, ru1;
struct rusage ru0,
ru1;
getrusage(RUSAGE_SELF, &ru0);
@ -1494,7 +1584,8 @@ vc_vaconeind(VPageList vpl, Relation indrel, int nhtups)
nitups = 0;
while ((res = index_getnext(iscan, ForwardScanDirection))
!= (RetrieveIndexResult) NULL) {
!= (RetrieveIndexResult) NULL)
{
heapptr = &res->heap_iptr;
if ((vp = vc_tidreapped(heapptr, vpl)) != (VPageDescr) NULL)
@ -1514,7 +1605,9 @@ vc_vaconeind(VPageList vpl, Relation indrel, int nhtups)
}
++nvac;
index_delete(indrel, &res->index_iptr);
} else {
}
else
{
nitups++;
}
@ -1551,7 +1644,8 @@ vc_tidreapped(ItemPointer itemptr, VPageList vpl)
{
OffsetNumber ioffno;
OffsetNumber *voff;
VPageDescr vp, *vpp;
VPageDescr vp,
*vpp;
VPageDescrData vpd;
vpd.vpd_blkno = ItemPointerGetBlockNumber(itemptr);
@ -1568,7 +1662,8 @@ vc_tidreapped(ItemPointer itemptr, VPageList vpl)
/* ok - we are on true page */
if ( vp->vpd_noff == 0 ) { /* this is EmptyPage !!! */
if (vp->vpd_noff == 0)
{ /* this is EmptyPage !!! */
return (vp);
}
@ -1607,13 +1702,15 @@ vc_tidreapped(ItemPointer itemptr, VPageList vpl)
static void
vc_attrstats(Relation onerel, VRelStats * vacrelstats, HeapTuple htup)
{
int i, attr_cnt = vacrelstats->va_natts;
int i,
attr_cnt = vacrelstats->va_natts;
VacAttrStats *vacattrstats = vacrelstats->vacattrstats;
TupleDesc tupDesc = onerel->rd_att;
Datum value;
bool isnull;
for (i = 0; i < attr_cnt; i++) {
for (i = 0; i < attr_cnt; i++)
{
VacAttrStats *stats = &vacattrstats[i];
bool value_hit = true;
@ -1625,27 +1722,33 @@ vc_attrstats(Relation onerel, VRelStats *vacrelstats, HeapTuple htup)
if (isnull)
stats->null_cnt++;
else {
else
{
stats->nonnull_cnt++;
if (stats->initialized == false) {
if (stats->initialized == false)
{
vc_bucketcpy(stats->attr, value, &stats->best, &stats->best_len);
/* best_cnt gets incremented later */
vc_bucketcpy(stats->attr, value, &stats->guess1, &stats->guess1_len);
stats->guess1_cnt = stats->guess1_hits = 1;
vc_bucketcpy(stats->attr, value, &stats->guess2, &stats->guess2_len);
stats->guess2_hits = 1;
if (VacAttrStatsLtGtValid(stats)) {
if (VacAttrStatsLtGtValid(stats))
{
vc_bucketcpy(stats->attr, value, &stats->max, &stats->max_len);
vc_bucketcpy(stats->attr, value, &stats->min, &stats->min_len);
}
stats->initialized = true;
}
if (VacAttrStatsLtGtValid(stats)) {
if ( (*(stats->f_cmplt)) (value,stats->min) ) {
if (VacAttrStatsLtGtValid(stats))
{
if ((*(stats->f_cmplt)) (value, stats->min))
{
vc_bucketcpy(stats->attr, value, &stats->min, &stats->min_len);
stats->min_cnt = 0;
}
if ( (*(stats->f_cmpgt)) (value,stats->max) ) {
if ((*(stats->f_cmpgt)) (value, stats->max))
{
vc_bucketcpy(stats->attr, value, &stats->max, &stats->max_len);
stats->max_cnt = 0;
}
@ -1656,28 +1759,33 @@ vc_attrstats(Relation onerel, VRelStats *vacrelstats, HeapTuple htup)
}
if ((*(stats->f_cmpeq)) (value, stats->best))
stats->best_cnt++;
else if ( (*(stats->f_cmpeq)) (value,stats->guess1) ) {
else if ((*(stats->f_cmpeq)) (value, stats->guess1))
{
stats->guess1_cnt++;
stats->guess1_hits++;
}
else if ((*(stats->f_cmpeq)) (value, stats->guess2))
stats->guess2_hits++;
else value_hit = false;
else
value_hit = false;
if (stats->guess2_hits > stats->guess1_hits) {
if (stats->guess2_hits > stats->guess1_hits)
{
swapDatum(stats->guess1, stats->guess2);
swapInt(stats->guess1_len, stats->guess2_len);
stats->guess1_cnt = stats->guess2_hits;
swapLong(stats->guess1_hits, stats->guess2_hits);
}
if (stats->guess1_cnt > stats->best_cnt) {
if (stats->guess1_cnt > stats->best_cnt)
{
swapDatum(stats->best, stats->guess1);
swapInt(stats->best_len, stats->guess1_len);
swapLong(stats->best_cnt, stats->guess1_cnt);
stats->guess1_hits = 1;
stats->guess2_hits = 1;
}
if (!value_hit) {
if (!value_hit)
{
vc_bucketcpy(stats->attr, value, &stats->guess2, &stats->guess2_len);
stats->guess1_hits = 1;
stats->guess2_hits = 1;
@ -1696,7 +1804,8 @@ vc_bucketcpy(AttributeTupleForm attr, Datum value, Datum *bucket, int16 *bucket_
{
if (attr->attbyval && attr->attlen != -1)
*bucket = value;
else {
else
{
int len = (attr->attlen != -1 ? attr->attlen : VARSIZE(value));
if (len > *bucket_len)
@ -1724,13 +1833,20 @@ vc_bucketcpy(AttributeTupleForm attr, Datum value, Datum *bucket, int16 *bucket_
static void
vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats * vacrelstats)
{
Relation rd, ad, sd;
HeapScanDesc rsdesc, asdesc;
Relation rd,
ad,
sd;
HeapScanDesc rsdesc,
asdesc;
TupleDesc sdesc;
HeapTuple rtup, atup, stup;
Buffer rbuf, abuf;
HeapTuple rtup,
atup,
stup;
Buffer rbuf,
abuf;
Form_pg_class pgcform;
ScanKeyData rskey, askey;
ScanKeyData rskey,
askey;
AttributeTupleForm attp;
/*
@ -1769,7 +1885,8 @@ vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelst
while (HeapTupleIsValid(atup = heap_getnext(asdesc, 0, &abuf)))
{
int i;
float32data selratio; /* average ratio of rows selected for a random constant */
float32data selratio; /* average ratio of rows selected
* for a random constant */
VacAttrStats *stats;
Datum values[Natts_pg_statistic];
char nulls[Natts_pg_statistic];
@ -1789,7 +1906,8 @@ vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelst
stats = &(vacattrstats[i]);
/* overwrite the existing statistics in the tuple */
if (VacAttrStatsEqValid(stats)) {
if (VacAttrStatsEqValid(stats))
{
vc_setpagelock(ad, BufferGetBlockNumber(abuf));
@ -1802,14 +1920,19 @@ vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelst
max_cnt_d = stats->max_cnt,
null_cnt_d = stats->null_cnt,
nonnullcnt_d = stats->nonnull_cnt; /* prevent overflow */
selratio = (min_cnt_d * min_cnt_d + max_cnt_d * max_cnt_d + null_cnt_d * null_cnt_d) /
(nonnullcnt_d + null_cnt_d) / (nonnullcnt_d + null_cnt_d);
}
else {
else
{
double most = (double) (stats->best_cnt > stats->null_cnt ? stats->best_cnt : stats->null_cnt);
double total = ((double) stats->nonnull_cnt) + ((double) stats->null_cnt);
/* we assume count of other values are 20%
of best count in table */
/*
* we assume count of other values are 20% of best
* count in table
*/
selratio = (most * most + 0.20 * most * (total - most)) / total / total;
}
if (selratio > 1.0)
@ -1819,14 +1942,21 @@ vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelst
/* DO PG_STATISTIC INSERTS */
/* doing system relations, especially pg_statistic is a problem */
/*
* doing system relations, especially pg_statistic is a
* problem
*/
if (VacAttrStatsLtGtValid(stats) && stats->initialized /* &&
!IsSystemRelationName(pgcform->relname.data)*/) {
* !IsSystemRelationName(
* pgcform->relname.data)
*/ )
{
func_ptr out_function;
char *out_string;
int dummy;
for (i = 0; i < Natts_pg_statistic; ++i) nulls[i] = ' ';
for (i = 0; i < Natts_pg_statistic; ++i)
nulls[i] = ' ';
/* ----------------
* initialize values[]
@ -1867,8 +1997,10 @@ vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelst
/* XXX -- after write, should invalidate relcache in other backends */
WriteNoReleaseBuffer(rbuf); /* heap_endscan release scan' buffers ? */
/* invalidating system relations confuses the function cache
of pg_operator and pg_opclass */
/*
* invalidating system relations confuses the function cache of
* pg_operator and pg_opclass
*/
if (!IsSystemRelationName(pgcform->relname.data))
RelationInvalidateHeapTuple(rd, rtup);
@ -1891,7 +2023,8 @@ vc_delhilowstats(Oid relid, int attcnt, int *attnums)
pgstatistic = heap_openr(StatisticRelationName);
if (relid != InvalidOid ) {
if (relid != InvalidOid)
{
ScanKeyEntryInitialize(&pgskey, 0x0, Anum_pg_statistic_starelid,
ObjectIdEqualRegProcedure,
ObjectIdGetDatum(relid));
@ -1922,7 +2055,8 @@ vc_delhilowstats(Oid relid, int attcnt, int *attnums)
heap_close(pgstatistic);
}
static void vc_setpagelock(Relation rel, BlockNumber blkno)
static void
vc_setpagelock(Relation rel, BlockNumber blkno)
{
ItemPointerData itm;
@ -1983,7 +2117,8 @@ vc_free(VRelList vrl)
pmem = PortalGetVariableMemory(vc_portal);
old = MemoryContextSwitchTo((MemoryContext) pmem);
while (vrl != (VRelList) NULL) {
while (vrl != (VRelList) NULL)
{
/* free rel list entry */
p_vrl = vrl;
@ -2045,7 +2180,8 @@ vc_find_eq (char *bot, int nelem, int size, char *elm, int (*compar)(char *, cha
int res;
int last = nelem - 1;
int celm = nelem / 2;
bool last_move, first_move;
bool last_move,
first_move;
last_move = first_move = true;
for (;;)
@ -2095,7 +2231,8 @@ vc_find_eq (char *bot, int nelem, int size, char *elm, int (*compar)(char *, cha
static int
vc_cmp_blk(char *left, char *right)
{
BlockNumber lblk, rblk;
BlockNumber lblk,
rblk;
lblk = (*((VPageDescr *) left))->vpd_blkno;
rblk = (*((VPageDescr *) right))->vpd_blkno;
@ -2130,7 +2267,8 @@ vc_getindices (Oid relid, int *nindices, Relation **Irel)
HeapTuple pgitup;
HeapScanDesc pgiscan;
Datum d;
int i, k;
int i,
k;
bool n;
ScanKeyData pgikey;
Oid *ioid;
@ -2149,7 +2287,8 @@ vc_getindices (Oid relid, int *nindices, Relation **Irel)
pgiscan = heap_beginscan(pgindex, false, NowTimeQual, 1, &pgikey);
while (HeapTupleIsValid(pgitup = heap_getnext(pgiscan, 0, NULL))) {
while (HeapTupleIsValid(pgitup = heap_getnext(pgiscan, 0, NULL)))
{
d = (Datum) heap_getattr(pgitup, InvalidBuffer, Anum_pg_index_indexrelid,
pgidesc, &n);
i++;
@ -2161,7 +2300,8 @@ vc_getindices (Oid relid, int *nindices, Relation **Irel)
heap_endscan(pgiscan);
heap_close(pgindex);
if ( i == 0 ) { /* No one index found */
if (i == 0)
{ /* No one index found */
pfree(ioid);
return;
}
@ -2202,7 +2342,8 @@ vc_clsindices (int nindices, Relation *Irel)
if (Irel == (Relation *) NULL)
return;
while (nindices--) {
while (nindices--)
{
index_close(Irel[nindices]);
}
pfree(Irel);
@ -2221,7 +2362,8 @@ vc_mkindesc (Relation onerel, int nindices, Relation *Irel, IndDesc **Idesc)
*Idesc = (IndDesc *) palloc(nindices * sizeof(IndDesc));
for (i = 0, idcur = *Idesc; i < nindices; i++, idcur++) {
for (i = 0, idcur = *Idesc; i < nindices; i++, idcur++)
{
pgIndexTup =
SearchSysCacheTuple(INDEXRELID,
ObjectIdGetDatum(Irel[i]->rd_id),
@ -2231,13 +2373,15 @@ vc_mkindesc (Relation onerel, int nindices, Relation *Irel, IndDesc **Idesc)
for (attnumP = &(idcur->tform->indkey[0]), natts = 0;
*attnumP != InvalidAttrNumber && natts != INDEX_MAX_KEYS;
attnumP++, natts++);
if (idcur->tform->indproc != InvalidOid) {
if (idcur->tform->indproc != InvalidOid)
{
idcur->finfoP = &(idcur->finfo);
FIgetnArgs(idcur->finfoP) = natts;
natts = 1;
FIgetProcOid(idcur->finfoP) = idcur->tform->indproc;
*(FIgetname(idcur->finfoP)) = '\0';
} else
}
else
idcur->finfoP = (FuncIndexInfo *) NULL;
idcur->natts = natts;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.8 1997/08/22 14:22:14 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.9 1997/09/07 04:41:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -45,21 +45,24 @@ static void
DefineVirtualRelation(char *relname, List * tlist)
{
CreateStmt createStmt;
List *attrList, *t;
List *attrList,
*t;
TargetEntry *entry;
Resdom *res;
char *resname;
char *restypename;
/*
* create a list with one entry per attribute of this relation.
* Each entry is a two element list. The first element is the
* name of the attribute (a string) and the second the name of the type
* (NOTE: a string, not a type id!).
* create a list with one entry per attribute of this relation. Each
* entry is a two element list. The first element is the name of the
* attribute (a string) and the second the name of the type (NOTE: a
* string, not a type id!).
*/
attrList = NIL;
if (tlist!=NIL) {
foreach (t, tlist ) {
if (tlist != NIL)
{
foreach(t, tlist)
{
ColumnDef *def = makeNode(ColumnDef);
TypeName *typename;
@ -83,13 +86,15 @@ DefineVirtualRelation(char *relname, List *tlist)
attrList = lappend(attrList, def);
}
} else {
}
else
{
elog(WARN, "attempted to define virtual relation with no attrs");
}
/*
* now create the parametesr for keys/inheritance etc.
* All of them are nil...
* now create the parametesr for keys/inheritance etc. All of them are
* nil...
*/
createStmt.relname = relname;
createStmt.tableElts = attrList;
@ -131,6 +136,7 @@ MakeRetrieveViewRuleName(char *viewName)
*/
char *buf;
buf = palloc(strlen(viewName) + 5);
sprintf(buf, "_RET%s", viewName);
return buf;
@ -144,8 +150,8 @@ FormViewRetrieveRule(char *viewName, Query *viewParse)
Attr *attr;
/*
* Create a RuleStmt that corresponds to the suitable
* rewrite rule args for DefineQueryRewrite();
* Create a RuleStmt that corresponds to the suitable rewrite rule
* args for DefineQueryRewrite();
*/
rule = makeNode(RuleStmt);
rname = MakeRetrieveViewRuleName(viewName);
@ -167,10 +173,12 @@ static void
DefineViewRules(char *viewName, Query * viewParse)
{
RuleStmt *retrieve_rule = NULL;
#ifdef NOTYET
RuleStmt *replace_rule = NULL;
RuleStmt *append_rule = NULL;
RuleStmt *delete_rule = NULL;
#endif
retrieve_rule =
@ -220,7 +228,8 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
{
List *old_rt;
List *new_rt;
RangeTblEntry *rt_entry1, *rt_entry2;
RangeTblEntry *rt_entry1,
*rt_entry2;
/*
* first offset all var nodes by 2
@ -234,9 +243,8 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
old_rt = viewParse->rtable;
/*
* create the 2 new range table entries and form the new
* range table...
* CURRENT first, then NEW....
* create the 2 new range table entries and form the new range
* table... CURRENT first, then NEW....
*/
rt_entry1 =
addRangeTableEntry(NULL, (char *) viewName, "*CURRENT*",
@ -248,9 +256,8 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
new_rt = lcons(rt_entry1, new_rt);
/*
* Now the tricky part....
* Update the range table in place... Be careful here, or
* hell breaks loooooooooooooOOOOOOOOOOOOOOOOOOSE!
* Now the tricky part.... Update the range table in place... Be
* careful here, or hell breaks loooooooooooooOOOOOOOOOOOOOOOOOOSE!
*/
viewParse->rtable = new_rt;
}
@ -275,26 +282,23 @@ DefineView(char *viewName, Query *viewParse)
viewTlist = viewParse->targetList;
/*
* Create the "view" relation
* NOTE: if it already exists, the xaxt will be aborted.
* Create the "view" relation NOTE: if it already exists, the xaxt
* will be aborted.
*/
DefineVirtualRelation(viewName, viewTlist);
/*
* The relation we have just created is not visible
* to any other commands running with the same transaction &
* command id.
* So, increment the command id counter (but do NOT pfree any
* memory!!!!)
* The relation we have just created is not visible to any other
* commands running with the same transaction & command id. So,
* increment the command id counter (but do NOT pfree any memory!!!!)
*/
CommandCounterIncrement();
/*
* The range table of 'viewParse' does not contain entries
* for the "CURRENT" and "NEW" relations.
* So... add them!
* NOTE: we make the update in place! After this call 'viewParse'
* will never be what it used to be...
* The range table of 'viewParse' does not contain entries for the
* "CURRENT" and "NEW" relations. So... add them! NOTE: we make the
* update in place! After this call 'viewParse' will never be what it
* used to be...
*/
UpdateRangeTableOfViewParse(viewName, viewParse);
DefineViewRules(viewName, viewParse);
@ -312,8 +316,7 @@ RemoveView(char *viewName)
char *rname;
/*
* first remove all the "view" rules...
* Currently we only have one!
* first remove all the "view" rules... Currently we only have one!
*/
rname = MakeRetrieveViewRuleName(viewName);
RemoveRewriteRule(rname);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.5 1997/08/19 21:30:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.6 1997/09/07 04:41:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -43,7 +43,8 @@
#include "access/heapam.h"
#include "catalog/heap.h"
static Pointer ExecBeginScan(Relation relation, int nkeys, ScanKey skeys,
static Pointer
ExecBeginScan(Relation relation, int nkeys, ScanKey skeys,
bool isindex, ScanDirection dir, TimeQual time_range);
static Relation ExecOpenR(Oid relationOid, bool isindex);
@ -107,6 +108,7 @@ static Relation
ExecOpenR(Oid relationOid, bool isindex)
{
Relation relation;
relation = (Relation) NULL;
/* ----------------
@ -114,9 +116,11 @@ ExecOpenR(Oid relationOid, bool isindex)
* on whether this is a heap relation or an index relation.
* ----------------
*/
if (isindex) {
if (isindex)
{
relation = index_open(relationOid);
} else
}
else
relation = heap_open(relationOid);
if (relation == NULL)
@ -157,12 +161,15 @@ ExecBeginScan(Relation relation,
* if you pass it true, then the scan is backward.
* ----------------
*/
if (isindex) {
if (isindex)
{
scanDesc = (Pointer) index_beginscan(relation,
false, /* see above comment */
nkeys,
skeys);
} else {
}
else
{
scanDesc = (Pointer) heap_beginscan(relation,
ScanDirectionIsBackward(dir),
time_range,
@ -198,7 +205,8 @@ ExecCloseR(Plan *node)
* shut down the heap scan and close the heap relation
* ----------------
*/
switch (nodeTag(node)) {
switch (nodeTag(node))
{
case T_SeqScan:
state = ((SeqScan *) node)->scanstate;
@ -239,7 +247,8 @@ ExecCloseR(Plan *node)
* of the index relations as well..
* ----------------
*/
if (nodeTag(node) == T_IndexScan) {
if (nodeTag(node) == T_IndexScan)
{
IndexScan *iscan = (IndexScan *) node;
IndexScanState *indexstate;
int numIndices;
@ -252,7 +261,8 @@ ExecCloseR(Plan *node)
indexRelationDescs = indexstate->iss_RelationDescs;
indexScanDescs = indexstate->iss_ScanDescs;
for (i = 0; i<numIndices; i++) {
for (i = 0; i < numIndices; i++)
{
/* ----------------
* shut down each of the scans and
* close each of the index relations
@ -280,7 +290,8 @@ ExecCloseR(Plan *node)
void
ExecReScan(Plan * node, ExprContext * exprCtxt, Plan * parent)
{
switch(nodeTag(node)) {
switch (nodeTag(node))
{
case T_SeqScan:
ExecSeqReScan((SeqScan *) node, exprCtxt, parent);
return;
@ -290,11 +301,13 @@ ExecReScan(Plan *node, ExprContext *exprCtxt, Plan *parent)
return;
case T_Material:
/* the first call to ExecReScan should have no effect because
/*
* the first call to ExecReScan should have no effect because
* everything is initialized properly already. the following
* calls will be handled by ExecSeqReScan() because the nodes
* below the Material node have already been materialized into
* a temp relation.
* below the Material node have already been materialized into a
* temp relation.
*/
return;
@ -340,7 +353,8 @@ ExecReScanR(Relation relDesc, /* LLL relDesc unused */
void
ExecMarkPos(Plan * node)
{
switch(nodeTag(node)) {
switch (nodeTag(node))
{
case T_SeqScan:
ExecSeqMarkPos((SeqScan *) node);
break;
@ -369,7 +383,8 @@ ExecMarkPos(Plan *node)
void
ExecRestrPos(Plan * node)
{
switch(nodeTag(node)) {
switch (nodeTag(node))
{
case T_SeqScan:
ExecSeqRestrPos((SeqScan *) node);
return;
@ -415,7 +430,8 @@ ExecCreatR(TupleDesc tupType,
relDesc = NULL;
if (relationOid == _TEMP_RELATION_ID_ ) {
if (relationOid == _TEMP_RELATION_ID_)
{
/* ----------------
* create a temporary relation
* (currently the planner always puts a _TEMP_RELATION_ID
@ -428,11 +444,17 @@ ExecCreatR(TupleDesc tupType,
sprintf(tempname, "temp_%d.%d", getpid(), tmpcnt++);
EU1_printf("ExecCreatR: attempting to create %s\n", tempname);
*/
/* heap_creatr creates a name if the argument to heap_creatr is '\0 ' */
/*
* heap_creatr creates a name if the argument to heap_creatr is
* '\0 '
*/
relDesc = heap_creatr("",
DEFAULT_SMGR,
tupType);
} else {
}
else
{
/* ----------------
* use a relation from the range table
* ----------------
@ -448,4 +470,3 @@ ExecCreatR(TupleDesc tupType,
return relDesc;
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/Attic/execFlatten.c,v 1.2 1997/08/19 21:30:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/Attic/execFlatten.c,v 1.3 1997/09/07 04:41:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -33,8 +33,10 @@
#include "executor/execFlatten.h"
#ifdef SETS_FIXED
static bool FjoinBumpOuterNodes(TargetEntry *tlist, ExprContext *econtext,
static bool
FjoinBumpOuterNodes(TargetEntry * tlist, ExprContext * econtext,
DatumPtr results, char *nulls);
#endif
Datum
@ -48,9 +50,10 @@ ExecEvalIter(Iter *iterNode,
expression = iterNode->iterexpr;
/*
* Really Iter nodes are only needed for C functions, postquel function
* by their nature return 1 result at a time. For now we are only worrying
* about postquel functions, c functions will come later.
* Really Iter nodes are only needed for C functions, postquel
* function by their nature return 1 result at a time. For now we are
* only worrying about postquel functions, c functions will come
* later.
*/
return ExecEvalExpr(expression, econtext, resultIsNull, iterIsDone);
}
@ -71,13 +74,16 @@ ExecEvalFjoin(TargetEntry *tlist,
DatumPtr resVect = fjNode->fj_results;
BoolPtr alwaysDone = fjNode->fj_alwaysDone;
if (fj_isDone) *fj_isDone = false;
if (fj_isDone)
*fj_isDone = false;
/*
* For the next tuple produced by the plan, we need to re-initialize
* the Fjoin node.
*/
if (!fjNode->fj_initialized)
{
/*
* Initialize all of the Outer nodes
*/
@ -123,11 +129,12 @@ ExecEvalFjoin(TargetEntry *tlist,
}
else
{
/*
* If we're already initialized, all we need to do is get the
* next inner result and pair it up with the existing outer node
* result vector. Watch out for the degenerate case, where the
* inner node never returns results.
* If we're already initialized, all we need to do is get the next
* inner result and pair it up with the existing outer node result
* vector. Watch out for the degenerate case, where the inner
* node never returns results.
*/
/*
@ -208,9 +215,9 @@ FjoinBumpOuterNodes(TargetEntry *tlist,
}
/*
* If every function is done, then we are done flattening.
* Mark the Fjoin node unitialized, it is time to get the
* next tuple from the plan and redo all of the flattening.
* If every function is done, then we are done flattening. Mark the
* Fjoin node unitialized, it is time to get the next tuple from the
* plan and redo all of the flattening.
*/
if (funcIsDone)
{
@ -237,4 +244,5 @@ FjoinBumpOuterNodes(TargetEntry *tlist,
}
return false;
}
#endif

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.5 1997/08/26 23:31:37 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.6 1997/09/07 04:41:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -65,11 +65,14 @@ ExecInitJunkFilter(List *targetList)
{
JunkFilter *junkfilter;
List *cleanTargetList;
int len, cleanLength;
TupleDesc tupType, cleanTupType;
int len,
cleanLength;
TupleDesc tupType,
cleanTupType;
List *t;
TargetEntry *tle;
Resdom *resdom, *cleanResdom;
Resdom *resdom,
*cleanResdom;
int resjunk;
AttrNumber cleanResno;
AttrNumber *cleanMap;
@ -86,19 +89,25 @@ ExecInitJunkFilter(List *targetList)
cleanTargetList = NIL;
cleanResno = 1;
foreach (t, targetList) {
foreach(t, targetList)
{
TargetEntry *rtarget = lfirst(t);
if (rtarget->resdom != NULL) {
if (rtarget->resdom != NULL)
{
resdom = rtarget->resdom;
expr = rtarget->expr;
resjunk = resdom->resjunk;
if (resjunk == 0) {
if (resjunk == 0)
{
/*
* make a copy of the resdom node, changing its resno.
*/
cleanResdom = (Resdom *) copyObject(resdom);
cleanResdom->resno = cleanResno;
cleanResno++;
/*
* create a new target list entry
*/
@ -108,7 +117,8 @@ ExecInitJunkFilter(List *targetList)
cleanTargetList = lappend(cleanTargetList, tle);
}
}
else {
else
{
#ifdef SETS_FIXED
List *fjListP;
Fjoin *cleanFjoin;
@ -127,7 +137,8 @@ ExecInitJunkFilter(List *targetList)
tle = (List) MakeTLE(cleanResdom, (Expr) expr);
set_fj_innerNode(cleanFjoin, tle);
foreach(fjListP, lnext(fjList)) {
foreach(fjListP, lnext(fjList))
{
TargetEntry *tle = lfirst(fjListP);
resdom = tle->resdom;
@ -135,6 +146,7 @@ ExecInitJunkFilter(List *targetList)
cleanResdom = (Resdom *) copyObject((Node) resdom);
cleanResno++;
cleanResdom->Resno = cleanResno;
/*
* create a new target list entry
*/
@ -170,21 +182,28 @@ ExecInitJunkFilter(List *targetList)
* attribute of the "original" tuple.
* ---------------------
*/
if (cleanLength > 0) {
if (cleanLength > 0)
{
size = cleanLength * sizeof(AttrNumber);
cleanMap = (AttrNumber *) palloc(size);
cleanResno = 1;
foreach (t, targetList) {
foreach(t, targetList)
{
TargetEntry *tle = lfirst(t);
if (tle->resdom != NULL) {
if (tle->resdom != NULL)
{
resdom = tle->resdom;
expr = tle->expr;
resjunk = resdom->resjunk;
if (resjunk == 0) {
if (resjunk == 0)
{
cleanMap[cleanResno - 1] = resdom->resno;
cleanResno++;
}
} else {
}
else
{
#ifdef SETS_FIXED
List fjListP;
List fjList = lfirst(t);
@ -198,7 +217,8 @@ ExecInitJunkFilter(List *targetList)
cleanResno++;
#ifdef SETS_FIXED
foreach(fjListP, lnext(fjList)) {
foreach(fjListP, lnext(fjList))
{
TargetEntry *tle = lfirst(fjListP);
resdom = tle->resdom;
@ -208,7 +228,9 @@ ExecInitJunkFilter(List *targetList)
#endif
}
}
} else {
}
else
{
cleanMap = NULL;
}
@ -265,19 +287,23 @@ ExecGetJunkAttribute(JunkFilter *junkfilter,
resno = InvalidAttrNumber;
targetList = junkfilter->jf_targetList;
foreach (t, targetList) {
foreach(t, targetList)
{
TargetEntry *tle = lfirst(t);
resdom = tle->resdom;
resname = resdom->resname;
resjunk = resdom->resjunk;
if (resjunk != 0 && (strcmp(resname, attrName) == 0)) {
if (resjunk != 0 && (strcmp(resname, attrName) == 0))
{
/* We found it ! */
resno = resdom->resno;
break;
}
}
if (resno == InvalidAttrNumber) {
if (resno == InvalidAttrNumber)
{
/* Ooops! We couldn't find this attribute... */
return (false);
}
@ -345,13 +371,16 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
* for large tuples we just use palloc.
* ---------------------
*/
if (cleanLength > 64) {
if (cleanLength > 64)
{
size = cleanLength * sizeof(Datum);
values = (Datum *) palloc(size);
size = cleanLength * sizeof(char);
nulls = (char *) palloc(size);
} else {
}
else
{
values = values_array;
nulls = nulls_array;
}
@ -360,7 +389,8 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
* Exctract one by one all the values of the "clean" tuple.
* ---------------------
*/
for (i=0; i<cleanLength; i++) {
for (i = 0; i < cleanLength; i++)
{
Datum d = (Datum)
heap_getattr(tuple, InvalidBuffer, cleanMap[i], tupType, &isNull);
@ -385,11 +415,11 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot)
* and return the new tuple.
* ---------------------
*/
if (cleanLength > 64) {
if (cleanLength > 64)
{
pfree(values);
pfree(nulls);
}
return (cleanTuple);
}

View File

@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.22 1997/09/04 13:22:36 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.23 1997/09/07 04:41:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -56,28 +56,35 @@
/* decls for local routines only used within this module */
static void ExecCheckPerms(CmdType operation, int resultRelation, List *rangeTable,
static void
ExecCheckPerms(CmdType operation, int resultRelation, List * rangeTable,
Query * parseTree);
static TupleDesc InitPlan(CmdType operation, Query *parseTree,
static TupleDesc
InitPlan(CmdType operation, Query * parseTree,
Plan * plan, EState * estate);
static void EndPlan(Plan * plan, EState * estate);
static TupleTableSlot *ExecutePlan(EState *estate, Plan *plan,
static TupleTableSlot *
ExecutePlan(EState * estate, Plan * plan,
Query * parseTree, CmdType operation,
int numberTuples, ScanDirection direction,
void (*printfunc) ());
static void ExecRetrieve(TupleTableSlot * slot, void (*printfunc) (),
EState * estate);
static void ExecAppend(TupleTableSlot *slot,ItemPointer tupleid,
static void
ExecAppend(TupleTableSlot * slot, ItemPointer tupleid,
EState * estate);
static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid,
static void
ExecDelete(TupleTableSlot * slot, ItemPointer tupleid,
EState * estate);
static void ExecReplace(TupleTableSlot *slot, ItemPointer tupleid,
static void
ExecReplace(TupleTableSlot * slot, ItemPointer tupleid,
EState * estate, Query * parseTree);
/* end of local decls */
#ifdef QUERY_LIMIT
static int queryLimit = ALL_TUPLES;
#undef ALL_TUPLES
#define ALL_TUPLES queryLimit
@ -87,6 +94,7 @@ ExecutorLimit(int limit)
{
return queryLimit = limit;
}
#endif
#endif
@ -114,13 +122,13 @@ ExecutorStart(QueryDesc *queryDesc, EState *estate)
queryDesc->plantree,
estate);
/* reset buffer refcount. the current refcounts
* are saved and will be restored when ExecutorEnd is called
/*
* reset buffer refcount. the current refcounts are saved and will be
* restored when ExecutorEnd is called
*
* this makes sure that when ExecutorRun's are
* called recursively as for postquel functions,
* the buffers pinned by one ExecutorRun will not be
* unpinned by another ExecutorRun.
* this makes sure that when ExecutorRun's are called recursively as for
* postquel functions, the buffers pinned by one ExecutorRun will not
* be unpinned by another ExecutorRun.
*/
BufferRefCountReset(estate->es_refcount);
@ -176,26 +184,32 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature, int count)
estate->es_lastoid = InvalidOid;
#if 0
/*
* It doesn't work in common case (i.g. if function has a aggregate).
* Now we store parameter values before ExecutorStart. - vadim 01/22/97
* Now we store parameter values before ExecutorStart. - vadim
* 01/22/97
*/
#ifdef INDEXSCAN_PATCH
/*
* If the plan is an index scan and some of the scan key are
* function arguments rescan the indices after the parameter
* values have been stored in the execution state. DZ - 27-8-1996
* If the plan is an index scan and some of the scan key are function
* arguments rescan the indices after the parameter values have been
* stored in the execution state. DZ - 27-8-1996
*/
if ((nodeTag(plan) == T_IndexScan) &&
(((IndexScan *)plan)->indxstate->iss_RuntimeKeyInfo != NULL)) {
(((IndexScan *) plan)->indxstate->iss_RuntimeKeyInfo != NULL))
{
ExprContext *econtext;
econtext = ((IndexScan *) plan)->scan.scanstate->cstate.cs_ExprContext;
ExecIndexReScan((IndexScan *) plan, econtext, plan);
}
#endif
#endif
switch(feature) {
switch (feature)
{
case EXEC_RUN:
result = ExecutePlan(estate,
@ -293,8 +307,10 @@ ExecCheckPerms(CmdType operation,
Oid relid;
HeapTuple htp;
List *lp;
List *qvars, *tvars;
int32 ok = 1, aclcheck_result = -1;
List *qvars,
*tvars;
int32 ok = 1,
aclcheck_result = -1;
char *opstr;
NameData rname;
char *userName;
@ -303,7 +319,8 @@ ExecCheckPerms(CmdType operation,
userName = GetPgUserName();
foreach (lp, rangeTable) {
foreach(lp, rangeTable)
{
RangeTblEntry *rte = lfirst(lp);
relid = rte->relid;
@ -316,18 +333,21 @@ ExecCheckPerms(CmdType operation,
strNcpy(rname.data,
((Form_pg_class) GETSTRUCT(htp))->relname.data,
NAMEDATALEN - 1);
if (i == resultRelation) { /* this is the result relation */
if (i == resultRelation)
{ /* this is the result relation */
qvars = pull_varnos(parseTree->qual);
tvars = pull_varnos((Node *) parseTree->targetList);
if (intMember(resultRelation, qvars) ||
intMember(resultRelation, tvars)) {
intMember(resultRelation, tvars))
{
/* result relation is scanned */
ok = ((aclcheck_result = CHECK(ACL_RD)) == ACLCHECK_OK);
opstr = "read";
if (!ok)
break;
}
switch (operation) {
switch (operation)
{
case CMD_INSERT:
ok = ((aclcheck_result = CHECK(ACL_AP)) == ACLCHECK_OK) ||
((aclcheck_result = CHECK(ACL_WR)) == ACLCHECK_OK);
@ -343,7 +363,9 @@ ExecCheckPerms(CmdType operation,
elog(WARN, "ExecCheckPerms: bogus operation %d",
operation);
}
} else {
}
else
{
/* XXX NOTIFY?? */
ok = ((aclcheck_result = CHECK(ACL_RD)) == ACLCHECK_OK);
opstr = "read";
@ -352,7 +374,8 @@ ExecCheckPerms(CmdType operation,
break;
++i;
}
if (!ok) {
if (!ok)
{
elog(WARN, "%s: %s", rname.data, aclcheck_error_strings[aclcheck_result]);
}
}
@ -403,7 +426,8 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
* ----------------
*/
if (resultRelation != 0 && operation != CMD_SELECT) {
if (resultRelation != 0 && operation != CMD_SELECT)
{
/* ----------------
* if we have a result relation, open it and
@ -425,11 +449,13 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
elog(WARN, "You can't change sequence relation %s",
resultRelationDesc->rd_rel->relname.data);
/* Write-lock the result relation right away: if the relation
is used in a subsequent scan, we won't have to elevate the
read-lock set by heap_beginscan to a write-lock (needed by
heap_insert, heap_delete and heap_replace).
This will hopefully prevent some deadlocks. - 01/24/94 */
/*
* Write-lock the result relation right away: if the relation is
* used in a subsequent scan, we won't have to elevate the
* read-lock set by heap_beginscan to a write-lock (needed by
* heap_insert, heap_delete and heap_replace). This will hopefully
* prevent some deadlocks. - 01/24/94
*/
RelationSetLockForWrite(resultRelationDesc);
resultRelationInfo = makeNode(RelationInfo);
@ -447,7 +473,9 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
ExecOpenIndices(resultRelationOid, resultRelationInfo);
estate->es_result_relation_info = resultRelationInfo;
} else {
}
else
{
/* ----------------
* if no result relation, then set state appropriately
* ----------------
@ -499,11 +527,14 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
* ----------------
*/
if (operation == CMD_UPDATE || operation == CMD_DELETE ||
operation == CMD_INSERT) {
operation == CMD_INSERT)
{
JunkFilter *j = (JunkFilter *) ExecInitJunkFilter(targetList);
estate->es_junkFilter = j;
} else
}
else
estate->es_junkFilter = NULL;
/* ----------------
@ -512,17 +543,21 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
*/
intoRelationDesc = (Relation) NULL;
if (operation == CMD_SELECT) {
if (operation == CMD_SELECT)
{
char *intoName;
char archiveMode;
Oid intoRelationId;
TupleDesc tupdesc;
if (!parseTree->isPortal) {
if (!parseTree->isPortal)
{
/*
* a select into table
*/
if (parseTree->into != NULL) {
if (parseTree->into != NULL)
{
/* ----------------
* create the "into" relation
*
@ -613,6 +648,7 @@ EndPlan(Plan *plan, EState *estate)
*/
{
TupleTable tupleTable = (TupleTable) estate->es_tupleTable;
ExecDestroyTupleTable(tupleTable, true); /* was missing last arg */
estate->es_tupleTable = NULL;
}
@ -621,7 +657,8 @@ EndPlan(Plan *plan, EState *estate)
* close the result relations if necessary
* ----------------
*/
if (resultRelationInfo != NULL) {
if (resultRelationInfo != NULL)
{
Relation resultRelationDesc;
resultRelationDesc = resultRelationInfo->ri_RelationDesc;
@ -638,7 +675,8 @@ EndPlan(Plan *plan, EState *estate)
* close the "into" relation if necessary
* ----------------
*/
if (intoRelationDesc != NULL) {
if (intoRelationDesc != NULL)
{
heap_close(intoRelationDesc);
}
}
@ -696,8 +734,10 @@ ExecutePlan(EState *estate,
* ----------------
*/
for(;;) {
if (operation != CMD_NOTIFY) {
for (;;)
{
if (operation != CMD_NOTIFY)
{
/* ----------------
* Execute the plan and obtain a tuple
* ----------------
@ -711,7 +751,8 @@ ExecutePlan(EState *estate,
* we just return null...
* ----------------
*/
if (TupIsNull(slot)) {
if (TupIsNull(slot))
{
result = NULL;
break;
}
@ -727,8 +768,10 @@ ExecutePlan(EState *estate,
* Also, extract all the junk ifnormation we need.
* ----------------
*/
if ((junkfilter = estate->es_junkFilter) != (JunkFilter*)NULL) {
if ((junkfilter = estate->es_junkFilter) != (JunkFilter *) NULL)
{
Datum datum;
/* NameData attrName; */
HeapTuple newTuple;
bool isNull;
@ -737,7 +780,8 @@ ExecutePlan(EState *estate,
* extract the 'ctid' junk attribute.
* ---------------
*/
if (operation == CMD_UPDATE || operation == CMD_DELETE) {
if (operation == CMD_UPDATE || operation == CMD_DELETE)
{
if (!ExecGetJunkAttribute(junkfilter,
slot,
"ctid",
@ -749,7 +793,8 @@ ExecutePlan(EState *estate,
elog(WARN, "ExecutePlan: (junk) `ctid' is NULL!");
tupleid = (ItemPointer) DatumGetPointer(datum);
tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */
tuple_ctid = *tupleid; /* make sure we don't free the
* ctid!! */
tupleid = &tuple_ctid;
}
@ -762,7 +807,8 @@ ExecutePlan(EState *estate,
slot = ExecStoreTuple(newTuple, /* tuple to store */
slot, /* destination slot */
InvalidBuffer,/* this tuple has no buffer */
InvalidBuffer, /* this tuple has no
* buffer */
true); /* tuple should be pfreed */
} /* if (junkfilter... */
@ -774,7 +820,8 @@ ExecutePlan(EState *estate,
* ----------------
*/
switch(operation) {
switch (operation)
{
case CMD_SELECT:
ExecRetrieve(slot, /* slot containing tuple */
printfunc, /* print function */
@ -797,13 +844,16 @@ ExecutePlan(EState *estate,
result = NULL;
break;
/* Total hack. I'm ignoring any accessor functions for
Relation, RelationTupleForm, NameData.
Assuming that NameData.data has offset 0.
/*
* Total hack. I'm ignoring any accessor functions for
* Relation, RelationTupleForm, NameData. Assuming that
* NameData.data has offset 0.
*/
case CMD_NOTIFY: {
case CMD_NOTIFY:
{
RelationInfo *rInfo = estate->es_result_relation_info;
Relation rDesc = rInfo->ri_RelationDesc;
Async_Notify(rDesc->rd_rel->relname.data);
result = NULL;
current_tuple_count = 0;
@ -974,7 +1024,8 @@ ExecAppend(TupleTableSlot *slot,
* ----------------
*/
numIndices = resultRelationInfo->ri_NumIndices;
if (numIndices > 0) {
if (numIndices > 0)
{
ExecInsertIndexTuples(slot, &(tuple->t_ctid), estate, false);
}
(estate->es_processed)++;
@ -1076,7 +1127,8 @@ ExecReplace(TupleTableSlot *slot,
* abort the operation if not running transactions
* ----------------
*/
if (IsBootstrapProcessingMode()) {
if (IsBootstrapProcessingMode())
{
elog(DEBUG, "ExecReplace: replace can't run without transactions");
return;
}
@ -1150,7 +1202,8 @@ ExecReplace(TupleTableSlot *slot,
*/
if (heap_replace(resultRelationDesc, /* relation desc */
tupleid, /* item ptr of tuple to replace */
tuple)) { /* replacement heap tuple */
tuple))
{ /* replacement heap tuple */
return;
}
@ -1180,7 +1233,8 @@ ExecReplace(TupleTableSlot *slot,
*/
numIndices = resultRelationInfo->ri_NumIndices;
if (numIndices > 0) {
if (numIndices > 0)
{
ExecInsertIndexTuples(slot, &(tuple->t_ctid), estate, true);
}

View File

@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.2 1996/10/31 10:11:27 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.3 1997/09/07 04:41:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -115,7 +115,8 @@ ExecInitNode(Plan *node, EState *estate, Plan *parent)
if (node == NULL)
return FALSE;
switch(nodeTag(node)) {
switch (nodeTag(node))
{
/* ----------------
* control nodes
* ----------------
@ -217,7 +218,8 @@ ExecProcNode(Plan *node, Plan *parent)
if (node == NULL)
return NULL;
switch(nodeTag(node)) {
switch (nodeTag(node))
{
/* ----------------
* control nodes
* ----------------
@ -305,7 +307,8 @@ ExecCountSlotsNode(Plan *node)
if (node == (Plan *) NULL)
return 0;
switch(nodeTag(node)) {
switch (nodeTag(node))
{
/* ----------------
* control nodes
* ----------------
@ -394,7 +397,8 @@ ExecEndNode(Plan *node, Plan *parent)
if (node == NULL)
return;
switch(nodeTag(node)) {
switch (nodeTag(node))
{
/* ----------------
* control nodes
* ----------------

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.12 1997/08/19 21:31:03 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.13 1997/09/07 04:41:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -70,19 +70,24 @@ int execConstLen;
/* static functions decls */
static Datum ExecEvalAggreg(Aggreg * agg, ExprContext * econtext, bool * isNull);
static Datum ExecEvalArrayRef(ArrayRef *arrayRef, ExprContext *econtext,
static Datum
ExecEvalArrayRef(ArrayRef * arrayRef, ExprContext * econtext,
bool * isNull, bool * isDone);
static Datum ExecEvalAnd(Expr * andExpr, ExprContext * econtext, bool * isNull);
static Datum ExecEvalFunc(Expr *funcClause, ExprContext *econtext,
static Datum
ExecEvalFunc(Expr * funcClause, ExprContext * econtext,
bool * isNull, bool * isDone);
static void ExecEvalFuncArgs(FunctionCachePtr fcache, ExprContext *econtext,
static void
ExecEvalFuncArgs(FunctionCachePtr fcache, ExprContext * econtext,
List * argList, Datum argV[], bool * argIsDone);
static Datum ExecEvalNot(Expr * notclause, ExprContext * econtext, bool * isNull);
static Datum ExecEvalOper(Expr *opClause, ExprContext *econtext,
static Datum
ExecEvalOper(Expr * opClause, ExprContext * econtext,
bool * isNull);
static Datum ExecEvalOr(Expr * orExpr, ExprContext * econtext, bool * isNull);
static Datum ExecEvalVar(Var * variable, ExprContext * econtext, bool * isNull);
static Datum ExecMakeFunctionResult(Node *node, List *arguments,
static Datum
ExecMakeFunctionResult(Node * node, List * arguments,
ExprContext * econtext, bool * isNull, bool * isDone);
static bool ExecQualClause(Node * clause, ExprContext * econtext);
@ -102,12 +107,15 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
bool * isDone)
{
bool dummy;
int i = 0, j = 0;
int i = 0,
j = 0;
ArrayType *array_scanner;
List *upperIndexpr, *lowerIndexpr;
List *upperIndexpr,
*lowerIndexpr;
Node *assgnexpr;
List *elt;
IntArray upper, lower;
IntArray upper,
lower;
int *lIndex;
char *dataPtr;
@ -116,27 +124,33 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
isDone);
if (*isNull) return (Datum)NULL;
if (*isNull)
return (Datum) NULL;
upperIndexpr = arrayRef->refupperindexpr;
foreach (elt, upperIndexpr) {
foreach(elt, upperIndexpr)
{
upper.indx[i++] = (int32) ExecEvalExpr((Node *) lfirst(elt),
econtext,
isNull,
&dummy);
if (*isNull) return (Datum)NULL;
if (*isNull)
return (Datum) NULL;
}
lowerIndexpr = arrayRef->reflowerindexpr;
lIndex = NULL;
if (lowerIndexpr != NIL) {
foreach (elt, lowerIndexpr) {
if (lowerIndexpr != NIL)
{
foreach(elt, lowerIndexpr)
{
lower.indx[j++] = (int32) ExecEvalExpr((Node *) lfirst(elt),
econtext,
isNull,
&dummy);
if (*isNull) return (Datum)NULL;
if (*isNull)
return (Datum) NULL;
}
if (i != j)
elog(WARN,
@ -145,11 +159,13 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
}
assgnexpr = arrayRef->refassgnexpr;
if (assgnexpr != NULL) {
if (assgnexpr != NULL)
{
dataPtr = (char *) ExecEvalExpr((Node *)
assgnexpr, econtext,
isNull, &dummy);
if (*isNull) return (Datum)NULL;
if (*isNull)
return (Datum) NULL;
execConstByVal = arrayRef->refelembyval;
execConstLen = arrayRef->refelemlength;
if (lIndex == NULL)
@ -230,7 +246,8 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
* get the slot we want
* ----------------
*/
switch(variable->varno) {
switch (variable->varno)
{
case INNER: /* get the tuple from the inner node */
slot = econtext->ecxt_innertuple;
break;
@ -239,7 +256,8 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
slot = econtext->ecxt_outertuple;
break;
default: /* get the tuple from the relation being scanned */
default: /* get the tuple from the relation being
* scanned */
slot = econtext->ecxt_scantuple;
break;
}
@ -261,9 +279,9 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
variable->vartype == tuple_type->attrs[attnum - 1]->atttypid))
/*
* If the attribute number is invalid, then we are supposed to
* return the entire tuple, we give back a whole slot so that
* callers know what the tuple looks like.
* If the attribute number is invalid, then we are supposed to return
* the entire tuple, we give back a whole slot so that callers know
* what the tuple looks like.
*/
if (attnum == InvalidAttrNumber)
{
@ -311,7 +329,8 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
* -cim 9/15/89
* ----------------
*/
if (attnum < 0) {
if (attnum < 0)
{
/* ----------------
* If this is a pseudo-att, we get the type and fake the length.
* There ought to be a routine to return the real lengths, so
@ -320,7 +339,9 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
*/
len = heap_sysattrlen(attnum); /* XXX see -mao above */
byval = heap_sysattrbyval(attnum); /* XXX see -mao above */
} else {
}
else
{
len = tuple_type->attrs[attnum - 1]->attlen;
byval = tuple_type->attrs[attnum - 1]->attbyval ? true : false;
}
@ -371,27 +392,34 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
paramList = econtext->ecxt_param_list_info;
*isNull = false;
/*
* search the list with the parameter info to find a matching name.
* An entry with an InvalidName denotes the last element in the array.
* search the list with the parameter info to find a matching name. An
* entry with an InvalidName denotes the last element in the array.
*/
matchFound = 0;
if (paramList != NULL) {
if (paramList != NULL)
{
/*
* search for an entry in 'paramList' that matches
* the `expression'.
* search for an entry in 'paramList' that matches the
* `expression'.
*/
while(paramList->kind != PARAM_INVALID && !matchFound) {
switch (thisParameterKind) {
while (paramList->kind != PARAM_INVALID && !matchFound)
{
switch (thisParameterKind)
{
case PARAM_NAMED:
if (thisParameterKind == paramList->kind &&
strcmp(paramList->name, thisParameterName) == 0){
strcmp(paramList->name, thisParameterName) == 0)
{
matchFound = 1;
}
break;
case PARAM_NUM:
if (thisParameterKind == paramList->kind &&
paramList->id == thisParameterId) {
paramList->id == thisParameterId)
{
matchFound = 1;
}
break;
@ -401,32 +429,38 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
paramList->id == thisParameterId)
{
matchFound = 1;
/*
* sanity check
*/
if (strcmp(paramList->name, thisParameterName) != 0){
if (strcmp(paramList->name, thisParameterName) != 0)
{
elog(WARN,
"ExecEvalParam: new/old params with same id & diff names");
}
}
break;
default:
/*
* oops! this is not supposed to happen!
*/
elog(WARN, "ExecEvalParam: invalid paramkind %d",
thisParameterKind);
}
if (! matchFound) {
if (!matchFound)
{
paramList++;
}
} /* while */
} /* if */
if (!matchFound) {
if (!matchFound)
{
/*
* ooops! we couldn't find this parameter
* in the parameter list. Signal an error
* ooops! we couldn't find this parameter in the parameter list.
* Signal an error
*/
elog(WARN, "ExecEvalParam: Unknown value for parameter %s",
thisParameterName);
@ -506,6 +540,7 @@ GetAttributeByNum(TupleTableSlot *slot,
return (char *) NULL;
return (char *) retval;
}
#endif
/* XXX char16 name for catalogs */
@ -517,6 +552,7 @@ att_by_num(TupleTableSlot *slot,
{
return (GetAttributeByNum(slot, attrno, isNull));
}
#endif
char *
@ -547,8 +583,10 @@ GetAttributeByName(TupleTableSlot *slot, char *attname, bool *isNull)
natts = tuple->t_natts;
attrno = InvalidAttrNumber;
for (i=0;i<tupdesc->natts;i++) {
if (namestrcmp(&(tupdesc->attrs[i]->attname), attname) == 0) {
for (i = 0; i < tupdesc->natts; i++)
{
if (namestrcmp(&(tupdesc->attrs[i]->attname), attname) == 0)
{
attrno = tupdesc->attrs[i]->attnum;
break;
}
@ -575,6 +613,7 @@ att_by_name(TupleTableSlot *slot, char *attname, bool *isNull)
{
return (GetAttributeByName(slot, attname, isNull));
}
#endif
static void
@ -585,13 +624,15 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
bool * argIsDone)
{
int i;
bool argIsNull, *nullVect;
bool argIsNull,
*nullVect;
List *arg;
nullVect = fcache->nullVect;
i = 0;
foreach (arg, argList) {
foreach(arg, argList)
{
/* ----------------
* evaluate the expression, in general functions cannot take
* sets as arguments but we make an exception in the case of
@ -636,8 +677,8 @@ ExecMakeFunctionResult(Node *node,
bool funcisset = false;
/*
* This is kind of ugly, Func nodes now have targetlists so that
* we know when and what to project out from postquel function results.
* This is kind of ugly, Func nodes now have targetlists so that we
* know when and what to project out from postquel function results.
* This means we have to pass the func node all the way down instead
* of using only the fcache struct as before. ExecMakeFunctionResult
* becomes a little bit more of a dual personality as a result.
@ -660,7 +701,8 @@ ExecMakeFunctionResult(Node *node,
* into a datum array (argv) and pass this array to arrayFmgr()
* ----------------
*/
if (fcache->nargs != 0) {
if (fcache->nargs != 0)
{
bool argDone;
if (fcache->nargs > MAXFMGRARGS)
@ -681,36 +723,42 @@ ExecMakeFunctionResult(Node *node,
else
ExecEvalFuncArgs(fcache, econtext, arguments, argv, &argDone);
if ((fcache->hasSetArg) && (argDone)) {
if (isDone) *isDone = true;
if ((fcache->hasSetArg) && (argDone))
{
if (isDone)
*isDone = true;
return (Datum) NULL;
}
}
/* If this function is really a set, we have to diddle with things.
* If the function has already been called at least once, then the
* setArg field of the fcache holds
* the OID of this set in pg_proc. (This is not quite legit, since
* the setArg field is really for functions which take sets of tuples
* as input - set functions take no inputs at all. But it's a nice
* place to stash this value, for now.)
/*
* If this function is really a set, we have to diddle with things. If
* the function has already been called at least once, then the setArg
* field of the fcache holds the OID of this set in pg_proc. (This is
* not quite legit, since the setArg field is really for functions
* which take sets of tuples as input - set functions take no inputs
* at all. But it's a nice place to stash this value, for now.)
*
* If this is the first call of the set's function, then
* the call to ExecEvalFuncArgs above just returned the OID of
* the pg_proc tuple which defines this set. So replace the existing
* funcid in the funcnode with the set's OID. Also, we want a new
* fcache which points to the right function, so get that, now that
* we have the right OID. Also zero out the argv, since the real
* set doesn't take any arguments.
* If this is the first call of the set's function, then the call to
* ExecEvalFuncArgs above just returned the OID of the pg_proc tuple
* which defines this set. So replace the existing funcid in the
* funcnode with the set's OID. Also, we want a new fcache which
* points to the right function, so get that, now that we have the
* right OID. Also zero out the argv, since the real set doesn't take
* any arguments.
*/
if (((Func *)node)->funcid == SetEvalRegProcedure) {
if (((Func *) node)->funcid == SetEvalRegProcedure)
{
funcisset = true;
if (fcache->setArg) {
if (fcache->setArg)
{
argv[0] = 0;
((Func *) node)->funcid = (Oid) PointerGetDatum(fcache->setArg);
} else {
}
else
{
((Func *) node)->funcid = (Oid) argv[0];
setFcache(node, argv[0], NIL, econtext);
fcache = ((Func *) node)->func_fcache;
@ -724,24 +772,28 @@ ExecMakeFunctionResult(Node *node,
* passing the function the evaluated parameter values.
* ----------------
*/
if (fcache->language == SQLlanguageId) {
if (fcache->language == SQLlanguageId)
{
Datum result;
Assert(funcNode);
result = postquel_function(funcNode, (char **) argv, isNull, isDone);
/*
* finagle the situation where we are iterating through all results
* in a nested dot function (whose argument function returns a set
* of tuples) and the current function finally finishes. We need to
* get the next argument in the set and run the function all over
* again. This is getting unclean.
* finagle the situation where we are iterating through all
* results in a nested dot function (whose argument function
* returns a set of tuples) and the current function finally
* finishes. We need to get the next argument in the set and run
* the function all over again. This is getting unclean.
*/
if ((*isDone) && (fcache->hasSetArg)) {
if ((*isDone) && (fcache->hasSetArg))
{
bool argDone;
ExecEvalFuncArgs(fcache, econtext, arguments, argv, &argDone);
if (argDone) {
if (argDone)
{
fcache->setArg = (char *) NULL;
*isDone = true;
result = (Datum) NULL;
@ -752,18 +804,23 @@ ExecMakeFunctionResult(Node *node,
isNull,
isDone);
}
if (funcisset) {
/* reset the funcid so that next call to this routine will
* still recognize this func as a set.
* Note that for now we assume that the set function in
* pg_proc must be a Postquel function - the funcid is
* not reset below for C functions.
if (funcisset)
{
/*
* reset the funcid so that next call to this routine will
* still recognize this func as a set. Note that for now we
* assume that the set function in pg_proc must be a Postquel
* function - the funcid is not reset below for C functions.
*/
((Func *) node)->funcid = SetEvalRegProcedure;
/* If we're done with the results of this function, get rid
* of its func cache.
/*
* If we're done with the results of this function, get rid of
* its func cache.
*/
if (*isDone) {
if (*isDone)
{
((Func *) node)->func_fcache = NULL;
}
}
@ -773,9 +830,11 @@ ExecMakeFunctionResult(Node *node,
{
int i;
if (isDone) *isDone = true;
if (isDone)
*isDone = true;
for (i = 0; i < fcache->nargs; i++)
if (fcache->nullVect[i] == true) *isNull = true;
if (fcache->nullVect[i] == true)
*isNull = true;
return ((Datum) fmgr_c(fcache->func, fcache->foid, fcache->nargs,
(FmgrValues *) argv, isNull));
@ -825,11 +884,12 @@ ExecEvalOper(Expr *opClause, ExprContext *econtext, bool *isNull)
argList = opClause->args;
/*
* get the fcache from the Oper node.
* If it is NULL, then initialize it
* get the fcache from the Oper node. If it is NULL, then initialize
* it
*/
fcache = op->op_fcache;
if (fcache == NULL) {
if (fcache == NULL)
{
setFcache((Node *) op, op->opid, argList, econtext);
fcache = op->op_fcache;
}
@ -873,11 +933,12 @@ ExecEvalFunc(Expr *funcClause,
argList = funcClause->args;
/*
* get the fcache from the Func node.
* If it is NULL, then initialize it
* get the fcache from the Func node. If it is NULL, then initialize
* it
*/
fcache = func->func_fcache;
if (fcache == NULL) {
if (fcache == NULL)
{
setFcache((Node *) func, func->funcid, argList, econtext);
fcache = func->func_fcache;
}
@ -965,7 +1026,8 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
* to NULL we set *isNull flag to true -
* ----------------
*/
foreach (clause, clauses) {
foreach(clause, clauses)
{
/* ----------------
* We don't iterate over sets in the quals, so pass in an isDone
@ -1025,7 +1087,8 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
* should be true.
* ----------------
*/
foreach (clause, clauses) {
foreach(clause, clauses)
{
/* ----------------
* We don't iterate over sets in the quals, so pass in an isDone
@ -1102,16 +1165,19 @@ ExecEvalExpr(Node *expression,
* of function given the type of our expression.
* ----------------
*/
if (expression == NULL) {
if (expression == NULL)
{
*isNull = true;
return (Datum) true;
}
switch(nodeTag(expression)) {
switch (nodeTag(expression))
{
case T_Var:
retDatum = (Datum) ExecEvalVar((Var *) expression, econtext, isNull);
break;
case T_Const: {
case T_Const:
{
Const *con = (Const *) expression;
if (con->constisnull)
@ -1139,9 +1205,12 @@ ExecEvalExpr(Node *expression,
isNull,
isDone);
break;
case T_Expr: {
case T_Expr:
{
Expr *expr = (Expr *) expression;
switch (expr->opType) {
switch (expr->opType)
{
case OP_EXPR:
retDatum = (Datum) ExecEvalOper(expr, econtext, isNull);
break;
@ -1199,8 +1268,8 @@ ExecQualClause(Node *clause, ExprContext *econtext)
return true;
/*
* pass isDone, but ignore it. We don't iterate over multiple
* returns in the qualifications.
* pass isDone, but ignore it. We don't iterate over multiple returns
* in the qualifications.
*/
expr_value = (Datum)
ExecEvalExpr(clause, econtext, &isNull, &isDone);
@ -1265,7 +1334,8 @@ ExecQual(List *qual, ExprContext *econtext)
* ----------------
*/
result = false;
foreach (clause, qual) {
foreach(clause, qual)
{
result = ExecQualClause((Node *) lfirst(clause), econtext);
if (result == true)
break;
@ -1291,7 +1361,8 @@ ExecTargetListLength(List *targetlist)
TargetEntry *curTle;
len = 0;
foreach (tl, targetlist) {
foreach(tl, targetlist)
{
curTle = lfirst(tl);
if (curTle->resdom != NULL)
@ -1344,7 +1415,8 @@ ExecTargetList(List *targetlist,
* between passing and failing the qualification.
* ----------------
*/
if (targetlist == NIL) {
if (targetlist == NIL)
{
/* ----------------
* I now think that the only time this makes
* any sence is when we run a delete query. Then
@ -1372,10 +1444,13 @@ ExecTargetList(List *targetlist,
* the stack.
* ----------------
*/
if (nodomains > 64) {
if (nodomains > 64)
{
null_head = (char *) palloc(nodomains + 1);
fjIsNull = (bool *) palloc(nodomains + 1);
} else {
}
else
{
null_head = &nulls_array[0];
fjIsNull = &fjNullArray[0];
}
@ -1387,7 +1462,8 @@ ExecTargetList(List *targetlist,
EV_printf("ExecTargetList: setting target list values\n");
*isDone = true;
foreach (tl, targetlist) {
foreach(tl, targetlist)
{
/* ----------------
* remember, a target list is a list of lists:
*
@ -1399,7 +1475,8 @@ ExecTargetList(List *targetlist,
*/
tle = lfirst(tl);
if (tle->resdom != NULL) {
if (tle->resdom != NULL)
{
expr = tle->expr;
resdom = tle->resdom;
resind = resdom->resno - 1;
@ -1417,7 +1494,9 @@ ExecTargetList(List *targetlist,
null_head[resind] = ' ';
else
null_head[resind] = 'n';
}else {
}
else
{
int curNode;
Resdom *fjRes;
List *fjTlist = (List *) tle->expr;
@ -1436,7 +1515,8 @@ ExecTargetList(List *targetlist,
resind = fjRes->resno - 1;
if (fjIsNull[0])
null_head[resind] = 'n';
else {
else
{
null_head[resind] = ' ';
values[resind] = results[0];
}
@ -1450,12 +1530,16 @@ ExecTargetList(List *targetlist,
{
#if 0 /* what is this?? */
Node *outernode = lfirst(fjTlist);
fjRes = (Resdom *) outernode->iterexpr;
#endif
resind = fjRes->resno - 1;
if (fjIsNull[curNode]) {
if (fjIsNull[curNode])
{
null_head[resind] = 'n';
}else {
}
else
{
null_head[resind] = ' ';
values[resind] = results[curNode];
}
@ -1474,7 +1558,8 @@ ExecTargetList(List *targetlist,
* free the nulls array if we allocated one..
* ----------------
*/
if (nodomains > 64) pfree(null_head);
if (nodomains > 64)
pfree(null_head);
return
newTuple;
@ -1523,7 +1608,8 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
tupValue = projInfo->pi_tupValue;
econtext = projInfo->pi_exprContext;
if (targetlist == NIL) {
if (targetlist == NIL)
{
*isDone = true;
return (TupleTableSlot *) NULL;
}
@ -1553,4 +1639,3 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
InvalidBuffer, /* tuple has no buffer */
true);
}

View File

@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.3 1997/07/28 00:53:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.4 1997/09/07 04:41:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -44,7 +44,8 @@
*/
TupleTableSlot *
ExecScan(Scan * node,
TupleTableSlot* (*accessMtd)()) /* function returning a tuple */
TupleTableSlot * (*accessMtd) ()) /* function returning a
* tuple */
{
CommonScanState *scanstate;
EState *estate;
@ -84,17 +85,20 @@ ExecScan(Scan *node,
econtext->ecxt_relation = scanstate->css_currentRelation;
econtext->ecxt_relid = node->scanrelid;
if (scanstate->cstate.cs_TupFromTlist) {
if (scanstate->cstate.cs_TupFromTlist)
{
projInfo = scanstate->cstate.cs_ProjInfo;
resultSlot = ExecProject(projInfo, &isDone);
if (!isDone)
return resultSlot;
}
/*
* get a tuple from the access method
* loop until we obtain a tuple which passes the qualification.
* get a tuple from the access method loop until we obtain a tuple
* which passes the qualification.
*/
for(;;) {
for (;;)
{
slot = (TupleTableSlot *) (*accessMtd) (node);
/* ----------------
@ -103,7 +107,8 @@ ExecScan(Scan *node,
* so we just return the empty slot.
* ----------------
*/
if (TupIsNull(slot)) return slot;
if (TupIsNull(slot))
return slot;
/* ----------------
* place the current tuple into the expr context
@ -118,8 +123,10 @@ ExecScan(Scan *node,
* ----------------
*/
/* add a check for non-nil qual here to avoid a
function call to ExecQual() when the qual is nil */
/*
* add a check for non-nil qual here to avoid a function call to
* ExecQual() when the qual is nil
*/
if (!qual || ExecQual(qual, econtext) == true)
break;
}
@ -136,4 +143,3 @@ ExecScan(Scan *node,
return resultSlot;
}

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.6 1997/08/19 21:31:05 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.7 1997/09/07 04:41:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -150,7 +150,8 @@ static TupleTableSlot *NodeGetResultTupleSlot(Plan *node);
* --------------------------------
*/
TupleTable /* return: address of table */
ExecCreateTupleTable(int initialSize) /* initial number of slots in table */
ExecCreateTupleTable(int initialSize) /* initial number of slots
* in table */
{
TupleTable newtable; /* newly allocated table */
TupleTableSlot *array; /* newly allocated slot array */
@ -196,7 +197,8 @@ ExecCreateTupleTable(int initialSize) /* initial number of slots in table */
*/
void
ExecDestroyTupleTable(TupleTable table, /* tuple table */
bool shouldFree) /* true if we should free slot contents */
bool shouldFree) /* true if we should free slot
* contents */
{
int next; /* next avaliable slot */
TupleTableSlot *array; /* start of table array */
@ -225,16 +227,19 @@ ExecDestroyTupleTable(TupleTable table, /* tuple table */
* ----------------
*/
if (shouldFree)
for (i = 0; i < next; i++) {
for (i = 0; i < next; i++)
{
TupleTableSlot slot;
HeapTuple tuple;
slot = array[i];
tuple = slot.val;
if (tuple != NULL) {
if (tuple != NULL)
{
slot.val = (HeapTuple) NULL;
if (slot.ttc_shouldFree) {
if (slot.ttc_shouldFree)
{
/* ----------------
* since a tuple may contain a pointer to
* lock information allocated along with the
@ -271,7 +276,8 @@ ExecDestroyTupleTable(TupleTable table, /* tuple table */
* slots (some just pass tuples around).
* --------------------------------
*/
TupleTableSlot* /* return: the slot allocated in the tuple table */
TupleTableSlot * /* return: the slot allocated in the tuple
* table */
ExecAllocTableSlot(TupleTable table)
{
int slotnum; /* new slot number */
@ -298,13 +304,14 @@ ExecAllocTableSlot(TupleTable table)
* happen now. Give a WARN if it does. -mer 4 Aug 1992
* ----------------
*/
if (table->next >= table->size) {
if (table->next >= table->size)
{
/*
* int newsize = NewTableSize(table->size);
*
* pfree(table->array);
* table->array = (Pointer) palloc(newsize * TableSlotSize);
* bzero(table->array, newsize * TableSlotSize);
* pfree(table->array); table->array = (Pointer) palloc(newsize *
* TableSlotSize); bzero(table->array, newsize * TableSlotSize);
* table->size = newsize;
*/
elog(NOTICE, "Plan requires more slots than are available");
@ -394,7 +401,8 @@ ExecClearTuple(TupleTableSlot* slot) /* slot in which to store tuple */
* free the old contents of the specified slot if necessary.
* ----------------
*/
if (slot->ttc_shouldFree && oldtuple != NULL) {
if (slot->ttc_shouldFree && oldtuple != NULL)
{
/* ----------------
* since a tuple may contain a pointer to
* lock information allocated along with the
@ -437,6 +445,7 @@ ExecSlotPolicy(TupleTableSlot* slot) /* slot to inspect */
{
return slot->ttc_shouldFree;
}
#endif
/* --------------------------------
@ -450,9 +459,11 @@ ExecSlotPolicy(TupleTableSlot* slot) /* slot to inspect */
*/
bool /* return: old slot policy */
ExecSetSlotPolicy(TupleTableSlot * slot, /* slot to change */
bool shouldFree) /* true if we call pfree() when we gc. */
bool shouldFree) /* true if we call pfree() when we
* gc. */
{
bool old_shouldFree = slot->ttc_shouldFree;
slot->ttc_shouldFree = shouldFree;
return old_shouldFree;
@ -511,11 +522,13 @@ ExecSetNewSlotDescriptor(TupleTableSlot *slot, /* slot to change */
TupleDesc tupdesc) /* tuple descriptor */
{
TupleDesc old_tupdesc = slot->ttc_tupleDescriptor;
slot->ttc_tupleDescriptor = tupdesc;
slot->ttc_descIsNew = true;
return old_tupdesc;
}
#endif
/* --------------------------------
@ -545,10 +558,12 @@ ExecSetSlotBuffer(TupleTableSlot *slot, /* slot to change */
Buffer b) /* tuple descriptor */
{
Buffer oldb = slot->ttc_buffer;
slot->ttc_buffer = b;
return oldb;
}
#endif
/* --------------------------------
@ -564,6 +579,7 @@ ExecIncrSlotBufferRefcnt(TupleTableSlot *slot) /* slot to bump refcnt */
{
/* Buffer b = SlotBuffer((TupleTableSlot*) slot); */
Buffer b = slot->ttc_buffer;
if (BufferIsValid(b))
IncrBufferRefCount(b);
}
@ -619,6 +635,7 @@ ExecSlotDescriptorIsNew(TupleTableSlot *slot) /* slot to inspect */
return isNew; */
return slot->ttc_descIsNew;
}
#endif
/* ----------------------------------------------------------------
@ -705,6 +722,7 @@ ExecInitHashTupleSlot(EState *estate, HashJoinState *hashstate)
INIT_SLOT_ALLOC;
hashstate->hj_HashTupleSlot = slot;
}
#endif
static TupleTableSlot *
@ -712,11 +730,13 @@ NodeGetResultTupleSlot(Plan *node)
{
TupleTableSlot *slot;
switch(nodeTag(node)) {
switch (nodeTag(node))
{
case T_Result:
{
ResultState *resstate = ((Result *) node)->resstate;
slot = resstate->cstate.cs_ResultTupleSlot;
}
break;
@ -724,6 +744,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_SeqScan:
{
CommonScanState *scanstate = ((SeqScan *) node)->scanstate;
slot = scanstate->cstate.cs_ResultTupleSlot;
}
break;
@ -731,6 +752,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_NestLoop:
{
NestLoopState *nlstate = ((NestLoop *) node)->nlstate;
slot = nlstate->jstate.cs_ResultTupleSlot;
}
break;
@ -755,6 +777,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_IndexScan:
{
CommonScanState *scanstate = ((IndexScan *) node)->scan.scanstate;
slot = scanstate->cstate.cs_ResultTupleSlot;
}
break;
@ -762,6 +785,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_Material:
{
MaterialState *matstate = ((Material *) node)->matstate;
slot = matstate->csstate.css_ScanTupleSlot;
}
break;
@ -769,6 +793,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_Sort:
{
SortState *sortstate = ((Sort *) node)->sortstate;
slot = sortstate->csstate.css_ScanTupleSlot;
}
break;
@ -776,6 +801,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_Agg:
{
AggState *aggstate = ((Agg *) node)->aggstate;
slot = aggstate->csstate.cstate.cs_ResultTupleSlot;
}
break;
@ -783,6 +809,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_Group:
{
GroupState *grpstate = ((Group *) node)->grpstate;
slot = grpstate->csstate.cstate.cs_ResultTupleSlot;
}
break;
@ -790,6 +817,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_Hash:
{
HashState *hashstate = ((Hash *) node)->hashstate;
slot = hashstate->cstate.cs_ResultTupleSlot;
}
break;
@ -797,6 +825,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_Unique:
{
UniqueState *uniquestate = ((Unique *) node)->uniquestate;
slot = uniquestate->cs_ResultTupleSlot;
}
break;
@ -804,6 +833,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_MergeJoin:
{
MergeJoinState *mergestate = ((MergeJoin *) node)->mergestate;
slot = mergestate->jstate.cs_ResultTupleSlot;
}
break;
@ -811,6 +841,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_HashJoin:
{
HashJoinState *hashjoinstate = ((HashJoin *) node)->hashjoinstate;
slot = hashjoinstate->jstate.cs_ResultTupleSlot;
}
break;
@ -818,6 +849,7 @@ NodeGetResultTupleSlot(Plan *node)
case T_Tee:
{
TeeState *teestate = ((Tee *) node)->teestate;
slot = teestate->cstate.cs_ResultTupleSlot;
}
break;
@ -943,9 +975,12 @@ ExecTypeFromTL(List *targetList)
* ----------------
*/
tlcdr = targetList;
while (tlcdr != NIL) {
while (tlcdr != NIL)
{
TargetEntry *tle = lfirst(tlcdr);
if (tle->resdom != NULL) {
if (tle->resdom != NULL)
{
resdom = tle->resdom;
restype = resdom->restype;
@ -968,10 +1003,12 @@ ExecTypeFromTL(List *targetList)
get_typalign(restype));
*/
}
else {
else
{
Resdom *fjRes;
List *fjTlistP;
List *fjList = lfirst(tlcdr);
#ifdef SETS_FIXED
TargetEntry *tle;
Fjoin *fjNode = ((TargetEntry *) lfirst(fjList))->fjoin;
@ -998,7 +1035,8 @@ ExecTypeFromTL(List *targetList)
get_typalign(restype));
*/
foreach(fjTlistP, lnext(fjList)) {
foreach(fjTlistP, lnext(fjList))
{
TargetEntry *fjTle = lfirst(fjTlistP);
fjRes = fjTle->resdom;
@ -1028,5 +1066,3 @@ ExecTypeFromTL(List *targetList)
return typeInfo;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.14 1997/08/22 03:12:19 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.15 1997/09/07 04:41:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -58,7 +58,8 @@
#include "catalog/pg_type.h"
#include "parser/parsetree.h"
static void ExecGetIndexKeyInfo(IndexTupleForm indexTuple, int *numAttsOutP,
static void
ExecGetIndexKeyInfo(IndexTupleForm indexTuple, int *numAttsOutP,
AttrNumber ** attsOutP, FuncIndexInfoPtr fInfoP);
/* ----------------------------------------------------------------
@ -72,9 +73,9 @@ int NTupleReplaced;
int NTupleAppended;
int NTupleDeleted;
int NIndexTupleInserted;
extern int NIndexTupleProcessed; /* have to be defined in the access
method level so that the cinterface.a
will link ok. */
extern int NIndexTupleProcessed; /* have to be defined in the
* access method level so that the
* cinterface.a will link ok. */
/* ----------------------------------------------------------------
* statistic functions
@ -96,6 +97,7 @@ ResetTupleCount(void)
NTupleReplaced = 0;
NIndexTupleProcessed = 0;
}
#endif
/* ----------------------------------------------------------------
@ -109,7 +111,8 @@ DisplayTupleCount(FILE *statfp)
if (NTupleProcessed > 0)
fprintf(statfp, "!\t%d tuple%s processed, ", NTupleProcessed,
(NTupleProcessed == 1) ? "" : "s");
else {
else
{
fprintf(statfp, "!\tno tuples processed.\n");
return;
}
@ -133,6 +136,7 @@ DisplayTupleCount(FILE *statfp)
(NTupleReplaced == 1) ? "" : "s");
fprintf(statfp, "\n");
}
#endif
/* ----------------------------------------------------------------
@ -250,18 +254,23 @@ ExecAssignResultTypeFromTL(Plan *node, CommonState *commonstate)
fjtl = NIL;
tl = targetList;
i = 0;
while (tl != NIL || fjtl != NIL) {
if (fjtl != NIL) {
while (tl != NIL || fjtl != NIL)
{
if (fjtl != NIL)
{
tle = lfirst(fjtl);
fjtl = lnext(fjtl);
}
else {
else
{
tle = lfirst(tl);
tl = lnext(tl);
}
#ifdef SETS_FIXED
if (!tl_is_resdom(tle)) {
if (!tl_is_resdom(tle))
{
Fjoin *fj = (Fjoin *) lfirst(tle);
/* it is a FJoin */
fjtl = lnext(tle);
tle = fj->fj_innerNode;
@ -269,7 +278,8 @@ ExecAssignResultTypeFromTL(Plan *node, CommonState *commonstate)
#endif
i++;
}
if (len > 0) {
if (len > 0)
{
ExecAssignResultType(commonstate,
origTupDesc);
}
@ -307,6 +317,7 @@ ExecFreeResultType(CommonState *commonstate)
/* ExecFreeTypeInfo(tupType); */
pfree(tupType);
}
#endif
/* ----------------
@ -383,6 +394,7 @@ TupleDesc
ExecGetScanType(CommonScanState * csstate)
{
TupleTableSlot *slot = csstate->css_ScanTupleSlot;
return slot->ttc_tupleDescriptor;
}
@ -403,6 +415,7 @@ ExecFreeScanType(CommonScanState *csstate)
/* ExecFreeTypeInfo(tupType); */
pfree(tupType);
}
#endif
/* ----------------
@ -575,6 +588,7 @@ QueryDescGetTypeInfo(QueryDesc *queryDesc)
attinfo->attrs = tupleType->attrs;
return attinfo;
}
#endif
/* ----------------------------------------------------------------
@ -605,7 +619,8 @@ ExecGetIndexKeyInfo(IndexTupleForm indexTuple,
* check parameters
* ----------------
*/
if (numAttsOutP == NULL && attsOutP == NULL) {
if (numAttsOutP == NULL && attsOutP == NULL)
{
elog(DEBUG, "ExecGetIndexKeyInfo: %s",
"invalid parameters: numAttsOutP and attsOutP must be non-NULL");
}
@ -633,14 +648,16 @@ ExecGetIndexKeyInfo(IndexTupleForm indexTuple,
* single return value).
* ----------------
*/
if (FIgetProcOid(fInfoP) != InvalidOid) {
if (FIgetProcOid(fInfoP) != InvalidOid)
{
FIsetnArgs(fInfoP, numKeys);
(*numAttsOutP) = 1;
}
else
(*numAttsOutP) = numKeys;
if (numKeys < 1) {
if (numKeys < 1)
{
elog(DEBUG, "ExecGetIndexKeyInfo: %s",
"all index key attribute numbers are zero!");
(*attsOutP) = NULL;
@ -752,7 +769,8 @@ ExecOpenIndices(Oid resultRelationOid,
while (tuple = heap_getnext(indexSd, /* scan desc */
false, /* scan backward flag */
NULL), /* return: buffer */
HeapTupleIsValid(tuple)) {
HeapTupleIsValid(tuple))
{
/* ----------------
* For each index relation we find, extract the information
@ -783,11 +801,14 @@ ExecOpenIndices(Oid resultRelationOid,
* next get the index predicate from the tuple
* ----------------
*/
if (VARSIZE(&indexStruct->indpred) != 0) {
if (VARSIZE(&indexStruct->indpred) != 0)
{
predString = fmgr(F_TEXTOUT, &indexStruct->indpred);
predicate = (PredInfo *) stringToNode(predString);
pfree(predString);
} else {
}
else
{
predicate = NULL;
}
@ -816,7 +837,8 @@ ExecOpenIndices(Oid resultRelationOid,
* ----------------
*/
len = length(oidList);
if (len > 0) {
if (len > 0)
{
/* ----------------
* allocate space for relation descs
* ----------------
@ -833,8 +855,10 @@ ExecOpenIndices(Oid resultRelationOid,
indexInfoArray = (IndexInfo **)
palloc(len * sizeof(IndexInfo *));
for (i=0; i<len; i++) {
for (i = 0; i < len; i++)
{
IndexInfo *ii = makeNode(IndexInfo);
ii->ii_NumKeyAttributes = 0;
ii->ii_KeyAttributeNumbers = (AttrNumber *) NULL;
ii->ii_FuncIndexInfo = (FuncIndexInfoPtr) NULL;
@ -849,7 +873,8 @@ ExecOpenIndices(Oid resultRelationOid,
* ----------------
*/
i = 0;
foreach (indexoid, oidList) {
foreach(indexoid, oidList)
{
Relation indexDesc;
indexOid = lfirsti(indexoid);
@ -872,25 +897,30 @@ ExecOpenIndices(Oid resultRelationOid,
* ----------------
*/
i = 0;
foreach (numkeys, nkeyList) {
foreach(numkeys, nkeyList)
{
numKeyAtts = lfirsti(numkeys);
indexInfoArray[i++]->ii_NumKeyAttributes = numKeyAtts;
}
i = 0;
foreach (indexkeys, keyList) {
foreach(indexkeys, keyList)
{
indexKeyAtts = (AttrNumber *) lfirst(indexkeys);
indexInfoArray[i++]->ii_KeyAttributeNumbers = indexKeyAtts;
}
i = 0;
foreach (indexfuncs, fiList) {
foreach(indexfuncs, fiList)
{
FuncIndexInfoPtr fiP = (FuncIndexInfoPtr) lfirst(indexfuncs);
indexInfoArray[i++]->ii_FuncIndexInfo = fiP;
}
i = 0;
foreach (indexpreds, predList) {
foreach(indexpreds, predList)
{
indexInfoArray[i++]->ii_Predicate = lfirst(indexpreds);
}
/* ----------------
@ -934,6 +964,7 @@ ExecCloseIndices(RelationInfo *resultRelationInfo)
for (i = 0; i < numIndices; i++)
if (relationDescs[i] != NULL)
index_close(relationDescs[i]);
/*
* XXX should free indexInfo array here too.
*/
@ -999,7 +1030,8 @@ ExecFormIndexTuple(HeapTuple heapTuple,
keyAttributeNumbers, /* array of att nums to extract */
heapTuple, /* tuple from base relation */
heapDescriptor, /* heap tuple's descriptor */
InvalidBuffer, /* buffer associated with heap tuple */
InvalidBuffer, /* buffer associated with heap
* tuple */
datum, /* return: array of attributes */
nulls, /* return: array of char's */
fInfoP); /* functional index information */
@ -1022,6 +1054,7 @@ ExecFormIndexTuple(HeapTuple heapTuple,
return indexTuple;
}
#endif
/* ----------------------------------------------------------------
@ -1078,13 +1111,17 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
* ----------------
*/
econtext = NULL;
for (i=0; i<numIndices; i++) {
if (relationDescs[i] == NULL) continue;
for (i = 0; i < numIndices; i++)
{
if (relationDescs[i] == NULL)
continue;
indexInfo = indexInfoArray[i];
predicate = indexInfo->ii_Predicate;
if (predicate != NULL) {
if (econtext == NULL) {
if (predicate != NULL)
{
if (econtext == NULL)
{
econtext = makeNode(ExprContext);
}
econtext->ecxt_scantuple = slot;
@ -1107,10 +1144,12 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
heapDescriptor = (TupleDesc) RelationGetTupleDescriptor(heapRelation);
FormIndexDatum(numberOfAttributes, /* num attributes */
keyAttributeNumbers, /* array of att nums to extract */
keyAttributeNumbers, /* array of att nums to
* extract */
heapTuple, /* tuple from base relation */
heapDescriptor, /* heap tuple's descriptor */
InvalidBuffer, /* buffer associated with heap tuple */
InvalidBuffer, /* buffer associated with heap
* tuple */
datum, /* return: array of attributes */
nulls, /* return: array of char's */
fInfoP); /* functional index information */
@ -1132,9 +1171,11 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
* free index tuple after insertion
* ----------------
*/
if (result) pfree(result);
if (result)
pfree(result);
}
if (econtext != NULL) pfree(econtext);
if (econtext != NULL)
pfree(econtext);
}
/* ----------------------------------------------------------------
@ -1155,13 +1196,16 @@ setVarAttrLenForCreateTable(TupleDesc tupType, List *targetList,
tl = targetList;
for (varno = 0; varno < tupType->natts; varno++) {
for (varno = 0; varno < tupType->natts; varno++)
{
tle = lfirst(tl);
if (tupType->attrs[varno]->atttypid == BPCHAROID ||
tupType->attrs[varno]->atttypid == VARCHAROID) {
tupType->attrs[varno]->atttypid == VARCHAROID)
{
expr = tle->expr;
if (expr && IsA(expr,Var)) {
if (expr && IsA(expr, Var))
{
Var *var;
RangeTblEntry *rtentry;
Relation rd;
@ -1196,11 +1240,13 @@ resetVarAttrLenForCreateTable(TupleDesc tupType)
{
int varno;
for (varno = 0; varno < tupType->natts; varno++) {
for (varno = 0; varno < tupType->natts; varno++)
{
if (tupType->attrs[varno]->atttypid == BPCHAROID ||
tupType->attrs[varno]->atttypid == VARCHAROID)
/* set length to original -1 */
tupType->attrs[varno]->attlen = -1;
}
}
#endif

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.7 1997/08/29 09:02:50 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.8 1997/09/07 04:41:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -41,9 +41,13 @@
#undef new
typedef enum {F_EXEC_START, F_EXEC_RUN, F_EXEC_DONE} ExecStatus;
typedef enum
{
F_EXEC_START, F_EXEC_RUN, F_EXEC_DONE
} ExecStatus;
typedef struct local_es {
typedef struct local_es
{
QueryDesc *qd;
EState *estate;
struct local_es *next;
@ -54,13 +58,16 @@ typedef struct local_es {
/* non-export function prototypes */
static TupleDesc postquel_start(execution_state * es);
static execution_state *init_execution_state(FunctionCachePtr fcache,
static execution_state *
init_execution_state(FunctionCachePtr fcache,
char *args[]);
static TupleTableSlot *postquel_getnext(execution_state * es);
static void postquel_end(execution_state * es);
static void postquel_sub_params(execution_state *es, int nargs,
static void
postquel_sub_params(execution_state * es, int nargs,
char *args[], bool * nullV);
static Datum postquel_execute(execution_state *es, FunctionCachePtr fcache,
static Datum
postquel_execute(execution_state * es, FunctionCachePtr fcache,
List * fTlist, char **args, bool * isNull);
@ -70,7 +77,8 @@ ProjectAttribute(TupleDesc TD,
HeapTuple tup,
bool * isnullP)
{
Datum val,valueP;
Datum val,
valueP;
Var *attrVar = (Var *) tlist->expr;
AttrNumber attrno = attrVar->varattno;
@ -112,7 +120,8 @@ init_execution_state(FunctionCachePtr fcache,
planTree_list = (List *)
pg_plan(fcache->src, fcache->argOidVect, nargs, &queryTree_list, None);
for (i=0; i < queryTree_list->len; i++) {
for (i = 0; i < queryTree_list->len; i++)
{
EState *estate;
Query *queryTree = (Query *) (queryTree_list->qtrees[i]);
Plan *planTree = lfirst(planTree_list);
@ -129,7 +138,8 @@ init_execution_state(FunctionCachePtr fcache,
None);
estate = CreateExecutorState();
if (nargs > 0) {
if (nargs > 0)
{
int i;
ParamListInfo paramLI;
@ -140,7 +150,8 @@ init_execution_state(FunctionCachePtr fcache,
estate->es_param_list_info = paramLI;
for (i=0; i<nargs; paramLI++, i++) {
for (i = 0; i < nargs; paramLI++, i++)
{
paramLI->kind = PARAM_NUM;
paramLI->id = i + 1;
paramLI->isnull = false;
@ -164,10 +175,13 @@ static TupleDesc
postquel_start(execution_state * es)
{
#ifdef FUNC_UTIL_PATCH
/*
* Do nothing for utility commands. (create, destroy...) DZ - 30-8-1996
* Do nothing for utility commands. (create, destroy...) DZ -
* 30-8-1996
*/
if (es->qd->operation == CMD_UTILITY) {
if (es->qd->operation == CMD_UTILITY)
{
return (TupleDesc) NULL;
}
#endif
@ -180,12 +194,16 @@ postquel_getnext(execution_state *es)
int feature;
#ifdef FUNC_UTIL_PATCH
if (es->qd->operation == CMD_UTILITY) {
if (es->qd->operation == CMD_UTILITY)
{
/*
* Process an utility command. (create, destroy...) DZ - 30-8-1996
* Process an utility command. (create, destroy...) DZ -
* 30-8-1996
*/
ProcessUtility(es->qd->parsetree->utilityStmt, es->qd->dest);
if (!LAST_POSTQUEL_COMMAND(es)) CommandCounterIncrement();
if (!LAST_POSTQUEL_COMMAND(es))
CommandCounterIncrement();
return (TupleTableSlot *) NULL;
}
#endif
@ -199,10 +217,13 @@ static void
postquel_end(execution_state * es)
{
#ifdef FUNC_UTIL_PATCH
/*
* Do nothing for utility commands. (create, destroy...) DZ - 30-8-1996
* Do nothing for utility commands. (create, destroy...) DZ -
* 30-8-1996
*/
if (es->qd->operation == CMD_UTILITY) {
if (es->qd->operation == CMD_UTILITY)
{
return;
}
#endif
@ -221,8 +242,10 @@ postquel_sub_params(execution_state *es,
estate = es->estate;
paramLI = estate->es_param_list_info;
while (paramLI->kind != PARAM_INVALID) {
if (paramLI->kind == PARAM_NUM) {
while (paramLI->kind != PARAM_INVALID)
{
if (paramLI->kind == PARAM_NUM)
{
Assert(paramLI->id <= nargs);
paramLI->value = (Datum) args[(paramLI->id - 1)];
paramLI->isnull = nullV[(paramLI->id - 1)];
@ -249,15 +272,18 @@ copy_function_result(FunctionCachePtr fcache,
return resultSlot;
resultTd = resultSlot->ttc_tupleDescriptor;
/*
* When the funcSlot is NULL we have to initialize the funcSlot's
* tuple descriptor.
*/
if (TupIsNull(funcSlot)) {
if (TupIsNull(funcSlot))
{
int i = 0;
TupleDesc funcTd = funcSlot->ttc_tupleDescriptor;
while (i < oldTuple->t_natts) {
while (i < oldTuple->t_natts)
{
funcTd->attrs[i] =
(AttributeTupleForm) palloc(ATTRIBUTE_TUPLE_SIZE);
memmove(funcTd->attrs[i],
@ -283,10 +309,12 @@ postquel_execute(execution_state *es,
Datum value;
#ifdef INDEXSCAN_PATCH
/*
* It's more right place to do it (before postquel_start->ExecutorStart).
* Now ExecutorStart->ExecInitIndexScan->ExecEvalParam works ok.
* (But note: I HOPE we can do it here). - vadim 01/22/97
* It's more right place to do it (before
* postquel_start->ExecutorStart). Now
* ExecutorStart->ExecInitIndexScan->ExecEvalParam works ok. (But
* note: I HOPE we can do it here). - vadim 01/22/97
*/
if (fcache->nargs > 0)
postquel_sub_params(es, fcache->nargs, args, fcache->nullVect);
@ -304,30 +332,34 @@ postquel_execute(execution_state *es,
slot = postquel_getnext(es);
if (TupIsNull(slot)) {
if (TupIsNull(slot))
{
postquel_end(es);
es->status = F_EXEC_DONE;
*isNull = true;
/*
* If this isn't the last command for the function
* we have to increment the command
* counter so that subsequent commands can see changes made
* by previous ones.
* If this isn't the last command for the function we have to
* increment the command counter so that subsequent commands can
* see changes made by previous ones.
*/
if (!LAST_POSTQUEL_COMMAND(es)) CommandCounterIncrement();
if (!LAST_POSTQUEL_COMMAND(es))
CommandCounterIncrement();
return (Datum) NULL;
}
if (LAST_POSTQUEL_COMMAND(es)) {
if (LAST_POSTQUEL_COMMAND(es))
{
TupleTableSlot *resSlot;
/*
* Copy the result. copy_function_result is smart enough
* to do nothing when no action is called for. This helps
* reduce the logic and code redundancy here.
* Copy the result. copy_function_result is smart enough to do
* nothing when no action is called for. This helps reduce the
* logic and code redundancy here.
*/
resSlot = copy_function_result(fcache, slot);
if (fTlist != NIL) {
if (fTlist != NIL)
{
HeapTuple tup;
TargetEntry *tle = lfirst(fTlist);
@ -336,27 +368,30 @@ postquel_execute(execution_state *es,
tle,
tup,
isNull);
}else {
}
else
{
value = (Datum) resSlot;
*isNull = false;
}
/*
* If this is a single valued function we have to end the
* function execution now.
* If this is a single valued function we have to end the function
* execution now.
*/
if (fcache->oneResult) {
if (fcache->oneResult)
{
postquel_end(es);
es->status = F_EXEC_DONE;
}
return value;
}
/*
* If this isn't the last command for the function, we don't
* return any results, but we have to increment the command
* counter so that subsequent commands can see changes made
* by previous ones.
* If this isn't the last command for the function, we don't return
* any results, but we have to increment the command counter so that
* subsequent commands can see changes made by previous ones.
*/
CommandCounterIncrement();
return (Datum) NULL;
@ -371,10 +406,9 @@ postquel_function(Func *funcNode, char **args, bool *isNull, bool *isDone)
CommandId savedId;
/*
* Before we start do anything we must save CurrentScanCommandId
* to restore it before return to upper Executor. Also, we have to
* set CurrentScanCommandId equal to CurrentCommandId.
* - vadim 08/29/97
* Before we start do anything we must save CurrentScanCommandId to
* restore it before return to upper Executor. Also, we have to set
* CurrentScanCommandId equal to CurrentCommandId. - vadim 08/29/97
*/
savedId = GetScanCommandId();
SetScanCommandId(GetCurrentCommandId());
@ -390,6 +424,7 @@ postquel_function(Func *funcNode, char **args, bool *isNull, bool *isDone)
es = es->next;
Assert(es);
/*
* Execute each command in the function one after another until we're
* executing the final command and get a result or we run out of
@ -412,6 +447,7 @@ postquel_function(Func *funcNode, char **args, bool *isNull, bool *isDone)
*/
if (es == (execution_state *) NULL)
{
/*
* Reset the execution states to start over again
*/
@ -421,6 +457,7 @@ postquel_function(Func *funcNode, char **args, bool *isNull, bool *isDone)
es->status = F_EXEC_START;
es = es->next;
}
/*
* Let caller know we're finished.
*/
@ -428,10 +465,10 @@ postquel_function(Func *funcNode, char **args, bool *isNull, bool *isDone)
SetScanCommandId(savedId);
return (fcache->oneResult) ? result : (Datum) NULL;
}
/*
* If we got a result from a command within the function it has
* to be the final command. All others shouldn't be returing
* anything.
* If we got a result from a command within the function it has to be
* the final command. All others shouldn't be returing anything.
*/
Assert(LAST_POSTQUEL_COMMAND(es));
*isDone = false;

View File

@ -34,7 +34,8 @@
* AggFuncInfo -
* keeps the transition functions information around
*/
typedef struct AggFuncInfo {
typedef struct AggFuncInfo
{
Oid xfn1_oid;
Oid xfn2_oid;
Oid finalfn_oid;
@ -93,8 +94,10 @@ ExecAgg(Agg *node)
EState *estate;
Aggreg **aggregates;
Plan *outerPlan;
int i, nagg;
Datum *value1, *value2;
int i,
nagg;
Datum *value1,
*value2;
int *noInitValue;
AggFuncInfo *aggFuncInfo;
long nTuplesAgged = 0;
@ -104,7 +107,9 @@ ExecAgg(Agg *node)
HeapTuple oneTuple;
char *nulls;
bool isDone;
bool isNull = FALSE, isNull1 = FALSE, isNull2 = FALSE;
bool isNull = FALSE,
isNull1 = FALSE,
isNull2 = FALSE;
/* ---------------------
* get state info from node
@ -136,14 +141,21 @@ ExecAgg(Agg *node)
projInfo = aggstate->csstate.cstate.cs_ProjInfo;
for(i = 0; i < nagg; i++) {
for (i = 0; i < nagg; i++)
{
Aggreg *agg;
char *aggname;
HeapTuple aggTuple;
Form_pg_aggregate aggp;
Oid xfn1_oid, xfn2_oid, finalfn_oid;
func_ptr xfn1_ptr, xfn2_ptr, finalfn_ptr;
int xfn1_nargs, xfn2_nargs, finalfn_nargs;
Oid xfn1_oid,
xfn2_oid,
finalfn_oid;
func_ptr xfn1_ptr,
xfn2_ptr,
finalfn_ptr;
int xfn1_nargs,
xfn2_nargs,
finalfn_nargs;
agg = aggregates[i];
@ -167,14 +179,16 @@ ExecAgg(Agg *node)
xfn2_oid = aggp->aggtransfn2;
finalfn_oid = aggp->aggfinalfn;
if (OidIsValid(finalfn_oid)) {
if (OidIsValid(finalfn_oid))
{
fmgr_info(finalfn_oid, &finalfn_ptr, &finalfn_nargs);
aggFuncInfo[i].finalfn_oid = finalfn_oid;
aggFuncInfo[i].finalfn = finalfn_ptr;
aggFuncInfo[i].finalfn_nargs = finalfn_nargs;
}
if (OidIsValid(xfn2_oid)) {
if (OidIsValid(xfn2_oid))
{
fmgr_info(xfn2_oid, &xfn2_ptr, &xfn2_nargs);
aggFuncInfo[i].xfn2_oid = xfn2_oid;
aggFuncInfo[i].xfn2 = xfn2_ptr;
@ -193,7 +207,8 @@ ExecAgg(Agg *node)
elog(WARN, "ExecAgg: agginitval2 is null");
}
if (OidIsValid(xfn1_oid)) {
if (OidIsValid(xfn1_oid))
{
fmgr_info(xfn1_oid, &xfn1_ptr, &xfn1_nargs);
aggFuncInfo[i].xfn1_oid = xfn1_oid;
aggFuncInfo[i].xfn1 = xfn1_ptr;
@ -211,7 +226,8 @@ ExecAgg(Agg *node)
* max{} and min{}.)
* ------------------------------------------
*/
if (isNull1) {
if (isNull1)
{
noInitValue[i] = 1;
nulls[i] = 1;
}
@ -222,20 +238,26 @@ ExecAgg(Agg *node)
* for each tuple from the the outer plan, apply all the aggregates
* ----------------
*/
for (;;) {
for (;;)
{
HeapTuple outerTuple = NULL;
TupleTableSlot *outerslot;
isNull = isNull1 = isNull2 = 0;
outerslot = ExecProcNode(outerPlan, (Plan *) node);
if (outerslot) outerTuple = outerslot->val;
if (!HeapTupleIsValid(outerTuple)) {
/* when the outerplan doesn't return a single tuple,
create a dummy heaptuple anyway
because we still need to return a valid aggregate value.
The value returned will be the initial values of the
transition functions */
if (nTuplesAgged == 0) {
if (outerslot)
outerTuple = outerslot->val;
if (!HeapTupleIsValid(outerTuple))
{
/*
* when the outerplan doesn't return a single tuple, create a
* dummy heaptuple anyway because we still need to return a
* valid aggregate value. The value returned will be the
* initial values of the transition functions
*/
if (nTuplesAgged == 0)
{
TupleDesc tupType;
Datum *tupValue;
char *null_array;
@ -253,7 +275,8 @@ ExecAgg(Agg *node)
break;
}
for(i = 0; i < nagg; i++) {
for (i = 0; i < nagg; i++)
{
AttrNumber attnum;
int2 attlen;
Datum newVal = (Datum) NULL;
@ -282,19 +305,23 @@ ExecAgg(Agg *node)
if (isNull)
continue; /* ignore this tuple for this agg */
if (aggfns->xfn1) {
if (noInitValue[i]) {
if (aggfns->xfn1)
{
if (noInitValue[i])
{
int byVal;
/*
* value1 and value2 has not been initialized. This
* is the first non-NULL value. We use it as the
* initial value.
* value1 and value2 has not been initialized. This is
* the first non-NULL value. We use it as the initial
* value.
*/
/*
* but we can't just use it straight, we have to make
* a copy of it since the tuple from which it came
* will be freed on the next iteration of the scan
*/
/* but we can't just use it straight, we have
to make a copy of it since the tuple from which
it came will be freed on the next iteration
of the scan */
if (tagnode != NULL)
{
FunctionCachePtr fcache_ptr;
@ -312,7 +339,8 @@ ExecAgg(Agg *node)
attlen = outerslot->ttc_tupleDescriptor->attrs[attnum - 1]->attlen;
byVal = outerslot->ttc_tupleDescriptor->attrs[attnum - 1]->attbyval;
}
if (attlen == -1) {
if (attlen == -1)
{
/* variable length */
attlen = VARSIZE((struct varlena *) newVal);
}
@ -324,7 +352,10 @@ ExecAgg(Agg *node)
/* value1[i] = newVal; */
noInitValue[i] = 0;
nulls[i] = 0;
} else {
}
else
{
/*
* apply the transition functions.
*/
@ -338,7 +369,8 @@ ExecAgg(Agg *node)
}
}
if (aggfns->xfn2) {
if (aggfns->xfn2)
{
Datum xfn2_val = value2[i];
value2[i] =
@ -350,10 +382,11 @@ ExecAgg(Agg *node)
}
/*
* keep this for the projection (we only need one of these -
* all the tuples we aggregate over share the same group column)
* keep this for the projection (we only need one of these - all
* the tuples we aggregate over share the same group column)
*/
if (!oneTuple) {
if (!oneTuple)
{
oneTuple = heap_copytuple(outerslot->val);
}
@ -364,37 +397,54 @@ ExecAgg(Agg *node)
* finalize the aggregate (if necessary), and get the resultant value
* --------------
*/
for(i = 0; i < nagg; i++) {
for (i = 0; i < nagg; i++)
{
char *args[2];
AggFuncInfo *aggfns = &aggFuncInfo[i];
if (noInitValue[i]) {
if (noInitValue[i])
{
/*
* No values found for this agg; return current state.
* This seems to fix behavior for avg() aggregate. -tgl 12/96
* No values found for this agg; return current state. This
* seems to fix behavior for avg() aggregate. -tgl 12/96
*/
} else if (aggfns->finalfn && nTuplesAgged > 0) {
if (aggfns->finalfn_nargs > 1) {
}
else if (aggfns->finalfn && nTuplesAgged > 0)
{
if (aggfns->finalfn_nargs > 1)
{
args[0] = (char *) value1[i];
args[1] = (char *) value2[i];
} else if (aggfns->xfn1) {
}
else if (aggfns->xfn1)
{
args[0] = (char *) value1[i];
} else if (aggfns->xfn2) {
}
else if (aggfns->xfn2)
{
args[0] = (char *) value2[i];
} else
}
else
elog(WARN, "ExecAgg: no valid transition functions??");
value1[i] =
(Datum) fmgr_c(aggfns->finalfn, aggfns->finalfn_oid,
aggfns->finalfn_nargs, (FmgrValues *) args,
&(nulls[i]));
} else if (aggfns->xfn1) {
}
else if (aggfns->xfn1)
{
/*
* value in the right place, ignore. (If you remove this
* case, fix the else part. -ay 2/95)
* value in the right place, ignore. (If you remove this case,
* fix the else part. -ay 2/95)
*/
} else if (aggfns->xfn2) {
}
else if (aggfns->xfn2)
{
value1[i] = value2[i];
} else
}
else
elog(WARN, "ExecAgg: no valid transition functions??");
}
@ -402,10 +452,13 @@ ExecAgg(Agg *node)
* whether the aggregation is done depends on whether we are doing
* aggregation over groups or the entire table
*/
if (nodeTag(outerPlan)==T_Group) {
if (nodeTag(outerPlan) == T_Group)
{
/* aggregation over groups */
aggstate->agg_done = ((Group *) outerPlan)->grpstate->grp_done;
} else {
}
else
{
aggstate->agg_done = TRUE;
}
@ -461,6 +514,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
ExecAssignExprContext(estate, &aggstate->csstate.cstate);
#define AGG_NSLOTS 2
/*
* tuple table initialization
*/
@ -487,8 +541,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
ExecAssignScanTypeFromOuterPlan((Plan *) node, &aggstate->csstate);
/*
* Initialize tuple type for both result and scan.
* This node does no projection
* Initialize tuple type for both result and scan. This node does no
* projection
*/
ExecAssignResultTypeFromTL((Plan *) node, &aggstate->csstate.cstate);
ExecAssignProjectionInfo((Plan *) node, &aggstate->csstate.cstate);
@ -558,11 +612,12 @@ aggGetAttr(TupleTableSlot *slot,
attnum = ((Var *) agg->target)->varattno;
/*
* If the attribute number is invalid, then we are supposed to
* return the entire tuple, we give back a whole slot so that
* callers know what the tuple looks like.
* If the attribute number is invalid, then we are supposed to return
* the entire tuple, we give back a whole slot so that callers know
* what the tuple looks like.
*/
if (attnum == InvalidAttrNumber) {
if (attnum == InvalidAttrNumber)
{
TupleTableSlot *tempSlot;
TupleDesc td;
HeapTuple tup;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.5 1997/08/19 21:31:07 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.6 1997/09/07 04:41:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -103,7 +103,8 @@ exec_append_initialize_next(Append *node)
nplans = unionstate->as_nplans;
rtentries = node->unionrtentries;
if (whichplan < 0) {
if (whichplan < 0)
{
/* ----------------
* if scanning in reverse, we start at
* the last scan in the list and then
@ -115,7 +116,9 @@ exec_append_initialize_next(Append *node)
unionstate->as_whichplan = 0;
return FALSE;
} else if (whichplan >= nplans) {
}
else if (whichplan >= nplans)
{
/* ----------------
* as above, end the scan if we go beyond
* the last scan in our list..
@ -124,7 +127,9 @@ exec_append_initialize_next(Append *node)
unionstate->as_whichplan = nplans - 1;
return FALSE;
} else {
}
else
{
/* ----------------
* initialize the scan
* (and update the range table appropriately)
@ -132,7 +137,8 @@ exec_append_initialize_next(Append *node)
* of the Append node??? - jolly )
* ----------------
*/
if (node->unionrelid > 0) {
if (node->unionrelid > 0)
{
rtentry = nth(whichplan, rtentries);
if (rtentry == NULL)
elog(DEBUG, "exec_append_initialize_next: rtentry is nil");
@ -141,12 +147,14 @@ exec_append_initialize_next(Append *node)
rt_store(unionrelid, rangeTable, rtentry);
if (unionstate->as_junkFilter_list) {
if (unionstate->as_junkFilter_list)
{
estate->es_junkFilter =
(JunkFilter *) nth(whichplan,
unionstate->as_junkFilter_list);
}
if (unionstate->as_result_relation_info_list) {
if (unionstate->as_result_relation_info_list)
{
estate->es_result_relation_info =
(RelationInfo *) nth(whichplan,
unionstate->as_result_relation_info_list);
@ -234,9 +242,9 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
ExecInitResultTupleSlot(estate, &unionstate->cstate);
/*
* If the inherits rtentry is the result relation, we have to make
* a result relation info list for all inheritors so we can update
* their indices and put the result tuples in the right place etc.
* If the inherits rtentry is the result relation, we have to make a
* result relation info list for all inheritors so we can update their
* indices and put the result tuples in the right place etc.
*
* e.g. replace p (age = p.age + 1) from p in person*
*/
@ -271,9 +279,11 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
*/
junkList = NIL;
for(i = 0; i < nplans ; i++ ) {
for (i = 0; i < nplans; i++)
{
JunkFilter *j;
List *targetList;
/* ----------------
* NOTE: we first modify range table in
* exec_append_initialize_next() and
@ -295,7 +305,8 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
* ---------------
*/
if ((es_rri != (RelationInfo *) NULL) &&
(node->unionrelid == es_rri->ri_RangeTableIndex)) {
(node->unionrelid == es_rri->ri_RangeTableIndex))
{
targetList = initNode->targetlist;
j = (JunkFilter *) ExecInitJunkFilter(targetList);
@ -336,7 +347,8 @@ ExecCountSlotsAppend(Append *node)
List *unionplans = node->unionplans;
int nSlots = 0;
foreach (plan,unionplans) {
foreach(plan, unionplans)
{
nSlots += ExecCountSlotsNode((Plan *) lfirst(plan));
}
return nSlots + APPEND_NSLOTS;
@ -390,7 +402,8 @@ ExecProcAppend(Append *node)
*/
result = ExecProcNode(subnode, (Plan *) node);
if (! TupIsNull(result)) {
if (!TupIsNull(result))
{
/* ----------------
* if the subplan gave us something then place a copy of
* whatever we get into our result slot and return it, else..
@ -399,7 +412,9 @@ ExecProcAppend(Append *node)
return ExecStoreTuple(result->val,
result_slot, result->ttc_buffer, false);
} else {
}
else
{
/* ----------------
* .. go on to the "next" subplan in the appropriate
* direction and try processing again (recursively)
@ -421,11 +436,13 @@ ExecProcAppend(Append *node)
* all of our subplans have been exhausted.
* ----------------
*/
if (exec_append_initialize_next(node)) {
if (exec_append_initialize_next(node))
{
ExecSetSlotDescriptorIsNew(result_slot, true);
return
ExecProcAppend(node);
} else
}
else
return ExecClearTuple(result_slot);
}
}
@ -462,8 +479,10 @@ ExecEndAppend(Append *node)
* shut down each of the subscans
* ----------------
*/
for(i = 0; i < nplans; i++) {
if (initialized[i]==TRUE) {
for (i = 0; i < nplans; i++)
{
if (initialized[i] == TRUE)
{
ExecEndNode((Plan *) nth(i, unionplans), (Plan *) node);
}
}
@ -473,7 +492,8 @@ ExecEndAppend(Append *node)
* ----------------
*/
resultRelationInfoList = unionstate->as_result_relation_info_list;
while (resultRelationInfoList != NIL) {
while (resultRelationInfoList != NIL)
{
Relation resultRelationDesc;
resultRelationInfo = (RelationInfo *) lfirst(resultRelationInfoList);
@ -485,6 +505,8 @@ ExecEndAppend(Append *node)
if (unionstate->as_result_relation_info_list)
pfree(unionstate->as_result_relation_info_list);
/* XXX should free unionstate->as_rtentries and unionstate->as_junkfilter_list here */
/*
* XXX should free unionstate->as_rtentries and
* unionstate->as_junkfilter_list here
*/
}

View File

@ -13,7 +13,7 @@
* columns. (ie. tuples from the same group are consecutive)
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.5 1997/01/10 20:17:35 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.6 1997/09/07 04:41:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -30,7 +30,8 @@
static TupleTableSlot *ExecGroupEveryTuple(Group * node);
static TupleTableSlot *ExecGroupOneTuple(Group * node);
static bool sameGroup(TupleTableSlot *oldslot, TupleTableSlot *newslot,
static bool
sameGroup(TupleTableSlot * oldslot, TupleTableSlot * newslot,
int numCols, AttrNumber * grpColIdx, TupleDesc tupdesc);
/* ---------------------------------------
@ -70,7 +71,8 @@ ExecGroupEveryTuple(Group *node)
ExprContext *econtext;
HeapTuple outerTuple = NULL;
TupleTableSlot *outerslot, *lastslot;
TupleTableSlot *outerslot,
*lastslot;
ProjectionInfo *projInfo;
TupleTableSlot *resultSlot;
@ -88,7 +90,9 @@ ExecGroupEveryTuple(Group *node)
econtext = grpstate->csstate.cstate.cs_ExprContext;
if (grpstate->grp_useLastTuple) {
if (grpstate->grp_useLastTuple)
{
/*
* we haven't returned last tuple yet because it is not of the
* same group
@ -99,11 +103,14 @@ ExecGroupEveryTuple(Group *node)
grpstate->csstate.css_ScanTupleSlot,
grpstate->grp_lastSlot->ttc_buffer,
false);
} else {
}
else
{
outerslot = ExecProcNode(outerPlan(node), (Plan *) node);
if (outerslot)
outerTuple = outerslot->val;
if (!HeapTupleIsValid(outerTuple)) {
if (!HeapTupleIsValid(outerTuple))
{
grpstate->grp_done = TRUE;
return NULL;
}
@ -118,7 +125,8 @@ ExecGroupEveryTuple(Group *node)
if (lastslot->val != NULL &&
(!sameGroup(lastslot, outerslot,
node->numCols, node->grpColIdx,
ExecGetScanType(&grpstate->csstate)))) {
ExecGetScanType(&grpstate->csstate))))
{
/* ExecGetResultType(&grpstate->csstate.cstate)))) {*/
grpstate->grp_useLastTuple = TRUE;
@ -164,7 +172,8 @@ ExecGroupOneTuple(Group *node)
ExprContext *econtext;
HeapTuple outerTuple = NULL;
TupleTableSlot *outerslot, *lastslot;
TupleTableSlot *outerslot,
*lastslot;
ProjectionInfo *projInfo;
TupleTableSlot *resultSlot;
@ -182,16 +191,21 @@ ExecGroupOneTuple(Group *node)
econtext = node->grpstate->csstate.cstate.cs_ExprContext;
if (grpstate->grp_useLastTuple) {
if (grpstate->grp_useLastTuple)
{
grpstate->grp_useLastTuple = FALSE;
ExecStoreTuple(grpstate->grp_lastSlot->val,
grpstate->csstate.css_ScanTupleSlot,
grpstate->grp_lastSlot->ttc_buffer,
false);
} else {
}
else
{
outerslot = ExecProcNode(outerPlan(node), (Plan *) node);
if (outerslot) outerTuple = outerslot->val;
if (!HeapTupleIsValid(outerTuple)) {
if (outerslot)
outerTuple = outerslot->val;
if (!HeapTupleIsValid(outerTuple))
{
grpstate->grp_done = TRUE;
return NULL;
}
@ -205,10 +219,13 @@ ExecGroupOneTuple(Group *node)
/*
* find all tuples that belong to a group
*/
for(;;) {
for (;;)
{
outerslot = ExecProcNode(outerPlan(node), (Plan *) node);
outerTuple = (outerslot) ? outerslot->val : NULL;
if (!HeapTupleIsValid(outerTuple)) {
if (!HeapTupleIsValid(outerTuple))
{
/*
* we have at least one tuple (lastslot) if we reach here
*/
@ -225,7 +242,8 @@ ExecGroupOneTuple(Group *node)
*/
if ((!sameGroup(lastslot, outerslot,
node->numCols, node->grpColIdx,
ExecGetScanType(&grpstate->csstate)))) {
ExecGetScanType(&grpstate->csstate))))
{
/* ExecGetResultType(&grpstate->csstate.cstate)))) {*/
grpstate->grp_useLastTuple = TRUE;
@ -297,6 +315,7 @@ ExecInitGroup(Group *node, EState *estate, Plan *parent)
ExecAssignExprContext(estate, &grpstate->csstate.cstate);
#define GROUP_NSLOTS 2
/*
* tuple table initialization
*/
@ -316,8 +335,8 @@ ExecInitGroup(Group *node, EState *estate, Plan *parent)
ExecAssignScanTypeFromOuterPlan((Plan *) node, &grpstate->csstate);
/*
* Initialize tuple type for both result and scan.
* This node does no projection
* Initialize tuple type for both result and scan. This node does no
* projection
*/
ExecAssignResultTypeFromTL((Plan *) node, &grpstate->csstate.cstate);
ExecAssignProjectionInfo((Plan *) node, &grpstate->csstate.cstate);
@ -367,14 +386,18 @@ sameGroup(TupleTableSlot *oldslot,
AttrNumber * grpColIdx,
TupleDesc tupdesc)
{
bool isNull1,isNull2;
char *attr1, *attr2;
char *val1, *val2;
bool isNull1,
isNull2;
char *attr1,
*attr2;
char *val1,
*val2;
int i;
AttrNumber att;
Oid typoutput;
for(i = 0; i < numCols; i++) {
for (i = 0; i < numCols; i++)
{
att = grpColIdx[i];
typoutput = typtoout((Oid) tupdesc->attrs[att - 1]->atttypid);
@ -390,7 +413,8 @@ sameGroup(TupleTableSlot *oldslot,
tupdesc,
&isNull2);
if (isNull1 == isNull2) {
if (isNull1 == isNull2)
{
if (isNull1) /* both are null, they are equal */
continue;
@ -399,11 +423,15 @@ sameGroup(TupleTableSlot *oldslot,
val2 = fmgr(typoutput, attr2,
gettypelem(tupdesc->attrs[att - 1]->atttypid));
/* now, val1 and val2 are ascii representations so we can
use strcmp for comparison */
/*
* now, val1 and val2 are ascii representations so we can use
* strcmp for comparison
*/
if (strcmp(val1, val2) != 0)
return FALSE;
} else {
}
else
{
/* one is null and the other isn't, they aren't equal */
return FALSE;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.10 1997/08/19 21:31:08 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.11 1997/09/07 04:41:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -49,7 +49,8 @@ static void mk_hj_temp(char *tempname);
static int hashFunc(char *key, int len);
static int ExecHashPartition(Hash * node);
static RelativeAddr hashTableAlloc(int size, HashJoinTable hashtable);
static void ExecHashOverflowInsert(HashJoinTable hashtable,
static void
ExecHashOverflowInsert(HashJoinTable hashtable,
HashBucket bucket,
HeapTuple heapTuple);
@ -93,7 +94,8 @@ ExecHash(Hash *node)
nbatch = hashtable->nbatch;
if (nbatch > 0) { /* if needs hash partition */
if (nbatch > 0)
{ /* if needs hash partition */
innerbatchNames = (RelativeAddr *) ABSADDR(hashtable->innerbatchNames);
/* --------------
@ -102,7 +104,8 @@ ExecHash(Hash *node)
* --------------
*/
batches = (File *) palloc(nbatch * sizeof(File));
for (i=0; i<nbatch; i++) {
for (i = 0; i < nbatch; i++)
{
batches[i] = FileNameOpenFile(ABSADDR(innerbatchNames[i]),
O_CREAT | O_RDWR, 0600);
}
@ -122,7 +125,8 @@ ExecHash(Hash *node)
* get tuple and insert into the hash table
* ----------------
*/
for (;;) {
for (;;)
{
slot = ExecProcNode(outerNode, (Plan *) node);
if (TupIsNull(slot))
break;
@ -137,7 +141,8 @@ ExecHash(Hash *node)
/*
* end of build phase, flush all the last pages of the batches.
*/
for (i=0; i<nbatch; i++) {
for (i = 0; i < nbatch; i++)
{
if (FileSeek(batches[i], 0L, SEEK_END) < 0)
perror("FileSeek");
if (FileWrite(batches[i], ABSADDR(hashtable->batch) + i * BLCKSZ, BLCKSZ) < 0)
@ -267,6 +272,7 @@ static RelativeAddr
hashTableAlloc(int size, HashJoinTable hashtable)
{
RelativeAddr p;
p = hashtable->top;
hashtable->top += size;
return p;
@ -304,7 +310,9 @@ ExecHashTableCreate(Hash *node)
nbatch = -1;
HashTBSize = NBuffers / 2;
while (nbatch < 0) {
while (nbatch < 0)
{
/*
* determine number of batches for the hashjoin
*/
@ -322,15 +330,15 @@ ExecHashTableCreate(Hash *node)
tupsize = outerNode->plan_width + sizeof(HeapTupleData);
/*
* totalbuckets is the total number of hash buckets needed for
* the entire relation
* totalbuckets is the total number of hash buckets needed for the
* entire relation
*/
totalbuckets = ceil((double) ntuples / NTUP_PER_BUCKET);
bucketsize = LONGALIGN(NTUP_PER_BUCKET * tupsize + sizeof(*bucket));
/*
* nbuckets is the number of hash buckets for the first pass
* of hybrid hashjoin
* nbuckets is the number of hash buckets for the first pass of hybrid
* hashjoin
*/
nbuckets = (HashTBSize - nbatch) * BLCKSZ / (bucketsize * FUDGE_FAC);
if (totalbuckets < nbuckets)
@ -349,7 +357,8 @@ ExecHashTableCreate(Hash *node)
hashtable = (HashJoinTable) palloc((HashTBSize + 1) * BLCKSZ);
shmid = 0;
if (hashtable == NULL) {
if (hashtable == NULL)
{
elog(WARN, "not enough memory for hashjoin.");
}
/* ----------------
@ -362,6 +371,7 @@ ExecHashTableCreate(Hash *node)
hashtable->shmid = shmid;
hashtable->top = sizeof(HashTableData);
hashtable->bottom = HashTBSize * BLCKSZ;
/*
* hashtable->readbuf has to be long aligned!!!
*/
@ -369,7 +379,8 @@ ExecHashTableCreate(Hash *node)
hashtable->nbatch = nbatch;
hashtable->curbatch = 0;
hashtable->pcount = hashtable->nprocess = 0;
if (nbatch > 0) {
if (nbatch > 0)
{
/* ---------------
* allocate and initialize the outer batches
* ---------------
@ -378,7 +389,8 @@ ExecHashTableCreate(Hash *node)
hashTableAlloc(nbatch * sizeof(RelativeAddr), hashtable));
outerbatchPos = (RelativeAddr *) ABSADDR(
hashTableAlloc(nbatch * sizeof(RelativeAddr), hashtable));
for (i=0; i<nbatch; i++) {
for (i = 0; i < nbatch; i++)
{
tempname = hashTableAlloc(12, hashtable);
mk_hj_temp(ABSADDR(tempname));
outerbatchNames[i] = tempname;
@ -396,7 +408,8 @@ ExecHashTableCreate(Hash *node)
hashTableAlloc(nbatch * sizeof(RelativeAddr), hashtable));
innerbatchSizes = (int *) ABSADDR(
hashTableAlloc(nbatch * sizeof(int), hashtable));
for (i=0; i<nbatch; i++) {
for (i = 0; i < nbatch; i++)
{
tempname = hashTableAlloc(12, hashtable);
mk_hj_temp(ABSADDR(tempname));
innerbatchNames[i] = tempname;
@ -407,7 +420,8 @@ ExecHashTableCreate(Hash *node)
hashtable->innerbatchPos = RELADDR(innerbatchPos);
hashtable->innerbatchSizes = RELADDR(innerbatchSizes);
}
else {
else
{
hashtable->outerbatchNames = (RelativeAddr) NULL;
hashtable->outerbatchPos = (RelativeAddr) NULL;
hashtable->innerbatchNames = (RelativeAddr) NULL;
@ -423,7 +437,8 @@ ExecHashTableCreate(Hash *node)
* ----------------
*/
bucket = (HashBucket) ABSADDR(hashtable->top);
for (i=0; i<nbuckets; i++) {
for (i = 0; i < nbuckets; i++)
{
bucket->top = RELADDR((char *) bucket + sizeof(*bucket));
bucket->bottom = bucket->top;
bucket->firstotuple = bucket->lastotuple = -1;
@ -473,7 +488,8 @@ ExecHashTableInsert(HashJoinTable hashtable,
* decide whether to put the tuple in the hash table or a tmp file
* ----------------
*/
if (bucketno < hashtable->nbuckets) {
if (bucketno < hashtable->nbuckets)
{
/* ---------------
* put the tuple in hash table
* ---------------
@ -483,7 +499,8 @@ ExecHashTableInsert(HashJoinTable hashtable,
if ((char *) LONGALIGN(ABSADDR(bucket->bottom))
- (char *) bucket + heapTuple->t_len > hashtable->bucketsize)
ExecHashOverflowInsert(hashtable, bucket, heapTuple);
else {
else
{
memmove((char *) LONGALIGN(ABSADDR(bucket->bottom)),
heapTuple,
heapTuple->t_len);
@ -491,7 +508,8 @@ ExecHashTableInsert(HashJoinTable hashtable,
((RelativeAddr) LONGALIGN(bucket->bottom) + heapTuple->t_len);
}
}
else {
else
{
/* -----------------
* put the tuple into a tmp file for other batches
* -----------------
@ -547,10 +565,11 @@ ExecHashGetBucket(HashJoinTable hashtable,
keyval = ExecEvalExpr((Node *) hashkey, econtext, &isNull, NULL);
/*
* keyval could be null, so we better point it to something
* valid before trying to run hashFunc on it. --djm 8/17/96
* keyval could be null, so we better point it to something valid
* before trying to run hashFunc on it. --djm 8/17/96
*/
if(isNull) {
if (isNull)
{
execConstByVal = 0;
execConstLen = 0;
keyval = (Datum) "";
@ -600,7 +619,8 @@ ExecHashOverflowInsert(HashJoinTable hashtable,
*/
newend = (RelativeAddr) LONGALIGN(hashtable->overflownext + sizeof(*otuple)
+ heapTuple->t_len);
if (newend > hashtable->bottom) {
if (newend > hashtable->bottom)
{
#if 0
elog(DEBUG, "hash table out of memory. expanding.");
/* ------------------
@ -612,7 +632,8 @@ ExecHashOverflowInsert(HashJoinTable hashtable,
hashtable->readbuf = hashtable->bottom = 2 * hashtable->bottom;
hashtable =
(HashJoinTable) repalloc(hashtable, hashtable->bottom + BLCKSZ);
if (hashtable == NULL) {
if (hashtable == NULL)
{
perror("repalloc");
elog(WARN, "can't expand hashtable.");
}
@ -636,7 +657,8 @@ ExecHashOverflowInsert(HashJoinTable hashtable,
hashtable->overflownext = newend;
if (firstotuple == NULL)
bucket->firstotuple = bucket->lastotuple = RELADDR(otuple);
else {
else
{
lastotuple->next = RELADDR(otuple);
bucket->lastotuple = RELADDR(otuple);
}
@ -682,7 +704,8 @@ ExecScanHashBucket(HashJoinState *hjstate,
* search the hash bucket
* ----------------
*/
if (curtuple == NULL || curtuple < (HeapTuple)ABSADDR(bucket->bottom)) {
if (curtuple == NULL || curtuple < (HeapTuple) ABSADDR(bucket->bottom))
{
if (curtuple == NULL)
heapTuple = (HeapTuple)
LONGALIGN(ABSADDR(bucket->top));
@ -690,7 +713,8 @@ ExecScanHashBucket(HashJoinState *hjstate,
heapTuple = (HeapTuple)
LONGALIGN(((char *) curtuple + curtuple->t_len));
while (heapTuple < (HeapTuple)ABSADDR(bucket->bottom)) {
while (heapTuple < (HeapTuple) ABSADDR(bucket->bottom))
{
inntuple = ExecStoreTuple(heapTuple, /* tuple to store */
hjstate->hj_HashTupleSlot, /* slot */
@ -716,23 +740,27 @@ ExecScanHashBucket(HashJoinState *hjstate,
* search the overflow area of the hash bucket
* ----------------
*/
if (otuple == NULL) {
if (otuple == NULL)
{
curotuple = hjstate->hj_CurOTuple;
otuple = (OverflowTuple) ABSADDR(curotuple->next);
}
while (otuple != NULL) {
while (otuple != NULL)
{
heapTuple = (HeapTuple) ABSADDR(otuple->tuple);
inntuple = ExecStoreTuple(heapTuple, /* tuple to store */
hjstate->hj_HashTupleSlot, /* slot */
InvalidBuffer, /* SP?? this tuple has no buffer */
InvalidBuffer, /* SP?? this tuple has
* no buffer */
false); /* do not pfree this tuple */
econtext->ecxt_innertuple = inntuple;
qualResult = ExecQual((List *) hjclauses, econtext);
if (qualResult) {
if (qualResult)
{
hjstate->hj_CurOTuple = otuple;
return heapTuple;
}
@ -761,19 +789,20 @@ hashFunc(char *key, int len)
register unsigned char *k;
/*
* If this is a variable length type, then 'k' points
* to a "struct varlena" and len == -1.
* NOTE:
* VARSIZE returns the "real" data length plus the sizeof the
* "vl_len" attribute of varlena (the length information).
* 'k' points to the beginning of the varlena struct, so
* we have to use "VARDATA" to find the beginning of the "real"
* data.
* If this is a variable length type, then 'k' points to a "struct
* varlena" and len == -1. NOTE: VARSIZE returns the "real" data
* length plus the sizeof the "vl_len" attribute of varlena (the
* length information). 'k' points to the beginning of the varlena
* struct, so we have to use "VARDATA" to find the beginning of the
* "real" data.
*/
if (len == -1) {
if (len == -1)
{
l = VARSIZE(key) - VARHDRSZ;
k = (unsigned char *) VARDATA(key);
} else {
}
else
{
l = len;
k = (unsigned char *) key;
}
@ -783,7 +812,8 @@ hashFunc(char *key, int len)
/*
* Convert string to integer
*/
while (l--) h = h * PRIME1 ^ (*k++);
while (l--)
h = h * PRIME1 ^ (*k++);
h %= PRIME2;
return (h);
@ -809,13 +839,13 @@ ExecHashPartition(Hash *node)
*/
outerNode = outerPlan(node);
ntuples = outerNode->plan_size;
if (ntuples == 0) ntuples = 1000;
if (ntuples == 0)
ntuples = 1000;
tupsize = outerNode->plan_width + sizeof(HeapTupleData);
pages = ceil((double) ntuples * tupsize * FUDGE_FAC / BLCKSZ);
/*
* if amount of buffer space below hashjoin threshold,
* return negative
* if amount of buffer space below hashjoin threshold, return negative
*/
if (ceil(sqrt((double) pages)) > HashTBSize)
return -1;
@ -846,7 +876,8 @@ ExecHashTableReset(HashJoinTable hashtable, int ntuples)
hashtable->nbuckets;
bucket = (HashBucket) ABSADDR(hashtable->top);
for (i=0; i<hashtable->nbuckets; i++) {
for (i = 0; i < hashtable->nbuckets; i++)
{
bucket->top = RELADDR((char *) bucket + sizeof(*bucket));
bucket->bottom = bucket->top;
bucket->firstotuple = bucket->lastotuple = -1;
@ -863,6 +894,3 @@ mk_hj_temp(char *tempname)
sprintf(tempname, "HJ%d.%d", (int) getpid(), hjtmpcnt);
hjtmpcnt = (hjtmpcnt + 1) % 1000;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.5 1997/08/19 21:31:09 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.6 1997/09/07 04:41:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,7 +39,8 @@ static TupleTableSlot *
ExecHashJoinGetSavedTuple(HashJoinState * hjstate, char *buffer,
File file, TupleTableSlot * tupleSlot, int *block, char **position);
static int ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable,
static int
ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable,
int nbatch);
static int ExecHashJoinNewBatch(HashJoinState * hjstate);
@ -121,7 +122,8 @@ ExecHashJoin(HashJoin *node)
*/
econtext = hjstate->jstate.cs_ExprContext;
if (hjstate->jstate.cs_TupFromTlist) {
if (hjstate->jstate.cs_TupFromTlist)
{
TupleTableSlot *result;
bool isDone;
@ -133,9 +135,11 @@ ExecHashJoin(HashJoin *node)
* if this is the first call, build the hash table for inner relation
* ----------------
*/
if (!hashPhaseDone) { /* if the hash phase not completed */
if (!hashPhaseDone)
{ /* if the hash phase not completed */
hashtable = node->hashjointable;
if (hashtable == NULL) { /* if the hash table has not been created */
if (hashtable == NULL)
{ /* if the hash table has not been created */
/* ----------------
* create the hash table
* ----------------
@ -159,7 +163,8 @@ ExecHashJoin(HashJoin *node)
}
nbatch = hashtable->nbatch;
outerbatches = hjstate->hj_OuterBatches;
if (nbatch > 0 && outerbatches == NULL) { /* if needs hash partition */
if (nbatch > 0 && outerbatches == NULL)
{ /* if needs hash partition */
/* -----------------
* allocate space for file descriptors of outer batch files
* then open the batch files in the current process
@ -171,7 +176,8 @@ ExecHashJoin(HashJoin *node)
ABSADDR(hashtable->outerbatchNames);
outerbatches = (File *)
palloc(nbatch * sizeof(File));
for (i=0; i<nbatch; i++) {
for (i = 0; i < nbatch; i++)
{
outerbatches[i] = FileNameOpenFile(
ABSADDR(outerbatchNames[i]),
O_CREAT | O_RDWR, 0600);
@ -197,20 +203,27 @@ ExecHashJoin(HashJoin *node)
outerTupleSlot = hjstate->jstate.cs_OuterTupleSlot;
outerVar = get_leftop(clause);
bucketno = -1; /* if bucketno remains -1, means use old outer tuple */
if (TupIsNull(outerTupleSlot)) {
bucketno = -1; /* if bucketno remains -1, means use old
* outer tuple */
if (TupIsNull(outerTupleSlot))
{
/*
* if the current outer tuple is nil, get a new one
*/
outerTupleSlot = (TupleTableSlot *)
ExecHashJoinOuterGetTuple(outerNode, (Plan *) node, hjstate);
while (curbatch <= nbatch && TupIsNull(outerTupleSlot)) {
while (curbatch <= nbatch && TupIsNull(outerTupleSlot))
{
/*
* if the current batch runs out, switch to new batch
*/
curbatch = ExecHashJoinNewBatch(hjstate);
if (curbatch > nbatch) {
if (curbatch > nbatch)
{
/*
* when the last batch runs out, clean up
*/
@ -222,6 +235,7 @@ ExecHashJoin(HashJoin *node)
outerTupleSlot = (TupleTableSlot *)
ExecHashJoinOuterGetTuple(outerNode, (Plan *) node, hjstate);
}
/*
* now we get an outer tuple, find the corresponding bucket for
* this tuple from the hash table
@ -236,21 +250,24 @@ ExecHashJoin(HashJoin *node)
+ bucketno * hashtable->bucketsize);
}
for (;;) {
for (;;)
{
/* ----------------
* Now we've got an outer tuple and the corresponding hash bucket,
* but this tuple may not belong to the current batch.
* ----------------
*/
if (curbatch == 0 && bucketno != -1) /* if this is the first pass */
if (curbatch == 0 && bucketno != -1) /* if this is the first
* pass */
batch = ExecHashJoinGetBatch(bucketno, hashtable, nbatch);
else
batch = 0;
if (batch > 0) {
if (batch > 0)
{
/*
* if the current outer tuple does not belong to
* the current batch, save to the tmp file for
* the corresponding batch.
* if the current outer tuple does not belong to the current
* batch, save to the tmp file for the corresponding batch.
*/
buffer = ABSADDR(hashtable->batch) + (batch - 1) * BLCKSZ;
batchno = batch - 1;
@ -261,8 +278,11 @@ ExecHashJoin(HashJoin *node)
outerbatchPos[batchno] = RELADDR(pos);
}
else if (bucket != NULL) {
do {
else if (bucket != NULL)
{
do
{
/*
* scan the hash bucket for matches
*/
@ -272,14 +292,17 @@ ExecHashJoin(HashJoin *node)
hjclauses,
econtext);
if (curtuple != NULL) {
if (curtuple != NULL)
{
/*
* we've got a match, but still need to test qpqual
*/
inntuple = ExecStoreTuple(curtuple,
hjstate->hj_HashTupleSlot,
InvalidBuffer,
false); /* don't pfree this tuple */
false); /* don't pfree this
* tuple */
econtext->ecxt_innertuple = inntuple;
@ -295,7 +318,8 @@ ExecHashJoin(HashJoin *node)
* in the tuple table, and return the slot.
* ----------------
*/
if (qualResult) {
if (qualResult)
{
ProjectionInfo *projInfo;
TupleTableSlot *result;
bool isDone;
@ -323,12 +347,16 @@ ExecHashJoin(HashJoin *node)
outerTupleSlot = (TupleTableSlot *)
ExecHashJoinOuterGetTuple(outerNode, (Plan *) node, hjstate);
while (curbatch <= nbatch && TupIsNull(outerTupleSlot)) {
while (curbatch <= nbatch && TupIsNull(outerTupleSlot))
{
/*
* if the current batch runs out, switch to new batch
*/
curbatch = ExecHashJoinNewBatch(hjstate);
if (curbatch > nbatch) {
if (curbatch > nbatch)
{
/*
* when the last batch runs out, clean up
*/
@ -426,6 +454,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, Plan *parent)
HashState *hashstate = hashNode->hashstate;
TupleTableSlot *slot =
hashstate->cstate.cs_ResultTupleSlot;
hjstate->hj_HashTupleSlot = slot;
}
hjstate->hj_OuterTupleSlot->ttc_tupleDescriptor =
@ -496,7 +525,8 @@ ExecEndHashJoin(HashJoin *node)
* free hash table in case we end plan before all tuples are retrieved
* ---------------
*/
if (hjstate->hj_HashTable) {
if (hjstate->hj_HashTable)
{
ExecHashTableDestroy(hjstate->hj_HashTable);
hjstate->hj_HashTable = NULL;
}
@ -553,7 +583,8 @@ ExecHashJoinOuterGetTuple(Plan *node, Plan* parent, HashJoinState *hjstate)
hashtable = hjstate->hj_HashTable;
curbatch = hashtable->curbatch;
if (curbatch == 0) { /* if it is the first pass */
if (curbatch == 0)
{ /* if it is the first pass */
slot = ExecProcNode(node, parent);
return slot;
}
@ -604,7 +635,8 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
hashtable = hjstate->hj_HashTable;
bufend = buffer + *(long *) buffer;
bufstart = (char *) (buffer + sizeof(long));
if ((*position == NULL) || (*position >= bufend)) {
if ((*position == NULL) || (*position >= bufend))
{
if (*position == NULL)
(*block) = 0;
else
@ -660,13 +692,16 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
* batch-switching.
* ------------------
*/
if (newbatch == 1) {
if (newbatch == 1)
{
/*
* if it is end of the first pass, flush all the last pages for
* the batches.
*/
outerBatches = hjstate->hj_OuterBatches;
for (i=0; i<nbatch; i++) {
for (i = 0; i < nbatch; i++)
{
cc = FileSeek(outerBatches[i], 0L, SEEK_END);
if (cc < 0)
perror("FileSeek");
@ -677,12 +712,15 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
perror("FileWrite");
}
}
if (newbatch > 1) {
if (newbatch > 1)
{
/*
* remove the previous outer batch
*/
FileUnlink(outerBatches[newbatch - 2]);
}
/*
* rebuild the hash table for the new inner batch
*/
@ -691,12 +729,14 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
* skip over empty inner batches
* --------------
*/
while (newbatch <= nbatch && innerBatchSizes[newbatch - 1] == 0) {
while (newbatch <= nbatch && innerBatchSizes[newbatch - 1] == 0)
{
FileUnlink(outerBatches[newbatch - 1]);
FileUnlink(innerBatches[newbatch - 1]);
newbatch++;
}
if (newbatch > nbatch) {
if (newbatch > nbatch)
{
hashtable->pcount = hashtable->nprocess;
return newbatch;
@ -716,7 +756,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
hjstate->hj_HashTupleSlot,
&readBlk,
&readPos))
&& ! TupIsNull(slot)) {
&& !TupIsNull(slot))
{
econtext->ecxt_innertuple = slot;
ExecHashTableInsert(hashtable, econtext, innerhashkey, NULL);
/* possible bug - glass */
@ -753,6 +794,7 @@ static int
ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable, int nbatch)
{
int b;
if (bucketno < hashtable->nbuckets || nbatch == 0)
return 0;
@ -788,7 +830,8 @@ ExecHashJoinSaveTuple(HeapTuple heapTuple,
if (position == NULL)
position = pagestart;
if (position + heapTuple->t_len >= pagebound) {
if (position + heapTuple->t_len >= pagebound)
{
cc = FileSeek(file, 0L, SEEK_END);
if (cc < 0)
perror("FileSeek");

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.7 1997/03/12 20:58:26 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.8 1997/09/07 04:41:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -116,14 +116,16 @@ IndexNext(IndexScan *node)
* ----------------
*/
for(;;) {
for (;;)
{
result = index_getnext(scandesc, direction);
/* ----------------
* if scanning this index succeeded then return the
* appropriate heap tuple.. else return NULL.
* ----------------
*/
if (result) {
if (result)
{
iptr = &result->heap_iptr;
tuple = heap_fetch(heapRelation,
NowTimeQual,
@ -132,7 +134,8 @@ IndexNext(IndexScan *node)
/* be tidy */
pfree(result);
if (tuple == NULL) {
if (tuple == NULL)
{
/* ----------------
* we found a deleted tuple, so keep on scanning..
* ----------------
@ -251,10 +254,12 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan* parent)
runtimeKeyInfo = (Pointer *) indexstate->iss_RuntimeKeyInfo;
if (runtimeKeyInfo != NULL) {
if (runtimeKeyInfo != NULL)
{
/*
* get the index qualifications and
* recalculate the appropriate values
* get the index qualifications and recalculate the appropriate
* values
*/
indexPtr = indexstate->iss_IndexPtr;
indxqual = node->indxqual;
@ -264,24 +269,33 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan* parent)
run_keys = (int *) runtimeKeyInfo[indexPtr];
scan_keys = (ScanKey) scanKeys[indexPtr];
for (j=0; j < n_keys; j++) {
for (j = 0; j < n_keys; j++)
{
/*
* If we have a run-time key, then extract the run-time
* expression and evaluate it with respect to the current
* outer tuple. We then stick the result into the scan
* key.
* outer tuple. We then stick the result into the scan key.
*/
if (run_keys[j] != NO_OP) {
if (run_keys[j] != NO_OP)
{
clause = nth(j, qual);
scanexpr = (run_keys[j] == RIGHT_OP) ?
(Node *) get_rightop(clause) : (Node *) get_leftop(clause);
/* pass in isDone but ignore it. We don't iterate in quals */
/*
* pass in isDone but ignore it. We don't iterate in
* quals
*/
scanvalue = (Datum)
ExecEvalExpr(scanexpr, exprCtxt, &isNull, &isDone);
scan_keys[j].sk_argument = scanvalue;
if (isNull) {
if (isNull)
{
scan_keys[j].sk_flags |= SK_ISNULL;
} else {
}
else
{
scan_keys[j].sk_flags &= ~SK_ISNULL;
}
}
@ -291,10 +305,11 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan* parent)
/*
* rescans all indices
*
* note: AMrescan assumes only one scan key. This may have
* to change if we ever decide to support multiple keys.
* note: AMrescan assumes only one scan key. This may have to change if
* we ever decide to support multiple keys.
*/
for (i = 0; i < numIndices; i++) {
for (i = 0; i < numIndices; i++)
{
sdesc = scanDescs[i];
skey = scanKeys[i];
index_rescan(sdesc, direction, skey);
@ -355,7 +370,8 @@ ExecEndIndexScan(IndexScan *node)
* free the scan keys used in scanning the indices
* ----------------
*/
for (i=0; i<numIndices; i++) {
for (i = 0; i < numIndices; i++)
{
if (scanKeys[i] != NULL)
pfree(scanKeys[i]);
@ -582,7 +598,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
* build the index scan keys from the index qualification
* ----------------
*/
for (i=0; i < numIndices; i++) {
for (i = 0; i < numIndices; i++)
{
int j;
List *qual;
int n_keys;
@ -604,7 +621,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
* convert each qual's opclause into a single scan key
* ----------------
*/
for (j=0; j < n_keys; j++) {
for (j = 0; j < n_keys; j++)
{
Expr *clause; /* one part of index qual */
Oper *op; /* operator used in scan.. */
Node *leftop; /* expr on lhs of operator */
@ -614,7 +632,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
int scanvar; /* which var identifies varattno */
AttrNumber varattno = 0; /* att number used in scan */
Oid opid; /* operator id used in scan */
Datum scanvalue = 0; /* value used in scan (if const) */
Datum scanvalue = 0; /* value used in scan (if
* const) */
/* ----------------
* extract clause information from the qualification
@ -659,7 +678,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
*/
leftop = (Node *) get_leftop(clause);
if (IsA(leftop,Var) && var_is_rel((Var*)leftop)) {
if (IsA(leftop, Var) && var_is_rel((Var *) leftop))
{
/* ----------------
* if the leftop is a "rel-var", then it means
* that it is a var node which tells us which
@ -668,7 +688,9 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
*/
varattno = ((Var *) leftop)->varattno;
scanvar = LEFT_OP;
} else if (IsA(leftop,Const)) {
}
else if (IsA(leftop, Const))
{
/* ----------------
* if the leftop is a const node then it means
* it identifies the value to place in our scan key.
@ -677,8 +699,11 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
run_keys[j] = NO_OP;
scanvalue = ((Const *) leftop)->constvalue;
#ifdef INDEXSCAN_PATCH
} else if (IsA(leftop,Param)) {
}
else if (IsA(leftop, Param))
{
bool isnull;
/* ----------------
* if the leftop is a Param node then it means
* it identifies the value to place in our scan key.
@ -691,9 +716,11 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
if (isnull)
flags |= SK_ISNULL;
#endif
} else if (leftop != NULL &&
}
else if (leftop != NULL &&
is_funcclause(leftop) &&
var_is_rel(lfirst(((Expr*)leftop)->args))) {
var_is_rel(lfirst(((Expr *) leftop)->args)))
{
/* ----------------
* if the leftop is a func node then it means
* it identifies the value to place in our scan key.
@ -704,7 +731,9 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
varattno = 1;
scanvar = LEFT_OP;
} else {
}
else
{
/* ----------------
* otherwise, the leftop contains information usable
* at runtime to figure out the value to place in our
@ -722,7 +751,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
*/
rightop = (Node *) get_rightop(clause);
if (IsA(rightop,Var) && var_is_rel((Var*)rightop)) {
if (IsA(rightop, Var) && var_is_rel((Var *) rightop))
{
/* ----------------
* here we make sure only one op identifies the
* scan-attribute...
@ -741,7 +771,9 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
varattno = ((Var *) rightop)->varattno;
scanvar = RIGHT_OP;
} else if (IsA(rightop,Const)) {
}
else if (IsA(rightop, Const))
{
/* ----------------
* if the leftop is a const node then it means
* it identifies the value to place in our scan key.
@ -750,8 +782,11 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
run_keys[j] = NO_OP;
scanvalue = ((Const *) rightop)->constvalue;
#ifdef INDEXSCAN_PATCH
} else if (IsA(rightop,Param)) {
}
else if (IsA(rightop, Param))
{
bool isnull;
/* ----------------
* if the rightop is a Param node then it means
* it identifies the value to place in our scan key.
@ -764,9 +799,11 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
if (isnull)
flags |= SK_ISNULL;
#endif
} else if (rightop!=NULL &&
}
else if (rightop != NULL &&
is_funcclause(rightop) &&
var_is_rel(lfirst(((Expr*)rightop)->args))) {
var_is_rel(lfirst(((Expr *) rightop)->args)))
{
/* ----------------
* if the rightop is a func node then it means
* it identifies the value to place in our scan key.
@ -781,7 +818,9 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
varattno = 1;
scanvar = RIGHT_OP;
} else {
}
else
{
/* ----------------
* otherwise, the leftop contains information usable
* at runtime to figure out the value to place in our
@ -808,7 +847,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
*/
ScanKeyEntryInitialize(&scan_keys[j],
flags,
varattno, /* attribute number to scan */
varattno, /* attribute number to
* scan */
(RegProcedure) opid, /* reg proc to use */
(Datum) scanvalue); /* constant */
}
@ -840,11 +880,14 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
{
indexstate->iss_RuntimeKeyInfo = (Pointer) runtimeKeyInfo;
}
else {
else
{
indexstate->iss_RuntimeKeyInfo = NULL;
for (i=0; i < numIndices; i++) {
for (i = 0; i < numIndices; i++)
{
List *qual;
int n_keys;
qual = nth(i, indxqual);
n_keys = length(qual);
if (n_keys > 0)
@ -902,12 +945,14 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
* relation and scan descriptors.
* ----------------
*/
for (i=0; i < numIndices; i++) {
for (i = 0; i < numIndices; i++)
{
Oid indexOid;
indexOid = (Oid) nthi(i, indxid);
if (indexOid != 0) {
if (indexOid != 0)
{
ExecOpenScanR(indexOid, /* relation */
numScanKeys[i], /* nkeys */
scanKeys[i], /* scan key */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.6 1997/08/20 14:53:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.7 1997/09/07 04:41:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -77,7 +77,8 @@ ExecMaterial(Material *node)
* ----------------
*/
if (matstate->mat_Flag == false) {
if (matstate->mat_Flag == false)
{
/* ----------------
* set all relations to be scanned in the forward direction
* while creating the temporary relation.
@ -91,13 +92,15 @@ ExecMaterial(Material *node)
* ----------------
*/
tempRelation = matstate->mat_TempRelation;
if (tempRelation == NULL) {
if (tempRelation == NULL)
{
elog(DEBUG, "ExecMaterial: temp relation is NULL! aborting...");
return NULL;
}
currentRelation = matstate->csstate.css_currentRelation;
if (currentRelation == NULL) {
if (currentRelation == NULL)
{
elog(DEBUG, "ExecMaterial: current relation is NULL! aborting...");
return NULL;
}
@ -108,7 +111,8 @@ ExecMaterial(Material *node)
* ----------------
*/
outerNode = outerPlan((Plan *) node);
for (;;) {
for (;;)
{
slot = ExecProcNode(outerNode, (Plan *) node);
heapTuple = slot->val;
@ -190,6 +194,7 @@ ExecInitMaterial(Material *node, EState *estate, Plan *parent)
Plan *outerPlan;
TupleDesc tupType;
Relation tempDesc;
/* int len; */
/* ----------------
@ -392,5 +397,5 @@ ExecMaterialRestrPos(Material node)
sdesc = get_css_currentScanDesc((CommonScanState) matstate);
heap_restrpos(sdesc);
}
#endif
#endif

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.8 1997/08/19 21:31:10 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.9 1997/09/07 04:41:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -158,7 +158,8 @@ MJFormOSortopI(List *qualList, Oid sortOp)
*/
qualCopy = (List *) copyObject((Node *) qualList);
foreach (qualcdr, qualCopy) {
foreach(qualcdr, qualCopy)
{
/* ----------------
* first get the current (op .. ..) list
* ----------------
@ -170,7 +171,8 @@ MJFormOSortopI(List *qualList, Oid sortOp)
* ----------------
*/
op = (Oper *) qual->oper;
if (!IsA(op,Oper)) {
if (!IsA(op, Oper))
{
elog(DEBUG, "MJFormOSortopI: op not an Oper!");
return NIL;
}
@ -222,10 +224,12 @@ MJFormISortopO(List *qualList, Oid sortOp)
* ((op inner outer) (op inner outer) ... )
* ----------------
*/
foreach (qualcdr, ISortopO) {
foreach(qualcdr, ISortopO)
{
Expr *qual;
List *inner;
List *outer;
qual = lfirst(qualcdr);
inner = lfirst(qual->args);
@ -275,7 +279,8 @@ MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
* ----------------
*/
eqclause = eqQual;
foreach (clause, compareQual) {
foreach(clause, compareQual)
{
/* ----------------
* first test if our compare clause is satisified.
* if so then return true. ignore isDone, don't iterate in
@ -377,16 +382,20 @@ ExecMergeTupleDump(ExprContext *econtext, MergeJoinState *mergestate)
printf("******** \n");
}
#endif
static void
CleanUpSort(Plan *plan) {
CleanUpSort(Plan * plan)
{
if (plan == NULL)
return;
if (plan->type == T_Sort) {
if (plan->type == T_Sort)
{
Sort *sort = (Sort *) plan;
psort_end(sort);
}
}
@ -475,10 +484,13 @@ ExecMergeJoin(MergeJoin *node)
mergeclauses = node->mergeclauses;
qual = node->join.qual;
if (ScanDirectionIsForward(direction)) {
if (ScanDirectionIsForward(direction))
{
outerSkipQual = mergestate->mj_OSortopI;
innerSkipQual = mergestate->mj_ISortopO;
} else {
}
else
{
outerSkipQual = mergestate->mj_ISortopO;
innerSkipQual = mergestate->mj_OSortopI;
}
@ -487,7 +499,8 @@ ExecMergeJoin(MergeJoin *node)
* ok, everything is setup.. let's go to work
* ----------------
*/
if (mergestate->jstate.cs_TupFromTlist) {
if (mergestate->jstate.cs_TupFromTlist)
{
TupleTableSlot *result;
ProjectionInfo *projInfo;
bool isDone;
@ -497,7 +510,8 @@ ExecMergeJoin(MergeJoin *node)
if (!isDone)
return result;
}
for (;;) {
for (;;)
{
/* ----------------
* get the current state of the join and do things accordingly.
* Note: The join states are highlighted with 32-* comments for
@ -506,13 +520,15 @@ ExecMergeJoin(MergeJoin *node)
*/
MJ_dump(econtext, mergestate);
switch (mergestate->mj_JoinState) {
/* ********************************
* EXEC_MJ_INITIALIZE means that this is the first time
* ExecMergeJoin() has been called and so we have to
* initialize the inner, outer and marked tuples as well
* as various stuff in the expression context.
* ********************************
switch (mergestate->mj_JoinState)
{
/*
* ******************************** EXEC_MJ_INITIALIZE means
* that this is the first time ExecMergeJoin() has been called
* and so we have to initialize the inner, outer and marked
* tuples as well as various stuff in the expression context. ********************************
*
*/
case EXEC_MJ_INITIALIZE:
MJ_printf("ExecMergeJoin: EXEC_MJ_INITIALIZE\n");
@ -523,13 +539,15 @@ ExecMergeJoin(MergeJoin *node)
* ----------------
*/
innerTupleSlot = ExecProcNode(innerPlan, (Plan *) node);
if (TupIsNull(innerTupleSlot)) {
if (TupIsNull(innerTupleSlot))
{
MJ_printf("ExecMergeJoin: **** inner tuple is nil ****\n");
return NULL;
}
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(outerTupleSlot)) {
if (TupIsNull(outerTupleSlot))
{
MJ_printf("ExecMergeJoin: **** outer tuple is nil ****\n");
return NULL;
}
@ -561,12 +579,12 @@ ExecMergeJoin(MergeJoin *node)
mergestate->mj_JoinState = EXEC_MJ_SKIPINNER;
break;
/* ********************************
* EXEC_MJ_JOINMARK means we have just found a new
* outer tuple and a possible matching inner tuple.
* This is the case after the INITIALIZE, SKIPOUTER
* or SKIPINNER states.
* ********************************
/*
* ******************************** EXEC_MJ_JOINMARK means we
* have just found a new outer tuple and a possible matching
* inner tuple. This is the case after the INITIALIZE,
* SKIPOUTER or SKIPINNER states. ********************************
*
*/
case EXEC_MJ_JOINMARK:
MJ_printf("ExecMergeJoin: EXEC_MJ_JOINMARK\n");
@ -578,15 +596,16 @@ ExecMergeJoin(MergeJoin *node)
mergestate->mj_JoinState = EXEC_MJ_JOINTEST;
break;
/* ********************************
* EXEC_MJ_JOINTEST means we have two tuples which
* might satisify the merge clause, so we test them.
/*
* ******************************** EXEC_MJ_JOINTEST means we
* have two tuples which might satisify the merge clause, so
* we test them.
*
* If they do satisify, then we join them and move
* on to the next inner tuple (EXEC_MJ_JOINTUPLES).
* If they do satisify, then we join them and move on to the next
* inner tuple (EXEC_MJ_JOINTUPLES).
*
* If they do not satisify then advance to next outer tuple. ********************************
*
* If they do not satisify then advance to next outer tuple.
* ********************************
*/
case EXEC_MJ_JOINTEST:
MJ_printf("ExecMergeJoin: EXEC_MJ_JOINTEST\n");
@ -604,11 +623,12 @@ ExecMergeJoin(MergeJoin *node)
}
break;
/* ********************************
* EXEC_MJ_JOINTUPLES means we have two tuples which
* satisified the merge clause so we join them and then
* proceed to get the next inner tuple (EXEC_NEXT_INNER).
* ********************************
/*
* ******************************** EXEC_MJ_JOINTUPLES means
* we have two tuples which satisified the merge clause so we
* join them and then proceed to get the next inner tuple
* (EXEC_NEXT_INNER). ********************************
*
*/
case EXEC_MJ_JOINTUPLES:
MJ_printf("ExecMergeJoin: EXEC_MJ_JOINTUPLES\n");
@ -617,7 +637,8 @@ ExecMergeJoin(MergeJoin *node)
qualResult = ExecQual((List *) qual, econtext);
MJ_DEBUG_QUAL(qual, qualResult);
if (qualResult) {
if (qualResult)
{
/* ----------------
* qualification succeeded. now form the desired
* projection tuple and return the slot containing it.
@ -637,11 +658,12 @@ ExecMergeJoin(MergeJoin *node)
}
break;
/* ********************************
* EXEC_MJ_NEXTINNER means advance the inner scan
* to the next tuple. If the tuple is not nil, we then
* proceed to test it against the join qualification.
* ********************************
/*
* ******************************** EXEC_MJ_NEXTINNER means
* advance the inner scan to the next tuple. If the tuple is
* not nil, we then proceed to test it against the join
* qualification. ********************************
*
*/
case EXEC_MJ_NEXTINNER:
MJ_printf("ExecMergeJoin: EXEC_MJ_NEXTINNER\n");
@ -664,20 +686,16 @@ ExecMergeJoin(MergeJoin *node)
}
break;
/* ********************************
* EXEC_MJ_NEXTOUTER means
/*
* ******************************** EXEC_MJ_NEXTOUTER means
*
* outer inner
* outer tuple - 5 5 - marked tuple
* 5 5
* 6 6 - inner tuple
* 7 7
* outer inner outer tuple - 5 5 - marked tuple 5 5 6
* 6 - inner tuple 7 7
*
* we know we just bumped into the first inner tuple > current
* outer tuple so get a new outer tuple and then proceed to
* test it against the marked tuple (EXEC_MJ_TESTOUTER) ********************************
*
* we know we just bumped into
* the first inner tuple > current outer tuple
* so get a new outer tuple and then proceed to test
* it against the marked tuple (EXEC_MJ_TESTOUTER)
* ********************************
*/
case EXEC_MJ_NEXTOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_NEXTOUTER\n");
@ -691,7 +709,8 @@ ExecMergeJoin(MergeJoin *node)
* we are done with the join
* ----------------
*/
if (TupIsNull(outerTupleSlot)) {
if (TupIsNull(outerTupleSlot))
{
MJ_printf("ExecMergeJoin: **** outer tuple is nil ****\n");
CleanUpSort(node->join.lefttree->lefttree);
CleanUpSort(node->join.righttree->lefttree);
@ -701,40 +720,34 @@ ExecMergeJoin(MergeJoin *node)
mergestate->mj_JoinState = EXEC_MJ_TESTOUTER;
break;
/* ********************************
* EXEC_MJ_TESTOUTER
* If the new outer tuple and the marked tuple satisify
* the merge clause then we know we have duplicates in
* the outer scan so we have to restore the inner scan
* to the marked tuple and proceed to join the new outer
* tuples with the inner tuples (EXEC_MJ_JOINTEST)
/*
* ******************************** EXEC_MJ_TESTOUTER If the
* new outer tuple and the marked tuple satisify the merge
* clause then we know we have duplicates in the outer scan so
* we have to restore the inner scan to the marked tuple and
* proceed to join the new outer tuples with the inner tuples
* (EXEC_MJ_JOINTEST)
*
* This is the case when
*
* outer inner
* 4 5 - marked tuple
* outer tuple - 5 5
* new outer tuple - 5 5
* 6 8 - inner tuple
* 7 12
* outer inner 4 5 - marked tuple outer tuple - 5 5 new
* outer tuple - 5 5 6 8 - inner tuple 7 12
*
* new outer tuple = marked tuple
*
* If the outer tuple fails the test, then we know we have
* to proceed to skip outer tuples until outer >= inner
* If the outer tuple fails the test, then we know we have to
* proceed to skip outer tuples until outer >= inner
* (EXEC_MJ_SKIPOUTER).
*
* This is the case when
*
* outer inner
* 5 5 - marked tuple
* outer tuple - 5 5
* new outer tuple - 6 8 - inner tuple
* 7 12
* outer inner 5 5 - marked tuple outer tuple - 5 5 new
* outer tuple - 6 8 - inner tuple 7 12
*
* new outer tuple > marked tuple
*
* ********************************
********************************
*
*/
case EXEC_MJ_TESTOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_TESTOUTER\n");
@ -751,7 +764,8 @@ ExecMergeJoin(MergeJoin *node)
qualResult = ExecQual((List *) mergeclauses, econtext);
MJ_DEBUG_QUAL(mergeclauses, qualResult);
if (qualResult) {
if (qualResult)
{
/* ----------------
* the merge clause matched so now we juggle the slots
* back the way they were and proceed to JOINTEST.
@ -764,7 +778,9 @@ ExecMergeJoin(MergeJoin *node)
ExecRestrPos(innerPlan);
mergestate->mj_JoinState = EXEC_MJ_JOINTEST;
} else {
}
else
{
/* ----------------
* if the inner tuple was nil and the new outer
* tuple didn't match the marked outer tuple then
@ -780,7 +796,8 @@ ExecMergeJoin(MergeJoin *node)
* larger than our inner tuples.
* ----------------
*/
if (TupIsNull(innerTupleSlot)) {
if (TupIsNull(innerTupleSlot))
{
MJ_printf("ExecMergeJoin: **** wierd case 1 ****\n");
return NULL;
}
@ -795,23 +812,20 @@ ExecMergeJoin(MergeJoin *node)
}
break;
/* ********************************
* EXEC_MJ_SKIPOUTER means skip over tuples in the outer plan
* until we find an outer tuple > current inner tuple.
/*
* ******************************** EXEC_MJ_SKIPOUTER means
* skip over tuples in the outer plan until we find an outer
* tuple > current inner tuple.
*
* For example:
*
* outer inner
* 5 5
* 5 5
* outer tuple - 6 8 - inner tuple
* 7 12
* 8 14
* outer inner 5 5 5 5 outer tuple - 6 8 - inner
* tuple 7 12 8 14
*
* we have to advance the outer scan
* until we find the outer 8.
* we have to advance the outer scan until we find the outer 8.
*
********************************
*
* ********************************
*/
case EXEC_MJ_SKIPOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPOUTER\n");
@ -824,7 +838,8 @@ ExecMergeJoin(MergeJoin *node)
qualResult = ExecQual((List *) mergeclauses, econtext);
MJ_DEBUG_QUAL(mergeclauses, qualResult);
if (qualResult) {
if (qualResult)
{
ExecMarkPos(innerPlan);
innerTupleSlot = econtext->ecxt_innertuple;
@ -849,7 +864,8 @@ ExecMergeJoin(MergeJoin *node)
* continue skipping tuples.
* ----------------
*/
if (compareResult) {
if (compareResult)
{
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
MJ_DEBUG_PROC_NODE(outerTupleSlot);
@ -860,7 +876,8 @@ ExecMergeJoin(MergeJoin *node)
* we are done with the join
* ----------------
*/
if (TupIsNull(outerTupleSlot)) {
if (TupIsNull(outerTupleSlot))
{
MJ_printf("ExecMergeJoin: **** outerTuple is nil ****\n");
return NULL;
}
@ -895,23 +912,20 @@ ExecMergeJoin(MergeJoin *node)
}
break;
/* ********************************
* EXEC_MJ_SKIPINNER means skip over tuples in the inner plan
* until we find an inner tuple > current outer tuple.
/*
* ******************************** EXEC_MJ_SKIPINNER means
* skip over tuples in the inner plan until we find an inner
* tuple > current outer tuple.
*
* For example:
*
* outer inner
* 5 5
* 5 5
* outer tuple - 12 8 - inner tuple
* 14 10
* 17 12
* outer inner 5 5 5 5 outer tuple - 12 8 - inner
* tuple 14 10 17 12
*
* we have to advance the inner scan
* until we find the inner 12.
* we have to advance the inner scan until we find the inner 12.
*
********************************
*
* ********************************
*/
case EXEC_MJ_SKIPINNER:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPINNER\n");
@ -924,7 +938,8 @@ ExecMergeJoin(MergeJoin *node)
qualResult = ExecQual((List *) mergeclauses, econtext);
MJ_DEBUG_QUAL(mergeclauses, qualResult);
if (qualResult) {
if (qualResult)
{
ExecMarkPos(innerPlan);
innerTupleSlot = econtext->ecxt_innertuple;
@ -949,7 +964,8 @@ ExecMergeJoin(MergeJoin *node)
* continue skipping tuples.
* ----------------
*/
if (compareResult) {
if (compareResult)
{
/* ----------------
* now try and get a new inner tuple
* ----------------
@ -964,7 +980,8 @@ ExecMergeJoin(MergeJoin *node)
* and advance to the next outer tuple
* ----------------
*/
if (TupIsNull(innerTupleSlot)) {
if (TupIsNull(innerTupleSlot))
{
/* ----------------
* this is an interesting case.. all our
* inner tuples are smaller then our outer
@ -1015,10 +1032,11 @@ ExecMergeJoin(MergeJoin *node)
break;
/* ********************************
* if we get here it means our code is fucked up and
* so we just end the join prematurely.
* ********************************
/*
* ******************************** if we get here it means
* our code is fucked up and so we just end the join
* prematurely. ********************************
*
*/
default:
elog(NOTICE, "ExecMergeJoin: invalid join state. aborting");
@ -1214,4 +1232,3 @@ ExecEndMergeJoin(MergeJoin *node)
MJ1_printf("ExecEndMergeJoin: %s\n",
"node processing ended");
}

Some files were not shown because too many files have changed in this diff Show More