mirror of
https://github.com/postgres/postgres.git
synced 2025-06-16 06:01:02 +03:00
pgindent run over code.
This commit is contained in:
@ -110,39 +110,27 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
|
|||||||
{
|
{
|
||||||
result = (int) (*proc_fn) (p, value);
|
result = (int) (*proc_fn) (p, value);
|
||||||
if (typlen > 0)
|
if (typlen > 0)
|
||||||
{
|
|
||||||
p += typlen;
|
p += typlen;
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
p += INTALIGN(*(int32 *) p);
|
p += INTALIGN(*(int32 *) p);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (result)
|
if (result)
|
||||||
{
|
{
|
||||||
if (!and)
|
if (!and)
|
||||||
{
|
|
||||||
return (1);
|
return (1);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (and)
|
if (and)
|
||||||
{
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (and && result)
|
if (and && result)
|
||||||
{
|
|
||||||
return (1);
|
return (1);
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Iterator functions for type _text
|
* Iterator functions for type _text
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
#ifndef ARRAY_ITERATOR_H
|
#ifndef ARRAY_ITERATOR_H
|
||||||
#define ARRAY_ITERATOR_H
|
#define ARRAY_ITERATOR_H
|
||||||
|
|
||||||
static int32
|
static int32 array_iterator(Oid elemtype, Oid proc, int and,
|
||||||
array_iterator(Oid elemtype, Oid proc, int and,
|
|
||||||
ArrayType *array, Datum value);
|
ArrayType *array, Datum value);
|
||||||
int32 array_texteq(ArrayType *array, char *value);
|
int32 array_texteq(ArrayType *array, char *value);
|
||||||
int32 array_all_texteq(ArrayType *array, char *value);
|
int32 array_all_texteq(ArrayType *array, char *value);
|
||||||
@ -26,4 +25,5 @@ int32 array_int4le(ArrayType *array, int4 value);
|
|||||||
int32 array_all_int4le(ArrayType *array, int4 value);
|
int32 array_all_int4le(ArrayType *array, int4 value);
|
||||||
int32 array_oideq(ArrayType *array, Oid value);
|
int32 array_oideq(ArrayType *array, Oid value);
|
||||||
int32 array_all_oidne(ArrayType *array, Oid value);
|
int32 array_all_oidne(ArrayType *array, Oid value);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -20,7 +20,8 @@ const TWO_PI = 2.0 * M_PI;
|
|||||||
******************************************************/
|
******************************************************/
|
||||||
|
|
||||||
static double
|
static double
|
||||||
degtorad (double degrees) {
|
degtorad(double degrees)
|
||||||
|
{
|
||||||
return (degrees / 360.0) * TWO_PI;
|
return (degrees / 360.0) * TWO_PI;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,9 +40,13 @@ degtorad (double degrees) {
|
|||||||
******************************************************/
|
******************************************************/
|
||||||
|
|
||||||
double *
|
double *
|
||||||
geo_distance (Point *pt1, Point *pt2) {
|
geo_distance(Point *pt1, Point *pt2)
|
||||||
|
{
|
||||||
|
|
||||||
double long1, lat1, long2, lat2;
|
double long1,
|
||||||
|
lat1,
|
||||||
|
long2,
|
||||||
|
lat2;
|
||||||
double longdiff;
|
double longdiff;
|
||||||
double *resultp = palloc(sizeof(double));
|
double *resultp = palloc(sizeof(double));
|
||||||
|
|
||||||
|
@ -9,7 +9,8 @@
|
|||||||
#include <libpq-fe.h>
|
#include <libpq-fe.h>
|
||||||
#include "pginterface.h"
|
#include "pginterface.h"
|
||||||
|
|
||||||
PGresult *attres, *relres;
|
PGresult *attres,
|
||||||
|
*relres;
|
||||||
|
|
||||||
int
|
int
|
||||||
main(int argc, char **argv)
|
main(int argc, char **argv)
|
||||||
@ -67,13 +68,13 @@ main(int argc, char **argv)
|
|||||||
if (strcmp(typname, "oid") == 0)
|
if (strcmp(typname, "oid") == 0)
|
||||||
sprintf(query, "\
|
sprintf(query, "\
|
||||||
DECLARE c_matches BINARY CURSOR FOR \
|
DECLARE c_matches BINARY CURSOR FOR \
|
||||||
SELECT count(*)
|
SELECT count(*) \
|
||||||
FROM % s t1, %s t2 \
|
FROM % s t1, %s t2 \
|
||||||
WHERE t1.% s = t2.oid ", relname, relname2, attname);
|
WHERE t1.% s = t2.oid ", relname, relname2, attname);
|
||||||
else
|
else
|
||||||
sprintf(query, "\
|
sprintf(query, "\
|
||||||
DECLARE c_matches BINARY CURSOR FOR \
|
DECLARE c_matches BINARY CURSOR FOR \
|
||||||
SELECT count(*)
|
SELECT count(*) \
|
||||||
FROM % s t1, %s t2 \
|
FROM % s t1, %s t2 \
|
||||||
WHERE RegprocToOid(t1.% s) = t2.oid ", relname, relname2, attname);
|
WHERE RegprocToOid(t1.% s) = t2.oid ", relname, relname2, attname);
|
||||||
|
|
||||||
|
@ -109,14 +109,14 @@ fti()
|
|||||||
int ret;
|
int ret;
|
||||||
char query[8192];
|
char query[8192];
|
||||||
Oid oid;
|
Oid oid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
FILE *debug;
|
* FILE *debug;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
debug = fopen("/dev/xconsole", "w");
|
* debug = fopen("/dev/xconsole", "w"); fprintf(debug, "FTI: entered
|
||||||
fprintf(debug, "FTI: entered function\n");
|
* function\n"); fflush(debug);
|
||||||
fflush(debug);
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!CurrentTriggerData)
|
if (!CurrentTriggerData)
|
||||||
@ -129,7 +129,10 @@ fti()
|
|||||||
if (TRIGGER_FIRED_BY_INSERT(CurrentTriggerData->tg_event))
|
if (TRIGGER_FIRED_BY_INSERT(CurrentTriggerData->tg_event))
|
||||||
isinsert = true;
|
isinsert = true;
|
||||||
if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event))
|
if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event))
|
||||||
{ isdelete=true;isinsert=true;}
|
{
|
||||||
|
isdelete = true;
|
||||||
|
isinsert = true;
|
||||||
|
}
|
||||||
if (TRIGGER_FIRED_BY_DELETE(CurrentTriggerData->tg_event))
|
if (TRIGGER_FIRED_BY_DELETE(CurrentTriggerData->tg_event))
|
||||||
isdelete = true;
|
isdelete = true;
|
||||||
|
|
||||||
@ -140,7 +143,8 @@ fti()
|
|||||||
if (isdelete && isinsert) /* is an UPDATE */
|
if (isdelete && isinsert) /* is an UPDATE */
|
||||||
rettuple = CurrentTriggerData->tg_newtuple;
|
rettuple = CurrentTriggerData->tg_newtuple;
|
||||||
|
|
||||||
CurrentTriggerData = NULL; /* invalidate 'normal' calls to this function */
|
CurrentTriggerData = NULL; /* invalidate 'normal' calls to this
|
||||||
|
* function */
|
||||||
|
|
||||||
if ((ret = SPI_connect()) < 0)
|
if ((ret = SPI_connect()) < 0)
|
||||||
elog(ERROR, "Full Text Indexing: SPI_connect failed, returned %d\n", ret);
|
elog(ERROR, "Full Text Indexing: SPI_connect failed, returned %d\n", ret);
|
||||||
@ -158,7 +162,8 @@ fti()
|
|||||||
if (!OidIsValid(oid))
|
if (!OidIsValid(oid))
|
||||||
elog(ERROR, "Full Text Indexing: oid of current tuple is NULL");
|
elog(ERROR, "Full Text Indexing: oid of current tuple is NULL");
|
||||||
|
|
||||||
if (isdelete) {
|
if (isdelete)
|
||||||
|
{
|
||||||
void *pplan;
|
void *pplan;
|
||||||
Oid *argtypes;
|
Oid *argtypes;
|
||||||
Datum values[1];
|
Datum values[1];
|
||||||
@ -166,7 +171,8 @@ fti()
|
|||||||
|
|
||||||
sprintf(query, "D%s$%s", args[0], args[1]);
|
sprintf(query, "D%s$%s", args[0], args[1]);
|
||||||
plan = find_plan(query, &DeletePlans, &nDeletePlans);
|
plan = find_plan(query, &DeletePlans, &nDeletePlans);
|
||||||
if (plan->nplans <= 0) {
|
if (plan->nplans <= 0)
|
||||||
|
{
|
||||||
argtypes = (Oid *) palloc(sizeof(Oid));
|
argtypes = (Oid *) palloc(sizeof(Oid));
|
||||||
|
|
||||||
argtypes[0] = OIDOID;
|
argtypes[0] = OIDOID;
|
||||||
@ -193,8 +199,10 @@ fti()
|
|||||||
elog(ERROR, "Full Text Indexing: error executing plan in delete");
|
elog(ERROR, "Full Text Indexing: error executing plan in delete");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isinsert) {
|
if (isinsert)
|
||||||
char *substring, *column;
|
{
|
||||||
|
char *substring,
|
||||||
|
*column;
|
||||||
void *pplan;
|
void *pplan;
|
||||||
Oid *argtypes;
|
Oid *argtypes;
|
||||||
Datum values[2];
|
Datum values[2];
|
||||||
@ -206,11 +214,12 @@ fti()
|
|||||||
plan = find_plan(query, &InsertPlans, &nInsertPlans);
|
plan = find_plan(query, &InsertPlans, &nInsertPlans);
|
||||||
|
|
||||||
/* no plan yet, so allocate mem for argtypes */
|
/* no plan yet, so allocate mem for argtypes */
|
||||||
if (plan->nplans <= 0) {
|
if (plan->nplans <= 0)
|
||||||
|
{
|
||||||
argtypes = (Oid *) palloc(2 * sizeof(Oid));
|
argtypes = (Oid *) palloc(2 * sizeof(Oid));
|
||||||
|
|
||||||
argtypes[0] = VARCHAROID; /*create table t_name
|
argtypes[0] = VARCHAROID; /* create table t_name (string
|
||||||
(string varchar, */
|
* varchar, */
|
||||||
argtypes[1] = OIDOID; /* id oid); */
|
argtypes[1] = OIDOID; /* id oid); */
|
||||||
|
|
||||||
/* prepare plan to gain speed */
|
/* prepare plan to gain speed */
|
||||||
@ -241,12 +250,15 @@ fti()
|
|||||||
/* Get the char* representation of the column with name args[1] */
|
/* Get the char* representation of the column with name args[1] */
|
||||||
column = SPI_getvalue(rettuple, tupdesc, colnum);
|
column = SPI_getvalue(rettuple, tupdesc, colnum);
|
||||||
|
|
||||||
if (column) { /* make sure we don't try to index NULL's */
|
if (column)
|
||||||
|
{ /* make sure we don't try to index NULL's */
|
||||||
char *buff;
|
char *buff;
|
||||||
char *string = column;
|
char *string = column;
|
||||||
|
|
||||||
while(*string != '\0') { /* placed 'really' inline. */
|
while (*string != '\0')
|
||||||
*string = tolower(*string); /* some compilers will choke */
|
{ /* placed 'really' inline. */
|
||||||
|
*string = tolower(*string); /* some compilers will
|
||||||
|
* choke */
|
||||||
string++; /* on 'inline' keyword */
|
string++; /* on 'inline' keyword */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -255,7 +267,8 @@ fti()
|
|||||||
/* saves lots of calls in while-loop and in breakup() */
|
/* saves lots of calls in while-loop and in breakup() */
|
||||||
|
|
||||||
new_tuple = true;
|
new_tuple = true;
|
||||||
while ((substring = breakup(column, buff))) {
|
while ((substring = breakup(column, buff)))
|
||||||
|
{
|
||||||
int l;
|
int l;
|
||||||
|
|
||||||
l = strlen(substring);
|
l = strlen(substring);
|
||||||
@ -279,7 +292,8 @@ fti()
|
|||||||
return (rettuple);
|
return (rettuple);
|
||||||
}
|
}
|
||||||
|
|
||||||
char *breakup(char *string, char *substring)
|
char *
|
||||||
|
breakup(char *string, char *substring)
|
||||||
{
|
{
|
||||||
static char *last_start;
|
static char *last_start;
|
||||||
static char *cur_pos;
|
static char *cur_pos;
|
||||||
@ -292,24 +306,30 @@ char *breakup(char *string, char *substring)
|
|||||||
|
|
||||||
while (cur_pos > string) /* don't read before start of 'string' */
|
while (cur_pos > string) /* don't read before start of 'string' */
|
||||||
{
|
{
|
||||||
/* skip pieces at the end of a string that are not
|
|
||||||
alfa-numeric (ie. 'string$%^&', last_start first points to
|
/*
|
||||||
'&', and after this to 'g' */
|
* skip pieces at the end of a string that are not alfa-numeric
|
||||||
if (!isalnum((int)*last_start)) {
|
* (ie. 'string$%^&', last_start first points to '&', and after
|
||||||
|
* this to 'g'
|
||||||
|
*/
|
||||||
|
if (!isalnum((int) *last_start))
|
||||||
|
{
|
||||||
while (!isalnum((int) *last_start) &&
|
while (!isalnum((int) *last_start) &&
|
||||||
last_start > string)
|
last_start > string)
|
||||||
last_start--;
|
last_start--;
|
||||||
cur_pos = last_start;
|
cur_pos = last_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
cur_pos--; /* substrings are at minimum 2 characters long */
|
cur_pos--; /* substrings are at minimum 2 characters
|
||||||
|
* long */
|
||||||
|
|
||||||
if (isalnum((int) *cur_pos))
|
if (isalnum((int) *cur_pos))
|
||||||
{
|
{
|
||||||
/* Houston, we have a substring! :) */
|
/* Houston, we have a substring! :) */
|
||||||
memcpy(substring, cur_pos, last_start - cur_pos + 1);
|
memcpy(substring, cur_pos, last_start - cur_pos + 1);
|
||||||
substring[last_start - cur_pos + 1] = '\0';
|
substring[last_start - cur_pos + 1] = '\0';
|
||||||
if (!is_stopword(substring)) return substring;
|
if (!is_stopword(substring))
|
||||||
|
return substring;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* PostgreSQL type definitions for ISBNs.
|
* PostgreSQL type definitions for ISBNs.
|
||||||
*
|
*
|
||||||
* $Id: isbn.c,v 1.1 1998/08/17 03:35:04 scrappy Exp $
|
* $Id: isbn.c,v 1.2 1999/05/25 16:05:40 momjian Exp $
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
@ -50,11 +50,13 @@ isbn_in(char *str)
|
|||||||
char *cp;
|
char *cp;
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
if (strlen(str) != 13) {
|
if (strlen(str) != 13)
|
||||||
|
{
|
||||||
elog(ERROR, "isbn_in: invalid ISBN \"%s\"", str);
|
elog(ERROR, "isbn_in: invalid ISBN \"%s\"", str);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
if (isbn_sum(str) != 0) {
|
if (isbn_sum(str) != 0)
|
||||||
|
{
|
||||||
elog(ERROR, "isbn_in: purported ISBN \"%s\" failed checksum",
|
elog(ERROR, "isbn_in: purported ISBN \"%s\" failed checksum",
|
||||||
str);
|
str);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -84,23 +86,35 @@ isbn_in(char *str)
|
|||||||
int4
|
int4
|
||||||
isbn_sum(char *str)
|
isbn_sum(char *str)
|
||||||
{
|
{
|
||||||
int4 sum = 0, dashes = 0, val;
|
int4 sum = 0,
|
||||||
|
dashes = 0,
|
||||||
|
val;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; str[i] && i < 13; i++) {
|
for (i = 0; str[i] && i < 13; i++)
|
||||||
switch(str[i]) {
|
{
|
||||||
|
switch (str[i])
|
||||||
|
{
|
||||||
case '-':
|
case '-':
|
||||||
if (++dashes > 3)
|
if (++dashes > 3)
|
||||||
return 12;
|
return 12;
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
case '0': case '1': case '2': case '3':
|
case '0':
|
||||||
case '4': case '5': case '6': case '7':
|
case '1':
|
||||||
case '8': case '9':
|
case '2':
|
||||||
|
case '3':
|
||||||
|
case '4':
|
||||||
|
case '5':
|
||||||
|
case '6':
|
||||||
|
case '7':
|
||||||
|
case '8':
|
||||||
|
case '9':
|
||||||
val = str[i] - '0';
|
val = str[i] - '0';
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'X': case 'x':
|
case 'X':
|
||||||
|
case 'x':
|
||||||
val = 10;
|
val = 10;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* PostgreSQL type definitions for ISSNs.
|
* PostgreSQL type definitions for ISSNs.
|
||||||
*
|
*
|
||||||
* $Id: issn.c,v 1.1 1998/08/17 03:35:05 scrappy Exp $
|
* $Id: issn.c,v 1.2 1999/05/25 16:05:42 momjian Exp $
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
@ -50,11 +50,13 @@ issn_in(char *str)
|
|||||||
char *cp;
|
char *cp;
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
if (strlen(str) != 9) {
|
if (strlen(str) != 9)
|
||||||
|
{
|
||||||
elog(ERROR, "issn_in: invalid ISSN \"%s\"", str);
|
elog(ERROR, "issn_in: invalid ISSN \"%s\"", str);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
if (issn_sum(str) != 0) {
|
if (issn_sum(str) != 0)
|
||||||
|
{
|
||||||
elog(ERROR, "issn_in: purported ISSN \"%s\" failed checksum",
|
elog(ERROR, "issn_in: purported ISSN \"%s\" failed checksum",
|
||||||
str);
|
str);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -75,23 +77,35 @@ issn_in(char *str)
|
|||||||
int4
|
int4
|
||||||
issn_sum(char *str)
|
issn_sum(char *str)
|
||||||
{
|
{
|
||||||
int4 sum = 0, dashes = 0, val;
|
int4 sum = 0,
|
||||||
|
dashes = 0,
|
||||||
|
val;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; str[i] && i < 9; i++) {
|
for (i = 0; str[i] && i < 9; i++)
|
||||||
switch(str[i]) {
|
{
|
||||||
|
switch (str[i])
|
||||||
|
{
|
||||||
case '-':
|
case '-':
|
||||||
if (++dashes > 1)
|
if (++dashes > 1)
|
||||||
return 12;
|
return 12;
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
case '0': case '1': case '2': case '3':
|
case '0':
|
||||||
case '4': case '5': case '6': case '7':
|
case '1':
|
||||||
case '8': case '9':
|
case '2':
|
||||||
|
case '3':
|
||||||
|
case '4':
|
||||||
|
case '5':
|
||||||
|
case '6':
|
||||||
|
case '7':
|
||||||
|
case '8':
|
||||||
|
case '9':
|
||||||
val = str[i] - '0';
|
val = str[i] - '0';
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'X': case 'x':
|
case 'X':
|
||||||
|
case 'x':
|
||||||
val = 10;
|
val = 10;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* PostgreSQL type definitions for managed LargeObjects.
|
* PostgreSQL type definitions for managed LargeObjects.
|
||||||
*
|
*
|
||||||
* $Id: lo.c,v 1.1 1998/06/16 07:07:11 momjian Exp $
|
* $Id: lo.c,v 1.2 1999/05/25 16:05:45 momjian Exp $
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -72,6 +72,7 @@ lo_in(char *str)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There is no Oid passed, so create a new one
|
* There is no Oid passed, so create a new one
|
||||||
*/
|
*/
|
||||||
@ -130,6 +131,7 @@ Blob *
|
|||||||
lo(Oid oid)
|
lo(Oid oid)
|
||||||
{
|
{
|
||||||
Blob *result = (Blob *) palloc(sizeof(Blob));
|
Blob *result = (Blob *) palloc(sizeof(Blob));
|
||||||
|
|
||||||
*result = oid;
|
*result = oid;
|
||||||
return (result);
|
return (result);
|
||||||
}
|
}
|
||||||
@ -177,10 +179,11 @@ lo_manage(void)
|
|||||||
/*
|
/*
|
||||||
* Handle updates
|
* Handle updates
|
||||||
*
|
*
|
||||||
* Here, if the value of the monitored attribute changes, then the
|
* Here, if the value of the monitored attribute changes, then the large
|
||||||
* large object associated with the original value is unlinked.
|
* object associated with the original value is unlinked.
|
||||||
*/
|
*/
|
||||||
if(newtuple!=NULL) {
|
if (newtuple != NULL)
|
||||||
|
{
|
||||||
char *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
|
char *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
|
||||||
char *newv = SPI_getvalue(newtuple, tupdesc, attnum);
|
char *newv = SPI_getvalue(newtuple, tupdesc, attnum);
|
||||||
|
|
||||||
@ -199,10 +202,12 @@ lo_manage(void)
|
|||||||
* Here, we unlink the large object associated with the managed attribute
|
* Here, we unlink the large object associated with the managed attribute
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
if(isdelete) {
|
if (isdelete)
|
||||||
|
{
|
||||||
char *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
|
char *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
|
||||||
|
|
||||||
if(orig != NULL) {
|
if (orig != NULL)
|
||||||
|
{
|
||||||
lo_unlink(atoi(orig));
|
lo_unlink(atoi(orig));
|
||||||
|
|
||||||
pfree(orig);
|
pfree(orig);
|
||||||
|
@ -17,16 +17,21 @@ char *msqlErrors[] = {
|
|||||||
"Out of database handlers."
|
"Out of database handlers."
|
||||||
};
|
};
|
||||||
|
|
||||||
char msqlErrMsg[BUFSIZ], *tfrom = "dunno";
|
char msqlErrMsg[BUFSIZ],
|
||||||
|
*tfrom = "dunno";
|
||||||
PGresult *queryres = NULL;
|
PGresult *queryres = NULL;
|
||||||
|
|
||||||
int msqlConnect (char *host) {
|
int
|
||||||
|
msqlConnect(char *host)
|
||||||
|
{
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
for (count = 0; count < HNDMAX; count++)
|
for (count = 0; count < HNDMAX; count++)
|
||||||
if (PGh[count] == NULL) break;
|
if (PGh[count] == NULL)
|
||||||
|
break;
|
||||||
|
|
||||||
if (count == HNDMAX) {
|
if (count == HNDMAX)
|
||||||
|
{
|
||||||
strncpy(msqlErrMsg, msqlErrors[E_NOHANDLERS], BUFSIZ);
|
strncpy(msqlErrMsg, msqlErrors[E_NOHANDLERS], BUFSIZ);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -36,14 +41,17 @@ int msqlConnect (char *host) {
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
int msqlSelectDB(int handle, char *dbname) {
|
int
|
||||||
|
msqlSelectDB(int handle, char *dbname)
|
||||||
|
{
|
||||||
char *options = calloc(1, BUFSIZ);
|
char *options = calloc(1, BUFSIZ);
|
||||||
char *e = getenv("PG_OPTIONS");
|
char *e = getenv("PG_OPTIONS");
|
||||||
|
|
||||||
if (e == NULL)
|
if (e == NULL)
|
||||||
e = "";
|
e = "";
|
||||||
|
|
||||||
if (PGh[handle]->pghost) {
|
if (PGh[handle]->pghost)
|
||||||
|
{
|
||||||
strcat(options, "host=");
|
strcat(options, "host=");
|
||||||
strncat(options, PGh[handle]->pghost, BUFSIZ);
|
strncat(options, PGh[handle]->pghost, BUFSIZ);
|
||||||
strncat(options, " ", BUFSIZ);
|
strncat(options, " ", BUFSIZ);
|
||||||
@ -61,7 +69,9 @@ int msqlSelectDB(int handle, char *dbname) {
|
|||||||
return (PQstatus(PGh[handle]) == CONNECTION_BAD ? -1 : 0);
|
return (PQstatus(PGh[handle]) == CONNECTION_BAD ? -1 : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int msqlQuery(int handle, char *query) {
|
int
|
||||||
|
msqlQuery(int handle, char *query)
|
||||||
|
{
|
||||||
char *tq = strdup(query);
|
char *tq = strdup(query);
|
||||||
char *p = tq;
|
char *p = tq;
|
||||||
PGresult *res;
|
PGresult *res;
|
||||||
@ -72,84 +82,122 @@ int msqlQuery(int handle, char *query) {
|
|||||||
|
|
||||||
rcode = PQresultStatus(res);
|
rcode = PQresultStatus(res);
|
||||||
|
|
||||||
if (rcode == PGRES_TUPLES_OK) {
|
if (rcode == PGRES_TUPLES_OK)
|
||||||
|
{
|
||||||
queryres = res;
|
queryres = res;
|
||||||
return PQntuples(res);
|
return PQntuples(res);
|
||||||
} else if (rcode == PGRES_FATAL_ERROR || rcode == PGRES_NONFATAL_ERROR) {
|
}
|
||||||
|
else if (rcode == PGRES_FATAL_ERROR || rcode == PGRES_NONFATAL_ERROR)
|
||||||
|
{
|
||||||
PQclear(res);
|
PQclear(res);
|
||||||
queryres = NULL;
|
queryres = NULL;
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
PQclear(res);
|
PQclear(res);
|
||||||
queryres = NULL;
|
queryres = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int msqlCreateDB (int a, char*b) {
|
int
|
||||||
|
msqlCreateDB(int a, char *b)
|
||||||
|
{
|
||||||
char tbuf[BUFSIZ];
|
char tbuf[BUFSIZ];
|
||||||
|
|
||||||
sprintf(tbuf, "create database %s", b);
|
sprintf(tbuf, "create database %s", b);
|
||||||
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
|
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int msqlDropDB (int a, char* b) {
|
int
|
||||||
|
msqlDropDB(int a, char *b)
|
||||||
|
{
|
||||||
char tbuf[BUFSIZ];
|
char tbuf[BUFSIZ];
|
||||||
|
|
||||||
sprintf(tbuf, "drop database %s", b);
|
sprintf(tbuf, "drop database %s", b);
|
||||||
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
|
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int msqlShutdown(int a) {
|
int
|
||||||
|
msqlShutdown(int a)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
int msqlGetProtoInfo(void) {
|
int
|
||||||
|
msqlGetProtoInfo(void)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
int msqlReloadAcls(int a) {
|
int
|
||||||
|
msqlReloadAcls(int a)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
char *msqlGetServerInfo(void) {
|
char *
|
||||||
|
msqlGetServerInfo(void)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
char *msqlGetHostInfo(void) {
|
char *
|
||||||
|
msqlGetHostInfo(void)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
char *msqlUnixTimeToDate(time_t date) {
|
char *
|
||||||
|
msqlUnixTimeToDate(time_t date)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
char *msqlUnixTimeToTime(time_t time) {
|
char *
|
||||||
|
msqlUnixTimeToTime(time_t time)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void msqlClose(int a) {
|
void
|
||||||
|
msqlClose(int a)
|
||||||
|
{
|
||||||
PQfinish(PGh[a]);
|
PQfinish(PGh[a]);
|
||||||
PGh[a] = NULL;
|
PGh[a] = NULL;
|
||||||
if (queryres) {
|
if (queryres)
|
||||||
|
{
|
||||||
free(queryres);
|
free(queryres);
|
||||||
queryres = NULL;
|
queryres = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void msqlDataSeek(m_result *result, int count) {
|
void
|
||||||
|
msqlDataSeek(m_result * result, int count)
|
||||||
|
{
|
||||||
int c;
|
int c;
|
||||||
|
|
||||||
result->cursor = result->queryData;
|
result->cursor = result->queryData;
|
||||||
for (c = 1; c < count; c++)
|
for (c = 1; c < count; c++)
|
||||||
if (result->cursor->next)
|
if (result->cursor->next)
|
||||||
result->cursor = result->cursor->next;
|
result->cursor = result->cursor->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
void msqlFieldSeek(m_result *result, int count) {
|
void
|
||||||
|
msqlFieldSeek(m_result * result, int count)
|
||||||
|
{
|
||||||
int c;
|
int c;
|
||||||
|
|
||||||
result->fieldCursor = result->fieldData;
|
result->fieldCursor = result->fieldData;
|
||||||
for (c = 1; c < count; c++)
|
for (c = 1; c < count; c++)
|
||||||
if (result->fieldCursor->next)
|
if (result->fieldCursor->next)
|
||||||
result->fieldCursor = result->fieldCursor->next;
|
result->fieldCursor = result->fieldCursor->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
void msqlFreeResult(m_result *result) {
|
void
|
||||||
if (result) {
|
msqlFreeResult(m_result * result)
|
||||||
|
{
|
||||||
|
if (result)
|
||||||
|
{
|
||||||
/* Clears fields */
|
/* Clears fields */
|
||||||
free(result->fieldData);
|
free(result->fieldData);
|
||||||
result->cursor = result->queryData;
|
result->cursor = result->queryData;
|
||||||
while (result->cursor) {
|
while (result->cursor)
|
||||||
|
{
|
||||||
int c;
|
int c;
|
||||||
m_row m = result->cursor->data;
|
m_row m = result->cursor->data;
|
||||||
|
|
||||||
@ -163,73 +211,106 @@ void msqlFreeResult(m_result *result) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m_row msqlFetchRow(m_result *row) {
|
m_row
|
||||||
|
msqlFetchRow(m_result * row)
|
||||||
|
{
|
||||||
m_data *r = row->cursor;
|
m_data *r = row->cursor;
|
||||||
if (r) {
|
|
||||||
|
if (r)
|
||||||
|
{
|
||||||
row->cursor = row->cursor->next;
|
row->cursor = row->cursor->next;
|
||||||
return (m_row) r->data;
|
return (m_row) r->data;
|
||||||
}
|
}
|
||||||
return (m_row) NULL;
|
return (m_row) NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_seq *msqlGetSequenceInfo(int a, char *b) {
|
m_seq *
|
||||||
|
msqlGetSequenceInfo(int a, char *b)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
m_field *msqlFetchField (m_result *mr) {
|
m_field *
|
||||||
|
msqlFetchField(m_result * mr)
|
||||||
|
{
|
||||||
m_field *m = (m_field *) mr->fieldCursor;
|
m_field *m = (m_field *) mr->fieldCursor;
|
||||||
if (m) {
|
|
||||||
|
if (m)
|
||||||
|
{
|
||||||
mr->fieldCursor = mr->fieldCursor->next;
|
mr->fieldCursor = mr->fieldCursor->next;
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_result *msqlListDBs(int a) {
|
m_result *
|
||||||
|
msqlListDBs(int a)
|
||||||
|
{
|
||||||
m_result *m;
|
m_result *m;
|
||||||
if (msqlQuery(a, "select datname from pg_database") > 0) {
|
|
||||||
|
if (msqlQuery(a, "select datname from pg_database") > 0)
|
||||||
|
{
|
||||||
m = msqlStoreResult();
|
m = msqlStoreResult();
|
||||||
return m;
|
return m;
|
||||||
} else return NULL;
|
}
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_result *msqlListTables(int a) {
|
m_result *
|
||||||
|
msqlListTables(int a)
|
||||||
|
{
|
||||||
m_result *m;
|
m_result *m;
|
||||||
char tbuf[BUFSIZ];
|
char tbuf[BUFSIZ];
|
||||||
|
|
||||||
sprintf(tbuf, "select relname from pg_class where relkind='r' and relowner=%d", getuid());
|
sprintf(tbuf, "select relname from pg_class where relkind='r' and relowner=%d", getuid());
|
||||||
if (msqlQuery(a, tbuf) > 0) {
|
if (msqlQuery(a, tbuf) > 0)
|
||||||
|
{
|
||||||
m = msqlStoreResult();
|
m = msqlStoreResult();
|
||||||
return m;
|
return m;
|
||||||
} else return NULL;
|
}
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_result *msqlListFields(int a, char *b) {
|
m_result *
|
||||||
|
msqlListFields(int a, char *b)
|
||||||
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m_result *msqlListIndex(int a, char *b, char *c) {
|
m_result *
|
||||||
|
msqlListIndex(int a, char *b, char *c)
|
||||||
|
{
|
||||||
m_result *m;
|
m_result *m;
|
||||||
char tbuf[BUFSIZ];
|
char tbuf[BUFSIZ];
|
||||||
|
|
||||||
sprintf(tbuf, "select relname from pg_class where relkind='i' and relowner=%d", getuid());
|
sprintf(tbuf, "select relname from pg_class where relkind='i' and relowner=%d", getuid());
|
||||||
if (msqlQuery(a, tbuf) > 0) {
|
if (msqlQuery(a, tbuf) > 0)
|
||||||
|
{
|
||||||
m = msqlStoreResult();
|
m = msqlStoreResult();
|
||||||
return m;
|
return m;
|
||||||
} else return NULL;
|
}
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
m_result *msqlStoreResult(void) {
|
m_result *
|
||||||
if (queryres) {
|
msqlStoreResult(void)
|
||||||
|
{
|
||||||
|
if (queryres)
|
||||||
|
{
|
||||||
m_result *mr = malloc(sizeof(m_result));
|
m_result *mr = malloc(sizeof(m_result));
|
||||||
m_fdata *mf;
|
m_fdata *mf;
|
||||||
m_data *md;
|
m_data *md;
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
mr->queryData = mr->cursor = NULL;
|
mr->queryData = mr->cursor = NULL;
|
||||||
mr->numRows = PQntuples(queryres);
|
mr->numRows = PQntuples(queryres);
|
||||||
mr->numFields = PQnfields(queryres);
|
mr->numFields = PQnfields(queryres);
|
||||||
|
|
||||||
mf = calloc(PQnfields(queryres), sizeof(m_fdata));
|
mf = calloc(PQnfields(queryres), sizeof(m_fdata));
|
||||||
for (count = 0; count < PQnfields(queryres); count++) {
|
for (count = 0; count < PQnfields(queryres); count++)
|
||||||
|
{
|
||||||
(m_fdata *) (mf + count)->field.name = strdup(PQfname(queryres, count));
|
(m_fdata *) (mf + count)->field.name = strdup(PQfname(queryres, count));
|
||||||
(m_fdata *) (mf + count)->field.table = tfrom;
|
(m_fdata *) (mf + count)->field.table = tfrom;
|
||||||
(m_fdata *) (mf + count)->field.type = CHAR_TYPE;
|
(m_fdata *) (mf + count)->field.type = CHAR_TYPE;
|
||||||
@ -239,13 +320,13 @@ m_result *msqlStoreResult(void) {
|
|||||||
(m_fdata *) (mf + count - 1)->next = NULL;
|
(m_fdata *) (mf + count - 1)->next = NULL;
|
||||||
|
|
||||||
md = calloc(PQntuples(queryres), sizeof(m_data));
|
md = calloc(PQntuples(queryres), sizeof(m_data));
|
||||||
for (count = 0; count < PQntuples(queryres); count++) {
|
for (count = 0; count < PQntuples(queryres); count++)
|
||||||
|
{
|
||||||
m_row rows = calloc(PQnfields(queryres) * sizeof(m_row) + 1, 1);
|
m_row rows = calloc(PQnfields(queryres) * sizeof(m_row) + 1, 1);
|
||||||
int c;
|
int c;
|
||||||
|
|
||||||
for (c = 0; c < PQnfields(queryres); c++) {
|
for (c = 0; c < PQnfields(queryres); c++)
|
||||||
rows[c] = strdup(PQgetvalue(queryres, count, c));
|
rows[c] = strdup(PQgetvalue(queryres, count, c));
|
||||||
}
|
|
||||||
(m_data *) (md + count)->data = rows;
|
(m_data *) (md + count)->data = rows;
|
||||||
|
|
||||||
(m_data *) (md + count)->width = PQnfields(queryres);
|
(m_data *) (md + count)->width = PQnfields(queryres);
|
||||||
@ -257,19 +338,28 @@ m_result *msqlStoreResult(void) {
|
|||||||
mr->fieldCursor = mr->fieldData = mf;
|
mr->fieldCursor = mr->fieldData = mf;
|
||||||
|
|
||||||
return mr;
|
return mr;
|
||||||
} else return NULL;
|
}
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
time_t msqlDateToUnixTime(char *a) {
|
time_t
|
||||||
|
msqlDateToUnixTime(char *a)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
time_t msqlTimeToUnixTime(char *b) {
|
time_t
|
||||||
|
msqlTimeToUnixTime(char *b)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
char *msql_tmpnam(void) {
|
char *
|
||||||
|
msql_tmpnam(void)
|
||||||
|
{
|
||||||
return tmpnam("/tmp/msql.XXXXXX");
|
return tmpnam("/tmp/msql.XXXXXX");
|
||||||
}
|
}
|
||||||
|
|
||||||
int msqlLoadConfigFile(char *a) {
|
int
|
||||||
|
msqlLoadConfigFile(char *a)
|
||||||
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ extern int assertTest(int val);
|
|||||||
|
|
||||||
#ifdef ASSERT_CHECKING_TEST
|
#ifdef ASSERT_CHECKING_TEST
|
||||||
extern int assertEnable(int val);
|
extern int assertEnable(int val);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -68,6 +69,7 @@ assert_test(int val)
|
|||||||
{
|
{
|
||||||
return assertTest(val);
|
return assertTest(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* end of file */
|
/* end of file */
|
||||||
|
@ -7,8 +7,10 @@ int unlisten(char *relname);
|
|||||||
int max(int x, int y);
|
int max(int x, int y);
|
||||||
int min(int x, int y);
|
int min(int x, int y);
|
||||||
int assert_enable(int val);
|
int assert_enable(int val);
|
||||||
|
|
||||||
#ifdef ASSERT_CHECKING_TEST
|
#ifdef ASSERT_CHECKING_TEST
|
||||||
int assert_test(int val);
|
int assert_test(int val);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: c.h,v 1.1 1998/10/31 04:10:53 scrappy Exp $
|
* $Id: c.h,v 1.2 1999/05/25 16:06:01 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -65,6 +65,7 @@
|
|||||||
#ifndef __cplusplus
|
#ifndef __cplusplus
|
||||||
#ifndef bool
|
#ifndef bool
|
||||||
typedef char bool;
|
typedef char bool;
|
||||||
|
|
||||||
#endif /* ndef bool */
|
#endif /* ndef bool */
|
||||||
#endif /* not C++ */
|
#endif /* not C++ */
|
||||||
typedef bool *BoolPtr;
|
typedef bool *BoolPtr;
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
|
|
||||||
#ifndef TCPIPV4
|
#ifndef TCPIPV4
|
||||||
#define TCPIPV4
|
#define TCPIPV4
|
||||||
#endif
|
#endif /*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef MAXSOCKETS
|
#ifndef MAXSOCKETS
|
||||||
#endif
|
#define MAXSOCKETS 2048
|
||||||
#endif /*
|
#endif /*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -206,7 +206,8 @@ on_error_continue()
|
|||||||
** get_result
|
** get_result
|
||||||
**
|
**
|
||||||
*/
|
*/
|
||||||
PGresult *get_result()
|
PGresult *
|
||||||
|
get_result()
|
||||||
{
|
{
|
||||||
char *cmdstatus = PQcmdStatus(res);
|
char *cmdstatus = PQcmdStatus(res);
|
||||||
|
|
||||||
@ -224,7 +225,8 @@ PGresult *get_result()
|
|||||||
** set_result
|
** set_result
|
||||||
**
|
**
|
||||||
*/
|
*/
|
||||||
void set_result(PGresult *newres)
|
void
|
||||||
|
set_result(PGresult *newres)
|
||||||
{
|
{
|
||||||
|
|
||||||
char *cmdstatus = PQcmdStatus(res);
|
char *cmdstatus = PQcmdStatus(res);
|
||||||
@ -256,7 +258,8 @@ void set_result(PGresult *newres)
|
|||||||
** unset_result
|
** unset_result
|
||||||
**
|
**
|
||||||
*/
|
*/
|
||||||
void unset_result(PGresult *oldres)
|
void
|
||||||
|
unset_result(PGresult *oldres)
|
||||||
{
|
{
|
||||||
char *cmdstatus = PQcmdStatus(oldres);
|
char *cmdstatus = PQcmdStatus(oldres);
|
||||||
|
|
||||||
@ -277,8 +280,8 @@ void unset_result(PGresult *oldres)
|
|||||||
** reset_fetch
|
** reset_fetch
|
||||||
**
|
**
|
||||||
*/
|
*/
|
||||||
void reset_fetch()
|
void
|
||||||
|
reset_fetch()
|
||||||
{
|
{
|
||||||
tuple = 0;
|
tuple = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,8 @@ OH, me, I'm Terry Mackintosh <terry@terrym.com>
|
|||||||
|
|
||||||
HeapTuple moddatetime(void);
|
HeapTuple moddatetime(void);
|
||||||
|
|
||||||
HeapTuple moddatetime()
|
HeapTuple
|
||||||
|
moddatetime()
|
||||||
{
|
{
|
||||||
Trigger *trigger; /* to get trigger name */
|
Trigger *trigger; /* to get trigger name */
|
||||||
int nargs; /* # of arguments */
|
int nargs; /* # of arguments */
|
||||||
@ -65,22 +66,25 @@ HeapTuple moddatetime()
|
|||||||
/* Get the current datetime. */
|
/* Get the current datetime. */
|
||||||
newdt = datetime_in("now");
|
newdt = datetime_in("now");
|
||||||
|
|
||||||
/* This gets the position in the turple of the field we want.
|
/*
|
||||||
args[0] being the name of the field to update, as passed in
|
* This gets the position in the turple of the field we want. args[0]
|
||||||
from the trigger.
|
* being the name of the field to update, as passed in from the
|
||||||
|
* trigger.
|
||||||
*/
|
*/
|
||||||
attnum = SPI_fnumber(tupdesc, args[0]);
|
attnum = SPI_fnumber(tupdesc, args[0]);
|
||||||
|
|
||||||
/* This is were we check to see if the feild we are suppost to update even
|
/*
|
||||||
exits. The above function must return -1 if name not found?
|
* This is were we check to see if the feild we are suppost to update
|
||||||
|
* even exits. The above function must return -1 if name not found?
|
||||||
*/
|
*/
|
||||||
if (attnum < 0)
|
if (attnum < 0)
|
||||||
elog(ERROR, "moddatetime (%s): there is no attribute %s", relname,
|
elog(ERROR, "moddatetime (%s): there is no attribute %s", relname,
|
||||||
args[0]);
|
args[0]);
|
||||||
|
|
||||||
/* OK, this is where we make sure the datetime field that we are
|
/*
|
||||||
modifying is really a datetime field.
|
* OK, this is where we make sure the datetime field that we are
|
||||||
Hay, error checking, what a novel idea !-)
|
* modifying is really a datetime field. Hay, error checking, what a
|
||||||
|
* novel idea !-)
|
||||||
*/
|
*/
|
||||||
if (SPI_gettypeid(tupdesc, attnum) != DATETIMEOID)
|
if (SPI_gettypeid(tupdesc, attnum) != DATETIMEOID)
|
||||||
elog(ERROR, "moddatetime (%s): attribute %s must be of DATETIME type",
|
elog(ERROR, "moddatetime (%s): attribute %s must be of DATETIME type",
|
||||||
|
@ -1,24 +1,27 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
char *strtoupper(char *string)
|
char *
|
||||||
|
strtoupper(char *string)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < strlen(string); i++)
|
for (i = 0; i < strlen(string); i++)
|
||||||
{
|
|
||||||
string[i] = toupper(string[i]);
|
string[i] = toupper(string[i]);
|
||||||
}
|
|
||||||
return string;
|
return string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void main ( char argc , char **argv )
|
void
|
||||||
|
main(char argc, char **argv)
|
||||||
{
|
{
|
||||||
char str[250];
|
char str[250];
|
||||||
int sw = 0;
|
int sw = 0;
|
||||||
|
|
||||||
while (fgets(str, 240, stdin))
|
while (fgets(str, 240, stdin))
|
||||||
{
|
{
|
||||||
if ( sw == 0 ) printf("%s",strtoupper(str));
|
if (sw == 0)
|
||||||
|
printf("%s", strtoupper(str));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -248,9 +248,11 @@ check_foreign_key()
|
|||||||
int ret;
|
int ret;
|
||||||
int i,
|
int i,
|
||||||
r;
|
r;
|
||||||
|
|
||||||
#ifdef DEBUG_QUERY
|
#ifdef DEBUG_QUERY
|
||||||
elog(NOTICE, "Check_foreign_key Enter Function");
|
elog(NOTICE, "Check_foreign_key Enter Function");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some checks first...
|
* Some checks first...
|
||||||
*/
|
*/
|
||||||
@ -404,6 +406,7 @@ check_foreign_key()
|
|||||||
for (r = 0; r < nrefs; r++)
|
for (r = 0; r < nrefs; r++)
|
||||||
{
|
{
|
||||||
relname = args2[0];
|
relname = args2[0];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For 'R'estrict action we construct SELECT query - SELECT 1
|
* For 'R'estrict action we construct SELECT query - SELECT 1
|
||||||
* FROM _referencing_relation_ WHERE Fkey1 = $1 [AND Fkey2 =
|
* FROM _referencing_relation_ WHERE Fkey1 = $1 [AND Fkey2 =
|
||||||
@ -418,17 +421,21 @@ check_foreign_key()
|
|||||||
* FROM _referencing_relation_ WHERE Fkey1 = $1 [AND Fkey2 =
|
* FROM _referencing_relation_ WHERE Fkey1 = $1 [AND Fkey2 =
|
||||||
* $2 [...]] - to delete all referencing tuples.
|
* $2 [...]] - to delete all referencing tuples.
|
||||||
*/
|
*/
|
||||||
/*Max : Cascade with UPDATE query i create update query that
|
|
||||||
updates new key values in referenced tables
|
/*
|
||||||
|
* Max : Cascade with UPDATE query i create update query that
|
||||||
|
* updates new key values in referenced tables
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
else if (action == 'c'){
|
else if (action == 'c')
|
||||||
|
{
|
||||||
if (is_update == 1)
|
if (is_update == 1)
|
||||||
{
|
{
|
||||||
int fn;
|
int fn;
|
||||||
char *nv;
|
char *nv;
|
||||||
int k;
|
int k;
|
||||||
|
|
||||||
sprintf(sql, "update %s set ", relname);
|
sprintf(sql, "update %s set ", relname);
|
||||||
for (k = 1; k <= nkeys; k++)
|
for (k = 1; k <= nkeys; k++)
|
||||||
{
|
{
|
||||||
@ -447,7 +454,10 @@ check_foreign_key()
|
|||||||
elog(NOTICE, "Check_foreign_key Debug value %s type %s %d",
|
elog(NOTICE, "Check_foreign_key Debug value %s type %s %d",
|
||||||
nv, type, is_char_type);
|
nv, type, is_char_type);
|
||||||
#endif
|
#endif
|
||||||
/* is_char_type =1 i set ' ' for define a new value
|
|
||||||
|
/*
|
||||||
|
* is_char_type =1 i set ' ' for define a new
|
||||||
|
* value
|
||||||
*/
|
*/
|
||||||
sprintf(sql + strlen(sql), " %s = %s%s%s %s ",
|
sprintf(sql + strlen(sql), " %s = %s%s%s %s ",
|
||||||
args2[k], (is_char_type > 0) ? "'" : "",
|
args2[k], (is_char_type > 0) ? "'" : "",
|
||||||
@ -457,10 +467,12 @@ check_foreign_key()
|
|||||||
strcat(sql, " where ");
|
strcat(sql, " where ");
|
||||||
|
|
||||||
}
|
}
|
||||||
else /* DELETE */
|
else
|
||||||
|
/* DELETE */
|
||||||
sprintf(sql, "delete from %s where ", relname);
|
sprintf(sql, "delete from %s where ", relname);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For 'S'etnull action we construct UPDATE query - UPDATE
|
* For 'S'etnull action we construct UPDATE query - UPDATE
|
||||||
* _referencing_relation_ SET Fkey1 null [, Fkey2 null [...]]
|
* _referencing_relation_ SET Fkey1 null [, Fkey2 null [...]]
|
||||||
|
@ -352,6 +352,7 @@ c_charin(char *str)
|
|||||||
{
|
{
|
||||||
return (string_input(str, 1, 0, NULL));
|
return (string_input(str, 1, 0, NULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* end of file */
|
/* end of file */
|
||||||
|
@ -14,6 +14,7 @@ char *c_varcharout(char *s);
|
|||||||
#if 0
|
#if 0
|
||||||
struct varlena *c_textin(char *str);
|
struct varlena *c_textin(char *str);
|
||||||
char *c_char16in(char *str);
|
char *c_char16in(char *str);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.1 1999/04/10 16:48:05 peter Exp $
|
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.2 1999/05/25 16:06:31 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -31,10 +31,12 @@ int vacuumlo(char *,int);
|
|||||||
/*
|
/*
|
||||||
* This vacuums a database. It returns 1 on success, -1 on failure.
|
* This vacuums a database. It returns 1 on success, -1 on failure.
|
||||||
*/
|
*/
|
||||||
int vacuumlo(char *database,int verbose)
|
int
|
||||||
|
vacuumlo(char *database, int verbose)
|
||||||
{
|
{
|
||||||
PGconn *conn;
|
PGconn *conn;
|
||||||
PGresult *res, *res2;
|
PGresult *res,
|
||||||
|
*res2;
|
||||||
char buf[BUFSIZE];
|
char buf[BUFSIZE];
|
||||||
int matched = 0; /* Number matched per scan */
|
int matched = 0; /* Number matched per scan */
|
||||||
int i;
|
int i;
|
||||||
@ -60,7 +62,8 @@ int vacuumlo(char *database,int verbose)
|
|||||||
strcat(buf, "INTO TEMP TABLE vacuum_l ");
|
strcat(buf, "INTO TEMP TABLE vacuum_l ");
|
||||||
strcat(buf, "FROM pg_class ");
|
strcat(buf, "FROM pg_class ");
|
||||||
strcat(buf, "WHERE relkind='l'");
|
strcat(buf, "WHERE relkind='l'");
|
||||||
if(!(res = PQexec(conn,buf))) {
|
if (!(res = PQexec(conn, buf)))
|
||||||
|
{
|
||||||
fprintf(stderr, "Failed to create temp table.\n");
|
fprintf(stderr, "Failed to create temp table.\n");
|
||||||
PQfinish(conn);
|
PQfinish(conn);
|
||||||
return -1;
|
return -1;
|
||||||
@ -68,8 +71,8 @@ int vacuumlo(char *database,int verbose)
|
|||||||
PQclear(res);
|
PQclear(res);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now find any candidate tables who have columns of type oid (the column
|
* Now find any candidate tables who have columns of type oid (the
|
||||||
* oid is ignored, as it has attnum < 1)
|
* column oid is ignored, as it has attnum < 1)
|
||||||
*/
|
*/
|
||||||
buf[0] = '\0';
|
buf[0] = '\0';
|
||||||
strcat(buf, "SELECT c.relname, a.attname ");
|
strcat(buf, "SELECT c.relname, a.attname ");
|
||||||
@ -79,19 +82,22 @@ int vacuumlo(char *database,int verbose)
|
|||||||
strcat(buf, " AND a.atttypid = t.oid ");
|
strcat(buf, " AND a.atttypid = t.oid ");
|
||||||
strcat(buf, " AND t.typname = 'oid' ");
|
strcat(buf, " AND t.typname = 'oid' ");
|
||||||
strcat(buf, " AND c.relname NOT LIKE 'pg_%'");
|
strcat(buf, " AND c.relname NOT LIKE 'pg_%'");
|
||||||
if(!(res = PQexec(conn,buf))) {
|
if (!(res = PQexec(conn, buf)))
|
||||||
|
{
|
||||||
fprintf(stderr, "Failed to create temp table.\n");
|
fprintf(stderr, "Failed to create temp table.\n");
|
||||||
PQfinish(conn);
|
PQfinish(conn);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
for (i = 0; i < PQntuples(res); i++)
|
for (i = 0; i < PQntuples(res); i++)
|
||||||
{
|
{
|
||||||
char *table,*field;
|
char *table,
|
||||||
|
*field;
|
||||||
|
|
||||||
table = PQgetvalue(res, i, 0);
|
table = PQgetvalue(res, i, 0);
|
||||||
field = PQgetvalue(res, i, 1);
|
field = PQgetvalue(res, i, 1);
|
||||||
|
|
||||||
if(verbose) {
|
if (verbose)
|
||||||
|
{
|
||||||
fprintf(stdout, "Checking %s in %s: ", field, table);
|
fprintf(stdout, "Checking %s in %s: ", field, table);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
@ -107,13 +113,15 @@ int vacuumlo(char *database,int verbose)
|
|||||||
strcat(buf, " FROM ");
|
strcat(buf, " FROM ");
|
||||||
strcat(buf, table);
|
strcat(buf, table);
|
||||||
strcat(buf, ");");
|
strcat(buf, ");");
|
||||||
if(!(res2 = PQexec(conn,buf))) {
|
if (!(res2 = PQexec(conn, buf)))
|
||||||
|
{
|
||||||
fprintf(stderr, "Failed to check %s in table %s\n", field, table);
|
fprintf(stderr, "Failed to check %s in table %s\n", field, table);
|
||||||
PQclear(res);
|
PQclear(res);
|
||||||
PQfinish(conn);
|
PQfinish(conn);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if(PQresultStatus(res2)!=PGRES_COMMAND_OK) {
|
if (PQresultStatus(res2) != PGRES_COMMAND_OK)
|
||||||
|
{
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"Failed to check %s in table %s\n%s\n",
|
"Failed to check %s in table %s\n%s\n",
|
||||||
field, table,
|
field, table,
|
||||||
@ -142,7 +150,8 @@ int vacuumlo(char *database,int verbose)
|
|||||||
buf[0] = '\0';
|
buf[0] = '\0';
|
||||||
strcat(buf, "SELECT lo ");
|
strcat(buf, "SELECT lo ");
|
||||||
strcat(buf, "FROM vacuum_l");
|
strcat(buf, "FROM vacuum_l");
|
||||||
if(!(res = PQexec(conn,buf))) {
|
if (!(res = PQexec(conn, buf)))
|
||||||
|
{
|
||||||
fprintf(stderr, "Failed to read temp table.\n");
|
fprintf(stderr, "Failed to read temp table.\n");
|
||||||
PQfinish(conn);
|
PQfinish(conn);
|
||||||
return -1;
|
return -1;
|
||||||
@ -152,15 +161,15 @@ int vacuumlo(char *database,int verbose)
|
|||||||
{
|
{
|
||||||
Oid lo = (Oid) atoi(PQgetvalue(res, i, 0));
|
Oid lo = (Oid) atoi(PQgetvalue(res, i, 0));
|
||||||
|
|
||||||
if(verbose) {
|
if (verbose)
|
||||||
|
{
|
||||||
fprintf(stdout, "\rRemoving lo %6d \n", lo);
|
fprintf(stdout, "\rRemoving lo %6d \n", lo);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(lo_unlink(conn,lo)<0) {
|
if (lo_unlink(conn, lo) < 0)
|
||||||
fprintf(stderr, "Failed to remove lo %d\n", lo);
|
fprintf(stderr, "Failed to remove lo %d\n", lo);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
PQclear(res);
|
PQclear(res);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -190,7 +199,8 @@ main(int argc, char **argv)
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
for(arg=1;arg<argc;arg++) {
|
for (arg = 1; arg < argc; arg++)
|
||||||
|
{
|
||||||
if (strcmp("-v", argv[arg]) == 0)
|
if (strcmp("-v", argv[arg]) == 0)
|
||||||
verbose = !verbose;
|
verbose = !verbose;
|
||||||
else
|
else
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.50 1999/03/14 20:17:20 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.51 1999/05/25 16:06:35 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* The old interface functions have been converted to macros
|
* The old interface functions have been converted to macros
|
||||||
@ -301,6 +301,7 @@ heap_getsysattr(HeapTuple tup, Buffer b, int attnum)
|
|||||||
}
|
}
|
||||||
return (Datum) NULL;
|
return (Datum) NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* ----------------
|
/* ----------------
|
||||||
@ -376,6 +377,7 @@ nocachegetattr(HeapTuple tuple,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* there's a null somewhere in the tuple
|
* there's a null somewhere in the tuple
|
||||||
*/
|
*/
|
||||||
@ -410,6 +412,7 @@ nocachegetattr(HeapTuple tuple,
|
|||||||
{
|
{
|
||||||
/* check for nulls in any "earlier" bytes */
|
/* check for nulls in any "earlier" bytes */
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < byte; i++)
|
for (i = 0; i < byte; i++)
|
||||||
{
|
{
|
||||||
if (bp[i] != 0xFF)
|
if (bp[i] != 0xFF)
|
||||||
@ -439,6 +442,7 @@ nocachegetattr(HeapTuple tuple,
|
|||||||
else if (!HeapTupleAllFixed(tuple))
|
else if (!HeapTupleAllFixed(tuple))
|
||||||
{
|
{
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In for(), we make this <= and not < because we want to test
|
* In for(), we make this <= and not < because we want to test
|
||||||
* if we can go past it in initializing offsets.
|
* if we can go past it in initializing offsets.
|
||||||
@ -456,9 +460,9 @@ nocachegetattr(HeapTuple tuple,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If slow is zero, and we got here, we know that we have a tuple with
|
* If slow is zero, and we got here, we know that we have a tuple with
|
||||||
* no nulls or varlenas before the target attribute.
|
* no nulls or varlenas before the target attribute. If possible, we
|
||||||
* If possible, we also want to initialize the remainder of the
|
* also want to initialize the remainder of the attribute cached
|
||||||
* attribute cached offset values.
|
* offset values.
|
||||||
*/
|
*/
|
||||||
if (!slow)
|
if (!slow)
|
||||||
{
|
{
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.45 1999/05/10 00:44:50 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.46 1999/05/25 16:06:39 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -54,6 +54,7 @@ getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
|
|||||||
if (HeapTupleIsValid(typeTuple))
|
if (HeapTupleIsValid(typeTuple))
|
||||||
{
|
{
|
||||||
Form_pg_type pt = (Form_pg_type) GETSTRUCT(typeTuple);
|
Form_pg_type pt = (Form_pg_type) GETSTRUCT(typeTuple);
|
||||||
|
|
||||||
*typOutput = (Oid) pt->typoutput;
|
*typOutput = (Oid) pt->typoutput;
|
||||||
*typElem = (Oid) pt->typelem;
|
*typElem = (Oid) pt->typelem;
|
||||||
return OidIsValid(*typOutput);
|
return OidIsValid(*typOutput);
|
||||||
@ -70,13 +71,15 @@ getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
|
|||||||
* Private state for a printtup destination object
|
* Private state for a printtup destination object
|
||||||
* ----------------
|
* ----------------
|
||||||
*/
|
*/
|
||||||
typedef struct { /* Per-attribute information */
|
typedef struct
|
||||||
|
{ /* Per-attribute information */
|
||||||
Oid typoutput; /* Oid for the attribute's type output fn */
|
Oid typoutput; /* Oid for the attribute's type output fn */
|
||||||
Oid typelem; /* typelem value to pass to the output fn */
|
Oid typelem; /* typelem value to pass to the output fn */
|
||||||
FmgrInfo finfo; /* Precomputed call info for typoutput */
|
FmgrInfo finfo; /* Precomputed call info for typoutput */
|
||||||
} PrinttupAttrInfo;
|
} PrinttupAttrInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct
|
||||||
|
{
|
||||||
DestReceiver pub; /* publicly-known function pointers */
|
DestReceiver pub; /* publicly-known function pointers */
|
||||||
TupleDesc attrinfo; /* The attr info we are set up for */
|
TupleDesc attrinfo; /* The attr info we are set up for */
|
||||||
int nattrs;
|
int nattrs;
|
||||||
@ -136,6 +139,7 @@ printtup_prepare_info(DR_printtup* myState, TupleDesc typeinfo, int numAttrs)
|
|||||||
for (i = 0; i < numAttrs; i++)
|
for (i = 0; i < numAttrs; i++)
|
||||||
{
|
{
|
||||||
PrinttupAttrInfo *thisState = myState->myinfo + i;
|
PrinttupAttrInfo *thisState = myState->myinfo + i;
|
||||||
|
|
||||||
if (getTypeOutAndElem((Oid) typeinfo->attrs[i]->atttypid,
|
if (getTypeOutAndElem((Oid) typeinfo->attrs[i]->atttypid,
|
||||||
&thisState->typoutput, &thisState->typelem))
|
&thisState->typoutput, &thisState->typelem))
|
||||||
fmgr_info(thisState->typoutput, &thisState->finfo);
|
fmgr_info(thisState->typoutput, &thisState->finfo);
|
||||||
@ -198,6 +202,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
|
|||||||
for (i = 0; i < tuple->t_data->t_natts; ++i)
|
for (i = 0; i < tuple->t_data->t_natts; ++i)
|
||||||
{
|
{
|
||||||
PrinttupAttrInfo *thisState = myState->myinfo + i;
|
PrinttupAttrInfo *thisState = myState->myinfo + i;
|
||||||
|
|
||||||
attr = heap_getattr(tuple, i + 1, typeinfo, &isnull);
|
attr = heap_getattr(tuple, i + 1, typeinfo, &isnull);
|
||||||
if (isnull)
|
if (isnull)
|
||||||
continue;
|
continue;
|
||||||
@ -226,6 +231,7 @@ static void
|
|||||||
printtup_cleanup(DestReceiver * self)
|
printtup_cleanup(DestReceiver * self)
|
||||||
{
|
{
|
||||||
DR_printtup *myState = (DR_printtup *) self;
|
DR_printtup *myState = (DR_printtup *) self;
|
||||||
|
|
||||||
if (myState->myinfo)
|
if (myState->myinfo)
|
||||||
pfree(myState->myinfo);
|
pfree(myState->myinfo);
|
||||||
pfree(myState);
|
pfree(myState);
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.13 1999/02/13 23:14:13 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.14 1999/05/25 16:06:41 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.48 1999/02/13 23:14:14 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.49 1999/05/25 16:06:42 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* some of the executor utility code such as "ExecTypeFromTL" should be
|
* some of the executor utility code such as "ExecTypeFromTL" should be
|
||||||
|
@ -344,7 +344,7 @@ gistinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
|
|||||||
/*
|
/*
|
||||||
* Notes in ExecUtils:ExecOpenIndices()
|
* Notes in ExecUtils:ExecOpenIndices()
|
||||||
*
|
*
|
||||||
RelationSetLockForWrite(r);
|
* RelationSetLockForWrite(r);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
res = gistdoinsert(r, itup, &giststate);
|
res = gistdoinsert(r, itup, &giststate);
|
||||||
@ -1106,10 +1106,10 @@ gistdelete(Relation r, ItemPointer tid)
|
|||||||
Page page;
|
Page page;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Notes in ExecUtils:ExecOpenIndices()
|
* Notes in ExecUtils:ExecOpenIndices() Also note that only vacuum
|
||||||
* Also note that only vacuum deletes index tuples now...
|
* deletes index tuples now...
|
||||||
*
|
*
|
||||||
RelationSetLockForWrite(r);
|
* RelationSetLockForWrite(r);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
blkno = ItemPointerGetBlockNumber(tid);
|
blkno = ItemPointerGetBlockNumber(tid);
|
||||||
|
@ -68,7 +68,7 @@ gistbeginscan(Relation r,
|
|||||||
/*
|
/*
|
||||||
* Let index_beginscan does its work...
|
* Let index_beginscan does its work...
|
||||||
*
|
*
|
||||||
RelationSetLockForRead(r);
|
* RelationSetLockForRead(r);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
s = RelationGetIndexScan(r, fromEnd, nkeys, key);
|
s = RelationGetIndexScan(r, fromEnd, nkeys, key);
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.25 1999/02/13 23:14:17 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.26 1999/05/25 16:06:54 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* This file contains only the public interface routines.
|
* This file contains only the public interface routines.
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.16 1999/03/14 16:27:59 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.17 1999/05/25 16:06:56 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* These functions are stored in pg_amproc. For each operator class
|
* These functions are stored in pg_amproc. For each operator class
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.19 1999/02/13 23:14:20 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.20 1999/05/25 16:06:58 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* Postgres hash pages look like ordinary relation pages. The opaque
|
* Postgres hash pages look like ordinary relation pages. The opaque
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.42 1999/03/28 20:31:56 vadim Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.43 1999/05/25 16:07:04 momjian Exp $
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* INTERFACE ROUTINES
|
* INTERFACE ROUTINES
|
||||||
@ -439,7 +439,8 @@ heapgettup(Relation relation,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
++lpp; /* move forward in this page's ItemId array */
|
++lpp; /* move forward in this page's ItemId
|
||||||
|
* array */
|
||||||
++lineoff;
|
++lineoff;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -816,6 +817,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{ /* NONTUP */
|
{ /* NONTUP */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't release scan->rs_cbuf at this point, because
|
* Don't release scan->rs_cbuf at this point, because
|
||||||
* heapgettup doesn't increase PrivateRefCount if it is
|
* heapgettup doesn't increase PrivateRefCount if it is
|
||||||
@ -897,6 +899,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{ /* NONTUP */
|
{ /* NONTUP */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't release scan->rs_cbuf at this point, because
|
* Don't release scan->rs_cbuf at this point, because
|
||||||
* heapgettup doesn't increase PrivateRefCount if it is
|
* heapgettup doesn't increase PrivateRefCount if it is
|
||||||
@ -1093,9 +1096,7 @@ heap_insert(Relation relation, HeapTuple tup)
|
|||||||
RelationPutHeapTupleAtEnd(relation, tup);
|
RelationPutHeapTupleAtEnd(relation, tup);
|
||||||
|
|
||||||
if (IsSystemRelationName(RelationGetRelationName(relation)->data))
|
if (IsSystemRelationName(RelationGetRelationName(relation)->data))
|
||||||
{
|
|
||||||
RelationInvalidateHeapTuple(relation, tup);
|
RelationInvalidateHeapTuple(relation, tup);
|
||||||
}
|
|
||||||
|
|
||||||
return tup->t_data->t_oid;
|
return tup->t_data->t_oid;
|
||||||
}
|
}
|
||||||
@ -1283,11 +1284,12 @@ l2:
|
|||||||
RelationPutHeapTuple(relation, buffer, newtup);
|
RelationPutHeapTuple(relation, buffer, newtup);
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* New item won't fit on same page as old item, have to look
|
* New item won't fit on same page as old item, have to look for a
|
||||||
* for a new place to put it. Note that we have to unlock
|
* new place to put it. Note that we have to unlock current buffer
|
||||||
* current buffer context - not good but RelationPutHeapTupleAtEnd
|
* context - not good but RelationPutHeapTupleAtEnd uses extend
|
||||||
* uses extend lock.
|
* lock.
|
||||||
*/
|
*/
|
||||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||||
RelationPutHeapTupleAtEnd(relation, newtup);
|
RelationPutHeapTupleAtEnd(relation, newtup);
|
||||||
@ -1295,8 +1297,8 @@ l2:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* New item in place, now record address of new tuple in
|
* New item in place, now record address of new tuple in t_ctid of old
|
||||||
* t_ctid of old one.
|
* one.
|
||||||
*/
|
*/
|
||||||
oldtup.t_data->t_ctid = newtup->t_self;
|
oldtup.t_data->t_ctid = newtup->t_self;
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Id: hio.c,v 1.19 1999/05/07 01:22:53 vadim Exp $
|
* $Id: hio.c,v 1.20 1999/05/25 16:07:07 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -68,7 +68,7 @@ RelationPutHeapTuple(Relation relation,
|
|||||||
/*
|
/*
|
||||||
* Let the caller do this!
|
* Let the caller do this!
|
||||||
*
|
*
|
||||||
WriteBuffer(buffer);
|
* WriteBuffer(buffer);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* return an accurate tuple */
|
/* return an accurate tuple */
|
||||||
@ -111,8 +111,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
|
|||||||
Item item;
|
Item item;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock relation for extention. We can use LockPage here as long as
|
* Lock relation for extention. We can use LockPage here as long as in
|
||||||
* in all other places we use page-level locking for indices only.
|
* all other places we use page-level locking for indices only.
|
||||||
* Alternatevely, we could define pseudo-table as we do for
|
* Alternatevely, we could define pseudo-table as we do for
|
||||||
* transactions with XactLockTable.
|
* transactions with XactLockTable.
|
||||||
*/
|
*/
|
||||||
@ -132,6 +132,7 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
|
|||||||
{
|
{
|
||||||
buffer = ReadBuffer(relation, lastblock);
|
buffer = ReadBuffer(relation, lastblock);
|
||||||
pageHeader = (Page) BufferGetPage(buffer);
|
pageHeader = (Page) BufferGetPage(buffer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There was IF instead of ASSERT here ?!
|
* There was IF instead of ASSERT here ?!
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.16 1999/02/13 23:14:29 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.17 1999/05/25 16:07:12 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* many of the old access method routines have been turned into
|
* many of the old access method routines have been turned into
|
||||||
@ -270,5 +270,5 @@ IndexScanRestorePosition(IndexScanDesc scan)
|
|||||||
|
|
||||||
scan->flags = 0x0; /* XXX should have a symbolic name */
|
scan->flags = 0x0; /* XXX should have a symbolic name */
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.31 1999/02/13 23:14:30 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.32 1999/05/25 16:07:15 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.22 1999/03/14 05:08:56 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.23 1999/05/25 16:07:21 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* These functions are stored in pg_amproc. For each operator class
|
* These functions are stored in pg_amproc. For each operator class
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.39 1999/05/01 16:09:45 vadim Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.40 1999/05/25 16:07:23 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -122,9 +122,10 @@ l1:
|
|||||||
*/
|
*/
|
||||||
while (_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
|
while (_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
|
||||||
{ /* they're equal */
|
{ /* they're equal */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Have to check is inserted heap tuple deleted one
|
* Have to check is inserted heap tuple deleted one (i.e.
|
||||||
* (i.e. just moved to another place by vacuum)!
|
* just moved to another place by vacuum)!
|
||||||
*/
|
*/
|
||||||
if (chtup)
|
if (chtup)
|
||||||
{
|
{
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.20 1999/04/22 08:19:59 vadim Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.21 1999/05/25 16:07:26 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* Postgres btree pages look like ordinary relation pages. The opaque
|
* Postgres btree pages look like ordinary relation pages. The opaque
|
||||||
@ -497,14 +497,13 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
|
|||||||
if (stack->bts_offset == InvalidOffsetNumber ||
|
if (stack->bts_offset == InvalidOffsetNumber ||
|
||||||
maxoff >= stack->bts_offset)
|
maxoff >= stack->bts_offset)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* _bt_insertonpg set bts_offset to InvalidOffsetNumber
|
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
|
||||||
* in the case of concurrent ROOT page split
|
* case of concurrent ROOT page split
|
||||||
*/
|
*/
|
||||||
if (stack->bts_offset == InvalidOffsetNumber)
|
if (stack->bts_offset == InvalidOffsetNumber)
|
||||||
{
|
|
||||||
i = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
|
i = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
itemid = PageGetItemId(page, stack->bts_offset);
|
itemid = PageGetItemId(page, stack->bts_offset);
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.37 1999/03/28 20:31:58 vadim Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.38 1999/05/25 16:07:27 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* This file contains only the public interface routines.
|
* This file contains only the public interface routines.
|
||||||
@ -391,9 +391,10 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
|
|||||||
|
|
||||||
if (ItemPointerIsValid(&(scan->currentItemData)))
|
if (ItemPointerIsValid(&(scan->currentItemData)))
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore scan position using heap TID returned
|
* Restore scan position using heap TID returned by previous call
|
||||||
* by previous call to btgettuple().
|
* to btgettuple().
|
||||||
*/
|
*/
|
||||||
_bt_restscan(scan);
|
_bt_restscan(scan);
|
||||||
res = _bt_next(scan, dir);
|
res = _bt_next(scan, dir);
|
||||||
@ -623,10 +624,9 @@ _bt_restscan(IndexScanDesc scan)
|
|||||||
BlockNumber blkno;
|
BlockNumber blkno;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use this as flag when first index tuple on page
|
* We use this as flag when first index tuple on page is deleted but
|
||||||
* is deleted but we do not move left (this would
|
* we do not move left (this would slowdown vacuum) - so we set
|
||||||
* slowdown vacuum) - so we set current->ip_posid
|
* current->ip_posid before first index tuple on the current page
|
||||||
* before first index tuple on the current page
|
|
||||||
* (_bt_step will move it right)...
|
* (_bt_step will move it right)...
|
||||||
*/
|
*/
|
||||||
if (!ItemPointerIsValid(&target))
|
if (!ItemPointerIsValid(&target))
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.20 1999/03/28 20:31:58 vadim Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.21 1999/05/25 16:07:29 momjian Exp $
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.43 1999/04/13 17:18:28 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.44 1999/05/25 16:07:31 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -1104,9 +1104,10 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
|
|||||||
|
|
||||||
rel = scan->relation;
|
rel = scan->relation;
|
||||||
current = &(scan->currentItemData);
|
current = &(scan->currentItemData);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't use ItemPointerGetOffsetNumber or you risk to get
|
* Don't use ItemPointerGetOffsetNumber or you risk to get assertion
|
||||||
* assertion due to ability of ip_posid to be equal 0.
|
* due to ability of ip_posid to be equal 0.
|
||||||
*/
|
*/
|
||||||
offnum = current->ip_posid;
|
offnum = current->ip_posid;
|
||||||
page = BufferGetPage(*bufP);
|
page = BufferGetPage(*bufP);
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Id: nbtsort.c,v 1.38 1999/05/09 00:53:19 tgl Exp $
|
* $Id: nbtsort.c,v 1.39 1999/05/25 16:07:34 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
*
|
*
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.31 1999/02/13 23:14:42 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.32 1999/05/25 16:07:38 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -307,7 +307,7 @@ rtinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation he
|
|||||||
/*
|
/*
|
||||||
* Notes in ExecUtils:ExecOpenIndices()
|
* Notes in ExecUtils:ExecOpenIndices()
|
||||||
*
|
*
|
||||||
RelationSetLockForWrite(r);
|
* RelationSetLockForWrite(r);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
res = rtdoinsert(r, itup, &rtState);
|
res = rtdoinsert(r, itup, &rtState);
|
||||||
@ -947,10 +947,10 @@ rtdelete(Relation r, ItemPointer tid)
|
|||||||
Page page;
|
Page page;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Notes in ExecUtils:ExecOpenIndices()
|
* Notes in ExecUtils:ExecOpenIndices() Also note that only vacuum
|
||||||
* Also note that only vacuum deletes index tuples now...
|
* deletes index tuples now...
|
||||||
*
|
*
|
||||||
RelationSetLockForWrite(r);
|
* RelationSetLockForWrite(r);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
blkno = ItemPointerGetBlockNumber(tid);
|
blkno = ItemPointerGetBlockNumber(tid);
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.22 1999/02/13 23:14:43 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.23 1999/05/25 16:07:40 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -69,7 +69,7 @@ rtbeginscan(Relation r,
|
|||||||
/*
|
/*
|
||||||
* Let index_beginscan does its work...
|
* Let index_beginscan does its work...
|
||||||
*
|
*
|
||||||
RelationSetLockForRead(r);
|
* RelationSetLockForRead(r);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
s = RelationGetIndexScan(r, fromEnd, nkeys, key);
|
s = RelationGetIndexScan(r, fromEnd, nkeys, key);
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.25 1999/03/30 01:37:21 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.26 1999/05/25 16:07:45 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* This file contains the high level access-method interface to the
|
* This file contains the high level access-method interface to the
|
||||||
@ -221,7 +221,7 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */
|
|||||||
/*
|
/*
|
||||||
* update (invalidate) our single item TransactionLogTest cache.
|
* update (invalidate) our single item TransactionLogTest cache.
|
||||||
*
|
*
|
||||||
if (status != XID_COMMIT)
|
* if (status != XID_COMMIT)
|
||||||
*
|
*
|
||||||
* What's the hell ?! Why != XID_COMMIT ?!
|
* What's the hell ?! Why != XID_COMMIT ?!
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.19 1999/02/13 23:14:48 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.20 1999/05/25 16:07:48 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.35 1999/05/13 00:34:57 tgl Exp $
|
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.36 1999/05/25 16:07:50 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* Transaction aborts can now occur two ways:
|
* Transaction aborts can now occur two ways:
|
||||||
@ -299,6 +299,7 @@ IsTransactionState(void)
|
|||||||
*/
|
*/
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* --------------------------------
|
/* --------------------------------
|
||||||
@ -1509,6 +1510,7 @@ AbortOutOfAnyTransaction()
|
|||||||
*/
|
*/
|
||||||
if (s->state != TRANS_DEFAULT)
|
if (s->state != TRANS_DEFAULT)
|
||||||
AbortTransaction();
|
AbortTransaction();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now reset the high-level state
|
* Now reset the high-level state
|
||||||
*/
|
*/
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: xid.c,v 1.21 1999/02/13 23:14:49 momjian Exp $
|
* $Id: xid.c,v 1.22 1999/05/25 16:07:52 momjian Exp $
|
||||||
*
|
*
|
||||||
* OLD COMMENTS
|
* OLD COMMENTS
|
||||||
* XXX WARNING
|
* XXX WARNING
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.59 1999/05/10 00:44:52 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.60 1999/05/25 16:07:56 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -587,7 +587,9 @@ DefineAttr(char *name, char *type, int attnum)
|
|||||||
printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
|
printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
|
||||||
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
|
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
|
||||||
attlen = attrtypes[attnum]->attlen = Procid[typeoid].len;
|
attlen = attrtypes[attnum]->attlen = Procid[typeoid].len;
|
||||||
/* Cheat like mad to fill in these items from the length only.
|
|
||||||
|
/*
|
||||||
|
* Cheat like mad to fill in these items from the length only.
|
||||||
* This only has to work for types used in the system catalogs...
|
* This only has to work for types used in the system catalogs...
|
||||||
*/
|
*/
|
||||||
switch (attlen)
|
switch (attlen)
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.20 1999/02/13 23:14:55 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.21 1999/05/25 16:08:01 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.84 1999/05/22 04:12:24 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.85 1999/05/25 16:08:03 momjian Exp $
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* INTERFACE ROUTINES
|
* INTERFACE ROUTINES
|
||||||
@ -240,9 +240,7 @@ heap_create(char *relname,
|
|||||||
nailme = true;
|
nailme = true;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
|
||||||
relid = newoid();
|
relid = newoid();
|
||||||
}
|
|
||||||
|
|
||||||
if (isnoname)
|
if (isnoname)
|
||||||
{
|
{
|
||||||
@ -719,6 +717,7 @@ AddNewRelationTuple(Relation pg_class_desc,
|
|||||||
|
|
||||||
if (!isBootstrap)
|
if (!isBootstrap)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First, open the catalog indices and insert index tuples for the
|
* First, open the catalog indices and insert index tuples for the
|
||||||
* new relation.
|
* new relation.
|
||||||
@ -814,6 +813,7 @@ heap_create_with_catalog(char *relname,
|
|||||||
|
|
||||||
if (relid != InvalidOid)
|
if (relid != InvalidOid)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is heavy-handed, but appears necessary bjm 1999/02/01
|
* This is heavy-handed, but appears necessary bjm 1999/02/01
|
||||||
* SystemCacheRelationFlushed(relid) is not enough either.
|
* SystemCacheRelationFlushed(relid) is not enough either.
|
||||||
@ -1516,7 +1516,9 @@ StoreAttrDefault(Relation rel, AttrDefault *attrdef)
|
|||||||
extern GlobalMemory CacheCxt;
|
extern GlobalMemory CacheCxt;
|
||||||
|
|
||||||
start:
|
start:
|
||||||
/* Surround table name with double quotes to allow mixed-case and
|
|
||||||
|
/*
|
||||||
|
* Surround table name with double quotes to allow mixed-case and
|
||||||
* whitespaces in names. - BGA 1998-11-14
|
* whitespaces in names. - BGA 1998-11-14
|
||||||
*/
|
*/
|
||||||
snprintf(str, MAX_PARSE_BUFFER,
|
snprintf(str, MAX_PARSE_BUFFER,
|
||||||
@ -1598,7 +1600,8 @@ StoreRelCheck(Relation rel, ConstrCheck *check)
|
|||||||
char nulls[4] = {' ', ' ', ' ', ' '};
|
char nulls[4] = {' ', ' ', ' ', ' '};
|
||||||
extern GlobalMemory CacheCxt;
|
extern GlobalMemory CacheCxt;
|
||||||
|
|
||||||
/* Check for table's existance. Surround table name with double-quotes
|
/*
|
||||||
|
* Check for table's existance. Surround table name with double-quotes
|
||||||
* to allow mixed-case and whitespace names. - thomas 1998-11-12
|
* to allow mixed-case and whitespace names. - thomas 1998-11-12
|
||||||
*/
|
*/
|
||||||
snprintf(str, MAX_PARSE_BUFFER,
|
snprintf(str, MAX_PARSE_BUFFER,
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.74 1999/05/17 00:27:45 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.75 1999/05/25 16:08:06 momjian Exp $
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* INTERFACE ROUTINES
|
* INTERFACE ROUTINES
|
||||||
@ -991,6 +991,7 @@ index_create(char *heapRelationName,
|
|||||||
|
|
||||||
if (relid != InvalidOid)
|
if (relid != InvalidOid)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is heavy-handed, but appears necessary bjm 1999/02/01
|
* This is heavy-handed, but appears necessary bjm 1999/02/01
|
||||||
* SystemCacheRelationFlushed(relid) is not enough either.
|
* SystemCacheRelationFlushed(relid) is not enough either.
|
||||||
@ -1005,7 +1006,8 @@ index_create(char *heapRelationName,
|
|||||||
{
|
{
|
||||||
temp_relname = pstrdup(indexRelationName); /* save original value */
|
temp_relname = pstrdup(indexRelationName); /* save original value */
|
||||||
indexRelationName = palloc(NAMEDATALEN);
|
indexRelationName = palloc(NAMEDATALEN);
|
||||||
strcpy(indexRelationName, temp_relname); /* heap_create will change this */
|
strcpy(indexRelationName, temp_relname); /* heap_create will
|
||||||
|
* change this */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ----------------
|
/* ----------------
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.37 1999/05/10 00:44:55 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.38 1999/05/25 16:08:07 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.36 1999/05/10 00:44:56 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.37 1999/05/25 16:08:09 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* these routines moved here from commands/define.c and somewhat cleaned up.
|
* these routines moved here from commands/define.c and somewhat cleaned up.
|
||||||
@ -135,6 +135,7 @@ OperatorGetWithOpenRelation(Relation pg_operator_desc,
|
|||||||
if (HeapTupleIsValid(tup))
|
if (HeapTupleIsValid(tup))
|
||||||
{
|
{
|
||||||
regproc oprcode = ((Form_pg_operator) GETSTRUCT(tup))->oprcode;
|
regproc oprcode = ((Form_pg_operator) GETSTRUCT(tup))->oprcode;
|
||||||
|
|
||||||
operatorObjectId = tup->t_data->t_oid;
|
operatorObjectId = tup->t_data->t_oid;
|
||||||
*defined = RegProcedureIsValid(oprcode);
|
*defined = RegProcedureIsValid(oprcode);
|
||||||
}
|
}
|
||||||
@ -506,8 +507,9 @@ OperatorDef(char *operatorName,
|
|||||||
elog(ERROR, "OperatorDef: operator \"%s\" already defined",
|
elog(ERROR, "OperatorDef: operator \"%s\" already defined",
|
||||||
operatorName);
|
operatorName);
|
||||||
|
|
||||||
/* At this point, if operatorObjectId is not InvalidOid then
|
/*
|
||||||
* we are filling in a previously-created shell.
|
* At this point, if operatorObjectId is not InvalidOid then we are
|
||||||
|
* filling in a previously-created shell.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* ----------------
|
/* ----------------
|
||||||
@ -648,7 +650,8 @@ OperatorDef(char *operatorName,
|
|||||||
values[i++] = ObjectIdGetDatum(leftTypeId);
|
values[i++] = ObjectIdGetDatum(leftTypeId);
|
||||||
values[i++] = ObjectIdGetDatum(rightTypeId);
|
values[i++] = ObjectIdGetDatum(rightTypeId);
|
||||||
|
|
||||||
++i; /* Skip "oprresult", it was filled in above */
|
++i; /* Skip "oprresult", it was filled in
|
||||||
|
* above */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up the other operators. If they do not currently exist, create
|
* Set up the other operators. If they do not currently exist, create
|
||||||
@ -704,7 +707,8 @@ OperatorDef(char *operatorName,
|
|||||||
otherRightTypeName,
|
otherRightTypeName,
|
||||||
&otherDefined);
|
&otherDefined);
|
||||||
break;
|
break;
|
||||||
case 3: /* right sort op takes right-side data type */
|
case 3: /* right sort op takes right-side data
|
||||||
|
* type */
|
||||||
otherLeftTypeName = rightTypeName;
|
otherLeftTypeName = rightTypeName;
|
||||||
otherRightTypeName = rightTypeName;
|
otherRightTypeName = rightTypeName;
|
||||||
otherLeftTypeId = rightTypeId;
|
otherLeftTypeId = rightTypeId;
|
||||||
@ -737,8 +741,10 @@ OperatorDef(char *operatorName,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* self-linkage to this operator; will fix below.
|
|
||||||
* Note that only self-linkage for commutation makes sense.
|
/*
|
||||||
|
* self-linkage to this operator; will fix below. Note
|
||||||
|
* that only self-linkage for commutation makes sense.
|
||||||
*/
|
*/
|
||||||
if (j != 0)
|
if (j != 0)
|
||||||
elog(ERROR,
|
elog(ERROR,
|
||||||
@ -804,15 +810,14 @@ OperatorDef(char *operatorName,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If a commutator and/or negator link is provided, update the other
|
* If a commutator and/or negator link is provided, update the other
|
||||||
* operator(s) to point at this one, if they don't already have a link.
|
* operator(s) to point at this one, if they don't already have a
|
||||||
* This supports an alternate style of operator definition wherein the
|
* link. This supports an alternate style of operator definition
|
||||||
* user first defines one operator without giving negator or
|
* wherein the user first defines one operator without giving negator
|
||||||
* commutator, then defines the other operator of the pair with the
|
* or commutator, then defines the other operator of the pair with the
|
||||||
* proper commutator or negator attribute. That style doesn't require
|
* proper commutator or negator attribute. That style doesn't require
|
||||||
* creation of a shell, and it's the only style that worked right before
|
* creation of a shell, and it's the only style that worked right
|
||||||
* Postgres version 6.5.
|
* before Postgres version 6.5. This code also takes care of the
|
||||||
* This code also takes care of the situation where the new operator
|
* situation where the new operator is its own commutator.
|
||||||
* is its own commutator.
|
|
||||||
*/
|
*/
|
||||||
if (selfCommutator)
|
if (selfCommutator)
|
||||||
commutatorId = operatorObjectId;
|
commutatorId = operatorObjectId;
|
||||||
@ -869,7 +874,8 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
|
|||||||
|
|
||||||
tup = heap_getnext(pg_operator_scan, 0);
|
tup = heap_getnext(pg_operator_scan, 0);
|
||||||
|
|
||||||
/* if the commutator and negator are the same operator, do one update.
|
/*
|
||||||
|
* if the commutator and negator are the same operator, do one update.
|
||||||
* XXX this is probably useless code --- I doubt it ever makes sense
|
* XXX this is probably useless code --- I doubt it ever makes sense
|
||||||
* for commutator and negator to be the same thing...
|
* for commutator and negator to be the same thing...
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.28 1999/05/13 07:28:27 tgl Exp $
|
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.29 1999/05/25 16:08:11 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -226,11 +226,11 @@ ProcedureCreate(char *procedureName,
|
|||||||
* function name (the 'prosrc' value) is a known builtin function.
|
* function name (the 'prosrc' value) is a known builtin function.
|
||||||
*
|
*
|
||||||
* NOTE: in Postgres versions before 6.5, the SQL name of the created
|
* NOTE: in Postgres versions before 6.5, the SQL name of the created
|
||||||
* function could not be different from the internal name, and 'prosrc'
|
* function could not be different from the internal name, and
|
||||||
* wasn't used. So there is code out there that does CREATE FUNCTION
|
* 'prosrc' wasn't used. So there is code out there that does CREATE
|
||||||
* xyz AS '' LANGUAGE 'internal'. To preserve some modicum of
|
* FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some modicum
|
||||||
* backwards compatibility, accept an empty 'prosrc' value as meaning
|
* of backwards compatibility, accept an empty 'prosrc' value as
|
||||||
* the supplied SQL function name.
|
* meaning the supplied SQL function name.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (strcmp(languageName, "internal") == 0)
|
if (strcmp(languageName, "internal") == 0)
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.36 1999/04/20 03:51:14 tgl Exp $
|
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.37 1999/05/25 16:08:12 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -400,8 +400,8 @@ TypeCreate(char *typeName,
|
|||||||
procname = procs[j];
|
procname = procs[j];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First look for a 1-argument func with all argtypes 0.
|
* First look for a 1-argument func with all argtypes 0. This is
|
||||||
* This is valid for all four kinds of procedure.
|
* valid for all four kinds of procedure.
|
||||||
*/
|
*/
|
||||||
MemSet(argList, 0, 8 * sizeof(Oid));
|
MemSet(argList, 0, 8 * sizeof(Oid));
|
||||||
|
|
||||||
@ -413,20 +413,23 @@ TypeCreate(char *typeName,
|
|||||||
|
|
||||||
if (!HeapTupleIsValid(tup))
|
if (!HeapTupleIsValid(tup))
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For array types, the input procedures may take 3 args
|
* For array types, the input procedures may take 3 args (data
|
||||||
* (data value, element OID, atttypmod); the pg_proc
|
* value, element OID, atttypmod); the pg_proc argtype
|
||||||
* argtype signature is 0,0,INT4OID. The output procedures
|
* signature is 0,0,INT4OID. The output procedures may take 2
|
||||||
* may take 2 args (data value, element OID).
|
* args (data value, element OID).
|
||||||
*/
|
*/
|
||||||
if (OidIsValid(elementObjectId))
|
if (OidIsValid(elementObjectId))
|
||||||
{
|
{
|
||||||
int nargs;
|
int nargs;
|
||||||
|
|
||||||
if (j % 2)
|
if (j % 2)
|
||||||
{
|
{
|
||||||
/* output proc */
|
/* output proc */
|
||||||
nargs = 2;
|
nargs = 2;
|
||||||
} else
|
}
|
||||||
|
else
|
||||||
{
|
{
|
||||||
/* input proc */
|
/* input proc */
|
||||||
nargs = 3;
|
nargs = 3;
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.2 1999/03/16 04:25:46 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.3 1999/05/25 16:08:30 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
* doesn't work! - jolly 8/19/95
|
* doesn't work! - jolly 8/19/95
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* $Id: version.c,v 1.18 1999/02/13 23:15:12 momjian Exp $
|
* $Id: version.c,v 1.19 1999/05/25 16:08:32 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* At the point the version is defined, 2 physical relations are created
|
* At the point the version is defined, 2 physical relations are created
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.46 1999/04/25 19:27:43 tgl Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.47 1999/05/25 16:08:15 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -164,22 +164,23 @@ Async_Notify(char *relname)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* We allocate list memory from the global malloc pool to ensure that
|
* We allocate list memory from the global malloc pool to ensure that
|
||||||
* it will live until we want to use it. This is probably not necessary
|
* it will live until we want to use it. This is probably not
|
||||||
* any longer, since we will use it before the end of the transaction.
|
* necessary any longer, since we will use it before the end of the
|
||||||
* DLList only knows how to use malloc() anyway, but we could probably
|
* transaction. DLList only knows how to use malloc() anyway, but we
|
||||||
* palloc() the strings...
|
* could probably palloc() the strings...
|
||||||
*/
|
*/
|
||||||
if (!pendingNotifies)
|
if (!pendingNotifies)
|
||||||
pendingNotifies = DLNewList();
|
pendingNotifies = DLNewList();
|
||||||
notifyName = strdup(relname);
|
notifyName = strdup(relname);
|
||||||
DLAddHead(pendingNotifies, DLNewElem(notifyName));
|
DLAddHead(pendingNotifies, DLNewElem(notifyName));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE: we could check to see if pendingNotifies already has an entry
|
* NOTE: we could check to see if pendingNotifies already has an entry
|
||||||
* for relname, and thus avoid making duplicate entries. However, most
|
* for relname, and thus avoid making duplicate entries. However,
|
||||||
* apps probably don't notify the same name multiple times per transaction,
|
* most apps probably don't notify the same name multiple times per
|
||||||
* so we'd likely just be wasting cycles to make such a check.
|
* transaction, so we'd likely just be wasting cycles to make such a
|
||||||
* AsyncExistsPendingNotify() doesn't really care whether the list
|
* check. AsyncExistsPendingNotify() doesn't really care whether the
|
||||||
* contains duplicates...
|
* list contains duplicates...
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,7 +327,9 @@ Async_Unlisten(char *relname, int pid)
|
|||||||
UnlockRelation(lRel, AccessExclusiveLock);
|
UnlockRelation(lRel, AccessExclusiveLock);
|
||||||
heap_close(lRel);
|
heap_close(lRel);
|
||||||
}
|
}
|
||||||
/* We do not complain about unlistening something not being listened;
|
|
||||||
|
/*
|
||||||
|
* We do not complain about unlistening something not being listened;
|
||||||
* should we?
|
* should we?
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
@ -398,11 +401,12 @@ Async_UnlistenAll()
|
|||||||
static void
|
static void
|
||||||
Async_UnlistenOnExit()
|
Async_UnlistenOnExit()
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to start/commit a transaction for the unlisten,
|
* We need to start/commit a transaction for the unlisten, but if
|
||||||
* but if there is already an active transaction we had better
|
* there is already an active transaction we had better abort that one
|
||||||
* abort that one first. Otherwise we'd end up committing changes
|
* first. Otherwise we'd end up committing changes that probably
|
||||||
* that probably ought to be discarded.
|
* ought to be discarded.
|
||||||
*/
|
*/
|
||||||
AbortOutOfAnyTransaction();
|
AbortOutOfAnyTransaction();
|
||||||
/* Now we can do the unlisten */
|
/* Now we can do the unlisten */
|
||||||
@ -450,10 +454,12 @@ AtCommit_Notify()
|
|||||||
int32 listenerPID;
|
int32 listenerPID;
|
||||||
|
|
||||||
if (!pendingNotifies)
|
if (!pendingNotifies)
|
||||||
return; /* no NOTIFY statements in this transaction */
|
return; /* no NOTIFY statements in this
|
||||||
|
* transaction */
|
||||||
|
|
||||||
/* NOTIFY is disabled if not normal processing mode.
|
/*
|
||||||
* This test used to be in xact.c, but it seems cleaner to do it here.
|
* NOTIFY is disabled if not normal processing mode. This test used to
|
||||||
|
* be in xact.c, but it seems cleaner to do it here.
|
||||||
*/
|
*/
|
||||||
if (!IsNormalProcessingMode())
|
if (!IsNormalProcessingMode())
|
||||||
{
|
{
|
||||||
@ -487,10 +493,13 @@ AtCommit_Notify()
|
|||||||
|
|
||||||
if (listenerPID == MyProcPid)
|
if (listenerPID == MyProcPid)
|
||||||
{
|
{
|
||||||
/* Self-notify: no need to bother with table update.
|
|
||||||
|
/*
|
||||||
|
* Self-notify: no need to bother with table update.
|
||||||
* Indeed, we *must not* clear the notification field in
|
* Indeed, we *must not* clear the notification field in
|
||||||
* this path, or we could lose an outside notify, which'd be
|
* this path, or we could lose an outside notify, which'd
|
||||||
* bad for applications that ignore self-notify messages.
|
* be bad for applications that ignore self-notify
|
||||||
|
* messages.
|
||||||
*/
|
*/
|
||||||
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying self");
|
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying self");
|
||||||
NotifyMyFrontEnd(relname, listenerPID);
|
NotifyMyFrontEnd(relname, listenerPID);
|
||||||
@ -499,23 +508,27 @@ AtCommit_Notify()
|
|||||||
{
|
{
|
||||||
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying pid %d",
|
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying pid %d",
|
||||||
listenerPID);
|
listenerPID);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If someone has already notified this listener,
|
* If someone has already notified this listener, we don't
|
||||||
* we don't bother modifying the table, but we do still send
|
* bother modifying the table, but we do still send a
|
||||||
* a SIGUSR2 signal, just in case that backend missed the
|
* SIGUSR2 signal, just in case that backend missed the
|
||||||
* earlier signal for some reason. It's OK to send the signal
|
* earlier signal for some reason. It's OK to send the
|
||||||
* first, because the other guy can't read pg_listener until
|
* signal first, because the other guy can't read
|
||||||
* we unlock it.
|
* pg_listener until we unlock it.
|
||||||
*/
|
*/
|
||||||
#ifdef HAVE_KILL
|
#ifdef HAVE_KILL
|
||||||
if (kill(listenerPID, SIGUSR2) < 0)
|
if (kill(listenerPID, SIGUSR2) < 0)
|
||||||
{
|
{
|
||||||
/* Get rid of pg_listener entry if it refers to a PID
|
|
||||||
|
/*
|
||||||
|
* Get rid of pg_listener entry if it refers to a PID
|
||||||
* that no longer exists. Presumably, that backend
|
* that no longer exists. Presumably, that backend
|
||||||
* crashed without deleting its pg_listener entries.
|
* crashed without deleting its pg_listener entries.
|
||||||
* This code used to only delete the entry if errno==ESRCH,
|
* This code used to only delete the entry if
|
||||||
* but as far as I can see we should just do it for any
|
* errno==ESRCH, but as far as I can see we should
|
||||||
* failure (certainly at least for EPERM too...)
|
* just do it for any failure (certainly at least for
|
||||||
|
* EPERM too...)
|
||||||
*/
|
*/
|
||||||
heap_delete(lRel, &lTuple->t_self, NULL);
|
heap_delete(lRel, &lTuple->t_self, NULL);
|
||||||
}
|
}
|
||||||
@ -536,6 +549,7 @@ AtCommit_Notify()
|
|||||||
}
|
}
|
||||||
|
|
||||||
heap_endscan(sRel);
|
heap_endscan(sRel);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do not do RelationUnsetLockForWrite(lRel) here, because the
|
* We do not do RelationUnsetLockForWrite(lRel) here, because the
|
||||||
* transaction is about to be committed anyway.
|
* transaction is about to be committed anyway.
|
||||||
@ -588,18 +602,23 @@ AtAbort_Notify()
|
|||||||
void
|
void
|
||||||
Async_NotifyHandler(SIGNAL_ARGS)
|
Async_NotifyHandler(SIGNAL_ARGS)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: this is a SIGNAL HANDLER. You must be very wary what you do here.
|
* Note: this is a SIGNAL HANDLER. You must be very wary what you do
|
||||||
* Some helpful soul had this routine sprinkled with TPRINTFs, which would
|
* here. Some helpful soul had this routine sprinkled with TPRINTFs,
|
||||||
* likely lead to corruption of stdio buffers if they were ever turned on.
|
* which would likely lead to corruption of stdio buffers if they were
|
||||||
|
* ever turned on.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (notifyInterruptEnabled)
|
if (notifyInterruptEnabled)
|
||||||
{
|
{
|
||||||
/* I'm not sure whether some flavors of Unix might allow another
|
|
||||||
* SIGUSR2 occurrence to recursively interrupt this routine.
|
/*
|
||||||
* To cope with the possibility, we do the same sort of dance that
|
* I'm not sure whether some flavors of Unix might allow another
|
||||||
* EnableNotifyInterrupt must do --- see that routine for comments.
|
* SIGUSR2 occurrence to recursively interrupt this routine. To
|
||||||
|
* cope with the possibility, we do the same sort of dance that
|
||||||
|
* EnableNotifyInterrupt must do --- see that routine for
|
||||||
|
* comments.
|
||||||
*/
|
*/
|
||||||
notifyInterruptEnabled = 0; /* disable any recursive signal */
|
notifyInterruptEnabled = 0; /* disable any recursive signal */
|
||||||
notifyInterruptOccurred = 1; /* do at least one iteration */
|
notifyInterruptOccurred = 1; /* do at least one iteration */
|
||||||
@ -621,7 +640,11 @@ Async_NotifyHandler(SIGNAL_ARGS)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* In this path it is NOT SAFE to do much of anything, except this: */
|
|
||||||
|
/*
|
||||||
|
* In this path it is NOT SAFE to do much of anything, except
|
||||||
|
* this:
|
||||||
|
*/
|
||||||
notifyInterruptOccurred = 1;
|
notifyInterruptOccurred = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -653,20 +676,21 @@ EnableNotifyInterrupt(void)
|
|||||||
* could fail to respond promptly to a signal that happens in between
|
* could fail to respond promptly to a signal that happens in between
|
||||||
* those two steps. (A very small time window, perhaps, but Murphy's
|
* those two steps. (A very small time window, perhaps, but Murphy's
|
||||||
* Law says you can hit it...) Instead, we first set the enable flag,
|
* Law says you can hit it...) Instead, we first set the enable flag,
|
||||||
* then test the occurred flag. If we see an unserviced interrupt
|
* then test the occurred flag. If we see an unserviced interrupt has
|
||||||
* has occurred, we re-clear the enable flag before going off to do
|
* occurred, we re-clear the enable flag before going off to do the
|
||||||
* the service work. (That prevents re-entrant invocation of
|
* service work. (That prevents re-entrant invocation of
|
||||||
* ProcessIncomingNotify() if another interrupt occurs.)
|
* ProcessIncomingNotify() if another interrupt occurs.) If an
|
||||||
* If an interrupt comes in between the setting and clearing of
|
* interrupt comes in between the setting and clearing of
|
||||||
* notifyInterruptEnabled, then it will have done the service
|
* notifyInterruptEnabled, then it will have done the service work and
|
||||||
* work and left notifyInterruptOccurred zero, so we have to check
|
* left notifyInterruptOccurred zero, so we have to check again after
|
||||||
* again after clearing enable. The whole thing has to be in a loop
|
* clearing enable. The whole thing has to be in a loop in case
|
||||||
* in case another interrupt occurs while we're servicing the first.
|
* another interrupt occurs while we're servicing the first. Once we
|
||||||
* Once we get out of the loop, enable is set and we know there is no
|
* get out of the loop, enable is set and we know there is no
|
||||||
* unserviced interrupt.
|
* unserviced interrupt.
|
||||||
*
|
*
|
||||||
* NB: an overenthusiastic optimizing compiler could easily break this
|
* NB: an overenthusiastic optimizing compiler could easily break this
|
||||||
* code. Hopefully, they all understand what "volatile" means these days.
|
* code. Hopefully, they all understand what "volatile" means these
|
||||||
|
* days.
|
||||||
*/
|
*/
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
@ -777,6 +801,7 @@ ProcessIncomingNotify(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
heap_endscan(sRel);
|
heap_endscan(sRel);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do not do RelationUnsetLockForWrite(lRel) here, because the
|
* We do not do RelationUnsetLockForWrite(lRel) here, because the
|
||||||
* transaction is about to be committed anyway.
|
* transaction is about to be committed anyway.
|
||||||
@ -785,7 +810,10 @@ ProcessIncomingNotify(void)
|
|||||||
|
|
||||||
CommitTransactionCommand();
|
CommitTransactionCommand();
|
||||||
|
|
||||||
/* Must flush the notify messages to ensure frontend gets them promptly. */
|
/*
|
||||||
|
* Must flush the notify messages to ensure frontend gets them
|
||||||
|
* promptly.
|
||||||
|
*/
|
||||||
pq_flush();
|
pq_flush();
|
||||||
|
|
||||||
PS_SET_STATUS("idle");
|
PS_SET_STATUS("idle");
|
||||||
@ -800,21 +828,23 @@ NotifyMyFrontEnd(char *relname, int32 listenerPID)
|
|||||||
if (whereToSendOutput == Remote)
|
if (whereToSendOutput == Remote)
|
||||||
{
|
{
|
||||||
StringInfoData buf;
|
StringInfoData buf;
|
||||||
|
|
||||||
pq_beginmessage(&buf);
|
pq_beginmessage(&buf);
|
||||||
pq_sendbyte(&buf, 'A');
|
pq_sendbyte(&buf, 'A');
|
||||||
pq_sendint(&buf, listenerPID, sizeof(int32));
|
pq_sendint(&buf, listenerPID, sizeof(int32));
|
||||||
pq_sendstring(&buf, relname);
|
pq_sendstring(&buf, relname);
|
||||||
pq_endmessage(&buf);
|
pq_endmessage(&buf);
|
||||||
/* NOTE: we do not do pq_flush() here. For a self-notify, it will
|
|
||||||
|
/*
|
||||||
|
* NOTE: we do not do pq_flush() here. For a self-notify, it will
|
||||||
* happen at the end of the transaction, and for incoming notifies
|
* happen at the end of the transaction, and for incoming notifies
|
||||||
* ProcessIncomingNotify will do it after finding all the notifies.
|
* ProcessIncomingNotify will do it after finding all the
|
||||||
|
* notifies.
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
|
||||||
elog(NOTICE, "NOTIFY for %s", relname);
|
elog(NOTICE, "NOTIFY for %s", relname);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* Does pendingNotifies include the given relname?
|
/* Does pendingNotifies include the given relname?
|
||||||
*
|
*
|
||||||
@ -847,10 +877,12 @@ ClearPendingNotifies()
|
|||||||
|
|
||||||
if (pendingNotifies)
|
if (pendingNotifies)
|
||||||
{
|
{
|
||||||
/* Since the referenced strings are malloc'd, we have to scan the
|
|
||||||
|
/*
|
||||||
|
* Since the referenced strings are malloc'd, we have to scan the
|
||||||
* list and delete them individually. If we used palloc for the
|
* list and delete them individually. If we used palloc for the
|
||||||
* strings then we could just do DLFreeList to get rid of both
|
* strings then we could just do DLFreeList to get rid of both the
|
||||||
* the list nodes and the list base...
|
* list nodes and the list base...
|
||||||
*/
|
*/
|
||||||
while ((p = DLRemHead(pendingNotifies)) != NULL)
|
while ((p = DLRemHead(pendingNotifies)) != NULL)
|
||||||
{
|
{
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.38 1999/02/13 23:15:02 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.39 1999/05/25 16:08:16 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.44 1999/05/10 00:44:56 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.45 1999/05/25 16:08:17 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* The PortalExecutorHeapMemory crap needs to be eliminated
|
* The PortalExecutorHeapMemory crap needs to be eliminated
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.76 1999/05/10 00:44:58 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.77 1999/05/25 16:08:19 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -92,18 +92,24 @@ inline void CopyDonePeek(FILE *fp, int c, int pickup);
|
|||||||
*
|
*
|
||||||
* NB: no data conversion is applied by these functions
|
* NB: no data conversion is applied by these functions
|
||||||
*/
|
*/
|
||||||
inline void CopySendData(void *databuf, int datasize, FILE *fp) {
|
inline void
|
||||||
|
CopySendData(void *databuf, int datasize, FILE *fp)
|
||||||
|
{
|
||||||
if (!fp)
|
if (!fp)
|
||||||
pq_putbytes((char *) databuf, datasize);
|
pq_putbytes((char *) databuf, datasize);
|
||||||
else
|
else
|
||||||
fwrite(databuf, datasize, 1, fp);
|
fwrite(databuf, datasize, 1, fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void CopySendString(char *str, FILE *fp) {
|
inline void
|
||||||
|
CopySendString(char *str, FILE *fp)
|
||||||
|
{
|
||||||
CopySendData(str, strlen(str), fp);
|
CopySendData(str, strlen(str), fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void CopySendChar(char c, FILE *fp) {
|
inline void
|
||||||
|
CopySendChar(char c, FILE *fp)
|
||||||
|
{
|
||||||
CopySendData(&c, 1, fp);
|
CopySendData(&c, 1, fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,17 +123,22 @@ inline void CopySendChar(char c, FILE *fp) {
|
|||||||
*
|
*
|
||||||
* NB: no data conversion is applied by these functions
|
* NB: no data conversion is applied by these functions
|
||||||
*/
|
*/
|
||||||
inline void CopyGetData(void *databuf, int datasize, FILE *fp) {
|
inline void
|
||||||
|
CopyGetData(void *databuf, int datasize, FILE *fp)
|
||||||
|
{
|
||||||
if (!fp)
|
if (!fp)
|
||||||
pq_getbytes((char *) databuf, datasize);
|
pq_getbytes((char *) databuf, datasize);
|
||||||
else
|
else
|
||||||
fread(databuf, datasize, 1, fp);
|
fread(databuf, datasize, 1, fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int CopyGetChar(FILE *fp) {
|
inline int
|
||||||
|
CopyGetChar(FILE *fp)
|
||||||
|
{
|
||||||
if (!fp)
|
if (!fp)
|
||||||
{
|
{
|
||||||
unsigned char ch;
|
unsigned char ch;
|
||||||
|
|
||||||
if (pq_getbytes((char *) &ch, 1))
|
if (pq_getbytes((char *) &ch, 1))
|
||||||
return EOF;
|
return EOF;
|
||||||
return ch;
|
return ch;
|
||||||
@ -136,9 +147,12 @@ inline int CopyGetChar(FILE *fp) {
|
|||||||
return getc(fp);
|
return getc(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int CopyGetEof(FILE *fp) {
|
inline int
|
||||||
|
CopyGetEof(FILE *fp)
|
||||||
|
{
|
||||||
if (!fp)
|
if (!fp)
|
||||||
return 0; /* Never return EOF when talking to frontend ? */
|
return 0; /* Never return EOF when talking to
|
||||||
|
* frontend ? */
|
||||||
else
|
else
|
||||||
return feof(fp);
|
return feof(fp);
|
||||||
}
|
}
|
||||||
@ -150,24 +164,37 @@ inline int CopyGetEof(FILE *fp) {
|
|||||||
* CopyDonePeek will either take the peeked char off the steam
|
* CopyDonePeek will either take the peeked char off the steam
|
||||||
* (if pickup is != 0) or leave it on the stream (if pickup == 0)
|
* (if pickup is != 0) or leave it on the stream (if pickup == 0)
|
||||||
*/
|
*/
|
||||||
inline int CopyPeekChar(FILE *fp) {
|
inline int
|
||||||
|
CopyPeekChar(FILE *fp)
|
||||||
|
{
|
||||||
if (!fp)
|
if (!fp)
|
||||||
return pq_peekbyte();
|
return pq_peekbyte();
|
||||||
else
|
else
|
||||||
return getc(fp);
|
return getc(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void CopyDonePeek(FILE *fp, int c, int pickup) {
|
inline void
|
||||||
if (!fp) {
|
CopyDonePeek(FILE *fp, int c, int pickup)
|
||||||
if (pickup) {
|
{
|
||||||
/* We want to pick it up - just receive again into dummy buffer */
|
if (!fp)
|
||||||
|
{
|
||||||
|
if (pickup)
|
||||||
|
{
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We want to pick it up - just receive again into dummy
|
||||||
|
* buffer
|
||||||
|
*/
|
||||||
char c;
|
char c;
|
||||||
|
|
||||||
pq_getbytes(&c, 1);
|
pq_getbytes(&c, 1);
|
||||||
}
|
}
|
||||||
/* If we didn't want to pick it up, just leave it where it sits */
|
/* If we didn't want to pick it up, just leave it where it sits */
|
||||||
}
|
}
|
||||||
else {
|
else
|
||||||
if (!pickup) {
|
{
|
||||||
|
if (!pickup)
|
||||||
|
{
|
||||||
/* We don't want to pick it up - so put it back in there */
|
/* We don't want to pick it up - so put it back in there */
|
||||||
ungetc(c, fp);
|
ungetc(c, fp);
|
||||||
}
|
}
|
||||||
@ -1176,15 +1203,18 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim)
|
|||||||
if (ISOCTAL(c))
|
if (ISOCTAL(c))
|
||||||
{
|
{
|
||||||
val = (val << 3) + VALUE(c);
|
val = (val << 3) + VALUE(c);
|
||||||
CopyDonePeek(fp, c, 1); /* Pick up the character! */
|
CopyDonePeek(fp, c, 1); /* Pick up the
|
||||||
|
* character! */
|
||||||
c = CopyPeekChar(fp);
|
c = CopyPeekChar(fp);
|
||||||
if (ISOCTAL(c)) {
|
if (ISOCTAL(c))
|
||||||
|
{
|
||||||
CopyDonePeek(fp, c, 1); /* pick up! */
|
CopyDonePeek(fp, c, 1); /* pick up! */
|
||||||
val = (val << 3) + VALUE(c);
|
val = (val << 3) + VALUE(c);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (CopyGetEof(fp)) {
|
if (CopyGetEof(fp))
|
||||||
|
{
|
||||||
CopyDonePeek(fp, c, 1); /* pick up */
|
CopyDonePeek(fp, c, 1); /* pick up */
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.40 1999/02/13 23:15:05 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.41 1999/05/25 16:08:20 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.34 1999/05/10 00:44:59 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.35 1999/05/25 16:08:21 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.28 1999/04/09 22:35:41 tgl Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.29 1999/05/25 16:08:22 momjian Exp $
|
||||||
*
|
*
|
||||||
* DESCRIPTION
|
* DESCRIPTION
|
||||||
* The "DefineFoo" routines take the parse tree and pick out the
|
* The "DefineFoo" routines take the parse tree and pick out the
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 1994-5, Regents of the University of California
|
* Copyright (c) 1994-5, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: explain.c,v 1.36 1999/05/09 23:31:45 tgl Exp $
|
* $Id: explain.c,v 1.37 1999/05/25 16:08:23 momjian Exp $
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
@ -212,9 +212,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||||||
{
|
{
|
||||||
relation = RelationIdCacheGetRelation((int) lfirst(l));
|
relation = RelationIdCacheGetRelation((int) lfirst(l));
|
||||||
if (++i > 1)
|
if (++i > 1)
|
||||||
{
|
|
||||||
appendStringInfo(str, ", ");
|
appendStringInfo(str, ", ");
|
||||||
}
|
|
||||||
appendStringInfo(str,
|
appendStringInfo(str,
|
||||||
stringStringInfo((RelationGetRelationName(relation))->data));
|
stringStringInfo((RelationGetRelationName(relation))->data));
|
||||||
}
|
}
|
||||||
@ -249,17 +247,13 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||||||
List *lst;
|
List *lst;
|
||||||
|
|
||||||
for (i = 0; i < indent; i++)
|
for (i = 0; i < indent; i++)
|
||||||
{
|
|
||||||
appendStringInfo(str, " ");
|
appendStringInfo(str, " ");
|
||||||
}
|
|
||||||
appendStringInfo(str, " InitPlan\n");
|
appendStringInfo(str, " InitPlan\n");
|
||||||
foreach(lst, plan->initPlan)
|
foreach(lst, plan->initPlan)
|
||||||
{
|
{
|
||||||
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
|
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
|
||||||
for (i = 0; i < indent; i++)
|
for (i = 0; i < indent; i++)
|
||||||
{
|
|
||||||
appendStringInfo(str, " ");
|
appendStringInfo(str, " ");
|
||||||
}
|
|
||||||
appendStringInfo(str, " -> ");
|
appendStringInfo(str, " -> ");
|
||||||
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 2, es);
|
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 2, es);
|
||||||
}
|
}
|
||||||
@ -270,9 +264,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||||||
if (outerPlan(plan))
|
if (outerPlan(plan))
|
||||||
{
|
{
|
||||||
for (i = 0; i < indent; i++)
|
for (i = 0; i < indent; i++)
|
||||||
{
|
|
||||||
appendStringInfo(str, " ");
|
appendStringInfo(str, " ");
|
||||||
}
|
|
||||||
appendStringInfo(str, " -> ");
|
appendStringInfo(str, " -> ");
|
||||||
explain_outNode(str, outerPlan(plan), indent + 3, es);
|
explain_outNode(str, outerPlan(plan), indent + 3, es);
|
||||||
}
|
}
|
||||||
@ -281,9 +273,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||||||
if (innerPlan(plan))
|
if (innerPlan(plan))
|
||||||
{
|
{
|
||||||
for (i = 0; i < indent; i++)
|
for (i = 0; i < indent; i++)
|
||||||
{
|
|
||||||
appendStringInfo(str, " ");
|
appendStringInfo(str, " ");
|
||||||
}
|
|
||||||
appendStringInfo(str, " -> ");
|
appendStringInfo(str, " -> ");
|
||||||
explain_outNode(str, innerPlan(plan), indent + 3, es);
|
explain_outNode(str, innerPlan(plan), indent + 3, es);
|
||||||
}
|
}
|
||||||
@ -295,17 +285,13 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||||||
List *lst;
|
List *lst;
|
||||||
|
|
||||||
for (i = 0; i < indent; i++)
|
for (i = 0; i < indent; i++)
|
||||||
{
|
|
||||||
appendStringInfo(str, " ");
|
appendStringInfo(str, " ");
|
||||||
}
|
|
||||||
appendStringInfo(str, " SubPlan\n");
|
appendStringInfo(str, " SubPlan\n");
|
||||||
foreach(lst, plan->subPlan)
|
foreach(lst, plan->subPlan)
|
||||||
{
|
{
|
||||||
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
|
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
|
||||||
for (i = 0; i < indent; i++)
|
for (i = 0; i < indent; i++)
|
||||||
{
|
|
||||||
appendStringInfo(str, " ");
|
appendStringInfo(str, " ");
|
||||||
}
|
|
||||||
appendStringInfo(str, " -> ");
|
appendStringInfo(str, " -> ");
|
||||||
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 4, es);
|
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 4, es);
|
||||||
}
|
}
|
||||||
@ -336,9 +322,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
|||||||
es->rtable = nth(whichplan, appendplan->unionrtables);
|
es->rtable = nth(whichplan, appendplan->unionrtables);
|
||||||
|
|
||||||
for (i = 0; i < indent; i++)
|
for (i = 0; i < indent; i++)
|
||||||
{
|
|
||||||
appendStringInfo(str, " ");
|
appendStringInfo(str, " ");
|
||||||
}
|
|
||||||
appendStringInfo(str, " -> ");
|
appendStringInfo(str, " -> ");
|
||||||
|
|
||||||
explain_outNode(str, subnode, indent + 4, es);
|
explain_outNode(str, subnode, indent + 4, es);
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.3 1999/05/10 00:44:59 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.4 1999/05/25 16:08:24 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.32 1999/02/13 23:15:08 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.33 1999/05/25 16:08:25 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.24 1999/05/17 18:24:48 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.25 1999/05/25 16:08:26 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -218,8 +218,8 @@ nextval(struct varlena * seqin)
|
|||||||
return elm->last;
|
return elm->last;
|
||||||
}
|
}
|
||||||
|
|
||||||
seq = read_info("nextval", elm, &buf); /* lock page' buffer and read
|
seq = read_info("nextval", elm, &buf); /* lock page' buffer and
|
||||||
* tuple */
|
* read tuple */
|
||||||
|
|
||||||
next = result = seq->last_value;
|
next = result = seq->last_value;
|
||||||
incby = seq->increment_by;
|
incby = seq->increment_by;
|
||||||
@ -327,8 +327,8 @@ setval(struct varlena * seqin, int4 next)
|
|||||||
|
|
||||||
/* open and AccessShareLock sequence */
|
/* open and AccessShareLock sequence */
|
||||||
elm = init_sequence("setval", seqname);
|
elm = init_sequence("setval", seqname);
|
||||||
seq = read_info("setval", elm, &buf); /* lock page' buffer and read
|
seq = read_info("setval", elm, &buf); /* lock page' buffer and
|
||||||
* tuple */
|
* read tuple */
|
||||||
|
|
||||||
if (seq->cache_value != 1)
|
if (seq->cache_value != 1)
|
||||||
{
|
{
|
||||||
|
@ -742,8 +742,8 @@ ExecBRUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In READ COMMITTED isolevel it's possible that newtuple
|
* In READ COMMITTED isolevel it's possible that newtuple was changed
|
||||||
* was changed due to concurrent update.
|
* due to concurrent update.
|
||||||
*/
|
*/
|
||||||
if (newSlot != NULL)
|
if (newSlot != NULL)
|
||||||
intuple = newtuple = ExecRemoveJunk(estate->es_junkFilter, newSlot);
|
intuple = newtuple = ExecRemoveJunk(estate->es_junkFilter, newSlot);
|
||||||
@ -846,10 +846,10 @@ ltrmark:;
|
|||||||
goto ltrmark;
|
goto ltrmark;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if tuple was deleted or PlanQual failed
|
* if tuple was deleted or PlanQual failed for updated
|
||||||
* for updated tuple - we have not process
|
* tuple - we have not process this tuple!
|
||||||
* this tuple!
|
|
||||||
*/
|
*/
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: user.c,v 1.27 1999/04/02 06:16:36 tgl Exp $
|
* $Id: user.c,v 1.28 1999/05/25 16:08:27 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -169,8 +169,8 @@ DefineUser(CreateUserStmt *stmt, CommandDest dest)
|
|||||||
/*
|
/*
|
||||||
* Build the insert statement to be executed.
|
* Build the insert statement to be executed.
|
||||||
*
|
*
|
||||||
* XXX Ugly as this code is, it still fails to cope with ' or \
|
* XXX Ugly as this code is, it still fails to cope with ' or \ in any of
|
||||||
* in any of the provided strings.
|
* the provided strings.
|
||||||
*/
|
*/
|
||||||
snprintf(sql, SQL_LENGTH,
|
snprintf(sql, SQL_LENGTH,
|
||||||
"insert into %s (usename,usesysid,usecreatedb,usetrace,"
|
"insert into %s (usename,usesysid,usecreatedb,usetrace,"
|
||||||
@ -272,9 +272,7 @@ AlterUser(AlterUserStmt *stmt, CommandDest dest)
|
|||||||
snprintf(sql, SQL_LENGTH, "update %s set", ShadowRelationName);
|
snprintf(sql, SQL_LENGTH, "update %s set", ShadowRelationName);
|
||||||
|
|
||||||
if (stmt->password)
|
if (stmt->password)
|
||||||
{
|
|
||||||
snprintf(sql, SQL_LENGTH, "%s passwd = '%s'", pstrdup(sql), stmt->password);
|
snprintf(sql, SQL_LENGTH, "%s passwd = '%s'", pstrdup(sql), stmt->password);
|
||||||
}
|
|
||||||
|
|
||||||
if (stmt->createdb)
|
if (stmt->createdb)
|
||||||
{
|
{
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.103 1999/05/23 09:10:24 vadim Exp $
|
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.104 1999/05/25 16:08:27 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -222,14 +222,15 @@ vc_shutdown()
|
|||||||
{
|
{
|
||||||
/* on entry, we are not in a transaction */
|
/* on entry, we are not in a transaction */
|
||||||
|
|
||||||
/* Flush the init file that relcache.c uses to save startup time.
|
/*
|
||||||
* The next backend startup will rebuild the init file with up-to-date
|
* Flush the init file that relcache.c uses to save startup time. The
|
||||||
* information from pg_class. This lets the optimizer see the stats that
|
* next backend startup will rebuild the init file with up-to-date
|
||||||
* we've collected for certain critical system indexes. See relcache.c
|
* information from pg_class. This lets the optimizer see the stats
|
||||||
* for more details.
|
* that we've collected for certain critical system indexes. See
|
||||||
|
* relcache.c for more details.
|
||||||
*
|
*
|
||||||
* Ignore any failure to unlink the file, since it might not be there
|
* Ignore any failure to unlink the file, since it might not be there if
|
||||||
* if no backend has been started since the last vacuum...
|
* no backend has been started since the last vacuum...
|
||||||
*/
|
*/
|
||||||
unlink(RELCACHE_INIT_FILENAME);
|
unlink(RELCACHE_INIT_FILENAME);
|
||||||
|
|
||||||
@ -799,6 +800,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
}
|
}
|
||||||
else if (!TransactionIdIsInProgress(tuple.t_data->t_xmax))
|
else if (!TransactionIdIsInProgress(tuple.t_data->t_xmax))
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not Aborted, Not Committed, Not in Progress - so it
|
* Not Aborted, Not Committed, Not in Progress - so it
|
||||||
* from crashed process. - vadim 06/02/97
|
* from crashed process. - vadim 06/02/97
|
||||||
@ -812,9 +814,10 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
relname, blkno, offnum, tuple.t_data->t_xmax);
|
relname, blkno, offnum, tuple.t_data->t_xmax);
|
||||||
do_shrinking = false;
|
do_shrinking = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If tuple is recently deleted then
|
* If tuple is recently deleted then we must not remove it
|
||||||
* we must not remove it from relation.
|
* from relation.
|
||||||
*/
|
*/
|
||||||
if (tupgone && tuple.t_data->t_xmax >= XmaxRecent &&
|
if (tupgone && tuple.t_data->t_xmax >= XmaxRecent &&
|
||||||
tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
|
tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
|
||||||
@ -826,6 +829,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
tuple.t_data->t_infomask |= HEAP_XMAX_COMMITTED;
|
tuple.t_data->t_infomask |= HEAP_XMAX_COMMITTED;
|
||||||
pgchanged = true;
|
pgchanged = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we do shrinking and this tuple is updated one
|
* If we do shrinking and this tuple is updated one
|
||||||
* then remember it to construct updated tuple
|
* then remember it to construct updated tuple
|
||||||
@ -1128,7 +1132,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
else
|
else
|
||||||
Assert(!isempty);
|
Assert(!isempty);
|
||||||
|
|
||||||
chain_tuple_moved = false; /* no one chain-tuple was moved off this page, yet */
|
chain_tuple_moved = false; /* no one chain-tuple was moved
|
||||||
|
* off this page, yet */
|
||||||
vpc->vpd_blkno = blkno;
|
vpc->vpd_blkno = blkno;
|
||||||
maxoff = PageGetMaxOffsetNumber(page);
|
maxoff = PageGetMaxOffsetNumber(page);
|
||||||
for (offnum = FirstOffsetNumber;
|
for (offnum = FirstOffsetNumber;
|
||||||
@ -1150,16 +1155,18 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
elog(ERROR, "Invalid XID in t_cmin");
|
elog(ERROR, "Invalid XID in t_cmin");
|
||||||
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
|
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
|
||||||
elog(ERROR, "HEAP_MOVED_IN was not expected");
|
elog(ERROR, "HEAP_MOVED_IN was not expected");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this (chain) tuple is moved by me already then
|
* If this (chain) tuple is moved by me already then I
|
||||||
* I have to check is it in vpc or not - i.e. is it
|
* have to check is it in vpc or not - i.e. is it moved
|
||||||
* moved while cleaning this page or some previous one.
|
* while cleaning this page or some previous one.
|
||||||
*/
|
*/
|
||||||
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
|
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
|
||||||
{
|
{
|
||||||
if (keep_tuples == 0)
|
if (keep_tuples == 0)
|
||||||
continue;
|
continue;
|
||||||
if (chain_tuple_moved) /* some chains was moved while */
|
if (chain_tuple_moved) /* some chains was moved
|
||||||
|
* while */
|
||||||
{ /* cleaning this page */
|
{ /* cleaning this page */
|
||||||
Assert(vpc->vpd_offsets_free > 0);
|
Assert(vpc->vpd_offsets_free > 0);
|
||||||
for (i = 0; i < vpc->vpd_offsets_free; i++)
|
for (i = 0; i < vpc->vpd_offsets_free; i++)
|
||||||
@ -1184,9 +1191,9 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this tuple is in the chain of tuples created in
|
* If this tuple is in the chain of tuples created in updates
|
||||||
* updates by "recent" transactions then we have to
|
* by "recent" transactions then we have to move all chain of
|
||||||
* move all chain of tuples to another places.
|
* tuples to another places.
|
||||||
*/
|
*/
|
||||||
if ((tuple.t_data->t_infomask & HEAP_UPDATED &&
|
if ((tuple.t_data->t_infomask & HEAP_UPDATED &&
|
||||||
tuple.t_data->t_xmin >= XmaxRecent) ||
|
tuple.t_data->t_xmin >= XmaxRecent) ||
|
||||||
@ -1215,9 +1222,10 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
WriteBuffer(cur_buffer);
|
WriteBuffer(cur_buffer);
|
||||||
cur_buffer = InvalidBuffer;
|
cur_buffer = InvalidBuffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this tuple is in the begin/middle of the chain
|
* If this tuple is in the begin/middle of the chain then
|
||||||
* then we have to move to the end of chain.
|
* we have to move to the end of chain.
|
||||||
*/
|
*/
|
||||||
while (!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) &&
|
while (!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) &&
|
||||||
!(ItemPointerEquals(&(tp.t_self), &(tp.t_data->t_ctid))))
|
!(ItemPointerEquals(&(tp.t_self), &(tp.t_data->t_ctid))))
|
||||||
@ -1257,7 +1265,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
if (vc_enough_space(fraged_pages->vpl_pagedesc[i], tlen))
|
if (vc_enough_space(fraged_pages->vpl_pagedesc[i], tlen))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (i == num_fraged_pages) /* can't move item anywhere */
|
if (i == num_fraged_pages) /* can't move item
|
||||||
|
* anywhere */
|
||||||
{
|
{
|
||||||
for (i = 0; i < num_vtmove; i++)
|
for (i = 0; i < num_vtmove; i++)
|
||||||
{
|
{
|
||||||
@ -1289,12 +1298,14 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
vtmove[num_vtmove].cleanVpd = false;
|
vtmove[num_vtmove].cleanVpd = false;
|
||||||
free_vtmove--;
|
free_vtmove--;
|
||||||
num_vtmove++;
|
num_vtmove++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All done ?
|
* All done ?
|
||||||
*/
|
*/
|
||||||
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
|
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
|
||||||
tp.t_data->t_xmin < XmaxRecent)
|
tp.t_data->t_xmin < XmaxRecent)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Well, try to find tuple with old row version
|
* Well, try to find tuple with old row version
|
||||||
*/
|
*/
|
||||||
@ -1326,11 +1337,12 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
elog(ERROR, "Parent itemid marked as unused");
|
elog(ERROR, "Parent itemid marked as unused");
|
||||||
Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
|
Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
|
||||||
Assert(Ptp.t_data->t_xmax == tp.t_data->t_xmin);
|
Assert(Ptp.t_data->t_xmax == tp.t_data->t_xmin);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this tuple is updated version of row and
|
* If this tuple is updated version of row and it
|
||||||
* it was created by the same transaction then
|
* was created by the same transaction then no one
|
||||||
* no one is interested in this tuple -
|
* is interested in this tuple - mark it as
|
||||||
* mark it as removed.
|
* removed.
|
||||||
*/
|
*/
|
||||||
if (Ptp.t_data->t_infomask & HEAP_UPDATED &&
|
if (Ptp.t_data->t_infomask & HEAP_UPDATED &&
|
||||||
Ptp.t_data->t_xmin == Ptp.t_data->t_xmax)
|
Ptp.t_data->t_xmin == Ptp.t_data->t_xmax)
|
||||||
@ -1373,6 +1385,7 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
|
|||||||
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
|
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
|
||||||
/* Get page to move in */
|
/* Get page to move in */
|
||||||
cur_buffer = ReadBuffer(onerel, vtmove[ti].vpd->vpd_blkno);
|
cur_buffer = ReadBuffer(onerel, vtmove[ti].vpd->vpd_blkno);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We should LockBuffer(cur_buffer) but don't, at the
|
* We should LockBuffer(cur_buffer) but don't, at the
|
||||||
* moment. If you'll do LockBuffer then UNLOCK it
|
* moment. If you'll do LockBuffer then UNLOCK it
|
||||||
@ -1401,6 +1414,7 @@ moving chain: failed to add item with len = %u to page %u",
|
|||||||
pfree(newtup.t_data);
|
pfree(newtup.t_data);
|
||||||
newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid);
|
newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid);
|
||||||
ItemPointerSet(&(newtup.t_self), vtmove[ti].vpd->vpd_blkno, newoff);
|
ItemPointerSet(&(newtup.t_self), vtmove[ti].vpd->vpd_blkno, newoff);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set t_ctid pointing to itself for last tuple in
|
* Set t_ctid pointing to itself for last tuple in
|
||||||
* chain and to next tuple in chain otherwise.
|
* chain and to next tuple in chain otherwise.
|
||||||
@ -1417,6 +1431,7 @@ moving chain: failed to add item with len = %u to page %u",
|
|||||||
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
|
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
|
||||||
|
|
||||||
num_moved++;
|
num_moved++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remember that we moved tuple from the current page
|
* Remember that we moved tuple from the current page
|
||||||
* (corresponding index tuple will be cleaned).
|
* (corresponding index tuple will be cleaned).
|
||||||
@ -1508,8 +1523,8 @@ moving chain: failed to add item with len = %u to page %u",
|
|||||||
RelationInvalidateHeapTuple(onerel, &tuple);
|
RelationInvalidateHeapTuple(onerel, &tuple);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark new tuple as moved_in by vacuum and
|
* Mark new tuple as moved_in by vacuum and store vacuum XID
|
||||||
* store vacuum XID in t_cmin !!!
|
* in t_cmin !!!
|
||||||
*/
|
*/
|
||||||
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
|
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
|
||||||
newtup.t_data->t_infomask &=
|
newtup.t_data->t_infomask &=
|
||||||
@ -1533,8 +1548,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
|
|||||||
newtup.t_self = newtup.t_data->t_ctid;
|
newtup.t_self = newtup.t_data->t_ctid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark old tuple as moved_off by vacuum and
|
* Mark old tuple as moved_off by vacuum and store vacuum XID
|
||||||
* store vacuum XID in t_cmin !!!
|
* in t_cmin !!!
|
||||||
*/
|
*/
|
||||||
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
|
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
|
||||||
tuple.t_data->t_infomask &=
|
tuple.t_data->t_infomask &=
|
||||||
@ -1590,7 +1605,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
|
|||||||
elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
|
elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
|
||||||
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
|
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
|
||||||
{
|
{
|
||||||
if (chain_tuple_moved) /* some chains was moved while */
|
if (chain_tuple_moved) /* some chains was moved
|
||||||
|
* while */
|
||||||
{ /* cleaning this page */
|
{ /* cleaning this page */
|
||||||
Assert(vpc->vpd_offsets_free > 0);
|
Assert(vpc->vpd_offsets_free > 0);
|
||||||
for (i = 0; i < vpc->vpd_offsets_free; i++)
|
for (i = 0; i < vpc->vpd_offsets_free; i++)
|
||||||
@ -1645,6 +1661,7 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
|
|||||||
|
|
||||||
if (num_moved > 0)
|
if (num_moved > 0)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to commit our tuple' movings before we'll truncate
|
* We have to commit our tuple' movings before we'll truncate
|
||||||
* relation, but we shouldn't lose our locks. And so - quick hack:
|
* relation, but we shouldn't lose our locks. And so - quick hack:
|
||||||
@ -1657,8 +1674,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clean uncleaned reapped pages from vacuum_pages list list and set xmin
|
* Clean uncleaned reapped pages from vacuum_pages list list and set
|
||||||
* committed for inserted tuples
|
* xmin committed for inserted tuples
|
||||||
*/
|
*/
|
||||||
checked_moved = 0;
|
checked_moved = 0;
|
||||||
for (i = 0, vpp = vacuum_pages->vpl_pagedesc; i < vacuumed_pages; i++, vpp++)
|
for (i = 0, vpp = vacuum_pages->vpl_pagedesc; i < vacuumed_pages; i++, vpp++)
|
||||||
@ -1671,7 +1688,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
|
|||||||
if (!PageIsEmpty(page))
|
if (!PageIsEmpty(page))
|
||||||
vc_vacpage(page, *vpp);
|
vc_vacpage(page, *vpp);
|
||||||
}
|
}
|
||||||
else /* this page was used */
|
else
|
||||||
|
/* this page was used */
|
||||||
{
|
{
|
||||||
num_tuples = 0;
|
num_tuples = 0;
|
||||||
max_offset = PageGetMaxOffsetNumber(page);
|
max_offset = PageGetMaxOffsetNumber(page);
|
||||||
@ -2317,8 +2335,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
|
|||||||
attp->attdisbursion = selratio;
|
attp->attdisbursion = selratio;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invalidate the cache for the tuple
|
* Invalidate the cache for the tuple and write the buffer
|
||||||
* and write the buffer
|
|
||||||
*/
|
*/
|
||||||
RelationInvalidateHeapTuple(ad, atup);
|
RelationInvalidateHeapTuple(ad, atup);
|
||||||
WriteNoReleaseBuffer(abuffer);
|
WriteNoReleaseBuffer(abuffer);
|
||||||
@ -2375,8 +2392,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invalidate the cached pg_class tuple and
|
* Invalidate the cached pg_class tuple and write the buffer
|
||||||
* write the buffer
|
|
||||||
*/
|
*/
|
||||||
RelationInvalidateHeapTuple(rd, &rtup);
|
RelationInvalidateHeapTuple(rd, &rtup);
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
* Routines for handling of 'SET var TO',
|
* Routines for handling of 'SET var TO',
|
||||||
* 'SHOW var' and 'RESET var' statements.
|
* 'SHOW var' and 'RESET var' statements.
|
||||||
*
|
*
|
||||||
* $Id: variable.c,v 1.19 1999/02/18 06:00:44 momjian Exp $
|
* $Id: variable.c,v 1.20 1999/05/25 16:08:28 momjian Exp $
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -45,10 +45,12 @@ static bool parse_ksqo(const char *);
|
|||||||
static bool show_XactIsoLevel(void);
|
static bool show_XactIsoLevel(void);
|
||||||
static bool reset_XactIsoLevel(void);
|
static bool reset_XactIsoLevel(void);
|
||||||
static bool parse_XactIsoLevel(const char *);
|
static bool parse_XactIsoLevel(const char *);
|
||||||
|
|
||||||
#ifdef QUERY_LIMIT
|
#ifdef QUERY_LIMIT
|
||||||
static bool show_query_limit(void);
|
static bool show_query_limit(void);
|
||||||
static bool reset_query_limit(void);
|
static bool reset_query_limit(void);
|
||||||
static bool parse_query_limit(const char *);
|
static bool parse_query_limit(const char *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern Cost _cpu_page_wight_;
|
extern Cost _cpu_page_wight_;
|
||||||
@ -547,15 +549,15 @@ parse_query_limit(const char *value)
|
|||||||
{
|
{
|
||||||
int32 limit;
|
int32 limit;
|
||||||
|
|
||||||
if (value == NULL) {
|
if (value == NULL)
|
||||||
|
{
|
||||||
reset_query_limit();
|
reset_query_limit();
|
||||||
return (TRUE);
|
return (TRUE);
|
||||||
}
|
}
|
||||||
/* why is pg_atoi's arg not declared "const char *" ? */
|
/* why is pg_atoi's arg not declared "const char *" ? */
|
||||||
limit = pg_atoi((char *) value, sizeof(int32), '\0');
|
limit = pg_atoi((char *) value, sizeof(int32), '\0');
|
||||||
if (limit <= -1) {
|
if (limit <= -1)
|
||||||
elog(ERROR, "Bad value for # of query limit (%s)", value);
|
elog(ERROR, "Bad value for # of query limit (%s)", value);
|
||||||
}
|
|
||||||
ExecutorLimit(limit);
|
ExecutorLimit(limit);
|
||||||
return (TRUE);
|
return (TRUE);
|
||||||
}
|
}
|
||||||
@ -566,11 +568,10 @@ show_query_limit(void)
|
|||||||
int limit;
|
int limit;
|
||||||
|
|
||||||
limit = ExecutorGetLimit();
|
limit = ExecutorGetLimit();
|
||||||
if (limit == ALL_TUPLES) {
|
if (limit == ALL_TUPLES)
|
||||||
elog(NOTICE, "No query limit is set");
|
elog(NOTICE, "No query limit is set");
|
||||||
} else {
|
else
|
||||||
elog(NOTICE, "query limit is %d", limit);
|
elog(NOTICE, "query limit is %d", limit);
|
||||||
}
|
|
||||||
return (TRUE);
|
return (TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -580,6 +581,7 @@ reset_query_limit(void)
|
|||||||
ExecutorLimit(ALL_TUPLES);
|
ExecutorLimit(ALL_TUPLES);
|
||||||
return (TRUE);
|
return (TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*-----------------------------------------------------------------------*/
|
/*-----------------------------------------------------------------------*/
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: view.c,v 1.32 1999/02/13 23:15:12 momjian Exp $
|
* $Id: view.c,v 1.33 1999/05/25 16:08:28 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
* ExecInitTee
|
* ExecInitTee
|
||||||
* ExecEndTee
|
* ExecEndTee
|
||||||
*
|
*
|
||||||
* $Id: nodeTee.c,v 1.1 1999/03/23 16:50:49 momjian Exp $
|
* $Id: nodeTee.c,v 1.2 1999/05/25 16:08:50 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -339,6 +339,7 @@ ExecTee(Tee *node, Plan *parent)
|
|||||||
slot = ExecProcNode(childNode, (Plan *) node);
|
slot = ExecProcNode(childNode, (Plan *) node);
|
||||||
if (!TupIsNull(slot))
|
if (!TupIsNull(slot))
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* heap_insert changes something...
|
* heap_insert changes something...
|
||||||
*/
|
*/
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: execAmi.c,v 1.34 1999/05/10 00:45:05 momjian Exp $
|
* $Id: execAmi.c,v 1.35 1999/05/25 16:08:34 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.83 1999/05/10 00:45:06 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.84 1999/05/25 16:08:36 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -210,8 +210,8 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
|
|||||||
Assert(queryDesc != NULL);
|
Assert(queryDesc != NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* extract information from the query descriptor
|
* extract information from the query descriptor and the query
|
||||||
* and the query feature.
|
* feature.
|
||||||
*/
|
*/
|
||||||
operation = queryDesc->operation;
|
operation = queryDesc->operation;
|
||||||
plan = queryDesc->plantree;
|
plan = queryDesc->plantree;
|
||||||
@ -223,8 +223,8 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
|
|||||||
/*
|
/*
|
||||||
* FIXME: the dest setup function ought to be handed the tuple desc
|
* FIXME: the dest setup function ought to be handed the tuple desc
|
||||||
* for the tuples to be output, but I'm not quite sure how to get that
|
* for the tuples to be output, but I'm not quite sure how to get that
|
||||||
* info at this point. For now, passing NULL is OK because no existing
|
* info at this point. For now, passing NULL is OK because no
|
||||||
* dest setup function actually uses the pointer.
|
* existing dest setup function actually uses the pointer.
|
||||||
*/
|
*/
|
||||||
(*destfunc->setup) (destfunc, (TupleDesc) NULL);
|
(*destfunc->setup) (destfunc, (TupleDesc) NULL);
|
||||||
|
|
||||||
@ -352,8 +352,8 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* return one tuple but don't "retrieve" it.
|
* return one tuple but don't "retrieve" it. (this is used by
|
||||||
* (this is used by the rule manager..) -cim 9/14/89
|
* the rule manager..) -cim 9/14/89
|
||||||
*/
|
*/
|
||||||
case EXEC_RETONE:
|
case EXEC_RETONE:
|
||||||
result = ExecutePlan(estate,
|
result = ExecutePlan(estate,
|
||||||
@ -395,18 +395,23 @@ ExecutorEnd(QueryDesc *queryDesc, EState *estate)
|
|||||||
EndPlan(queryDesc->plantree, estate);
|
EndPlan(queryDesc->plantree, estate);
|
||||||
|
|
||||||
/* XXX - clean up some more from ExecutorStart() - er1p */
|
/* XXX - clean up some more from ExecutorStart() - er1p */
|
||||||
if (NULL == estate->es_snapshot) {
|
if (NULL == estate->es_snapshot)
|
||||||
|
{
|
||||||
/* nothing to free */
|
/* nothing to free */
|
||||||
} else {
|
|
||||||
if (estate->es_snapshot->xcnt > 0) {
|
|
||||||
pfree(estate->es_snapshot->xip);
|
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (estate->es_snapshot->xcnt > 0)
|
||||||
|
pfree(estate->es_snapshot->xip);
|
||||||
pfree(estate->es_snapshot);
|
pfree(estate->es_snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NULL == estate->es_param_exec_vals) {
|
if (NULL == estate->es_param_exec_vals)
|
||||||
|
{
|
||||||
/* nothing to free */
|
/* nothing to free */
|
||||||
} else {
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
pfree(estate->es_param_exec_vals);
|
pfree(estate->es_param_exec_vals);
|
||||||
estate->es_param_exec_vals = NULL;
|
estate->es_param_exec_vals = NULL;
|
||||||
}
|
}
|
||||||
@ -586,10 +591,9 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||||||
estate->es_range_table = rangeTable;
|
estate->es_range_table = rangeTable;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* initialize the BaseId counter so node base_id's
|
* initialize the BaseId counter so node base_id's are assigned
|
||||||
* are assigned correctly. Someday baseid's will have to
|
* correctly. Someday baseid's will have to be stored someplace other
|
||||||
* be stored someplace other than estate because they
|
* than estate because they should be unique per query planned.
|
||||||
* should be unique per query planned.
|
|
||||||
*/
|
*/
|
||||||
estate->es_BaseId = 1;
|
estate->es_BaseId = 1;
|
||||||
|
|
||||||
@ -599,9 +603,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||||||
|
|
||||||
if (resultRelation != 0 && operation != CMD_SELECT)
|
if (resultRelation != 0 && operation != CMD_SELECT)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if we have a result relation, open it and
|
* if we have a result relation, open it and initialize the result
|
||||||
* initialize the result relation info stuff.
|
* relation info stuff.
|
||||||
*/
|
*/
|
||||||
RelationInfo *resultRelationInfo;
|
RelationInfo *resultRelationInfo;
|
||||||
Index resultRelationIndex;
|
Index resultRelationIndex;
|
||||||
@ -628,8 +633,8 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||||||
resultRelationInfo->ri_IndexRelationInfo = NULL;
|
resultRelationInfo->ri_IndexRelationInfo = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* open indices on result relation and save descriptors
|
* open indices on result relation and save descriptors in the
|
||||||
* in the result relation information..
|
* result relation information..
|
||||||
*/
|
*/
|
||||||
if (operation != CMD_DELETE)
|
if (operation != CMD_DELETE)
|
||||||
ExecOpenIndices(resultRelationOid, resultRelationInfo);
|
ExecOpenIndices(resultRelationOid, resultRelationInfo);
|
||||||
@ -638,6 +643,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if no result relation, then set state appropriately
|
* if no result relation, then set state appropriately
|
||||||
*/
|
*/
|
||||||
@ -683,31 +689,29 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* initialize the private state information for
|
* initialize the private state information for all the nodes in the
|
||||||
* all the nodes in the query tree. This opens
|
* query tree. This opens files, allocates storage and leaves us
|
||||||
* files, allocates storage and leaves us ready
|
* ready to start processing tuples..
|
||||||
* to start processing tuples..
|
|
||||||
*/
|
*/
|
||||||
ExecInitNode(plan, estate, NULL);
|
ExecInitNode(plan, estate, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get the tuple descriptor describing the type
|
* get the tuple descriptor describing the type of tuples to return..
|
||||||
* of tuples to return.. (this is especially important
|
* (this is especially important if we are creating a relation with
|
||||||
* if we are creating a relation with "retrieve into")
|
* "retrieve into")
|
||||||
*/
|
*/
|
||||||
tupType = ExecGetTupType(plan); /* tuple descriptor */
|
tupType = ExecGetTupType(plan); /* tuple descriptor */
|
||||||
targetList = plan->targetlist;
|
targetList = plan->targetlist;
|
||||||
len = ExecTargetListLength(targetList); /* number of attributes */
|
len = ExecTargetListLength(targetList); /* number of attributes */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* now that we have the target list, initialize the junk filter
|
* now that we have the target list, initialize the junk filter if
|
||||||
* if this is a REPLACE or a DELETE query.
|
* this is a REPLACE or a DELETE query. We also init the junk filter
|
||||||
* We also init the junk filter if this is an append query
|
* if this is an append query (there might be some rule lock info
|
||||||
* (there might be some rule lock info there...)
|
* there...) NOTE: in the future we might want to initialize the junk
|
||||||
* NOTE: in the future we might want to initialize the junk
|
* filter for all queries. SELECT added by daveh@insightdist.com
|
||||||
* filter for all queries.
|
* 5/20/98 to allow ORDER/GROUP BY have an identifier missing from the
|
||||||
* SELECT added by daveh@insightdist.com 5/20/98 to allow
|
* target.
|
||||||
* ORDER/GROUP BY have an identifier missing from the target.
|
|
||||||
*/
|
*/
|
||||||
{
|
{
|
||||||
bool junk_filter_needed = false;
|
bool junk_filter_needed = false;
|
||||||
@ -761,6 +765,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||||||
*/
|
*/
|
||||||
if (parseTree->into != NULL)
|
if (parseTree->into != NULL)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* create the "into" relation
|
* create the "into" relation
|
||||||
*/
|
*/
|
||||||
@ -778,8 +783,8 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX rather than having to call setheapoverride(true)
|
* XXX rather than having to call setheapoverride(true)
|
||||||
* and then back to false, we should change the
|
* and then back to false, we should change the arguments
|
||||||
* arguments to heap_open() instead..
|
* to heap_open() instead..
|
||||||
*/
|
*/
|
||||||
setheapoverride(true);
|
setheapoverride(true);
|
||||||
|
|
||||||
@ -901,12 +906,13 @@ ExecutePlan(EState *estate,
|
|||||||
estate->es_direction = direction;
|
estate->es_direction = direction;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Loop until we've processed the proper number
|
* Loop until we've processed the proper number of tuples from the
|
||||||
* of tuples from the plan..
|
* plan..
|
||||||
*/
|
*/
|
||||||
|
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Execute the plan and obtain a tuple
|
* Execute the plan and obtain a tuple
|
||||||
*/
|
*/
|
||||||
@ -922,9 +928,8 @@ lnext:;
|
|||||||
slot = ExecProcNode(plan, plan);
|
slot = ExecProcNode(plan, plan);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the tuple is null, then we assume
|
* if the tuple is null, then we assume there is nothing more to
|
||||||
* there is nothing more to process so
|
* process so we just return null...
|
||||||
* we just return null...
|
|
||||||
*/
|
*/
|
||||||
if (TupIsNull(slot))
|
if (TupIsNull(slot))
|
||||||
{
|
{
|
||||||
@ -933,11 +938,9 @@ lnext:;
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For now we completely execute the plan and skip
|
* For now we completely execute the plan and skip result tuples
|
||||||
* result tuples if requested by LIMIT offset.
|
* if requested by LIMIT offset. Finally we should try to do it in
|
||||||
* Finally we should try to do it in deeper levels
|
* deeper levels if possible (during index scan) - Jan
|
||||||
* if possible (during index scan)
|
|
||||||
* - Jan
|
|
||||||
*/
|
*/
|
||||||
if (offsetTuples > 0)
|
if (offsetTuples > 0)
|
||||||
{
|
{
|
||||||
@ -946,11 +949,10 @@ lnext:;
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if we have a junk filter, then project a new
|
* if we have a junk filter, then project a new tuple with the
|
||||||
* tuple with the junk removed.
|
* junk removed.
|
||||||
*
|
*
|
||||||
* Store this new "clean" tuple in the place of the
|
* Store this new "clean" tuple in the place of the original tuple.
|
||||||
* original tuple.
|
|
||||||
*
|
*
|
||||||
* Also, extract all the junk information we need.
|
* Also, extract all the junk information we need.
|
||||||
*/
|
*/
|
||||||
@ -1029,10 +1031,11 @@ lmark:;
|
|||||||
goto lmark;
|
goto lmark;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if tuple was deleted or PlanQual failed
|
* if tuple was deleted or PlanQual failed for
|
||||||
* for updated tuple - we have not return
|
* updated tuple - we have not return this
|
||||||
* this tuple!
|
* tuple!
|
||||||
*/
|
*/
|
||||||
goto lnext;
|
goto lnext;
|
||||||
|
|
||||||
@ -1057,17 +1060,17 @@ lmark:;
|
|||||||
} /* if (junkfilter... */
|
} /* if (junkfilter... */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* now that we have a tuple, do the appropriate thing
|
* now that we have a tuple, do the appropriate thing with it..
|
||||||
* with it.. either return it to the user, add
|
* either return it to the user, add it to a relation someplace,
|
||||||
* it to a relation someplace, delete it from a
|
* delete it from a relation, or modify some of it's attributes.
|
||||||
* relation, or modify some of it's attributes.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
switch (operation)
|
switch (operation)
|
||||||
{
|
{
|
||||||
case CMD_SELECT:
|
case CMD_SELECT:
|
||||||
ExecRetrieve(slot, /* slot containing tuple */
|
ExecRetrieve(slot, /* slot containing tuple */
|
||||||
destfunc, /* destination's tuple-receiver obj */
|
destfunc, /* destination's tuple-receiver
|
||||||
|
* obj */
|
||||||
estate); /* */
|
estate); /* */
|
||||||
result = slot;
|
result = slot;
|
||||||
break;
|
break;
|
||||||
@ -1092,10 +1095,10 @@ lmark:;
|
|||||||
result = NULL;
|
result = NULL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check our tuple count.. if we've returned the
|
* check our tuple count.. if we've returned the proper number
|
||||||
* proper number then return, else loop again and
|
* then return, else loop again and process more tuples..
|
||||||
* process more tuples..
|
|
||||||
*/
|
*/
|
||||||
current_tuple_count += 1;
|
current_tuple_count += 1;
|
||||||
if (numberTuples == current_tuple_count)
|
if (numberTuples == current_tuple_count)
|
||||||
@ -1103,8 +1106,8 @@ lmark:;
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* here, result is either a slot containing a tuple in the case
|
* here, result is either a slot containing a tuple in the case of a
|
||||||
* of a RETRIEVE or NULL otherwise.
|
* RETRIEVE or NULL otherwise.
|
||||||
*/
|
*/
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -1182,8 +1185,7 @@ ExecAppend(TupleTableSlot *slot,
|
|||||||
resultRelationDesc = resultRelationInfo->ri_RelationDesc;
|
resultRelationDesc = resultRelationInfo->ri_RelationDesc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* have to add code to preform unique checking here.
|
* have to add code to preform unique checking here. cim -12/1/89
|
||||||
* cim -12/1/89
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* BEFORE ROW INSERT Triggers */
|
/* BEFORE ROW INSERT Triggers */
|
||||||
@ -1210,9 +1212,7 @@ ExecAppend(TupleTableSlot *slot,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (resultRelationDesc->rd_att->constr)
|
if (resultRelationDesc->rd_att->constr)
|
||||||
{
|
|
||||||
ExecConstraints("ExecAppend", resultRelationDesc, tuple, estate);
|
ExecConstraints("ExecAppend", resultRelationDesc, tuple, estate);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* insert the tuple
|
* insert the tuple
|
||||||
@ -1224,9 +1224,9 @@ ExecAppend(TupleTableSlot *slot,
|
|||||||
/*
|
/*
|
||||||
* process indices
|
* process indices
|
||||||
*
|
*
|
||||||
* Note: heap_insert adds a new tuple to a relation. As a side
|
* Note: heap_insert adds a new tuple to a relation. As a side effect,
|
||||||
* effect, the tupleid of the new tuple is placed in the new
|
* the tupleid of the new tuple is placed in the new tuple's t_ctid
|
||||||
* tuple's t_ctid field.
|
* field.
|
||||||
*/
|
*/
|
||||||
numIndices = resultRelationInfo->ri_NumIndices;
|
numIndices = resultRelationInfo->ri_NumIndices;
|
||||||
if (numIndices > 0)
|
if (numIndices > 0)
|
||||||
@ -1313,13 +1313,11 @@ ldelete:;
|
|||||||
(estate->es_processed)++;
|
(estate->es_processed)++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: Normally one would think that we have to
|
* Note: Normally one would think that we have to delete index tuples
|
||||||
* delete index tuples associated with the
|
* associated with the heap tuple now..
|
||||||
* heap tuple now..
|
|
||||||
*
|
*
|
||||||
* ... but in POSTGRES, we have no need to do this
|
* ... but in POSTGRES, we have no need to do this because the vacuum
|
||||||
* because the vacuum daemon automatically
|
* daemon automatically opens an index scan and deletes index tuples
|
||||||
* opens an index scan and deletes index tuples
|
|
||||||
* when it finds deleted heap tuples. -cim 9/27/89
|
* when it finds deleted heap tuples. -cim 9/27/89
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -1374,10 +1372,9 @@ ExecReplace(TupleTableSlot *slot,
|
|||||||
resultRelationDesc = resultRelationInfo->ri_RelationDesc;
|
resultRelationDesc = resultRelationInfo->ri_RelationDesc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* have to add code to preform unique checking here.
|
* have to add code to preform unique checking here. in the event of
|
||||||
* in the event of unique tuples, this becomes a deletion
|
* unique tuples, this becomes a deletion of the original tuple
|
||||||
* of the original tuple affected by the replace.
|
* affected by the replace. cim -12/1/89
|
||||||
* cim -12/1/89
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* BEFORE ROW UPDATE Triggers */
|
/* BEFORE ROW UPDATE Triggers */
|
||||||
@ -1404,9 +1401,7 @@ ExecReplace(TupleTableSlot *slot,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (resultRelationDesc->rd_att->constr)
|
if (resultRelationDesc->rd_att->constr)
|
||||||
{
|
|
||||||
ExecConstraints("ExecReplace", resultRelationDesc, tuple, estate);
|
ExecConstraints("ExecReplace", resultRelationDesc, tuple, estate);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* replace the heap tuple
|
* replace the heap tuple
|
||||||
@ -1448,23 +1443,21 @@ lreplace:;
|
|||||||
(estate->es_processed)++;
|
(estate->es_processed)++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: instead of having to update the old index tuples
|
* Note: instead of having to update the old index tuples associated
|
||||||
* associated with the heap tuple, all we do is form
|
* with the heap tuple, all we do is form and insert new index
|
||||||
* and insert new index tuples.. This is because
|
* tuples.. This is because replaces are actually deletes and inserts
|
||||||
* replaces are actually deletes and inserts and
|
* and index tuple deletion is done automagically by the vaccuum
|
||||||
* index tuple deletion is done automagically by
|
* deamon.. All we do is insert new index tuples. -cim 9/27/89
|
||||||
* the vaccuum deamon.. All we do is insert new
|
|
||||||
* index tuples. -cim 9/27/89
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* process indices
|
* process indices
|
||||||
*
|
*
|
||||||
* heap_replace updates a tuple in the base relation by invalidating
|
* heap_replace updates a tuple in the base relation by invalidating it
|
||||||
* it and then appending a new tuple to the relation. As a side
|
* and then appending a new tuple to the relation. As a side effect,
|
||||||
* effect, the tupleid of the new tuple is placed in the new
|
* the tupleid of the new tuple is placed in the new tuple's t_ctid
|
||||||
* tuple's t_ctid field. So we now insert index tuples using
|
* field. So we now insert index tuples using the new tupleid stored
|
||||||
* the new tupleid stored there.
|
* there.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
numIndices = resultRelationInfo->ri_NumIndices;
|
numIndices = resultRelationInfo->ri_NumIndices;
|
||||||
@ -1665,9 +1658,9 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is request for another RTE - Ra, - then we have to check
|
* If this is request for another RTE - Ra, - then we have to check
|
||||||
* wasn't PlanQual requested for Ra already and if so then Ra' row
|
* wasn't PlanQual requested for Ra already and if so then Ra' row was
|
||||||
* was updated again and we have to re-start old execution for Ra
|
* updated again and we have to re-start old execution for Ra and
|
||||||
* and forget all what we done after Ra was suspended. Cool? -:))
|
* forget all what we done after Ra was suspended. Cool? -:))
|
||||||
*/
|
*/
|
||||||
if (epq != NULL && epq->rti != rti &&
|
if (epq != NULL && epq->rti != rti &&
|
||||||
epq->estate.es_evTuple[rti - 1] != NULL)
|
epq->estate.es_evTuple[rti - 1] != NULL)
|
||||||
@ -1729,14 +1722,10 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
length(estate->es_range_table) * sizeof(HeapTuple));
|
length(estate->es_range_table) * sizeof(HeapTuple));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
|
||||||
epqstate->es_evTuple = epq->estate.es_evTuple;
|
epqstate->es_evTuple = epq->estate.es_evTuple;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
epqstate = &(newepq->estate);
|
epqstate = &(newepq->estate);
|
||||||
}
|
|
||||||
/* push current PQ to the stack */
|
/* push current PQ to the stack */
|
||||||
epqstate->es_evalPlanQual = (Pointer) epq;
|
epqstate->es_evalPlanQual = (Pointer) epq;
|
||||||
epq = newepq;
|
epq = newepq;
|
||||||
@ -1748,9 +1737,8 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
epqstate = &(epq->estate);
|
epqstate = &(epq->estate);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ok - we're requested for the same RTE (-:)).
|
* Ok - we're requested for the same RTE (-:)). I'm not sure about
|
||||||
* I'm not sure about ability to use ExecReScan instead of
|
* ability to use ExecReScan instead of ExecInitNode, so...
|
||||||
* ExecInitNode, so...
|
|
||||||
*/
|
*/
|
||||||
if (endNode)
|
if (endNode)
|
||||||
ExecEndNode(epq->plan, epq->plan);
|
ExecEndNode(epq->plan, epq->plan);
|
||||||
@ -1787,9 +1775,10 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
|
|
||||||
if (TransactionIdIsValid(SnapshotDirty->xmin))
|
if (TransactionIdIsValid(SnapshotDirty->xmin))
|
||||||
elog(ERROR, "EvalPlanQual: t_xmin is uncommitted ?!");
|
elog(ERROR, "EvalPlanQual: t_xmin is uncommitted ?!");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If tuple is being updated by other transaction then
|
* If tuple is being updated by other transaction then we have
|
||||||
* we have to wait for its commit/abort.
|
* to wait for its commit/abort.
|
||||||
*/
|
*/
|
||||||
if (TransactionIdIsValid(xwait))
|
if (TransactionIdIsValid(xwait))
|
||||||
{
|
{
|
||||||
@ -1797,6 +1786,7 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
XactLockTableWait(xwait);
|
XactLockTableWait(xwait);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Nice! We got tuple - now copy it.
|
* Nice! We got tuple - now copy it.
|
||||||
*/
|
*/
|
||||||
@ -1806,10 +1796,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
ReleaseBuffer(buffer);
|
ReleaseBuffer(buffer);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ops! Invalid tuple. Have to check is it updated or deleted.
|
* Ops! Invalid tuple. Have to check is it updated or deleted.
|
||||||
* Note that it's possible to get invalid SnapshotDirty->tid
|
* Note that it's possible to get invalid SnapshotDirty->tid if
|
||||||
* if tuple updated by this transaction. Have we to check this ?
|
* tuple updated by this transaction. Have we to check this ?
|
||||||
*/
|
*/
|
||||||
if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
|
if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
|
||||||
!(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
|
!(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
|
||||||
@ -1817,9 +1808,10 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
tuple.t_self = SnapshotDirty->tid; /* updated ... */
|
tuple.t_self = SnapshotDirty->tid; /* updated ... */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Deleted or updated by this transaction. Do not
|
* Deleted or updated by this transaction. Do not (re-)start
|
||||||
* (re-)start execution of this PQ. Continue previous PQ.
|
* execution of this PQ. Continue previous PQ.
|
||||||
*/
|
*/
|
||||||
oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
|
oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
|
||||||
if (oldepq != NULL)
|
if (oldepq != NULL)
|
||||||
@ -1832,10 +1824,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
estate->es_evalPlanQual = (Pointer) epq;
|
estate->es_evalPlanQual = (Pointer) epq;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{ /* this is the first (oldest) PQ
|
{ /* this is the first (oldest) PQ epq->rti
|
||||||
epq->rti = 0; * - mark as free and
|
* = 0; * - mark as
|
||||||
estate->es_useEvalPlan = false; * continue Query execution
|
* free and estate->es_useEvalPlan =
|
||||||
return (NULL); */
|
* false; * continue Query execution
|
||||||
|
* return (NULL); */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1847,8 +1840,8 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
|
|||||||
ExecInitNode(epq->plan, epqstate, NULL);
|
ExecInitNode(epq->plan, epqstate, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For UPDATE/DELETE we have to return tid of actual row
|
* For UPDATE/DELETE we have to return tid of actual row we're
|
||||||
* we're executing PQ for.
|
* executing PQ for.
|
||||||
*/
|
*/
|
||||||
*tid = tuple.t_self;
|
*tid = tuple.t_self;
|
||||||
|
|
||||||
@ -1881,7 +1874,8 @@ lpqnext:;
|
|||||||
if (oldepq == (evalPlanQual *) NULL)
|
if (oldepq == (evalPlanQual *) NULL)
|
||||||
{ /* this is the first (oldest) */
|
{ /* this is the first (oldest) */
|
||||||
epq->rti = 0; /* PQ - mark as free and */
|
epq->rti = 0; /* PQ - mark as free and */
|
||||||
estate->es_useEvalPlan = false; /* continue Query execution */
|
estate->es_useEvalPlan = false; /* continue Query
|
||||||
|
* execution */
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
Assert(oldepq->rti != 0);
|
Assert(oldepq->rti != 0);
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.50 1999/03/20 02:07:31 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.51 1999/05/25 16:08:37 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -305,16 +305,15 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
|
|||||||
return (Datum) NULL;
|
return (Datum) NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get length and type information..
|
* get length and type information.. ??? what should we do about
|
||||||
* ??? what should we do about variable length attributes
|
* variable length attributes - variable length attributes have their
|
||||||
* - variable length attributes have their length stored
|
* length stored in the first 4 bytes of the memory pointed to by the
|
||||||
* in the first 4 bytes of the memory pointed to by the
|
* returned value.. If we can determine that the type is a variable
|
||||||
* returned value.. If we can determine that the type
|
* length type, we can do the right thing. -cim 9/15/89
|
||||||
* is a variable length type, we can do the right thing.
|
|
||||||
* -cim 9/15/89
|
|
||||||
*/
|
*/
|
||||||
if (attnum < 0)
|
if (attnum < 0)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is a pseudo-att, we get the type and fake the length.
|
* If this is a pseudo-att, we get the type and fake the length.
|
||||||
* There ought to be a routine to return the real lengths, so
|
* There ought to be a routine to return the real lengths, so
|
||||||
@ -609,11 +608,11 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
|
|||||||
i = 0;
|
i = 0;
|
||||||
foreach(arg, argList)
|
foreach(arg, argList)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* evaluate the expression, in general functions cannot take
|
* evaluate the expression, in general functions cannot take sets
|
||||||
* sets as arguments but we make an exception in the case of
|
* as arguments but we make an exception in the case of nested dot
|
||||||
* nested dot expressions. We have to watch out for this case
|
* expressions. We have to watch out for this case here.
|
||||||
* here.
|
|
||||||
*/
|
*/
|
||||||
argV[i] = (Datum)
|
argV[i] = (Datum)
|
||||||
ExecEvalExpr((Node *) lfirst(arg),
|
ExecEvalExpr((Node *) lfirst(arg),
|
||||||
@ -671,10 +670,10 @@ ExecMakeFunctionResult(Node *node,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* arguments is a list of expressions to evaluate
|
* arguments is a list of expressions to evaluate before passing to
|
||||||
* before passing to the function manager.
|
* the function manager. We collect the results of evaluating the
|
||||||
* We collect the results of evaluating the expressions
|
* expressions into a datum array (argV) and pass this array to
|
||||||
* into a datum array (argV) and pass this array to arrayFmgr()
|
* arrayFmgr()
|
||||||
*/
|
*/
|
||||||
if (fcache->nargs != 0)
|
if (fcache->nargs != 0)
|
||||||
{
|
{
|
||||||
@ -845,10 +844,10 @@ ExecEvalOper(Expr *opClause, ExprContext *econtext, bool *isNull)
|
|||||||
/*
|
/*
|
||||||
* an opclause is a list (op args). (I think)
|
* an opclause is a list (op args). (I think)
|
||||||
*
|
*
|
||||||
* we extract the oid of the function associated with
|
* we extract the oid of the function associated with the op and then
|
||||||
* the op and then pass the work onto ExecMakeFunctionResult
|
* pass the work onto ExecMakeFunctionResult which evaluates the
|
||||||
* which evaluates the arguments and returns the result of
|
* arguments and returns the result of calling the function on the
|
||||||
* calling the function on the evaluated arguments.
|
* evaluated arguments.
|
||||||
*/
|
*/
|
||||||
op = (Oper *) opClause->oper;
|
op = (Oper *) opClause->oper;
|
||||||
argList = opClause->args;
|
argList = opClause->args;
|
||||||
@ -889,10 +888,10 @@ ExecEvalFunc(Expr *funcClause,
|
|||||||
/*
|
/*
|
||||||
* an funcclause is a list (func args). (I think)
|
* an funcclause is a list (func args). (I think)
|
||||||
*
|
*
|
||||||
* we extract the oid of the function associated with
|
* we extract the oid of the function associated with the func node and
|
||||||
* the func node and then pass the work onto ExecMakeFunctionResult
|
* then pass the work onto ExecMakeFunctionResult which evaluates the
|
||||||
* which evaluates the arguments and returns the result of
|
* arguments and returns the result of calling the function on the
|
||||||
* calling the function on the evaluated arguments.
|
* evaluated arguments.
|
||||||
*
|
*
|
||||||
* this is nearly identical to the ExecEvalOper code.
|
* this is nearly identical to the ExecEvalOper code.
|
||||||
*/
|
*/
|
||||||
@ -939,21 +938,21 @@ ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull)
|
|||||||
clause = lfirst(notclause->args);
|
clause = lfirst(notclause->args);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't iterate over sets in the quals, so pass in an isDone
|
* We don't iterate over sets in the quals, so pass in an isDone flag,
|
||||||
* flag, but ignore it.
|
* but ignore it.
|
||||||
*/
|
*/
|
||||||
expr_value = ExecEvalExpr(clause, econtext, isNull, &isDone);
|
expr_value = ExecEvalExpr(clause, econtext, isNull, &isDone);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the expression evaluates to null, then we just
|
* if the expression evaluates to null, then we just cascade the null
|
||||||
* cascade the null back to whoever called us.
|
* back to whoever called us.
|
||||||
*/
|
*/
|
||||||
if (*isNull)
|
if (*isNull)
|
||||||
return expr_value;
|
return expr_value;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* evaluation of 'not' is simple.. expr is false, then
|
* evaluation of 'not' is simple.. expr is false, then return 'true'
|
||||||
* return 'true' and vice versa.
|
* and vice versa.
|
||||||
*/
|
*/
|
||||||
if (DatumGetInt32(expr_value) == 0)
|
if (DatumGetInt32(expr_value) == 0)
|
||||||
return (Datum) true;
|
return (Datum) true;
|
||||||
@ -978,15 +977,12 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
|
|||||||
clauses = orExpr->args;
|
clauses = orExpr->args;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we use three valued logic functions here...
|
* we use three valued logic functions here... we evaluate each of the
|
||||||
* we evaluate each of the clauses in turn,
|
* clauses in turn, as soon as one is true we return that value. If
|
||||||
* as soon as one is true we return that
|
* none is true and none of the clauses evaluate to NULL we return
|
||||||
* value. If none is true and none of the
|
* the value of the last clause evaluated (which should be false) with
|
||||||
* clauses evaluate to NULL we return
|
* *isNull set to false else if none is true and at least one clause
|
||||||
* the value of the last clause evaluated (which
|
* evaluated to NULL we set *isNull flag to true -
|
||||||
* should be false) with *isNull set to false else
|
|
||||||
* if none is true and at least one clause evaluated
|
|
||||||
* to NULL we set *isNull flag to true -
|
|
||||||
*/
|
*/
|
||||||
foreach(clause, clauses)
|
foreach(clause, clauses)
|
||||||
{
|
{
|
||||||
@ -1001,28 +997,26 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
|
|||||||
&isDone);
|
&isDone);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the expression evaluates to null, then we
|
* if the expression evaluates to null, then we remember it in the
|
||||||
* remember it in the local IsNull flag, if none of the
|
* local IsNull flag, if none of the clauses are true then we need
|
||||||
* clauses are true then we need to set *isNull
|
* to set *isNull to true again.
|
||||||
* to true again.
|
|
||||||
*/
|
*/
|
||||||
if (*isNull)
|
if (*isNull)
|
||||||
{
|
{
|
||||||
IsNull = *isNull;
|
IsNull = *isNull;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Many functions don't (or can't!) check if an argument is NULL
|
* Many functions don't (or can't!) check if an argument is
|
||||||
* or NOT_NULL and may return TRUE (1) with *isNull TRUE
|
* NULL or NOT_NULL and may return TRUE (1) with *isNull TRUE
|
||||||
* (an_int4_column <> 1: int4ne returns TRUE for NULLs).
|
* (an_int4_column <> 1: int4ne returns TRUE for NULLs). Not
|
||||||
* Not having time to fix the function manager I want to fix OR:
|
* having time to fix the function manager I want to fix OR:
|
||||||
* if we had 'x <> 1 OR x isnull' then when x is NULL
|
* if we had 'x <> 1 OR x isnull' then when x is NULL TRUE was
|
||||||
* TRUE was returned by the 'x <> 1' clause ...
|
* returned by the 'x <> 1' clause ... but ExecQualClause says
|
||||||
* but ExecQualClause says that the qualification should *fail*
|
* that the qualification should *fail* if isnull is TRUE for
|
||||||
* if isnull is TRUE for any value returned by ExecEvalExpr.
|
* any value returned by ExecEvalExpr. So, force this rule
|
||||||
* So, force this rule here:
|
* here: if isnull is TRUE then the clause failed. Note:
|
||||||
* if isnull is TRUE then the clause failed.
|
* nullvalue() & nonnullvalue() always sets isnull to FALSE
|
||||||
* Note: nullvalue() & nonnullvalue() always sets isnull to FALSE for NULLs.
|
* for NULLs. - vadim 09/22/97
|
||||||
* - vadim 09/22/97
|
|
||||||
*/
|
*/
|
||||||
const_value = 0;
|
const_value = 0;
|
||||||
}
|
}
|
||||||
@ -1057,11 +1051,9 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
|
|||||||
clauses = andExpr->args;
|
clauses = andExpr->args;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we evaluate each of the clauses in turn,
|
* we evaluate each of the clauses in turn, as soon as one is false we
|
||||||
* as soon as one is false we return that
|
* return that value. If none are false or NULL then we return the
|
||||||
* value. If none are false or NULL then we return
|
* value of the last clause evaluated, which should be true.
|
||||||
* the value of the last clause evaluated, which
|
|
||||||
* should be true.
|
|
||||||
*/
|
*/
|
||||||
foreach(clause, clauses)
|
foreach(clause, clauses)
|
||||||
{
|
{
|
||||||
@ -1076,10 +1068,9 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
|
|||||||
&isDone);
|
&isDone);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the expression evaluates to null, then we
|
* if the expression evaluates to null, then we remember it in
|
||||||
* remember it in IsNull, if none of the clauses after
|
* IsNull, if none of the clauses after this evaluates to false we
|
||||||
* this evaluates to false we will have to set *isNull
|
* will have to set *isNull to true again.
|
||||||
* to true again.
|
|
||||||
*/
|
*/
|
||||||
if (*isNull)
|
if (*isNull)
|
||||||
IsNull = *isNull;
|
IsNull = *isNull;
|
||||||
@ -1117,10 +1108,9 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
|
|||||||
clauses = caseExpr->args;
|
clauses = caseExpr->args;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we evaluate each of the WHEN clauses in turn,
|
* we evaluate each of the WHEN clauses in turn, as soon as one is
|
||||||
* as soon as one is true we return the corresponding
|
* true we return the corresponding result. If none are true then we
|
||||||
* result. If none are true then we return the value
|
* return the value of the default clause, or NULL.
|
||||||
* of the default clause, or NULL.
|
|
||||||
*/
|
*/
|
||||||
foreach(clause, clauses)
|
foreach(clause, clauses)
|
||||||
{
|
{
|
||||||
@ -1137,8 +1127,8 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
|
|||||||
&isDone);
|
&isDone);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if we have a true test, then we return the result,
|
* if we have a true test, then we return the result, since the
|
||||||
* since the case statement is satisfied.
|
* case statement is satisfied.
|
||||||
*/
|
*/
|
||||||
if (DatumGetInt32(const_value) != 0)
|
if (DatumGetInt32(const_value) != 0)
|
||||||
{
|
{
|
||||||
@ -1159,9 +1149,7 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
|
|||||||
&isDone);
|
&isDone);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
|
||||||
*isNull = true;
|
*isNull = true;
|
||||||
}
|
|
||||||
|
|
||||||
return const_value;
|
return const_value;
|
||||||
}
|
}
|
||||||
@ -1204,8 +1192,8 @@ ExecEvalExpr(Node *expression,
|
|||||||
*isDone = true;
|
*isDone = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* here we dispatch the work to the appropriate type
|
* here we dispatch the work to the appropriate type of function given
|
||||||
* of function given the type of our expression.
|
* the type of our expression.
|
||||||
*/
|
*/
|
||||||
if (expression == NULL)
|
if (expression == NULL)
|
||||||
{
|
{
|
||||||
@ -1325,10 +1313,9 @@ ExecQualClause(Node *clause, ExprContext *econtext)
|
|||||||
ExecEvalExpr(clause, econtext, &isNull, &isDone);
|
ExecEvalExpr(clause, econtext, &isNull, &isDone);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this is interesting behaviour here. When a clause evaluates
|
* this is interesting behaviour here. When a clause evaluates to
|
||||||
* to null, then we consider this as passing the qualification.
|
* null, then we consider this as passing the qualification. it seems
|
||||||
* it seems kind of like, if the qual is NULL, then there's no
|
* kind of like, if the qual is NULL, then there's no qual..
|
||||||
* qual..
|
|
||||||
*/
|
*/
|
||||||
if (isNull)
|
if (isNull)
|
||||||
return true;
|
return true;
|
||||||
@ -1371,12 +1358,12 @@ ExecQual(List *qual, ExprContext *econtext)
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* a "qual" is a list of clauses. To evaluate the
|
* a "qual" is a list of clauses. To evaluate the qual, we evaluate
|
||||||
* qual, we evaluate each of the clauses in the list.
|
* each of the clauses in the list.
|
||||||
*
|
*
|
||||||
* ExecQualClause returns true when we know the qualification
|
* ExecQualClause returns true when we know the qualification *failed* so
|
||||||
* *failed* so we just pass each clause in qual to it until
|
* we just pass each clause in qual to it until we know the qual
|
||||||
* we know the qual failed or there are no more clauses.
|
* failed or there are no more clauses.
|
||||||
*/
|
*/
|
||||||
result = false;
|
result = false;
|
||||||
|
|
||||||
@ -1388,9 +1375,9 @@ ExecQual(List *qual, ExprContext *econtext)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if result is true, then it means a clause failed so we
|
* if result is true, then it means a clause failed so we return
|
||||||
* return false. if result is false then it means no clause
|
* false. if result is false then it means no clause failed so we
|
||||||
* failed so we return true.
|
* return true.
|
||||||
*/
|
*/
|
||||||
if (result == true)
|
if (result == true)
|
||||||
return false;
|
return false;
|
||||||
@ -1454,41 +1441,39 @@ ExecTargetList(List *targetlist,
|
|||||||
EV_printf("\n");
|
EV_printf("\n");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return a dummy tuple if the targetlist is empty.
|
* Return a dummy tuple if the targetlist is empty. the dummy tuple is
|
||||||
* the dummy tuple is necessary to differentiate
|
* necessary to differentiate between passing and failing the
|
||||||
* between passing and failing the qualification.
|
* qualification.
|
||||||
*/
|
*/
|
||||||
if (targetlist == NIL)
|
if (targetlist == NIL)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* I now think that the only time this makes
|
* I now think that the only time this makes any sense is when we
|
||||||
* any sense is when we run a delete query. Then
|
* run a delete query. Then we need to return something other
|
||||||
* we need to return something other than nil
|
* than nil so we know to delete the tuple associated with the
|
||||||
* so we know to delete the tuple associated
|
* saved tupleid.. see what ExecutePlan does with the returned
|
||||||
* with the saved tupleid.. see what ExecutePlan
|
* tuple.. -cim 9/21/89
|
||||||
* does with the returned tuple.. -cim 9/21/89
|
|
||||||
*
|
*
|
||||||
* It could also happen in queries like:
|
* It could also happen in queries like: retrieve (foo.all) where
|
||||||
* retrieve (foo.all) where bar.a = 3
|
* bar.a = 3
|
||||||
*
|
*
|
||||||
* is this a new phenomenon? it might cause bogus behavior
|
* is this a new phenomenon? it might cause bogus behavior if we try
|
||||||
* if we try to free this tuple later!! I put a hook in
|
* to free this tuple later!! I put a hook in ExecProject to watch
|
||||||
* ExecProject to watch out for this case -mer 24 Aug 1992
|
* out for this case -mer 24 Aug 1992
|
||||||
*
|
*
|
||||||
* We must return dummy tuple!!! Try
|
* We must return dummy tuple!!! Try select t1.x from t1, t2 where
|
||||||
* select t1.x from t1, t2 where t1.y = 1 and t2.y = 1
|
* t1.y = 1 and t2.y = 1 - t2 scan target list will be empty and
|
||||||
* - t2 scan target list will be empty and so no one tuple
|
* so no one tuple will be returned! But Mer was right - dummy
|
||||||
* will be returned! But Mer was right - dummy tuple
|
* tuple must be palloced... - vadim 03/01/1999
|
||||||
* must be palloced... - vadim 03/01/1999
|
|
||||||
*/
|
*/
|
||||||
*isDone = true;
|
*isDone = true;
|
||||||
return (HeapTuple) palloc(1);
|
return (HeapTuple) palloc(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* allocate an array of char's to hold the "null" information
|
* allocate an array of char's to hold the "null" information only if
|
||||||
* only if we have a really large targetlist. otherwise we use
|
* we have a really large targetlist. otherwise we use the stack.
|
||||||
* the stack.
|
|
||||||
*/
|
*/
|
||||||
if (nodomains > 64)
|
if (nodomains > 64)
|
||||||
{
|
{
|
||||||
@ -1509,13 +1494,14 @@ ExecTargetList(List *targetlist,
|
|||||||
*isDone = true;
|
*isDone = true;
|
||||||
foreach(tl, targetlist)
|
foreach(tl, targetlist)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* remember, a target list is a list of lists:
|
* remember, a target list is a list of lists:
|
||||||
*
|
*
|
||||||
* ((<resdom | fjoin> expr) (<resdom | fjoin> expr) ...)
|
* ((<resdom | fjoin> expr) (<resdom | fjoin> expr) ...)
|
||||||
*
|
*
|
||||||
* tl is a pointer to successive cdr's of the targetlist
|
* tl is a pointer to successive cdr's of the targetlist tle is a
|
||||||
* tle is a pointer to the target list entry in tl
|
* pointer to the target list entry in tl
|
||||||
*/
|
*/
|
||||||
tle = lfirst(tl);
|
tle = lfirst(tl);
|
||||||
|
|
||||||
@ -1660,9 +1646,8 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
|
|||||||
/*
|
/*
|
||||||
* store the tuple in the projection slot and return the slot.
|
* store the tuple in the projection slot and return the slot.
|
||||||
*
|
*
|
||||||
* If there's no projection target list we don't want to pfree
|
* If there's no projection target list we don't want to pfree the bogus
|
||||||
* the bogus tuple that ExecTargetList passes back to us.
|
* tuple that ExecTargetList passes back to us. -mer 24 Aug 1992
|
||||||
* -mer 24 Aug 1992
|
|
||||||
*/
|
*/
|
||||||
return (TupleTableSlot *)
|
return (TupleTableSlot *)
|
||||||
ExecStoreTuple(newTuple,/* tuple to store */
|
ExecStoreTuple(newTuple,/* tuple to store */
|
||||||
@ -1670,4 +1655,3 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
|
|||||||
InvalidBuffer, /* tuple has no buffer */
|
InvalidBuffer, /* tuple has no buffer */
|
||||||
true);
|
true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.24 1999/03/23 16:50:48 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.25 1999/05/25 16:08:39 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -467,6 +467,7 @@ ExecSetSlotPolicy(TupleTableSlot *slot, /* slot to change */
|
|||||||
|
|
||||||
return old_shouldFree;
|
return old_shouldFree;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* --------------------------------
|
/* --------------------------------
|
||||||
@ -650,6 +651,7 @@ ExecInitMarkedTupleSlot(EState *estate, MergeJoinState *mergestate)
|
|||||||
INIT_SLOT_ALLOC;
|
INIT_SLOT_ALLOC;
|
||||||
mergestate->mj_MarkedTupleSlot = (TupleTableSlot *) slot;
|
mergestate->mj_MarkedTupleSlot = (TupleTableSlot *) slot;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* ----------------
|
/* ----------------
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.44 1999/03/20 01:13:22 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.45 1999/05/25 16:08:39 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -917,16 +917,17 @@ ExecOpenIndices(Oid resultRelationOid,
|
|||||||
if (indexDesc != NULL)
|
if (indexDesc != NULL)
|
||||||
{
|
{
|
||||||
relationDescs[i++] = indexDesc;
|
relationDescs[i++] = indexDesc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hack for not btree and hash indices: they use relation level
|
* Hack for not btree and hash indices: they use relation
|
||||||
* exclusive locking on updation (i.e. - they are not ready
|
* level exclusive locking on updation (i.e. - they are
|
||||||
* for MVCC) and so we have to exclusively lock indices here
|
* not ready for MVCC) and so we have to exclusively lock
|
||||||
* to prevent deadlocks if we will scan them - index_beginscan
|
* indices here to prevent deadlocks if we will scan them
|
||||||
* places AccessShareLock, indices update methods don't use
|
* - index_beginscan places AccessShareLock, indices
|
||||||
* locks at all. We release this lock in ExecCloseIndices.
|
* update methods don't use locks at all. We release this
|
||||||
* Note, that hashes use page level locking - i.e. are not
|
* lock in ExecCloseIndices. Note, that hashes use page
|
||||||
* deadlock-free, - let's them be on their way -:))
|
* level locking - i.e. are not deadlock-free, - let's
|
||||||
* vadim 03-12-1998
|
* them be on their way -:)) vadim 03-12-1998
|
||||||
*/
|
*/
|
||||||
if (indexDesc->rd_rel->relam != BTREE_AM_OID &&
|
if (indexDesc->rd_rel->relam != BTREE_AM_OID &&
|
||||||
indexDesc->rd_rel->relam != HASH_AM_OID)
|
indexDesc->rd_rel->relam != HASH_AM_OID)
|
||||||
@ -1014,6 +1015,7 @@ ExecCloseIndices(RelationInfo *resultRelationInfo)
|
|||||||
{
|
{
|
||||||
if (relationDescs[i] == NULL)
|
if (relationDescs[i] == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Notes in ExecOpenIndices.
|
* Notes in ExecOpenIndices.
|
||||||
*/
|
*/
|
||||||
@ -1023,6 +1025,7 @@ ExecCloseIndices(RelationInfo *resultRelationInfo)
|
|||||||
|
|
||||||
index_close(relationDescs[i]);
|
index_close(relationDescs[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX should free indexInfo array here too.
|
* XXX should free indexInfo array here too.
|
||||||
*/
|
*/
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.25 1999/05/13 07:28:29 tgl Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.26 1999/05/25 16:08:39 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -121,7 +121,8 @@ ExecAgg(Agg *node)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We loop retrieving groups until we find one matching node->plan.qual
|
* We loop retrieving groups until we find one matching
|
||||||
|
* node->plan.qual
|
||||||
*/
|
*/
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
@ -245,6 +246,7 @@ ExecAgg(Agg *node)
|
|||||||
outerslot = ExecProcNode(outerPlan, (Plan *) node);
|
outerslot = ExecProcNode(outerPlan, (Plan *) node);
|
||||||
if (TupIsNull(outerslot))
|
if (TupIsNull(outerslot))
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* when the outerplan doesn't return a single tuple,
|
* when the outerplan doesn't return a single tuple,
|
||||||
* create a dummy heaptuple anyway because we still need
|
* create a dummy heaptuple anyway because we still need
|
||||||
@ -299,17 +301,19 @@ ExecAgg(Agg *node)
|
|||||||
{
|
{
|
||||||
if (noInitValue[aggno])
|
if (noInitValue[aggno])
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* value1 has not been initialized.
|
* value1 has not been initialized. This is the
|
||||||
* This is the first non-NULL input value.
|
* first non-NULL input value. We use it as the
|
||||||
* We use it as the initial value for value1.
|
* initial value for value1.
|
||||||
*
|
*
|
||||||
* But we can't just use it straight, we have to
|
* But we can't just use it straight, we have to make
|
||||||
* make a copy of it since the tuple from which it
|
* a copy of it since the tuple from which it came
|
||||||
* came will be freed on the next iteration of the
|
* will be freed on the next iteration of the
|
||||||
* scan. This requires finding out how to copy
|
* scan. This requires finding out how to copy
|
||||||
* the Datum. We assume the datum is of the agg's
|
* the Datum. We assume the datum is of the agg's
|
||||||
* basetype, or at least binary compatible with it.
|
* basetype, or at least binary compatible with
|
||||||
|
* it.
|
||||||
*/
|
*/
|
||||||
Type aggBaseType = typeidType(aggref->basetype);
|
Type aggBaseType = typeidType(aggref->basetype);
|
||||||
int attlen = typeLen(aggBaseType);
|
int attlen = typeLen(aggBaseType);
|
||||||
@ -330,6 +334,7 @@ ExecAgg(Agg *node)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* apply the transition functions.
|
* apply the transition functions.
|
||||||
*/
|
*/
|
||||||
@ -443,7 +448,8 @@ ExecAgg(Agg *node)
|
|||||||
*/
|
*/
|
||||||
if (node->plan.qual != NULL)
|
if (node->plan.qual != NULL)
|
||||||
qual_result = ExecQual(fix_opids(node->plan.qual), econtext);
|
qual_result = ExecQual(fix_opids(node->plan.qual), econtext);
|
||||||
else qual_result = false;
|
else
|
||||||
|
qual_result = false;
|
||||||
|
|
||||||
if (oneTuple)
|
if (oneTuple)
|
||||||
pfree(oneTuple);
|
pfree(oneTuple);
|
||||||
@ -623,7 +629,8 @@ aggGetAttr(TupleTableSlot *slot,
|
|||||||
}
|
}
|
||||||
|
|
||||||
result = heap_getattr(heapTuple, /* tuple containing attribute */
|
result = heap_getattr(heapTuple, /* tuple containing attribute */
|
||||||
attnum, /* attribute number of desired attribute */
|
attnum, /* attribute number of desired
|
||||||
|
* attribute */
|
||||||
tuple_type, /* tuple descriptor of tuple */
|
tuple_type, /* tuple descriptor of tuple */
|
||||||
isNull); /* return: is attribute null? */
|
isNull); /* return: is attribute null? */
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.18 1999/02/21 03:48:40 scrappy Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.19 1999/05/25 16:08:40 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
* columns. (ie. tuples from the same group are consecutive)
|
* columns. (ie. tuples from the same group are consecutive)
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.25 1999/02/13 23:15:21 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.26 1999/05/25 16:08:41 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* $Id: nodeHash.c,v 1.35 1999/05/18 21:33:06 tgl Exp $
|
* $Id: nodeHash.c,v 1.36 1999/05/25 16:08:41 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -81,6 +81,7 @@ ExecHash(Hash *node)
|
|||||||
for (i = 0; i < nbatch; i++)
|
for (i = 0; i < nbatch; i++)
|
||||||
{
|
{
|
||||||
File tfile = OpenTemporaryFile();
|
File tfile = OpenTemporaryFile();
|
||||||
|
|
||||||
Assert(tfile >= 0);
|
Assert(tfile >= 0);
|
||||||
hashtable->innerBatchFile[i] = BufFileCreate(tfile);
|
hashtable->innerBatchFile[i] = BufFileCreate(tfile);
|
||||||
}
|
}
|
||||||
@ -261,8 +262,10 @@ ExecHashTableCreate(Hash *node)
|
|||||||
ntuples = outerNode->plan_size;
|
ntuples = outerNode->plan_size;
|
||||||
if (ntuples <= 0) /* force a plausible size if no info */
|
if (ntuples <= 0) /* force a plausible size if no info */
|
||||||
ntuples = 1000;
|
ntuples = 1000;
|
||||||
/* estimate tupsize based on footprint of tuple in hashtable...
|
|
||||||
* but what about palloc overhead?
|
/*
|
||||||
|
* estimate tupsize based on footprint of tuple in hashtable... but
|
||||||
|
* what about palloc overhead?
|
||||||
*/
|
*/
|
||||||
tupsize = MAXALIGN(outerNode->plan_width) +
|
tupsize = MAXALIGN(outerNode->plan_width) +
|
||||||
MAXALIGN(sizeof(HashJoinTupleData));
|
MAXALIGN(sizeof(HashJoinTupleData));
|
||||||
@ -270,7 +273,8 @@ ExecHashTableCreate(Hash *node)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Target hashtable size is SortMem kilobytes, but not less than
|
* Target hashtable size is SortMem kilobytes, but not less than
|
||||||
* sqrt(estimated inner rel size), so as to avoid horrible performance.
|
* sqrt(estimated inner rel size), so as to avoid horrible
|
||||||
|
* performance.
|
||||||
*/
|
*/
|
||||||
hash_table_bytes = sqrt(inner_rel_bytes);
|
hash_table_bytes = sqrt(inner_rel_bytes);
|
||||||
if (hash_table_bytes < (SortMem * 1024L))
|
if (hash_table_bytes < (SortMem * 1024L))
|
||||||
@ -278,17 +282,19 @@ ExecHashTableCreate(Hash *node)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Count the number of hash buckets we want for the whole relation,
|
* Count the number of hash buckets we want for the whole relation,
|
||||||
* for an average bucket load of NTUP_PER_BUCKET (per virtual bucket!).
|
* for an average bucket load of NTUP_PER_BUCKET (per virtual
|
||||||
|
* bucket!).
|
||||||
*/
|
*/
|
||||||
totalbuckets = (int) ceil((double) ntuples * FUDGE_FAC / NTUP_PER_BUCKET);
|
totalbuckets = (int) ceil((double) ntuples * FUDGE_FAC / NTUP_PER_BUCKET);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Count the number of buckets we think will actually fit in the
|
* Count the number of buckets we think will actually fit in the
|
||||||
* target memory size, at a loading of NTUP_PER_BUCKET (physical buckets).
|
* target memory size, at a loading of NTUP_PER_BUCKET (physical
|
||||||
* NOTE: FUDGE_FAC here determines the fraction of the hashtable space
|
* buckets). NOTE: FUDGE_FAC here determines the fraction of the
|
||||||
* reserved to allow for nonuniform distribution of hash values.
|
* hashtable space reserved to allow for nonuniform distribution of
|
||||||
* Perhaps this should be a different number from the other uses of
|
* hash values. Perhaps this should be a different number from the
|
||||||
* FUDGE_FAC, but since we have no real good way to pick either one...
|
* other uses of FUDGE_FAC, but since we have no real good way to pick
|
||||||
|
* either one...
|
||||||
*/
|
*/
|
||||||
bucketsize = NTUP_PER_BUCKET * tupsize;
|
bucketsize = NTUP_PER_BUCKET * tupsize;
|
||||||
nbuckets = (int) (hash_table_bytes / (bucketsize * FUDGE_FAC));
|
nbuckets = (int) (hash_table_bytes / (bucketsize * FUDGE_FAC));
|
||||||
@ -297,21 +303,25 @@ ExecHashTableCreate(Hash *node)
|
|||||||
|
|
||||||
if (totalbuckets <= nbuckets)
|
if (totalbuckets <= nbuckets)
|
||||||
{
|
{
|
||||||
/* We have enough space, so no batching. In theory we could
|
|
||||||
* even reduce nbuckets, but since that could lead to poor
|
/*
|
||||||
* behavior if estimated ntuples is much less than reality,
|
* We have enough space, so no batching. In theory we could even
|
||||||
* it seems better to make more buckets instead of fewer.
|
* reduce nbuckets, but since that could lead to poor behavior if
|
||||||
|
* estimated ntuples is much less than reality, it seems better to
|
||||||
|
* make more buckets instead of fewer.
|
||||||
*/
|
*/
|
||||||
totalbuckets = nbuckets;
|
totalbuckets = nbuckets;
|
||||||
nbatch = 0;
|
nbatch = 0;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* Need to batch; compute how many batches we want to use.
|
|
||||||
* Note that nbatch doesn't have to have anything to do with
|
/*
|
||||||
* the ratio totalbuckets/nbuckets; in fact, it is the number
|
* Need to batch; compute how many batches we want to use. Note
|
||||||
* of groups we will use for the part of the data that doesn't
|
* that nbatch doesn't have to have anything to do with the ratio
|
||||||
* fall into the first nbuckets hash buckets.
|
* totalbuckets/nbuckets; in fact, it is the number of groups we
|
||||||
|
* will use for the part of the data that doesn't fall into the
|
||||||
|
* first nbuckets hash buckets.
|
||||||
*/
|
*/
|
||||||
nbatch = (int) ceil((inner_rel_bytes - hash_table_bytes) /
|
nbatch = (int) ceil((inner_rel_bytes - hash_table_bytes) /
|
||||||
hash_table_bytes);
|
hash_table_bytes);
|
||||||
@ -319,12 +329,13 @@ ExecHashTableCreate(Hash *node)
|
|||||||
nbatch = 1;
|
nbatch = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Now, totalbuckets is the number of (virtual) hashbuckets for the
|
/*
|
||||||
|
* Now, totalbuckets is the number of (virtual) hashbuckets for the
|
||||||
* whole relation, and nbuckets is the number of physical hashbuckets
|
* whole relation, and nbuckets is the number of physical hashbuckets
|
||||||
* we will use in the first pass. Data falling into the first nbuckets
|
* we will use in the first pass. Data falling into the first
|
||||||
* virtual hashbuckets gets handled in the first pass; everything else
|
* nbuckets virtual hashbuckets gets handled in the first pass;
|
||||||
* gets divided into nbatch batches to be processed in additional
|
* everything else gets divided into nbatch batches to be processed in
|
||||||
* passes.
|
* additional passes.
|
||||||
*/
|
*/
|
||||||
#ifdef HJDEBUG
|
#ifdef HJDEBUG
|
||||||
printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
|
printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
|
||||||
@ -353,14 +364,16 @@ ExecHashTableCreate(Hash *node)
|
|||||||
* ----------------
|
* ----------------
|
||||||
*/
|
*/
|
||||||
i = 0;
|
i = 0;
|
||||||
do {
|
do
|
||||||
|
{
|
||||||
i++;
|
i++;
|
||||||
sprintf(myPortalName, "<hashtable %d>", i);
|
sprintf(myPortalName, "<hashtable %d>", i);
|
||||||
myPortal = GetPortalByName(myPortalName);
|
myPortal = GetPortalByName(myPortalName);
|
||||||
} while (PortalIsValid(myPortal));
|
} while (PortalIsValid(myPortal));
|
||||||
myPortal = CreatePortal(myPortalName);
|
myPortal = CreatePortal(myPortalName);
|
||||||
Assert(PortalIsValid(myPortal));
|
Assert(PortalIsValid(myPortal));
|
||||||
hashtable->myPortal = (void*) myPortal; /* kluge for circular includes */
|
hashtable->myPortal = (void *) myPortal; /* kluge for circular
|
||||||
|
* includes */
|
||||||
hashtable->hashCxt = (MemoryContext) PortalGetVariableMemory(myPortal);
|
hashtable->hashCxt = (MemoryContext) PortalGetVariableMemory(myPortal);
|
||||||
hashtable->batchCxt = (MemoryContext) PortalGetHeapMemory(myPortal);
|
hashtable->batchCxt = (MemoryContext) PortalGetHeapMemory(myPortal);
|
||||||
|
|
||||||
@ -392,8 +405,9 @@ ExecHashTableCreate(Hash *node)
|
|||||||
/* The files will not be opened until later... */
|
/* The files will not be opened until later... */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare portal for the first-scan space allocations;
|
/*
|
||||||
* allocate the hashbucket array therein, and set each bucket "empty".
|
* Prepare portal for the first-scan space allocations; allocate the
|
||||||
|
* hashbucket array therein, and set each bucket "empty".
|
||||||
*/
|
*/
|
||||||
MemoryContextSwitchTo(hashtable->batchCxt);
|
MemoryContextSwitchTo(hashtable->batchCxt);
|
||||||
StartPortalAllocMode(DefaultAllocMode, 0);
|
StartPortalAllocMode(DefaultAllocMode, 0);
|
||||||
@ -405,9 +419,7 @@ ExecHashTableCreate(Hash *node)
|
|||||||
elog(ERROR, "Insufficient memory for hash table.");
|
elog(ERROR, "Insufficient memory for hash table.");
|
||||||
|
|
||||||
for (i = 0; i < nbuckets; i++)
|
for (i = 0; i < nbuckets; i++)
|
||||||
{
|
|
||||||
hashtable->buckets[i] = NULL;
|
hashtable->buckets[i] = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
MemoryContextSwitchTo(oldcxt);
|
MemoryContextSwitchTo(oldcxt);
|
||||||
|
|
||||||
@ -495,6 +507,7 @@ ExecHashTableInsert(HashJoinTable hashtable,
|
|||||||
*/
|
*/
|
||||||
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
|
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
|
||||||
(hashtable->totalbuckets - hashtable->nbuckets);
|
(hashtable->totalbuckets - hashtable->nbuckets);
|
||||||
|
|
||||||
hashtable->innerBatchSize[batchno]++;
|
hashtable->innerBatchSize[batchno]++;
|
||||||
ExecHashJoinSaveTuple(heapTuple,
|
ExecHashJoinSaveTuple(heapTuple,
|
||||||
hashtable->innerBatchFile[batchno]);
|
hashtable->innerBatchFile[batchno]);
|
||||||
@ -566,17 +579,14 @@ ExecScanHashBucket(HashJoinState *hjstate,
|
|||||||
HashJoinTable hashtable = hjstate->hj_HashTable;
|
HashJoinTable hashtable = hjstate->hj_HashTable;
|
||||||
HashJoinTuple hashTuple = hjstate->hj_CurTuple;
|
HashJoinTuple hashTuple = hjstate->hj_CurTuple;
|
||||||
|
|
||||||
/* hj_CurTuple is NULL to start scanning a new bucket, or the address
|
/*
|
||||||
|
* hj_CurTuple is NULL to start scanning a new bucket, or the address
|
||||||
* of the last tuple returned from the current bucket.
|
* of the last tuple returned from the current bucket.
|
||||||
*/
|
*/
|
||||||
if (hashTuple == NULL)
|
if (hashTuple == NULL)
|
||||||
{
|
|
||||||
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
|
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
hashTuple = hashTuple->next;
|
hashTuple = hashTuple->next;
|
||||||
}
|
|
||||||
|
|
||||||
while (hashTuple != NULL)
|
while (hashTuple != NULL)
|
||||||
{
|
{
|
||||||
@ -621,25 +631,31 @@ hashFunc(Datum key, int len, bool byVal)
|
|||||||
unsigned int h = 0;
|
unsigned int h = 0;
|
||||||
unsigned char *k;
|
unsigned char *k;
|
||||||
|
|
||||||
if (byVal) {
|
if (byVal)
|
||||||
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If it's a by-value data type, use the 'len' least significant bytes
|
* If it's a by-value data type, use the 'len' least significant
|
||||||
* of the Datum value. This should do the right thing on either
|
* bytes of the Datum value. This should do the right thing on
|
||||||
* bigendian or littleendian hardware --- see the Datum access
|
* either bigendian or littleendian hardware --- see the Datum
|
||||||
* macros in c.h.
|
* access macros in c.h.
|
||||||
*/
|
*/
|
||||||
while (len-- > 0) {
|
while (len-- > 0)
|
||||||
|
{
|
||||||
h = (h * PRIME1) ^ (key & 0xFF);
|
h = (h * PRIME1) ^ (key & 0xFF);
|
||||||
key >>= 8;
|
key >>= 8;
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is a variable length type, then 'k' points to a "struct
|
* If this is a variable length type, then 'k' points to a "struct
|
||||||
* varlena" and len == -1. NOTE: VARSIZE returns the "real" data
|
* varlena" and len == -1. NOTE: VARSIZE returns the "real" data
|
||||||
* length plus the sizeof the "vl_len" attribute of varlena (the
|
* length plus the sizeof the "vl_len" attribute of varlena (the
|
||||||
* length information). 'k' points to the beginning of the varlena
|
* length information). 'k' points to the beginning of the varlena
|
||||||
* struct, so we have to use "VARDATA" to find the beginning of the
|
* struct, so we have to use "VARDATA" to find the beginning of
|
||||||
* "real" data.
|
* the "real" data.
|
||||||
*/
|
*/
|
||||||
if (len == -1)
|
if (len == -1)
|
||||||
{
|
{
|
||||||
@ -647,9 +663,7 @@ hashFunc(Datum key, int len, bool byVal)
|
|||||||
k = (unsigned char *) VARDATA(key);
|
k = (unsigned char *) VARDATA(key);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
|
||||||
k = (unsigned char *) key;
|
k = (unsigned char *) key;
|
||||||
}
|
|
||||||
while (len-- > 0)
|
while (len-- > 0)
|
||||||
h = (h * PRIME1) ^ (*k++);
|
h = (h * PRIME1) ^ (*k++);
|
||||||
}
|
}
|
||||||
@ -682,13 +696,14 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
|
|||||||
StartPortalAllocMode(DefaultAllocMode, 0);
|
StartPortalAllocMode(DefaultAllocMode, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We still use the same number of physical buckets as in the first pass.
|
* We still use the same number of physical buckets as in the first
|
||||||
* (It could be different; but we already decided how many buckets would
|
* pass. (It could be different; but we already decided how many
|
||||||
* be appropriate for the allowed memory, so stick with that number.)
|
* buckets would be appropriate for the allowed memory, so stick with
|
||||||
* We MUST set totalbuckets to equal nbuckets, because from now on
|
* that number.) We MUST set totalbuckets to equal nbuckets, because
|
||||||
* no tuples will go out to temp files; there are no more virtual buckets,
|
* from now on no tuples will go out to temp files; there are no more
|
||||||
* only real buckets. (This implies that tuples will go into different
|
* virtual buckets, only real buckets. (This implies that tuples will
|
||||||
* bucket numbers than they did on the first pass, but that's OK.)
|
* go into different bucket numbers than they did on the first pass,
|
||||||
|
* but that's OK.)
|
||||||
*/
|
*/
|
||||||
hashtable->totalbuckets = nbuckets;
|
hashtable->totalbuckets = nbuckets;
|
||||||
|
|
||||||
@ -700,9 +715,7 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
|
|||||||
elog(ERROR, "Insufficient memory for hash table.");
|
elog(ERROR, "Insufficient memory for hash table.");
|
||||||
|
|
||||||
for (i = 0; i < nbuckets; i++)
|
for (i = 0; i < nbuckets; i++)
|
||||||
{
|
|
||||||
hashtable->buckets[i] = NULL;
|
hashtable->buckets[i] = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
MemoryContextSwitchTo(oldcxt);
|
MemoryContextSwitchTo(oldcxt);
|
||||||
}
|
}
|
||||||
@ -710,6 +723,7 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
|
|||||||
void
|
void
|
||||||
ExecReScanHash(Hash *node, ExprContext *exprCtxt, Plan *parent)
|
ExecReScanHash(Hash *node, ExprContext *exprCtxt, Plan *parent)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if chgParam of subnode is not null then plan will be re-scanned by
|
* if chgParam of subnode is not null then plan will be re-scanned by
|
||||||
* first ExecProcNode.
|
* first ExecProcNode.
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.20 1999/05/18 21:33:06 tgl Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.21 1999/05/25 16:08:42 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -133,6 +133,7 @@ ExecHashJoin(HashJoin *node)
|
|||||||
for (i = 0; i < hashtable->nbatch; i++)
|
for (i = 0; i < hashtable->nbatch; i++)
|
||||||
{
|
{
|
||||||
File tfile = OpenTemporaryFile();
|
File tfile = OpenTemporaryFile();
|
||||||
|
|
||||||
Assert(tfile >= 0);
|
Assert(tfile >= 0);
|
||||||
hashtable->outerBatchFile[i] = BufFileCreate(tfile);
|
hashtable->outerBatchFile[i] = BufFileCreate(tfile);
|
||||||
}
|
}
|
||||||
@ -149,6 +150,7 @@ ExecHashJoin(HashJoin *node)
|
|||||||
|
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the current outer tuple is nil, get a new one
|
* if the current outer tuple is nil, get a new one
|
||||||
*/
|
*/
|
||||||
@ -159,6 +161,7 @@ ExecHashJoin(HashJoin *node)
|
|||||||
hjstate);
|
hjstate);
|
||||||
if (TupIsNull(outerTupleSlot))
|
if (TupIsNull(outerTupleSlot))
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* when the last batch runs out, clean up and exit
|
* when the last batch runs out, clean up and exit
|
||||||
*/
|
*/
|
||||||
@ -168,8 +171,8 @@ ExecHashJoin(HashJoin *node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* now we have an outer tuple, find the corresponding bucket for
|
* now we have an outer tuple, find the corresponding bucket
|
||||||
* this tuple from the hash table
|
* for this tuple from the hash table
|
||||||
*/
|
*/
|
||||||
econtext->ecxt_outertuple = outerTupleSlot;
|
econtext->ecxt_outertuple = outerTupleSlot;
|
||||||
hjstate->hj_CurBucketNo = ExecHashGetBucket(hashtable, econtext,
|
hjstate->hj_CurBucketNo = ExecHashGetBucket(hashtable, econtext,
|
||||||
@ -186,13 +189,16 @@ ExecHashJoin(HashJoin *node)
|
|||||||
{
|
{
|
||||||
int batch = ExecHashJoinGetBatch(hjstate->hj_CurBucketNo,
|
int batch = ExecHashJoinGetBatch(hjstate->hj_CurBucketNo,
|
||||||
hashtable);
|
hashtable);
|
||||||
|
|
||||||
if (batch > 0)
|
if (batch > 0)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to postpone this outer tuple to a later batch.
|
* Need to postpone this outer tuple to a later batch.
|
||||||
* Save it in the corresponding outer-batch file.
|
* Save it in the corresponding outer-batch file.
|
||||||
*/
|
*/
|
||||||
int batchno = batch - 1;
|
int batchno = batch - 1;
|
||||||
|
|
||||||
hashtable->outerBatchSize[batchno]++;
|
hashtable->outerBatchSize[batchno]++;
|
||||||
ExecHashJoinSaveTuple(outerTupleSlot->val,
|
ExecHashJoinSaveTuple(outerTupleSlot->val,
|
||||||
hashtable->outerBatchFile[batchno]);
|
hashtable->outerBatchFile[batchno]);
|
||||||
@ -212,6 +218,7 @@ ExecHashJoin(HashJoin *node)
|
|||||||
econtext);
|
econtext);
|
||||||
if (curtuple == NULL)
|
if (curtuple == NULL)
|
||||||
break; /* out of matches */
|
break; /* out of matches */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we've got a match, but still need to test qpqual
|
* we've got a match, but still need to test qpqual
|
||||||
*/
|
*/
|
||||||
@ -436,16 +443,17 @@ ExecHashJoinOuterGetTuple(Plan *node, Plan *parent, HashJoinState *hjstate)
|
|||||||
slot = ExecProcNode(node, parent);
|
slot = ExecProcNode(node, parent);
|
||||||
if (!TupIsNull(slot))
|
if (!TupIsNull(slot))
|
||||||
return slot;
|
return slot;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have just reached the end of the first pass.
|
* We have just reached the end of the first pass. Try to switch
|
||||||
* Try to switch to a saved batch.
|
* to a saved batch.
|
||||||
*/
|
*/
|
||||||
curbatch = ExecHashJoinNewBatch(hjstate);
|
curbatch = ExecHashJoinNewBatch(hjstate);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to read from a temp file.
|
* Try to read from a temp file. Loop allows us to advance to new
|
||||||
* Loop allows us to advance to new batch as needed.
|
* batch as needed.
|
||||||
*/
|
*/
|
||||||
while (curbatch <= hashtable->nbatch)
|
while (curbatch <= hashtable->nbatch)
|
||||||
{
|
{
|
||||||
@ -513,9 +521,10 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
|
|||||||
|
|
||||||
if (newbatch > 1)
|
if (newbatch > 1)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We no longer need the previous outer batch file;
|
* We no longer need the previous outer batch file; close it right
|
||||||
* close it right away to free disk space.
|
* away to free disk space.
|
||||||
*/
|
*/
|
||||||
BufFileClose(hashtable->outerBatchFile[newbatch - 2]);
|
BufFileClose(hashtable->outerBatchFile[newbatch - 2]);
|
||||||
hashtable->outerBatchFile[newbatch - 2] = NULL;
|
hashtable->outerBatchFile[newbatch - 2] = NULL;
|
||||||
@ -541,8 +550,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
|
|||||||
return newbatch; /* no more batches */
|
return newbatch; /* no more batches */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Rewind inner and outer batch files for this batch,
|
* Rewind inner and outer batch files for this batch, so that we can
|
||||||
* so that we can start reading them.
|
* start reading them.
|
||||||
*/
|
*/
|
||||||
if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0L,
|
if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0L,
|
||||||
SEEK_SET) != 0L)
|
SEEK_SET) != 0L)
|
||||||
@ -571,7 +580,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* after we build the hash table, the inner batch file is no longer needed
|
* after we build the hash table, the inner batch file is no longer
|
||||||
|
* needed
|
||||||
*/
|
*/
|
||||||
BufFileClose(innerFile);
|
BufFileClose(innerFile);
|
||||||
hashtable->innerBatchFile[newbatch - 1] = NULL;
|
hashtable->innerBatchFile[newbatch - 1] = NULL;
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.35 1999/05/10 00:45:06 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.36 1999/05/25 16:08:43 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -98,6 +98,7 @@ IndexNext(IndexScan *node)
|
|||||||
|
|
||||||
bool bBackward;
|
bool bBackward;
|
||||||
int indexNumber;
|
int indexNumber;
|
||||||
|
|
||||||
/* ----------------
|
/* ----------------
|
||||||
* extract necessary information from index scan node
|
* extract necessary information from index scan node
|
||||||
* ----------------
|
* ----------------
|
||||||
@ -114,9 +115,9 @@ IndexNext(IndexScan *node)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if we are evaluating PlanQual for tuple of this relation.
|
* Check if we are evaluating PlanQual for tuple of this relation.
|
||||||
* Additional checking is not good, but no other way for now.
|
* Additional checking is not good, but no other way for now. We could
|
||||||
* We could introduce new nodes for this case and handle
|
* introduce new nodes for this case and handle IndexScan --> NewNode
|
||||||
* IndexScan --> NewNode switching in Init/ReScan plan...
|
* switching in Init/ReScan plan...
|
||||||
*/
|
*/
|
||||||
if (estate->es_evTuple != NULL &&
|
if (estate->es_evTuple != NULL &&
|
||||||
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
|
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.21 1999/02/13 23:15:24 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.22 1999/05/25 16:08:44 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.26 1999/05/10 00:45:07 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.27 1999/05/25 16:08:45 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
* SeqScan (emp.all)
|
* SeqScan (emp.all)
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.10 1999/03/20 01:13:22 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.11 1999/05/25 16:08:46 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -278,7 +278,8 @@ ExecEndResult(Result *node)
|
|||||||
* ----------------
|
* ----------------
|
||||||
*/
|
*/
|
||||||
ExecClearTuple(resstate->cstate.cs_ResultTupleSlot);
|
ExecClearTuple(resstate->cstate.cs_ResultTupleSlot);
|
||||||
pfree(resstate); node->resstate = NULL; /* XXX - new for us - er1p */
|
pfree(resstate);
|
||||||
|
node->resstate = NULL; /* XXX - new for us - er1p */
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.17 1999/02/13 23:15:26 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.18 1999/05/25 16:08:46 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -68,9 +68,9 @@ SeqNext(SeqScan *node)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if we are evaluating PlanQual for tuple of this relation.
|
* Check if we are evaluating PlanQual for tuple of this relation.
|
||||||
* Additional checking is not good, but no other way for now.
|
* Additional checking is not good, but no other way for now. We could
|
||||||
* We could introduce new nodes for this case and handle
|
* introduce new nodes for this case and handle SeqScan --> NewNode
|
||||||
* SeqScan --> NewNode switching in Init/ReScan plan...
|
* switching in Init/ReScan plan...
|
||||||
*/
|
*/
|
||||||
if (estate->es_evTuple != NULL &&
|
if (estate->es_evTuple != NULL &&
|
||||||
estate->es_evTuple[node->scanrelid - 1] != NULL)
|
estate->es_evTuple[node->scanrelid - 1] != NULL)
|
||||||
@ -83,10 +83,11 @@ SeqNext(SeqScan *node)
|
|||||||
return (slot);
|
return (slot);
|
||||||
}
|
}
|
||||||
slot->val = estate->es_evTuple[node->scanrelid - 1];
|
slot->val = estate->es_evTuple[node->scanrelid - 1];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that unlike IndexScan, SeqScan never use keys
|
* Note that unlike IndexScan, SeqScan never use keys in
|
||||||
* in heap_beginscan (and this is very bad) - so, here
|
* heap_beginscan (and this is very bad) - so, here we have not
|
||||||
* we have not check are keys ok or not.
|
* check are keys ok or not.
|
||||||
*/
|
*/
|
||||||
/* Flag for the next call that no more tuples */
|
/* Flag for the next call that no more tuples */
|
||||||
estate->es_evTupleNull[node->scanrelid - 1] = true;
|
estate->es_evTupleNull[node->scanrelid - 1] = true;
|
||||||
@ -401,7 +402,8 @@ ExecSeqReScan(SeqScan *node, ExprContext *exprCtxt, Plan *parent)
|
|||||||
outerPlan = outerPlan((Plan *) node);
|
outerPlan = outerPlan((Plan *) node);
|
||||||
ExecReScan(outerPlan, exprCtxt, parent);
|
ExecReScan(outerPlan, exprCtxt, parent);
|
||||||
}
|
}
|
||||||
else /* otherwise, we are scanning a relation */
|
else
|
||||||
|
/* otherwise, we are scanning a relation */
|
||||||
{
|
{
|
||||||
/* If this is re-scanning of PlanQual ... */
|
/* If this is re-scanning of PlanQual ... */
|
||||||
if (estate->es_evTuple != NULL &&
|
if (estate->es_evTuple != NULL &&
|
||||||
|
@ -58,15 +58,16 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
|
|||||||
ExecReScan(plan, (ExprContext *) NULL, plan);
|
ExecReScan(plan, (ExprContext *) NULL, plan);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For all sublink types except EXPR_SUBLINK, the result type is boolean,
|
* For all sublink types except EXPR_SUBLINK, the result type is
|
||||||
* and we have a fairly clear idea of how to combine multiple subitems
|
* boolean, and we have a fairly clear idea of how to combine multiple
|
||||||
* and deal with NULL values or an empty subplan result.
|
* subitems and deal with NULL values or an empty subplan result.
|
||||||
*
|
*
|
||||||
* For EXPR_SUBLINK, the result type is whatever the combining operator
|
* For EXPR_SUBLINK, the result type is whatever the combining operator
|
||||||
* returns. We have no way to deal with more than one column in the
|
* returns. We have no way to deal with more than one column in the
|
||||||
* subplan result --- hopefully the parser forbids that. More seriously,
|
* subplan result --- hopefully the parser forbids that. More
|
||||||
* it's unclear what to do with NULL values or an empty subplan result.
|
* seriously, it's unclear what to do with NULL values or an empty
|
||||||
* For now, we error out, but should something else happen?
|
* subplan result. For now, we error out, but should something else
|
||||||
|
* happen?
|
||||||
*/
|
*/
|
||||||
|
|
||||||
for (slot = ExecProcNode(plan, plan);
|
for (slot = ExecProcNode(plan, plan);
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
* spi.c
|
* spi.c
|
||||||
* Server Programming Interface
|
* Server Programming Interface
|
||||||
*
|
*
|
||||||
* $Id: spi.c,v 1.37 1999/05/13 07:28:30 tgl Exp $
|
* $Id: spi.c,v 1.38 1999/05/25 16:08:48 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -638,7 +638,8 @@ _SPI_execute(char *src, int tcount, _SPI_plan *plan)
|
|||||||
queryTree = (Query *) lfirst(queryTree_list_item);
|
queryTree = (Query *) lfirst(queryTree_list_item);
|
||||||
planTree = lfirst(planTree_list);
|
planTree = lfirst(planTree_list);
|
||||||
planTree_list = lnext(planTree_list);
|
planTree_list = lnext(planTree_list);
|
||||||
islastquery = (planTree_list == NIL); /* assume lists are same len */
|
islastquery = (planTree_list == NIL); /* assume lists are same
|
||||||
|
* len */
|
||||||
|
|
||||||
if (queryTree->commandType == CMD_UTILITY)
|
if (queryTree->commandType == CMD_UTILITY)
|
||||||
{
|
{
|
||||||
@ -722,7 +723,8 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, char *Nulls, int tcount)
|
|||||||
queryTree = (Query *) lfirst(queryTree_list_item);
|
queryTree = (Query *) lfirst(queryTree_list_item);
|
||||||
planTree = lfirst(planTree_list);
|
planTree = lfirst(planTree_list);
|
||||||
planTree_list = lnext(planTree_list);
|
planTree_list = lnext(planTree_list);
|
||||||
islastquery = (planTree_list == NIL); /* assume lists are same len */
|
islastquery = (planTree_list == NIL); /* assume lists are same
|
||||||
|
* len */
|
||||||
|
|
||||||
if (queryTree->commandType == CMD_UTILITY)
|
if (queryTree->commandType == CMD_UTILITY)
|
||||||
{
|
{
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.10 1999/02/13 23:15:34 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.11 1999/05/25 16:08:52 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: stringinfo.c,v 1.15 1999/04/25 03:19:25 tgl Exp $
|
* $Id: stringinfo.c,v 1.16 1999/05/25 16:08:53 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -75,9 +75,10 @@ enlargeStringInfo(StringInfo str, int needed)
|
|||||||
return; /* got enough space already */
|
return; /* got enough space already */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't want to allocate just a little more space with each append;
|
* We don't want to allocate just a little more space with each
|
||||||
* for efficiency, double the buffer size each time it overflows.
|
* append; for efficiency, double the buffer size each time it
|
||||||
* Actually, we might need to more than double it if 'needed' is big...
|
* overflows. Actually, we might need to more than double it if
|
||||||
|
* 'needed' is big...
|
||||||
*/
|
*/
|
||||||
newlen = 2 * str->maxlen;
|
newlen = 2 * str->maxlen;
|
||||||
while (needed > newlen)
|
while (needed > newlen)
|
||||||
@ -164,7 +165,8 @@ appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
|
|||||||
memcpy(str->data + str->len, data, datalen);
|
memcpy(str->data + str->len, data, datalen);
|
||||||
str->len += datalen;
|
str->len += datalen;
|
||||||
|
|
||||||
/* Keep a trailing null in place, even though it's probably useless
|
/*
|
||||||
|
* Keep a trailing null in place, even though it's probably useless
|
||||||
* for binary data...
|
* for binary data...
|
||||||
*/
|
*/
|
||||||
str->data[str->len] = '\0';
|
str->data[str->len] = '\0';
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.35 1999/04/16 04:59:03 tgl Exp $
|
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.36 1999/05/25 16:08:55 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -449,9 +449,9 @@ be_recvauth(Port *port)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the authentication method to use for this frontend/database
|
* Get the authentication method to use for this frontend/database
|
||||||
* combination. Note: a failure return indicates a problem with
|
* combination. Note: a failure return indicates a problem with the
|
||||||
* the hba config file, not with the request. hba.c should have
|
* hba config file, not with the request. hba.c should have dropped
|
||||||
* dropped an error message into the postmaster logfile if it failed.
|
* an error message into the postmaster logfile if it failed.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (hba_getauthmethod(&port->raddr, port->user, port->database,
|
if (hba_getauthmethod(&port->raddr, port->user, port->database,
|
||||||
@ -476,18 +476,19 @@ be_recvauth(Port *port)
|
|||||||
switch (port->auth_method)
|
switch (port->auth_method)
|
||||||
{
|
{
|
||||||
case uaReject:
|
case uaReject:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This could have come from an explicit "reject" entry
|
* This could have come from an explicit "reject" entry in
|
||||||
* in pg_hba.conf, but more likely it means there was no
|
* pg_hba.conf, but more likely it means there was no
|
||||||
* matching entry. Take pity on the poor user and issue
|
* matching entry. Take pity on the poor user and issue a
|
||||||
* a helpful error message. NOTE: this is not a security
|
* helpful error message. NOTE: this is not a security
|
||||||
* breach, because all the info reported here is known
|
* breach, because all the info reported here is known at
|
||||||
* at the frontend and must be assumed known to bad guys.
|
* the frontend and must be assumed known to bad guys.
|
||||||
* We're merely helping out the less clueful good guys.
|
* We're merely helping out the less clueful good guys.
|
||||||
* NOTE 2: libpq-be.h defines the maximum error message
|
* NOTE 2: libpq-be.h defines the maximum error message
|
||||||
* length as 99 characters. It probably wouldn't hurt
|
* length as 99 characters. It probably wouldn't hurt
|
||||||
* anything to increase it, but there might be some
|
* anything to increase it, but there might be some client
|
||||||
* client out there that will fail. So, be terse.
|
* out there that will fail. So, be terse.
|
||||||
*/
|
*/
|
||||||
{
|
{
|
||||||
char buffer[512];
|
char buffer[512];
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: be-dumpdata.c,v 1.23 1999/05/10 00:45:08 momjian Exp $
|
* $Id: be-dumpdata.c,v 1.24 1999/05/25 16:08:57 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.32 1999/05/10 00:45:09 momjian Exp $
|
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.33 1999/05/25 16:08:57 momjian Exp $
|
||||||
*
|
*
|
||||||
* NOTES
|
* NOTES
|
||||||
* This should be moved to a more appropriate place. It is here
|
* This should be moved to a more appropriate place. It is here
|
||||||
@ -374,9 +374,7 @@ lo_export(Oid lobjId, text *filename)
|
|||||||
*/
|
*/
|
||||||
lobj = inv_open(lobjId, INV_READ);
|
lobj = inv_open(lobjId, INV_READ);
|
||||||
if (lobj == NULL)
|
if (lobj == NULL)
|
||||||
{
|
|
||||||
elog(ERROR, "lo_export: can't open inv object %u", lobjId);
|
elog(ERROR, "lo_export: can't open inv object %u", lobjId);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* open the file to be written to
|
* open the file to be written to
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
* Dec 17, 1997 - Todd A. Brandys
|
* Dec 17, 1997 - Todd A. Brandys
|
||||||
* Orignal Version Completed.
|
* Orignal Version Completed.
|
||||||
*
|
*
|
||||||
* $Id: crypt.c,v 1.16 1999/05/09 00:54:30 tgl Exp $
|
* $Id: crypt.c,v 1.17 1999/05/25 16:08:58 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -147,9 +147,7 @@ crypt_loadpwdfile()
|
|||||||
{ /* free the old data only if this is a
|
{ /* free the old data only if this is a
|
||||||
* reload */
|
* reload */
|
||||||
while (pwd_cache_count--)
|
while (pwd_cache_count--)
|
||||||
{
|
|
||||||
pfree((void *) pwd_cache[pwd_cache_count]);
|
pfree((void *) pwd_cache[pwd_cache_count]);
|
||||||
}
|
|
||||||
pfree((void *) pwd_cache);
|
pfree((void *) pwd_cache);
|
||||||
pwd_cache = NULL;
|
pwd_cache = NULL;
|
||||||
pwd_cache_count = 0;
|
pwd_cache_count = 0;
|
||||||
@ -269,20 +267,14 @@ crypt_verify(Port *port, const char *user, const char *pgpass)
|
|||||||
current;
|
current;
|
||||||
|
|
||||||
if (crypt_getloginfo(user, &passwd, &valuntil) == STATUS_ERROR)
|
if (crypt_getloginfo(user, &passwd, &valuntil) == STATUS_ERROR)
|
||||||
{
|
|
||||||
return STATUS_ERROR;
|
return STATUS_ERROR;
|
||||||
}
|
|
||||||
|
|
||||||
if (passwd == NULL || *passwd == '\0')
|
if (passwd == NULL || *passwd == '\0')
|
||||||
{
|
{
|
||||||
if (passwd)
|
if (passwd)
|
||||||
{
|
|
||||||
pfree((void *) passwd);
|
pfree((void *) passwd);
|
||||||
}
|
|
||||||
if (valuntil)
|
if (valuntil)
|
||||||
{
|
|
||||||
pfree((void *) valuntil);
|
pfree((void *) valuntil);
|
||||||
}
|
|
||||||
return STATUS_ERROR;
|
return STATUS_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,33 +288,24 @@ crypt_verify(Port *port, const char *user, const char *pgpass)
|
|||||||
|
|
||||||
if (!strcmp(pgpass, crypt_pwd))
|
if (!strcmp(pgpass, crypt_pwd))
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check here to be sure we are not past valuntil
|
* check here to be sure we are not past valuntil
|
||||||
*/
|
*/
|
||||||
if (!valuntil || strcmp(valuntil, "\\N") == 0)
|
if (!valuntil || strcmp(valuntil, "\\N") == 0)
|
||||||
{
|
|
||||||
vuntil = INVALID_ABSTIME;
|
vuntil = INVALID_ABSTIME;
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
vuntil = nabstimein(valuntil);
|
vuntil = nabstimein(valuntil);
|
||||||
}
|
|
||||||
current = GetCurrentAbsoluteTime();
|
current = GetCurrentAbsoluteTime();
|
||||||
if (vuntil != INVALID_ABSTIME && vuntil < current)
|
if (vuntil != INVALID_ABSTIME && vuntil < current)
|
||||||
{
|
|
||||||
retval = STATUS_ERROR;
|
retval = STATUS_ERROR;
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
retval = STATUS_OK;
|
retval = STATUS_OK;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pfree((void *) passwd);
|
pfree((void *) passwd);
|
||||||
if (valuntil)
|
if (valuntil)
|
||||||
{
|
|
||||||
pfree((void *) valuntil);
|
pfree((void *) valuntil);
|
||||||
}
|
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
* wherein you authenticate a user by seeing what IP address the system
|
* wherein you authenticate a user by seeing what IP address the system
|
||||||
* says he comes from and possibly using ident).
|
* says he comes from and possibly using ident).
|
||||||
*
|
*
|
||||||
* $Id: hba.c,v 1.42 1999/05/10 15:17:16 momjian Exp $
|
* $Id: hba.c,v 1.43 1999/05/25 16:08:59 momjian Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -313,6 +313,7 @@ process_open_config_file(FILE *file, SockAddr *raddr, const char *user,
|
|||||||
{
|
{
|
||||||
/* Process a line from the config file */
|
/* Process a line from the config file */
|
||||||
int c = getc(file);
|
int c = getc(file);
|
||||||
|
|
||||||
if (c == EOF)
|
if (c == EOF)
|
||||||
eof = true;
|
eof = true;
|
||||||
else
|
else
|
||||||
@ -394,7 +395,8 @@ find_hba_entry(SockAddr *raddr, const char *user, const char *database,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
char *conf_file; /* The name of the config file we have to read */
|
char *conf_file; /* The name of the config file we have to
|
||||||
|
* read */
|
||||||
|
|
||||||
/* put together the full pathname to the config file */
|
/* put together the full pathname to the config file */
|
||||||
bufsize = (strlen(DataDir) + strlen(CONF_FILE) + 2) * sizeof(char);
|
bufsize = (strlen(DataDir) + strlen(CONF_FILE) + 2) * sizeof(char);
|
||||||
@ -531,8 +533,10 @@ ident(const struct in_addr remote_ip_addr, const struct in_addr local_ip_addr,
|
|||||||
----------------------------------------------------------------------------*/
|
----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
|
||||||
int sock_fd, /* File descriptor for socket on which we talk to Ident */
|
int sock_fd, /* File descriptor for socket on which we
|
||||||
rc; /* Return code from a locally called function */
|
* talk to Ident */
|
||||||
|
rc; /* Return code from a locally called
|
||||||
|
* function */
|
||||||
|
|
||||||
sock_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
|
sock_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
|
||||||
if (sock_fd == -1)
|
if (sock_fd == -1)
|
||||||
@ -559,9 +563,9 @@ ident(const struct in_addr remote_ip_addr, const struct in_addr local_ip_addr,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Bind to the address which the client originally contacted,
|
* Bind to the address which the client originally contacted,
|
||||||
* otherwise the ident server won't be able to match up the
|
* otherwise the ident server won't be able to match up the right
|
||||||
* right connection. This is necessary if the PostgreSQL
|
* connection. This is necessary if the PostgreSQL server is
|
||||||
* server is running on an IP alias.
|
* running on an IP alias.
|
||||||
*/
|
*/
|
||||||
memset(&la, 0, sizeof(la));
|
memset(&la, 0, sizeof(la));
|
||||||
la.sin_family = AF_INET;
|
la.sin_family = AF_INET;
|
||||||
@ -770,18 +774,15 @@ verify_against_usermap(const char *pguser,
|
|||||||
else if (strcmp(usermap_name, "sameuser") == 0)
|
else if (strcmp(usermap_name, "sameuser") == 0)
|
||||||
{
|
{
|
||||||
if (strcmp(ident_username, pguser) == 0)
|
if (strcmp(ident_username, pguser) == 0)
|
||||||
{
|
|
||||||
*checks_out_p = true;
|
*checks_out_p = true;
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
*checks_out_p = false;
|
*checks_out_p = false;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
FILE *file; /* The map file we have to read */
|
FILE *file; /* The map file we have to read */
|
||||||
char *map_file; /* The name of the map file we have to read */
|
char *map_file; /* The name of the map file we have to
|
||||||
|
* read */
|
||||||
int bufsize;
|
int bufsize;
|
||||||
|
|
||||||
/* put together the full pathname to the map file */
|
/* put together the full pathname to the map file */
|
||||||
@ -971,9 +972,7 @@ GetCharSetByHost(char *TableName, int host, const char *DataDir)
|
|||||||
file = AllocateFile(map_file, "rb");
|
file = AllocateFile(map_file, "rb");
|
||||||
#endif
|
#endif
|
||||||
if (file == NULL)
|
if (file == NULL)
|
||||||
{
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
while (!eof)
|
while (!eof)
|
||||||
{
|
{
|
||||||
c = getc(file);
|
c = getc(file);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1994, Regents of the University of California
|
* Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* $Id: password.c,v 1.20 1999/01/17 06:18:26 momjian Exp $
|
* $Id: password.c,v 1.21 1999/05/25 16:09:00 momjian Exp $
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user