mirror of
https://github.com/postgres/postgres.git
synced 2025-04-20 00:42:27 +03:00
Ye-old pgindent run. Same 4-space tabs.
This commit is contained in:
parent
db4518729d
commit
52f77df613
@ -34,6 +34,7 @@ int32 array_all_int4le(ArrayType *array, int4 value);
|
||||
|
||||
int32 array_oideq(ArrayType *array, Oid value);
|
||||
int32 array_all_oidne(ArrayType *array, Oid value);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -4,7 +4,7 @@
|
||||
* Functions for the built-in type bit() and varying bit().
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/contrib/bit/Attic/varbit.c,v 1.2 2000/04/03 20:56:40 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/contrib/bit/Attic/varbit.c,v 1.3 2000/04/12 17:14:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -48,8 +48,9 @@ zpbitin(char *s, int dummy, int32 atttypmod)
|
||||
int len, /* Length of the whole data structure */
|
||||
bitlen, /* Number of bits in the bit string */
|
||||
slen; /* Length of the input string */
|
||||
int bit_not_hex = 0; /* 0 = hex string 1=bit string */
|
||||
int bc, ipad;
|
||||
int bit_not_hex = 0;/* 0 = hex string 1=bit string */
|
||||
int bc,
|
||||
ipad;
|
||||
bits8 x = 0;
|
||||
|
||||
|
||||
@ -57,12 +58,12 @@ zpbitin(char *s, int dummy, int32 atttypmod)
|
||||
return (bits8 *) NULL;
|
||||
|
||||
/* Check that the first character is a b or an x */
|
||||
if (s[0]=='b' || s[0]=='B')
|
||||
if (s[0] == 'b' || s[0] == 'B')
|
||||
bit_not_hex = 1;
|
||||
else if (s[0]=='x' || s[0]=='X')
|
||||
else if (s[0] == 'x' || s[0] == 'X')
|
||||
bit_not_hex = 0;
|
||||
else
|
||||
elog(ERROR, "zpbitin: %s is not a valid bitstring",s);
|
||||
elog(ERROR, "zpbitin: %s is not a valid bitstring", s);
|
||||
|
||||
slen = strlen(s) - 1;
|
||||
/* Determine bitlength from input string */
|
||||
@ -70,25 +71,26 @@ zpbitin(char *s, int dummy, int32 atttypmod)
|
||||
if (!bit_not_hex)
|
||||
bitlen *= 4;
|
||||
|
||||
/* Sometimes atttypmod is not supplied. If it is supplied we need to make
|
||||
sure that the bitstring fits. Note that the number of infered bits can
|
||||
be larger than the number of actual bits needed, but only if we are
|
||||
reading a hex string and not by more than 3 bits, as a hex string gives
|
||||
and accurate length upto 4 bits */
|
||||
/*
|
||||
* Sometimes atttypmod is not supplied. If it is supplied we need to
|
||||
* make sure that the bitstring fits. Note that the number of infered
|
||||
* bits can be larger than the number of actual bits needed, but only
|
||||
* if we are reading a hex string and not by more than 3 bits, as a
|
||||
* hex string gives and accurate length upto 4 bits
|
||||
*/
|
||||
if (atttypmod == -1)
|
||||
atttypmod = bitlen;
|
||||
else
|
||||
if ((bitlen>atttypmod && bit_not_hex) ||
|
||||
(bitlen>atttypmod+3 && !bit_not_hex))
|
||||
else if ((bitlen > atttypmod && bit_not_hex) ||
|
||||
(bitlen > atttypmod + 3 && !bit_not_hex))
|
||||
elog(ERROR, "zpbitin: bit string of size %d cannot be written into bits(%d)",
|
||||
bitlen,atttypmod);
|
||||
bitlen, atttypmod);
|
||||
|
||||
|
||||
len = VARBITDATALEN(atttypmod);
|
||||
|
||||
if (len > MaxAttrSize)
|
||||
elog(ERROR, "zpbitin: length of bit() must be less than %ld",
|
||||
(MaxAttrSize-VARHDRSZ-VARBITHDRSZ)*BITSPERBYTE);
|
||||
(MaxAttrSize - VARHDRSZ - VARBITHDRSZ) * BITSPERBYTE);
|
||||
|
||||
result = (bits8 *) palloc(len);
|
||||
/* set to 0 so that *r is always initialised and strin is zero-padded */
|
||||
@ -96,60 +98,74 @@ zpbitin(char *s, int dummy, int32 atttypmod)
|
||||
VARSIZE(result) = len;
|
||||
VARBITLEN(result) = atttypmod;
|
||||
|
||||
/* We need to read the bitstring from the end, as we store it least
|
||||
significant byte first. s points to the byte before the beginning
|
||||
of the bitstring */
|
||||
sp = s+1;
|
||||
/*
|
||||
* We need to read the bitstring from the end, as we store it least
|
||||
* significant byte first. s points to the byte before the beginning
|
||||
* of the bitstring
|
||||
*/
|
||||
sp = s + 1;
|
||||
r = VARBITS(result);
|
||||
if (bit_not_hex)
|
||||
{
|
||||
/* Parse the bit representation of the string */
|
||||
/* We know it fits, as bitlen was compared to atttypmod */
|
||||
x = BITHIGH;
|
||||
for (bc = 0; sp != s+slen+1; sp++, bc++)
|
||||
for (bc = 0; sp != s + slen + 1; sp++, bc++)
|
||||
{
|
||||
if (*sp=='1')
|
||||
if (*sp == '1')
|
||||
*r |= x;
|
||||
if (bc==7) {
|
||||
if (bc == 7)
|
||||
{
|
||||
bc = 0;
|
||||
x = BITHIGH;
|
||||
r++;
|
||||
} else
|
||||
}
|
||||
else
|
||||
x >>= 1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Parse the hex representation of the string */
|
||||
for (bc = 0; sp != s+slen+1; sp++)
|
||||
for (bc = 0; sp != s + slen + 1; sp++)
|
||||
{
|
||||
if (*sp>='0' && *sp<='9')
|
||||
if (*sp >= '0' && *sp <= '9')
|
||||
x = (bits8) (*sp - '0');
|
||||
else if (*sp>='A' && *sp<='F')
|
||||
else if (*sp >= 'A' && *sp <= 'F')
|
||||
x = (bits8) (*sp - 'A') + 10;
|
||||
else if (*sp>='a' && *sp<='f')
|
||||
else if (*sp >= 'a' && *sp <= 'f')
|
||||
x = (bits8) (*sp - 'a') + 10;
|
||||
else
|
||||
elog(ERROR,"Cannot parse %c as a hex digit",*sp);
|
||||
if (bc) {
|
||||
elog(ERROR, "Cannot parse %c as a hex digit", *sp);
|
||||
if (bc)
|
||||
{
|
||||
bc = 0;
|
||||
*r++ |= x;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
bc++;
|
||||
*r = x<<4;
|
||||
*r = x << 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bitlen > atttypmod) {
|
||||
if (bitlen > atttypmod)
|
||||
{
|
||||
/* Check that this fitted */
|
||||
r = (bits8 *) (result + len - 1);
|
||||
ipad = VARBITPAD(result);
|
||||
/* The bottom ipad bits of the byte pointed to by r need to be zero */
|
||||
/* printf("Byte %X shift %X %d\n",*r,(*r << (8-ipad)) & BITMASK,
|
||||
(*r << (8-ipad)) & BITMASK > 0);
|
||||
|
||||
/*
|
||||
* The bottom ipad bits of the byte pointed to by r need to be
|
||||
* zero
|
||||
*/
|
||||
if (((*r << (BITSPERBYTE-ipad)) & BITMASK) > 0)
|
||||
|
||||
/*
|
||||
* printf("Byte %X shift %X %d\n",*r,(*r << (8-ipad)) & BITMASK,
|
||||
* (*r << (8-ipad)) & BITMASK > 0);
|
||||
*/
|
||||
if (((*r << (BITSPERBYTE - ipad)) & BITMASK) > 0)
|
||||
elog(ERROR, "zpbitin: bit string too large for bit(%d) data type",
|
||||
atttypmod);
|
||||
}
|
||||
@ -165,9 +181,12 @@ zpbitin(char *s, int dummy, int32 atttypmod)
|
||||
char *
|
||||
zpbitout(bits8 *s)
|
||||
{
|
||||
char *result, *r;
|
||||
char *result,
|
||||
*r;
|
||||
bits8 *sp;
|
||||
int i, len, bitlen;
|
||||
int i,
|
||||
len,
|
||||
bitlen;
|
||||
|
||||
if (s == NULL)
|
||||
{
|
||||
@ -178,20 +197,24 @@ zpbitout(bits8 *s)
|
||||
else
|
||||
{
|
||||
bitlen = VARBITLEN(s);
|
||||
len = bitlen/4 + (bitlen%4>0 ? 1 : 0);
|
||||
len = bitlen / 4 + (bitlen % 4 > 0 ? 1 : 0);
|
||||
result = (char *) palloc(len + 4);
|
||||
sp = VARBITS(s);
|
||||
r = result;
|
||||
*r++ = 'X';
|
||||
*r++ = '\'';
|
||||
/* we cheat by knowing that we store full bytes zero padded */
|
||||
for (i=0; i<len; i+=2, sp++) {
|
||||
*r++ = HEXDIG((*sp)>>4);
|
||||
for (i = 0; i < len; i += 2, sp++)
|
||||
{
|
||||
*r++ = HEXDIG((*sp) >> 4);
|
||||
*r++ = HEXDIG((*sp) & 0xF);
|
||||
}
|
||||
/* Go back one step if we printed a hex number that was not part
|
||||
of the bitstring anymore */
|
||||
if (i==len+1)
|
||||
|
||||
/*
|
||||
* Go back one step if we printed a hex number that was not part
|
||||
* of the bitstring anymore
|
||||
*/
|
||||
if (i == len + 1)
|
||||
r--;
|
||||
*r++ = '\'';
|
||||
*r = '\0';
|
||||
@ -205,10 +228,13 @@ zpbitout(bits8 *s)
|
||||
char *
|
||||
zpbitsout(bits8 *s)
|
||||
{
|
||||
char *result, *r;
|
||||
char *result,
|
||||
*r;
|
||||
bits8 *sp;
|
||||
bits8 x;
|
||||
int i, k, len;
|
||||
int i,
|
||||
k,
|
||||
len;
|
||||
|
||||
if (s == NULL)
|
||||
{
|
||||
@ -224,16 +250,17 @@ zpbitsout(bits8 *s)
|
||||
r = result;
|
||||
*r++ = 'B';
|
||||
*r++ = '\'';
|
||||
for (i=0; i<len-BITSPERBYTE; i+=BITSPERBYTE, sp++) {
|
||||
for (i = 0; i < len - BITSPERBYTE; i += BITSPERBYTE, sp++)
|
||||
{
|
||||
x = *sp;
|
||||
for (k=0; k<BITSPERBYTE; k++)
|
||||
for (k = 0; k < BITSPERBYTE; k++)
|
||||
{
|
||||
*r++ = (x & BITHIGH) ? '1' : '0';
|
||||
x <<= 1;
|
||||
}
|
||||
}
|
||||
x = *sp;
|
||||
for (k=i; k<len; k++)
|
||||
for (k = i; k < len; k++)
|
||||
{
|
||||
*r++ = (x & BITHIGH) ? '1' : '0';
|
||||
x <<= 1;
|
||||
@ -259,7 +286,8 @@ varbitin(char *s, int dummy, int32 atttypmod)
|
||||
bitlen, /* Number of bits in the bit string */
|
||||
slen; /* Length of the input string */
|
||||
int bit_not_hex = 0;
|
||||
int bc, ipad;
|
||||
int bc,
|
||||
ipad;
|
||||
bits8 x = 0;
|
||||
|
||||
|
||||
@ -267,12 +295,12 @@ varbitin(char *s, int dummy, int32 atttypmod)
|
||||
return (bits8 *) NULL;
|
||||
|
||||
/* Check that the first character is a b or an x */
|
||||
if (s[0]=='b' || s[0]=='B')
|
||||
if (s[0] == 'b' || s[0] == 'B')
|
||||
bit_not_hex = 1;
|
||||
else if (s[0]=='x' || s[0]=='X')
|
||||
else if (s[0] == 'x' || s[0] == 'X')
|
||||
bit_not_hex = 0;
|
||||
else
|
||||
elog(ERROR, "zpbitin: %s is not a valid bitstring",s);
|
||||
elog(ERROR, "zpbitin: %s is not a valid bitstring", s);
|
||||
|
||||
slen = strlen(s) - 1;
|
||||
/* Determine bitlength from input string */
|
||||
@ -280,23 +308,25 @@ varbitin(char *s, int dummy, int32 atttypmod)
|
||||
if (!bit_not_hex)
|
||||
bitlen *= 4;
|
||||
|
||||
/* Sometimes atttypmod is not supplied. If it is supplied we need to make
|
||||
sure that the bitstring fits. Note that the number of infered bits can
|
||||
be larger than the number of actual bits needed, but only if we are
|
||||
reading a hex string and not by more than 3 bits, as a hex string gives
|
||||
and accurate length upto 4 bits */
|
||||
/*
|
||||
* Sometimes atttypmod is not supplied. If it is supplied we need to
|
||||
* make sure that the bitstring fits. Note that the number of infered
|
||||
* bits can be larger than the number of actual bits needed, but only
|
||||
* if we are reading a hex string and not by more than 3 bits, as a
|
||||
* hex string gives and accurate length upto 4 bits
|
||||
*/
|
||||
if (atttypmod > -1)
|
||||
if ((bitlen>atttypmod && bit_not_hex) ||
|
||||
(bitlen>atttypmod+3 && !bit_not_hex))
|
||||
if ((bitlen > atttypmod && bit_not_hex) ||
|
||||
(bitlen > atttypmod + 3 && !bit_not_hex))
|
||||
elog(ERROR, "varbitin: bit string of size %d cannot be written into varying bits(%d)",
|
||||
bitlen,atttypmod);
|
||||
bitlen, atttypmod);
|
||||
|
||||
|
||||
len = VARBITDATALEN(bitlen);
|
||||
|
||||
if (len > MaxAttrSize)
|
||||
elog(ERROR, "varbitin: length of bit() must be less than %ld",
|
||||
(MaxAttrSize-VARHDRSZ-VARBITHDRSZ)*BITSPERBYTE);
|
||||
(MaxAttrSize - VARHDRSZ - VARBITHDRSZ) * BITSPERBYTE);
|
||||
|
||||
result = (bits8 *) palloc(len);
|
||||
/* set to 0 so that *r is always initialised and strin is zero-padded */
|
||||
@ -304,55 +334,67 @@ varbitin(char *s, int dummy, int32 atttypmod)
|
||||
VARSIZE(result) = len;
|
||||
VARBITLEN(result) = bitlen;
|
||||
|
||||
/* We need to read the bitstring from the end, as we store it least
|
||||
significant byte first. s points to the byte before the beginning
|
||||
of the bitstring */
|
||||
/*
|
||||
* We need to read the bitstring from the end, as we store it least
|
||||
* significant byte first. s points to the byte before the beginning
|
||||
* of the bitstring
|
||||
*/
|
||||
sp = s + 1;
|
||||
r = VARBITS(result);
|
||||
if (bit_not_hex)
|
||||
{
|
||||
/* Parse the bit representation of the string */
|
||||
x = BITHIGH;
|
||||
for (bc = 0; sp != s+slen+1; sp++, bc++)
|
||||
for (bc = 0; sp != s + slen + 1; sp++, bc++)
|
||||
{
|
||||
if (*sp=='1')
|
||||
if (*sp == '1')
|
||||
*r |= x;
|
||||
if (bc==7) {
|
||||
if (bc == 7)
|
||||
{
|
||||
bc = 0;
|
||||
x = BITHIGH;
|
||||
r++;
|
||||
} else
|
||||
}
|
||||
else
|
||||
x >>= 1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (bc = 0; sp != s+slen+1; sp++)
|
||||
for (bc = 0; sp != s + slen + 1; sp++)
|
||||
{
|
||||
if (*sp>='0' && *sp<='9')
|
||||
if (*sp >= '0' && *sp <= '9')
|
||||
x = (bits8) (*sp - '0');
|
||||
else if (*sp>='A' && *sp<='F')
|
||||
else if (*sp >= 'A' && *sp <= 'F')
|
||||
x = (bits8) (*sp - 'A') + 10;
|
||||
else if (*sp>='a' && *sp<='f')
|
||||
else if (*sp >= 'a' && *sp <= 'f')
|
||||
x = (bits8) (*sp - 'a') + 10;
|
||||
else
|
||||
elog(ERROR,"Cannot parse %c as a hex digit",*sp);
|
||||
if (bc) {
|
||||
elog(ERROR, "Cannot parse %c as a hex digit", *sp);
|
||||
if (bc)
|
||||
{
|
||||
bc = 0;
|
||||
*r++ |= x;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
bc++;
|
||||
*r = x<<4;
|
||||
*r = x << 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bitlen > atttypmod) {
|
||||
if (bitlen > atttypmod)
|
||||
{
|
||||
/* Check that this fitted */
|
||||
r = (bits8 *) (result + len - 1);
|
||||
ipad = VARBITPAD(result);
|
||||
/* The bottom ipad bits of the byte pointed to by r need to be zero */
|
||||
if (((*r << (BITSPERBYTE-ipad)) & BITMASK) > 0)
|
||||
|
||||
/*
|
||||
* The bottom ipad bits of the byte pointed to by r need to be
|
||||
* zero
|
||||
*/
|
||||
if (((*r << (BITSPERBYTE - ipad)) & BITMASK) > 0)
|
||||
elog(ERROR, "varbitin: bit string too large for varying bit(%d) data type",
|
||||
atttypmod);
|
||||
}
|
||||
@ -381,7 +423,7 @@ varbitin(char *s, int dummy, int32 atttypmod)
|
||||
*/
|
||||
|
||||
bool
|
||||
biteq (bits8 *arg1, bits8 *arg2)
|
||||
biteq(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
int bitlen1,
|
||||
bitlen2;
|
||||
@ -394,12 +436,12 @@ biteq (bits8 *arg1, bits8 *arg2)
|
||||
return (bool) 0;
|
||||
|
||||
/* bit strings are always stored in a full number of bytes */
|
||||
return memcmp((void *)VARBITS(arg1),(void *)VARBITS(arg2),
|
||||
return memcmp((void *) VARBITS(arg1), (void *) VARBITS(arg2),
|
||||
VARBITBYTES(arg1)) == 0;
|
||||
}
|
||||
|
||||
bool
|
||||
bitne (bits8 *arg1, bits8 *arg2)
|
||||
bitne(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
int bitlen1,
|
||||
bitlen2;
|
||||
@ -412,7 +454,7 @@ bitne (bits8 *arg1, bits8 *arg2)
|
||||
return (bool) 1;
|
||||
|
||||
/* bit strings are always stored in a full number of bytes */
|
||||
return memcmp((void *)VARBITS(arg1),(void *)VARBITS(arg2),
|
||||
return memcmp((void *) VARBITS(arg1), (void *) VARBITS(arg2),
|
||||
VARBITBYTES(arg1)) != 0;
|
||||
}
|
||||
|
||||
@ -425,10 +467,12 @@ bitne (bits8 *arg1, bits8 *arg2)
|
||||
* Anything is equal to undefined.
|
||||
*/
|
||||
int
|
||||
bitcmp (bits8 *arg1, bits8 *arg2)
|
||||
bitcmp(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
int bitlen1, bytelen1,
|
||||
bitlen2, bytelen2;
|
||||
int bitlen1,
|
||||
bytelen1,
|
||||
bitlen2,
|
||||
bytelen2;
|
||||
int cmp;
|
||||
|
||||
if (!PointerIsValid(arg1) || !PointerIsValid(arg2))
|
||||
@ -436,8 +480,9 @@ bitcmp (bits8 *arg1, bits8 *arg2)
|
||||
bytelen1 = VARBITBYTES(arg1);
|
||||
bytelen2 = VARBITBYTES(arg2);
|
||||
|
||||
cmp = memcmp(VARBITS(arg1),VARBITS(arg2),Min(bytelen1,bytelen2));
|
||||
if (cmp==0) {
|
||||
cmp = memcmp(VARBITS(arg1), VARBITS(arg2), Min(bytelen1, bytelen2));
|
||||
if (cmp == 0)
|
||||
{
|
||||
bitlen1 = VARBITLEN(arg1);
|
||||
bitlen2 = VARBITLEN(arg2);
|
||||
if (bitlen1 != bitlen2)
|
||||
@ -447,38 +492,43 @@ bitcmp (bits8 *arg1, bits8 *arg2)
|
||||
}
|
||||
|
||||
bool
|
||||
bitlt (bits8 *arg1, bits8 *arg2)
|
||||
bitlt(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
return (bool) (bitcmp(arg1,arg2) == -1);
|
||||
return (bool) (bitcmp(arg1, arg2) == -1);
|
||||
}
|
||||
|
||||
bool
|
||||
bitle (bits8 *arg1, bits8 *arg2)
|
||||
bitle(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
return (bool) (bitcmp(arg1,arg2) <= 0);
|
||||
return (bool) (bitcmp(arg1, arg2) <= 0);
|
||||
}
|
||||
|
||||
bool
|
||||
bitge (bits8 *arg1, bits8 *arg2)
|
||||
bitge(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
return (bool) (bitcmp(arg1,arg2) >= 0);
|
||||
return (bool) (bitcmp(arg1, arg2) >= 0);
|
||||
}
|
||||
|
||||
bool
|
||||
bitgt (bits8 *arg1, bits8 *arg2)
|
||||
bitgt(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
return (bool) (bitcmp(arg1,arg2) == 1);
|
||||
return (bool) (bitcmp(arg1, arg2) == 1);
|
||||
}
|
||||
|
||||
/* bitcat
|
||||
* Concatenation of bit strings
|
||||
*/
|
||||
bits8 *
|
||||
bitcat (bits8 *arg1, bits8 *arg2)
|
||||
bitcat(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
int bitlen1, bitlen2, bytelen, bit1pad, bit2shift;
|
||||
int bitlen1,
|
||||
bitlen2,
|
||||
bytelen,
|
||||
bit1pad,
|
||||
bit2shift;
|
||||
bits8 *result;
|
||||
bits8 *pr, *pa;
|
||||
bits8 *pr,
|
||||
*pa;
|
||||
|
||||
if (!PointerIsValid(arg1) || !PointerIsValid(arg2))
|
||||
return NULL;
|
||||
@ -486,28 +536,29 @@ bitcat (bits8 *arg1, bits8 *arg2)
|
||||
bitlen1 = VARBITLEN(arg1);
|
||||
bitlen2 = VARBITLEN(arg2);
|
||||
|
||||
bytelen = VARBITDATALEN(bitlen1+bitlen2);
|
||||
bytelen = VARBITDATALEN(bitlen1 + bitlen2);
|
||||
|
||||
result = (bits8 *) palloc(bytelen*sizeof(bits8));
|
||||
result = (bits8 *) palloc(bytelen * sizeof(bits8));
|
||||
VARSIZE(result) = bytelen;
|
||||
VARBITLEN(result) = bitlen1+bitlen2;
|
||||
printf("%d %d %d \n",VARBITBYTES(arg1),VARBITLEN(arg1),VARBITPAD(arg1));
|
||||
VARBITLEN(result) = bitlen1 + bitlen2;
|
||||
printf("%d %d %d \n", VARBITBYTES(arg1), VARBITLEN(arg1), VARBITPAD(arg1));
|
||||
/* Copy the first bitstring in */
|
||||
memcpy(VARBITS(result),VARBITS(arg1),VARBITBYTES(arg1));
|
||||
memcpy(VARBITS(result), VARBITS(arg1), VARBITBYTES(arg1));
|
||||
/* Copy the second bit string */
|
||||
bit1pad = VARBITPAD(arg1);
|
||||
if (bit1pad==0)
|
||||
if (bit1pad == 0)
|
||||
{
|
||||
memcpy(VARBITS(result)+VARBITBYTES(arg1),VARBITS(arg2),
|
||||
memcpy(VARBITS(result) + VARBITBYTES(arg1), VARBITS(arg2),
|
||||
VARBITBYTES(arg2));
|
||||
}
|
||||
else if (bitlen2>0)
|
||||
else if (bitlen2 > 0)
|
||||
{
|
||||
/* We need to shift all the results to fit */
|
||||
bit2shift = BITSPERBYTE - bit1pad;
|
||||
pa = VARBITS(arg2);
|
||||
pr = VARBITS(result)+VARBITBYTES(arg1)-1;
|
||||
for ( ; pa < VARBITEND(arg2); pa++) {
|
||||
pr = VARBITS(result) + VARBITBYTES(arg1) - 1;
|
||||
for (; pa < VARBITEND(arg2); pa++)
|
||||
{
|
||||
*pr |= ((*pa >> bit2shift) & BITMASK);
|
||||
pr++;
|
||||
if (pr < VARBITEND(result))
|
||||
@ -524,7 +575,7 @@ bitcat (bits8 *arg1, bits8 *arg2)
|
||||
* SQL draft 6.10 9)
|
||||
*/
|
||||
bits8 *
|
||||
bitsubstr (bits8 *arg, int32 s, int32 l)
|
||||
bitsubstr(bits8 *arg, int32 s, int32 l)
|
||||
{
|
||||
int bitlen,
|
||||
rbitlen,
|
||||
@ -532,18 +583,22 @@ bitsubstr (bits8 *arg, int32 s, int32 l)
|
||||
ipad = 0,
|
||||
ishift,
|
||||
i;
|
||||
int e, s1, e1;
|
||||
bits8 * result;
|
||||
bits8 mask, *r, *ps;
|
||||
int e,
|
||||
s1,
|
||||
e1;
|
||||
bits8 *result;
|
||||
bits8 mask,
|
||||
*r,
|
||||
*ps;
|
||||
|
||||
if (!PointerIsValid(arg))
|
||||
return NULL;
|
||||
|
||||
bitlen = VARBITLEN(arg);
|
||||
e = s+l;
|
||||
s1 = Max(s,1);
|
||||
e1 = Min(e,bitlen+1);
|
||||
if (s1>bitlen || e1<1)
|
||||
e = s + l;
|
||||
s1 = Max(s, 1);
|
||||
e1 = Min(e, bitlen + 1);
|
||||
if (s1 > bitlen || e1 < 1)
|
||||
{
|
||||
/* Need to return a null string */
|
||||
len = VARBITDATALEN(0);
|
||||
@ -553,31 +608,34 @@ bitsubstr (bits8 *arg, int32 s, int32 l)
|
||||
}
|
||||
else
|
||||
{
|
||||
/* OK, we've got a true substring starting at position s1-1 and
|
||||
ending at position e1-1 */
|
||||
rbitlen = e1-s1;
|
||||
|
||||
/*
|
||||
* OK, we've got a true substring starting at position s1-1 and
|
||||
* ending at position e1-1
|
||||
*/
|
||||
rbitlen = e1 - s1;
|
||||
len = VARBITDATALEN(rbitlen);
|
||||
result = (bits8 *) palloc(len);
|
||||
VARBITLEN(result) = rbitlen;
|
||||
VARSIZE(result) = len;
|
||||
len -= VARHDRSZ + VARBITHDRSZ;
|
||||
/* Are we copying from a byte boundary? */
|
||||
if ((s1-1)%BITSPERBYTE==0)
|
||||
if ((s1 - 1) % BITSPERBYTE == 0)
|
||||
{
|
||||
/* Yep, we are copying bytes */
|
||||
memcpy(VARBITS(result),VARBITS(arg)+(s1-1)/BITSPERBYTE,len);
|
||||
memcpy(VARBITS(result), VARBITS(arg) + (s1 - 1) / BITSPERBYTE, len);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Figure out how much we need to shift the sequence by */
|
||||
ishift = (s1-1)%BITSPERBYTE;
|
||||
ishift = (s1 - 1) % BITSPERBYTE;
|
||||
r = VARBITS(result);
|
||||
ps = VARBITS(arg) + (s1-1)/BITSPERBYTE;
|
||||
for (i=0; i<len; i++)
|
||||
ps = VARBITS(arg) + (s1 - 1) / BITSPERBYTE;
|
||||
for (i = 0; i < len; i++)
|
||||
{
|
||||
*r = (*ps <<ishift) & BITMASK;
|
||||
*r = (*ps << ishift) & BITMASK;
|
||||
if ((++ps) < VARBITEND(arg))
|
||||
*r |= *ps >>(BITSPERBYTE-ishift);
|
||||
*r |= *ps >> (BITSPERBYTE - ishift);
|
||||
r++;
|
||||
}
|
||||
}
|
||||
@ -598,7 +656,7 @@ bitsubstr (bits8 *arg, int32 s, int32 l)
|
||||
* truncated to the shorter bit string
|
||||
*/
|
||||
bits8 *
|
||||
bitand (bits8 * arg1, bits8 * arg2)
|
||||
bitand(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
int len,
|
||||
i;
|
||||
@ -610,15 +668,15 @@ bitand (bits8 * arg1, bits8 * arg2)
|
||||
if (!PointerIsValid(arg1) || !PointerIsValid(arg2))
|
||||
return (bool) 0;
|
||||
|
||||
len = Min(VARSIZE(arg1),VARSIZE(arg2));
|
||||
len = Min(VARSIZE(arg1), VARSIZE(arg2));
|
||||
result = (bits8 *) palloc(len);
|
||||
VARSIZE(result) = len;
|
||||
VARBITLEN(result) = Min(VARBITLEN(arg1),VARBITLEN(arg2));
|
||||
VARBITLEN(result) = Min(VARBITLEN(arg1), VARBITLEN(arg2));
|
||||
|
||||
p1 = (bits8 *) VARBITS(arg1);
|
||||
p2 = (bits8 *) VARBITS(arg2);
|
||||
r = (bits8 *) VARBITS(result);
|
||||
for (i=0; i<Min(VARBITBYTES(arg1),VARBITBYTES(arg2)); i++)
|
||||
for (i = 0; i < Min(VARBITBYTES(arg1), VARBITBYTES(arg2)); i++)
|
||||
*r++ = *p1++ & *p2++;
|
||||
|
||||
/* Padding is not needed as & of 0 pad is 0 */
|
||||
@ -631,7 +689,7 @@ bitand (bits8 * arg1, bits8 * arg2)
|
||||
* truncated to the shorter bit string.
|
||||
*/
|
||||
bits8 *
|
||||
bitor (bits8 * arg1, bits8 * arg2)
|
||||
bitor(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
int len,
|
||||
i;
|
||||
@ -644,15 +702,15 @@ bitor (bits8 * arg1, bits8 * arg2)
|
||||
if (!PointerIsValid(arg1) || !PointerIsValid(arg2))
|
||||
return (bool) 0;
|
||||
|
||||
len = Min(VARSIZE(arg1),VARSIZE(arg2));
|
||||
len = Min(VARSIZE(arg1), VARSIZE(arg2));
|
||||
result = (bits8 *) palloc(len);
|
||||
VARSIZE(result) = len;
|
||||
VARBITLEN(result) = Min(VARBITLEN(arg1),VARBITLEN(arg2));
|
||||
VARBITLEN(result) = Min(VARBITLEN(arg1), VARBITLEN(arg2));
|
||||
|
||||
p1 = (bits8 *) VARBITS(arg1);
|
||||
p2 = (bits8 *) VARBITS(arg2);
|
||||
r = (bits8 *) VARBITS(result);
|
||||
for (i=0; i<Min(VARBITBYTES(arg1),VARBITBYTES(arg2)); i++)
|
||||
for (i = 0; i < Min(VARBITBYTES(arg1), VARBITBYTES(arg2)); i++)
|
||||
*r++ = *p1++ | *p2++;
|
||||
|
||||
/* Pad the result */
|
||||
@ -667,7 +725,7 @@ bitor (bits8 * arg1, bits8 * arg2)
|
||||
* truncated to the shorter bit string.
|
||||
*/
|
||||
bits8 *
|
||||
bitxor (bits8 * arg1, bits8 * arg2)
|
||||
bitxor(bits8 *arg1, bits8 *arg2)
|
||||
{
|
||||
int len,
|
||||
i;
|
||||
@ -680,18 +738,16 @@ bitxor (bits8 * arg1, bits8 * arg2)
|
||||
if (!PointerIsValid(arg1) || !PointerIsValid(arg2))
|
||||
return (bool) 0;
|
||||
|
||||
len = Min(VARSIZE(arg1),VARSIZE(arg2));
|
||||
len = Min(VARSIZE(arg1), VARSIZE(arg2));
|
||||
result = (bits8 *) palloc(len);
|
||||
VARSIZE(result) = len;
|
||||
VARBITLEN(result) = Min(VARBITLEN(arg1),VARBITLEN(arg2));
|
||||
VARBITLEN(result) = Min(VARBITLEN(arg1), VARBITLEN(arg2));
|
||||
|
||||
p1 = (bits8 *) VARBITS(arg1);
|
||||
p2 = (bits8 *) VARBITS(arg2);
|
||||
r = (bits8 *) VARBITS(result);
|
||||
for (i=0; i<Min(VARBITBYTES(arg1),VARBITBYTES(arg2)); i++)
|
||||
{
|
||||
for (i = 0; i < Min(VARBITBYTES(arg1), VARBITBYTES(arg2)); i++)
|
||||
*r++ = *p1++ ^ *p2++;
|
||||
}
|
||||
|
||||
/* Pad the result */
|
||||
mask = BITMASK << VARBITPAD(result);
|
||||
@ -704,7 +760,7 @@ bitxor (bits8 * arg1, bits8 * arg2)
|
||||
* perform a logical NOT on a bit strings.
|
||||
*/
|
||||
bits8 *
|
||||
bitnot (bits8 * arg)
|
||||
bitnot(bits8 *arg)
|
||||
{
|
||||
bits8 *result;
|
||||
bits8 *p,
|
||||
@ -720,7 +776,7 @@ bitnot (bits8 * arg)
|
||||
|
||||
p = (bits8 *) VARBITS(arg);
|
||||
r = (bits8 *) VARBITS(result);
|
||||
for ( ; p < VARBITEND(arg); p++, r++)
|
||||
for (; p < VARBITEND(arg); p++, r++)
|
||||
*r = ~*p;
|
||||
|
||||
/* Pad the result */
|
||||
@ -734,9 +790,11 @@ bitnot (bits8 * arg)
|
||||
* do a left shift (i.e. to the beginning of the string) of the bit string
|
||||
*/
|
||||
bits8 *
|
||||
bitshiftleft (bits8 * arg, int shft)
|
||||
bitshiftleft(bits8 *arg, int shft)
|
||||
{
|
||||
int byte_shift, ishift, len;
|
||||
int byte_shift,
|
||||
ishift,
|
||||
len;
|
||||
bits8 *result;
|
||||
bits8 *p,
|
||||
*r;
|
||||
@ -753,22 +811,26 @@ bitshiftleft (bits8 * arg, int shft)
|
||||
VARBITLEN(result) = VARBITLEN(arg);
|
||||
r = (bits8 *) VARBITS(result);
|
||||
|
||||
byte_shift = shft/BITSPERBYTE;
|
||||
byte_shift = shft / BITSPERBYTE;
|
||||
ishift = shft % BITSPERBYTE;
|
||||
p = ((bits8 *) VARBITS(arg)) + byte_shift;
|
||||
|
||||
if (ishift == 0) {
|
||||
if (ishift == 0)
|
||||
{
|
||||
/* Special case: we can do a memcpy */
|
||||
len = VARBITBYTES(arg) - byte_shift;
|
||||
memcpy(r, p, len);
|
||||
memset(r+len, 0, byte_shift);
|
||||
} else {
|
||||
for ( ; p < VARBITEND(arg); r++) {
|
||||
*r = *p <<ishift;
|
||||
if ((++p) < VARBITEND(arg))
|
||||
*r |= *p >>(BITSPERBYTE-ishift);
|
||||
memset(r + len, 0, byte_shift);
|
||||
}
|
||||
for ( ; r < VARBITEND(result) ; r++ )
|
||||
else
|
||||
{
|
||||
for (; p < VARBITEND(arg); r++)
|
||||
{
|
||||
*r = *p << ishift;
|
||||
if ((++p) < VARBITEND(arg))
|
||||
*r |= *p >> (BITSPERBYTE - ishift);
|
||||
}
|
||||
for (; r < VARBITEND(result); r++)
|
||||
*r = (bits8) 0;
|
||||
}
|
||||
|
||||
@ -779,9 +841,11 @@ bitshiftleft (bits8 * arg, int shft)
|
||||
* do a right shift (i.e. to the beginning of the string) of the bit string
|
||||
*/
|
||||
bits8 *
|
||||
bitshiftright (bits8 * arg, int shft)
|
||||
bitshiftright(bits8 *arg, int shft)
|
||||
{
|
||||
int byte_shift, ishift, len;
|
||||
int byte_shift,
|
||||
ishift,
|
||||
len;
|
||||
bits8 *result;
|
||||
bits8 *p,
|
||||
*r;
|
||||
@ -798,7 +862,7 @@ bitshiftright (bits8 * arg, int shft)
|
||||
VARBITLEN(result) = VARBITLEN(arg);
|
||||
r = (bits8 *) VARBITS(result);
|
||||
|
||||
byte_shift = shft/BITSPERBYTE;
|
||||
byte_shift = shft / BITSPERBYTE;
|
||||
ishift = shft % BITSPERBYTE;
|
||||
p = (bits8 *) VARBITS(arg);
|
||||
|
||||
@ -809,16 +873,17 @@ bitshiftright (bits8 * arg, int shft)
|
||||
{
|
||||
/* Special case: we can do a memcpy */
|
||||
len = VARBITBYTES(arg) - byte_shift;
|
||||
memcpy(r+byte_shift, p, len);
|
||||
memcpy(r + byte_shift, p, len);
|
||||
}
|
||||
else
|
||||
{
|
||||
r += byte_shift;
|
||||
*r = 0; /* Initialise first byte */
|
||||
for ( ; r < VARBITEND(result); p++) {
|
||||
for (; r < VARBITEND(result); p++)
|
||||
{
|
||||
*r |= *p >> ishift;
|
||||
if ((++r) < VARBITEND(result))
|
||||
*r = (*p <<(BITSPERBYTE-ishift)) & BITMASK;
|
||||
*r = (*p << (BITSPERBYTE - ishift)) & BITMASK;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,22 +52,22 @@ struct varbita
|
||||
#define BITHIGH 0x80
|
||||
|
||||
|
||||
bits8 * zpbitin(char *s, int dummy, int32 atttypmod);
|
||||
char * zpbitout(bits8 *s);
|
||||
char * zpbitsout(bits8 *s);
|
||||
bits8 * varbitin(char *s, int dummy, int32 atttypmod);
|
||||
bool biteq (bits8 *arg1, bits8 *arg2);
|
||||
bool bitne (bits8 *arg1, bits8 *arg2);
|
||||
bool bitge (bits8 *arg1, bits8 *arg2);
|
||||
bool bitgt (bits8 *arg1, bits8 *arg2);
|
||||
bool bitle (bits8 *arg1, bits8 *arg2);
|
||||
bool bitlt (bits8 *arg1, bits8 *arg2);
|
||||
int bitcmp (bits8 *arg1, bits8 *arg2);
|
||||
bits8 * bitand (bits8 * arg1, bits8 * arg2);
|
||||
bits8 * bitor (bits8 * arg1, bits8 * arg2);
|
||||
bits8 * bitxor (bits8 * arg1, bits8 * arg2);
|
||||
bits8 * bitnot (bits8 * arg);
|
||||
bits8 * bitshiftright (bits8 * arg, int shft);
|
||||
bits8 * bitshiftleft (bits8 * arg, int shft);
|
||||
bits8 * bitcat (bits8 *arg1, bits8 *arg2);
|
||||
bits8 * bitsubstr (bits8 *arg, int32 s, int32 l);
|
||||
bits8 *zpbitin(char *s, int dummy, int32 atttypmod);
|
||||
char *zpbitout(bits8 *s);
|
||||
char *zpbitsout(bits8 *s);
|
||||
bits8 *varbitin(char *s, int dummy, int32 atttypmod);
|
||||
bool biteq(bits8 *arg1, bits8 *arg2);
|
||||
bool bitne(bits8 *arg1, bits8 *arg2);
|
||||
bool bitge(bits8 *arg1, bits8 *arg2);
|
||||
bool bitgt(bits8 *arg1, bits8 *arg2);
|
||||
bool bitle(bits8 *arg1, bits8 *arg2);
|
||||
bool bitlt(bits8 *arg1, bits8 *arg2);
|
||||
int bitcmp(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitand(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitor(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitxor(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitnot(bits8 *arg);
|
||||
bits8 *bitshiftright(bits8 *arg, int shft);
|
||||
bits8 *bitshiftleft(bits8 *arg, int shft);
|
||||
bits8 *bitcat(bits8 *arg1, bits8 *arg2);
|
||||
bits8 *bitsubstr(bits8 *arg, int32 s, int32 l);
|
||||
|
@ -2,12 +2,13 @@
|
||||
|
||||
#include "varbit.h"
|
||||
|
||||
bits8 * varbit_in (char * s);
|
||||
char * varbit_out (bits8 *s);
|
||||
bits8 *varbit_in(char *s);
|
||||
char *varbit_out(bits8 *s);
|
||||
|
||||
bits8 *
|
||||
varbit_in (char * s) {
|
||||
return varbitin (s, 0, -1);
|
||||
varbit_in(char *s)
|
||||
{
|
||||
return varbitin(s, 0, -1);
|
||||
}
|
||||
|
||||
/*char *
|
||||
@ -17,6 +18,7 @@ varbit_out (bits8 *s) {
|
||||
*/
|
||||
|
||||
char *
|
||||
varbit_out (bits8 *s) {
|
||||
varbit_out(bits8 *s)
|
||||
{
|
||||
return zpbitsout(s);
|
||||
}
|
||||
|
@ -2,173 +2,183 @@
|
||||
#include "varbit.h"
|
||||
#include <stdio.h>
|
||||
|
||||
void print_details (unsigned char *s);
|
||||
void print_details(unsigned char *s);
|
||||
|
||||
const int numb = 8;
|
||||
|
||||
/*
|
||||
const char *b[] = { "B0010", "B11011011", "B0001", "X3F12", "X27", "B",
|
||||
"X11", "B100111"};
|
||||
int atttypmod[] = {-1, -1, -1,-1,-1,-1,-1,-1 };
|
||||
*/
|
||||
const char *b[] = { "B0010", "B11011011", "B10001", "X3D12", "X27", "B",
|
||||
"X11", "B100111"};
|
||||
int atttypmod[] = { 7, 9, 6, 18, 11, 6, -1, -1 };
|
||||
const char *b[] = {"B0010", "B11011011", "B10001", "X3D12", "X27", "B",
|
||||
"X11", "B100111"};
|
||||
int atttypmod[] = {7, 9, 6, 18, 11, 6, -1, -1};
|
||||
|
||||
|
||||
void print_details (unsigned char *s)
|
||||
void
|
||||
print_details(unsigned char *s)
|
||||
{
|
||||
int i;
|
||||
printf ("Length in bytes : %d\n",VARSIZE(s));
|
||||
printf ("Length of bitstring: %d\n",VARBITLEN(s));
|
||||
for (i=8; i<VARSIZE(s); i++)
|
||||
printf ("%X%X ",s[i]>>4,s[i]&0xF);
|
||||
|
||||
printf("Length in bytes : %d\n", VARSIZE(s));
|
||||
printf("Length of bitstring: %d\n", VARBITLEN(s));
|
||||
for (i = 8; i < VARSIZE(s); i++)
|
||||
printf("%X%X ", s[i] >> 4, s[i] & 0xF);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int
|
||||
main ()
|
||||
main()
|
||||
{
|
||||
int i, j;
|
||||
int i,
|
||||
j;
|
||||
char *s[numb];
|
||||
|
||||
for (i=0; i<numb; i++) {
|
||||
printf ("Input: %s\n",b[i]);
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("Input: %s\n", b[i]);
|
||||
s[i] = zpbitin(b[i], 0, atttypmod[i]);
|
||||
//print_details(s[i]);
|
||||
printf ("%s = %s\n",zpbitout(s[i]),zpbitsout(s[i]));
|
||||
printf("%s = %s\n", zpbitout(s[i]), zpbitsout(s[i]));
|
||||
}
|
||||
|
||||
printf ("\nCOMPARISONS:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s <=> %s = %d\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
bitcmp(s[i],s[j]));
|
||||
printf("\nCOMPARISONS:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s <=> %s = %d\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
bitcmp(s[i], s[j]));
|
||||
|
||||
printf ("\nCONCATENATION:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s || %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitcat(s[i],s[j])));
|
||||
printf("\nCONCATENATION:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s || %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitcat(s[i], s[j])));
|
||||
|
||||
printf("\nSUBSTR:\n");
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),1,8,
|
||||
zpbitsout(bitsubstr(s[3],1,8)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),9,8,
|
||||
zpbitsout(bitsubstr(s[3],9,8)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),1,9,
|
||||
zpbitsout(bitsubstr(s[3],1,9)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,5,
|
||||
zpbitsout(bitsubstr(s[3],3,5)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,9,
|
||||
zpbitsout(bitsubstr(s[3],3,9)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,17,
|
||||
zpbitsout(bitsubstr(s[3],3,17)));
|
||||
printf ("\nLOGICAL AND:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s & %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitand(s[i],s[j])));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 1, 8,
|
||||
zpbitsout(bitsubstr(s[3], 1, 8)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 9, 8,
|
||||
zpbitsout(bitsubstr(s[3], 9, 8)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 1, 9,
|
||||
zpbitsout(bitsubstr(s[3], 1, 9)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 5,
|
||||
zpbitsout(bitsubstr(s[3], 3, 5)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 9,
|
||||
zpbitsout(bitsubstr(s[3], 3, 9)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 17,
|
||||
zpbitsout(bitsubstr(s[3], 3, 17)));
|
||||
printf("\nLOGICAL AND:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s & %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitand(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL OR:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s | %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitor(s[i],s[j])));
|
||||
printf("\nLOGICAL OR:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s | %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitor(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL XOR:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s ^ %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitxor(s[i],s[j])));
|
||||
printf("\nLOGICAL XOR:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s ^ %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitxor(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL NOT:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
printf("~%s = %s\n",zpbitsout(s[i]),zpbitsout(bitnot(s[i])));
|
||||
printf("\nLOGICAL NOT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
printf("~%s = %s\n", zpbitsout(s[i]), zpbitsout(bitnot(s[i])));
|
||||
|
||||
|
||||
printf ("\nSHIFT LEFT:\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf("%s\n",zpbitsout(s[i]));
|
||||
for (j=0; j<=VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n",j,zpbitsout(bitshiftleft(s[i],j)));
|
||||
printf("\nSHIFT LEFT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
for (j = 0; j <= VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n", j, zpbitsout(bitshiftleft(s[i], j)));
|
||||
}
|
||||
|
||||
printf ("\nSHIFT RIGHT:\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf("%s\n",zpbitsout(s[i]));
|
||||
for (j=0; j<=VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n",j,zpbitsout(bitshiftright(s[i],j)));
|
||||
printf("\nSHIFT RIGHT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
for (j = 0; j <= VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n", j, zpbitsout(bitshiftright(s[i], j)));
|
||||
}
|
||||
|
||||
printf ("\n\n ********** VARYING **********\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf ("Input: %s\n",b[i]);
|
||||
printf("\n\n ********** VARYING **********\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("Input: %s\n", b[i]);
|
||||
s[i] = varbitin(b[i], 0, atttypmod[i]);
|
||||
/*print_details(s);*/
|
||||
printf ("%s\n",zpbitout(s[i]));
|
||||
printf ("%s\n",zpbitsout(s[i]));
|
||||
/* print_details(s); */
|
||||
printf("%s\n", zpbitout(s[i]));
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
}
|
||||
|
||||
printf ("\nCOMPARISONS:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s <=> %s = %d\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
bitcmp(s[i],s[j]));
|
||||
printf("\nCOMPARISONS:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s <=> %s = %d\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
bitcmp(s[i], s[j]));
|
||||
|
||||
printf ("\nCONCATENATION:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s || %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitcat(s[i],s[j])));
|
||||
printf("\nCONCATENATION:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s || %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitcat(s[i], s[j])));
|
||||
|
||||
printf("\nSUBSTR:\n");
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),1,8,
|
||||
zpbitsout(bitsubstr(s[3],1,8)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),9,8,
|
||||
zpbitsout(bitsubstr(s[3],9,8)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),1,9,
|
||||
zpbitsout(bitsubstr(s[3],1,9)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,5,
|
||||
zpbitsout(bitsubstr(s[3],3,5)));
|
||||
printf("%s (%d,%d) => %s\n",zpbitsout(s[3]),3,9,
|
||||
zpbitsout(bitsubstr(s[3],3,9)));
|
||||
printf("%s (%d,%d) => %s (%s)\n",zpbitsout(s[3]),3,17,
|
||||
zpbitsout(bitsubstr(s[3],3,17)),zpbitsout(bitsubstr(s[3],3,17)));
|
||||
printf ("\nLOGICAL AND:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s & %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitand(s[i],s[j])));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 1, 8,
|
||||
zpbitsout(bitsubstr(s[3], 1, 8)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 9, 8,
|
||||
zpbitsout(bitsubstr(s[3], 9, 8)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 1, 9,
|
||||
zpbitsout(bitsubstr(s[3], 1, 9)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 5,
|
||||
zpbitsout(bitsubstr(s[3], 3, 5)));
|
||||
printf("%s (%d,%d) => %s\n", zpbitsout(s[3]), 3, 9,
|
||||
zpbitsout(bitsubstr(s[3], 3, 9)));
|
||||
printf("%s (%d,%d) => %s (%s)\n", zpbitsout(s[3]), 3, 17,
|
||||
zpbitsout(bitsubstr(s[3], 3, 17)), zpbitsout(bitsubstr(s[3], 3, 17)));
|
||||
printf("\nLOGICAL AND:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s & %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitand(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL OR:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s | %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitor(s[i],s[j])));
|
||||
printf("\nLOGICAL OR:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s | %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitor(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL XOR:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
for (j=i+1; j<numb; j++)
|
||||
printf("%s ^ %s = %s\n",zpbitsout(s[i]),zpbitsout(s[j]),
|
||||
zpbitsout(bitxor(s[i],s[j])));
|
||||
printf("\nLOGICAL XOR:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
for (j = i + 1; j < numb; j++)
|
||||
printf("%s ^ %s = %s\n", zpbitsout(s[i]), zpbitsout(s[j]),
|
||||
zpbitsout(bitxor(s[i], s[j])));
|
||||
|
||||
printf ("\nLOGICAL NOT:\n");
|
||||
for (i=0; i<numb; i++)
|
||||
printf("~%s = %s\n",zpbitsout(s[i]),zpbitsout(bitnot(s[i])));
|
||||
printf("\nLOGICAL NOT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
printf("~%s = %s\n", zpbitsout(s[i]), zpbitsout(bitnot(s[i])));
|
||||
|
||||
|
||||
printf ("\nSHIFT LEFT:\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf("%s\n",zpbitsout(s[i]));
|
||||
for (j=0; j<=VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n",j,zpbitsout(bitshiftleft(s[i],j)));
|
||||
printf("\nSHIFT LEFT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
for (j = 0; j <= VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n", j, zpbitsout(bitshiftleft(s[i], j)));
|
||||
}
|
||||
|
||||
printf ("\nSHIFT RIGHT:\n");
|
||||
for (i=0; i<numb; i++) {
|
||||
printf("%s\n",zpbitsout(s[i]));
|
||||
for (j=0; j<=VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n",j,zpbitsout(bitshiftright(s[i],j)));
|
||||
printf("\nSHIFT RIGHT:\n");
|
||||
for (i = 0; i < numb; i++)
|
||||
{
|
||||
printf("%s\n", zpbitsout(s[i]));
|
||||
for (j = 0; j <= VARBITLEN(s[i]); j++)
|
||||
printf("\t%3d\t%s\n", j, zpbitsout(bitshiftright(s[i], j)));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
* Decode time string 00:00:00 through 24:00:00.
|
||||
*/
|
||||
static int
|
||||
decode_24h_time(char *str, struct tm *tm, double *fsec)
|
||||
decode_24h_time(char *str, struct tm * tm, double *fsec)
|
||||
{
|
||||
char *cp;
|
||||
|
||||
@ -51,9 +51,7 @@ decode_24h_time(char *str, struct tm *tm, double *fsec)
|
||||
*fsec = 0;
|
||||
}
|
||||
else if (*cp != ':')
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
str = cp + 1;
|
||||
@ -72,10 +70,10 @@ decode_24h_time(char *str, struct tm *tm, double *fsec)
|
||||
}
|
||||
|
||||
/* do a sanity check */
|
||||
if ( (tm->tm_hour < 0) || (tm->tm_hour > 24)
|
||||
if ((tm->tm_hour < 0) || (tm->tm_hour > 24)
|
||||
|| (tm->tm_min < 0) || (tm->tm_min > 59)
|
||||
|| (tm->tm_sec < 0) || (tm->tm_sec > 59)
|
||||
|| (*fsec < 0) )
|
||||
|| (*fsec < 0))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
@ -38,6 +38,7 @@ extern int assertTest(int val);
|
||||
|
||||
#ifdef ASSERT_CHECKING_TEST
|
||||
extern int assertEnable(int val);
|
||||
|
||||
#endif
|
||||
|
||||
int
|
||||
@ -84,7 +85,8 @@ active_listeners(text *relname)
|
||||
ScanKeyData key;
|
||||
Datum d;
|
||||
bool isnull;
|
||||
int len, pid;
|
||||
int len,
|
||||
pid;
|
||||
int count = 0;
|
||||
int ourpid = getpid();
|
||||
char listen_name[NAMEDATALEN];
|
||||
@ -92,8 +94,9 @@ active_listeners(text *relname)
|
||||
lRel = heap_openr(ListenerRelationName, AccessShareLock);
|
||||
tdesc = RelationGetDescr(lRel);
|
||||
|
||||
if (relname && (VARSIZE(relname) > VARHDRSZ)) {
|
||||
len = MIN(VARSIZE(relname)-VARHDRSZ, NAMEDATALEN-1);
|
||||
if (relname && (VARSIZE(relname) > VARHDRSZ))
|
||||
{
|
||||
len = MIN(VARSIZE(relname) - VARHDRSZ, NAMEDATALEN - 1);
|
||||
strncpy(listen_name, VARDATA(relname), len);
|
||||
listen_name[len] = '\0';
|
||||
ScanKeyEntryInitialize(&key, 0,
|
||||
@ -101,15 +104,16 @@ active_listeners(text *relname)
|
||||
F_NAMEEQ,
|
||||
PointerGetDatum(listen_name));
|
||||
sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, &key);
|
||||
} else {
|
||||
sRel = heap_beginscan(lRel, 0, SnapshotNow, 0, (ScanKey)NULL);
|
||||
}
|
||||
else
|
||||
sRel = heap_beginscan(lRel, 0, SnapshotNow, 0, (ScanKey) NULL);
|
||||
|
||||
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0)))
|
||||
{
|
||||
d = heap_getattr(lTuple, Anum_pg_listener_pid, tdesc, &isnull);
|
||||
pid = DatumGetInt32(d);
|
||||
if ((pid == ourpid) || (kill(pid, SIGTSTP) == 0)) {
|
||||
if ((pid == ourpid) || (kill(pid, SIGTSTP) == 0))
|
||||
{
|
||||
/* elog(NOTICE, "%d ok", pid); */
|
||||
count++;
|
||||
}
|
||||
@ -134,6 +138,7 @@ assert_test(int val)
|
||||
{
|
||||
return assertTest(val);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -10,8 +10,10 @@ int active_listeners(text *relname);
|
||||
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
int assert_enable(int val);
|
||||
|
||||
#ifdef ASSERT_CHECKING_TEST
|
||||
int assert_test(int val);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* $Header: /cvsroot/pgsql/contrib/pgbench/pgbench.c,v 1.2 2000/04/08 18:32:24 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/contrib/pgbench/pgbench.c,v 1.3 2000/04/12 17:14:27 momjian Exp $
|
||||
*
|
||||
* pgbench: a simple TPC-B like benchmark program for PostgreSQL
|
||||
* written by Tatsuo Ishii
|
||||
@ -53,7 +53,8 @@
|
||||
#define MAXCLIENTS 1024 /* max number of clients allowed */
|
||||
|
||||
int nclients = 1; /* default number of simulated clients */
|
||||
int nxacts = 10; /* default number of transactions per clients */
|
||||
int nxacts = 10; /* default number of transactions per
|
||||
* clients */
|
||||
|
||||
/*
|
||||
* scaling factor. for example, tps = 10 will make 1000000 tuples of
|
||||
@ -71,12 +72,14 @@ int tps = 1;
|
||||
|
||||
int remains; /* number of remained clients */
|
||||
|
||||
typedef struct {
|
||||
typedef struct
|
||||
{
|
||||
PGconn *con; /* connection handle to DB */
|
||||
int state; /* state No. */
|
||||
int cnt; /* xacts count */
|
||||
int ecnt; /* error count */
|
||||
int listen; /* none 0 indicates that an async query has been sent */
|
||||
int listen; /* none 0 indicates that an async query
|
||||
* has been sent */
|
||||
int aid; /* account id for this transaction */
|
||||
int bid; /* branch id for this transaction */
|
||||
int tid; /* teller id for this transaction */
|
||||
@ -84,53 +87,67 @@ typedef struct {
|
||||
int abalance;
|
||||
} CState;
|
||||
|
||||
static void usage() {
|
||||
fprintf(stderr,"usage: pgbench [-h hostname][-p port][-c nclients][-t ntransactions][-s scaling_factor][-n][-v][-S][-d][dbname]\n");
|
||||
fprintf(stderr,"(initialize mode): pgbench -i [-h hostname][-p port][-s scaling_factor][-d][dbname]\n");
|
||||
static void
|
||||
usage()
|
||||
{
|
||||
fprintf(stderr, "usage: pgbench [-h hostname][-p port][-c nclients][-t ntransactions][-s scaling_factor][-n][-v][-S][-d][dbname]\n");
|
||||
fprintf(stderr, "(initialize mode): pgbench -i [-h hostname][-p port][-s scaling_factor][-d][dbname]\n");
|
||||
}
|
||||
|
||||
/* random number generator */
|
||||
static int getrand(int min, int max) {
|
||||
return(min+(int)(max*1.0*rand()/(RAND_MAX+1.0)));
|
||||
static int
|
||||
getrand(int min, int max)
|
||||
{
|
||||
return (min + (int) (max * 1.0 * rand() / (RAND_MAX + 1.0)));
|
||||
}
|
||||
|
||||
/* throw away response from backend */
|
||||
static void discard_response(CState *state) {
|
||||
static void
|
||||
discard_response(CState * state)
|
||||
{
|
||||
PGresult *res;
|
||||
do {
|
||||
|
||||
do
|
||||
{
|
||||
res = PQgetResult(state->con);
|
||||
if (res)
|
||||
PQclear(res);
|
||||
} while(res);
|
||||
} while (res);
|
||||
}
|
||||
|
||||
static int check(CState *state, PGresult *res, int n, int good)
|
||||
static int
|
||||
check(CState * state, PGresult *res, int n, int good)
|
||||
{
|
||||
CState *st = &state[n];
|
||||
|
||||
if (res && PQresultStatus(res) != good) {
|
||||
fprintf(stderr,"Client %d aborted in state %d: %s",n,st->state,PQerrorMessage(st->con));
|
||||
if (res && PQresultStatus(res) != good)
|
||||
{
|
||||
fprintf(stderr, "Client %d aborted in state %d: %s", n, st->state, PQerrorMessage(st->con));
|
||||
remains--; /* I've aborted */
|
||||
PQfinish(st->con);
|
||||
st->con = NULL;
|
||||
return(-1);
|
||||
return (-1);
|
||||
}
|
||||
return(0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* process a transaction */
|
||||
static void doOne(CState *state, int n, int debug) {
|
||||
static void
|
||||
doOne(CState * state, int n, int debug)
|
||||
{
|
||||
char sql[256];
|
||||
PGresult *res;
|
||||
CState *st = &state[n];
|
||||
|
||||
if (st->listen) { /* are we receiver? */
|
||||
if (debug) {
|
||||
fprintf(stderr,"client %d receiving\n",n);
|
||||
}
|
||||
while (PQisBusy(st->con) == TRUE) {
|
||||
if (!PQconsumeInput(st->con)) { /* there's something wrong */
|
||||
fprintf(stderr, "Client %d aborted in state %d. Probably the backend died while processing.\n",n, st->state);
|
||||
if (st->listen)
|
||||
{ /* are we receiver? */
|
||||
if (debug)
|
||||
fprintf(stderr, "client %d receiving\n", n);
|
||||
while (PQisBusy(st->con) == TRUE)
|
||||
{
|
||||
if (!PQconsumeInput(st->con))
|
||||
{ /* there's something wrong */
|
||||
fprintf(stderr, "Client %d aborted in state %d. Probably the backend died while processing.\n", n, st->state);
|
||||
remains--; /* I've aborted */
|
||||
PQfinish(st->con);
|
||||
st->con = NULL;
|
||||
@ -138,64 +155,59 @@ static void doOne(CState *state, int n, int debug) {
|
||||
}
|
||||
}
|
||||
|
||||
switch (st->state) {
|
||||
switch (st->state)
|
||||
{
|
||||
case 0: /* response to "begin" */
|
||||
res = PQgetResult(st->con);
|
||||
if (check(state, res, n, PGRES_COMMAND_OK)) {
|
||||
if (check(state, res, n, PGRES_COMMAND_OK))
|
||||
return;
|
||||
}
|
||||
PQclear(res);
|
||||
discard_response(st);
|
||||
break;
|
||||
case 1: /* response to "update accounts..." */
|
||||
res = PQgetResult(st->con);
|
||||
if (check(state, res, n, PGRES_COMMAND_OK)) {
|
||||
if (check(state, res, n, PGRES_COMMAND_OK))
|
||||
return;
|
||||
}
|
||||
PQclear(res);
|
||||
discard_response(st);
|
||||
break;
|
||||
case 2: /* response to "select abalance ..." */
|
||||
res = PQgetResult(st->con);
|
||||
if (check(state, res, n, PGRES_TUPLES_OK)) {
|
||||
if (check(state, res, n, PGRES_TUPLES_OK))
|
||||
return;
|
||||
}
|
||||
PQclear(res);
|
||||
discard_response(st);
|
||||
break;
|
||||
case 3: /* response to "update tellers ..." */
|
||||
res = PQgetResult(st->con);
|
||||
if (check(state, res, n, PGRES_COMMAND_OK)) {
|
||||
if (check(state, res, n, PGRES_COMMAND_OK))
|
||||
return;
|
||||
}
|
||||
PQclear(res);
|
||||
discard_response(st);
|
||||
break;
|
||||
case 4: /* response to "update branches ..." */
|
||||
res = PQgetResult(st->con);
|
||||
if (check(state, res, n, PGRES_COMMAND_OK)) {
|
||||
if (check(state, res, n, PGRES_COMMAND_OK))
|
||||
return;
|
||||
}
|
||||
PQclear(res);
|
||||
discard_response(st);
|
||||
break;
|
||||
case 5: /* response to "insert into history ..." */
|
||||
res = PQgetResult(st->con);
|
||||
if (check(state, res, n, PGRES_COMMAND_OK)) {
|
||||
if (check(state, res, n, PGRES_COMMAND_OK))
|
||||
return;
|
||||
}
|
||||
PQclear(res);
|
||||
discard_response(st);
|
||||
break;
|
||||
case 6: /* response to "end" */
|
||||
res = PQgetResult(st->con);
|
||||
if (check(state, res, n, PGRES_COMMAND_OK)) {
|
||||
if (check(state, res, n, PGRES_COMMAND_OK))
|
||||
return;
|
||||
}
|
||||
PQclear(res);
|
||||
discard_response(st);
|
||||
|
||||
if (++st->cnt >= nxacts) {
|
||||
if (++st->cnt >= nxacts)
|
||||
{
|
||||
remains--; /* I've done */
|
||||
PQfinish(st->con);
|
||||
st->con = NULL;
|
||||
@ -206,67 +218,72 @@ static void doOne(CState *state, int n, int debug) {
|
||||
|
||||
/* increment state counter */
|
||||
st->state++;
|
||||
if (st->state > 6) {
|
||||
if (st->state > 6)
|
||||
st->state = 0;
|
||||
}
|
||||
}
|
||||
|
||||
switch (st->state) {
|
||||
switch (st->state)
|
||||
{
|
||||
case 0: /* about to start */
|
||||
strcpy(sql,"begin");
|
||||
st->aid = getrand(1,naccounts*tps);
|
||||
st->bid = getrand(1,nbranches*tps);
|
||||
st->tid = getrand(1,ntellers*tps);
|
||||
st->delta = getrand(1,1000);
|
||||
strcpy(sql, "begin");
|
||||
st->aid = getrand(1, naccounts * tps);
|
||||
st->bid = getrand(1, nbranches * tps);
|
||||
st->tid = getrand(1, ntellers * tps);
|
||||
st->delta = getrand(1, 1000);
|
||||
break;
|
||||
case 1:
|
||||
sprintf(sql,"update accounts set abalance = abalance + %d where aid = %d\n",st->delta,st->aid);
|
||||
sprintf(sql, "update accounts set abalance = abalance + %d where aid = %d\n", st->delta, st->aid);
|
||||
break;
|
||||
case 2:
|
||||
sprintf(sql,"select abalance from accounts where aid = %d",st->aid);
|
||||
sprintf(sql, "select abalance from accounts where aid = %d", st->aid);
|
||||
break;
|
||||
case 3:
|
||||
sprintf(sql,"update tellers set tbalance = tbalance + %d where tid = %d\n",
|
||||
st->delta,st->tid);
|
||||
sprintf(sql, "update tellers set tbalance = tbalance + %d where tid = %d\n",
|
||||
st->delta, st->tid);
|
||||
break;
|
||||
case 4:
|
||||
sprintf(sql,"update branches set bbalance = bbalance + %d where bid = %d",st->delta,st->bid);
|
||||
sprintf(sql, "update branches set bbalance = bbalance + %d where bid = %d", st->delta, st->bid);
|
||||
break;
|
||||
case 5:
|
||||
sprintf(sql,"insert into history(tid,bid,aid,delta,time) values(%d,%d,%d,%d,'now')",
|
||||
st->tid,st->bid,st->aid,st->delta);
|
||||
sprintf(sql, "insert into history(tid,bid,aid,delta,time) values(%d,%d,%d,%d,'now')",
|
||||
st->tid, st->bid, st->aid, st->delta);
|
||||
break;
|
||||
case 6:
|
||||
strcpy(sql,"end");
|
||||
strcpy(sql, "end");
|
||||
break;
|
||||
}
|
||||
|
||||
if (debug) {
|
||||
fprintf(stderr,"client %d sending %s\n",n,sql);
|
||||
}
|
||||
if (PQsendQuery(st->con, sql) == 0) {
|
||||
if (debug) {
|
||||
fprintf(stderr, "PQsendQuery(%s)failed\n",sql);
|
||||
}
|
||||
if (debug)
|
||||
fprintf(stderr, "client %d sending %s\n", n, sql);
|
||||
if (PQsendQuery(st->con, sql) == 0)
|
||||
{
|
||||
if (debug)
|
||||
fprintf(stderr, "PQsendQuery(%s)failed\n", sql);
|
||||
st->ecnt++;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
st->listen++; /* flags that should be listned */
|
||||
}
|
||||
}
|
||||
|
||||
/* process a select only transaction */
|
||||
static void doSelectOnly(CState *state, int n, int debug) {
|
||||
static void
|
||||
doSelectOnly(CState * state, int n, int debug)
|
||||
{
|
||||
char sql[256];
|
||||
PGresult *res;
|
||||
CState *st = &state[n];
|
||||
|
||||
if (st->listen) { /* are we receiver? */
|
||||
if (debug) {
|
||||
fprintf(stderr,"client %d receiving\n",n);
|
||||
}
|
||||
while (PQisBusy(st->con) == TRUE) {
|
||||
if (!PQconsumeInput(st->con)) { /* there's something wrong */
|
||||
fprintf(stderr, "Client %d aborted in state %d. Probably the backend died while processing.\n",n, st->state);
|
||||
if (st->listen)
|
||||
{ /* are we receiver? */
|
||||
if (debug)
|
||||
fprintf(stderr, "client %d receiving\n", n);
|
||||
while (PQisBusy(st->con) == TRUE)
|
||||
{
|
||||
if (!PQconsumeInput(st->con))
|
||||
{ /* there's something wrong */
|
||||
fprintf(stderr, "Client %d aborted in state %d. Probably the backend died while processing.\n", n, st->state);
|
||||
remains--; /* I've aborted */
|
||||
PQfinish(st->con);
|
||||
st->con = NULL;
|
||||
@ -274,16 +291,17 @@ static void doSelectOnly(CState *state, int n, int debug) {
|
||||
}
|
||||
}
|
||||
|
||||
switch (st->state) {
|
||||
switch (st->state)
|
||||
{
|
||||
case 0: /* response to "select abalance ..." */
|
||||
res = PQgetResult(st->con);
|
||||
if (check(state, res, n, PGRES_TUPLES_OK)) {
|
||||
if (check(state, res, n, PGRES_TUPLES_OK))
|
||||
return;
|
||||
}
|
||||
PQclear(res);
|
||||
discard_response(st);
|
||||
|
||||
if (++st->cnt >= nxacts) {
|
||||
if (++st->cnt >= nxacts)
|
||||
{
|
||||
remains--; /* I've done */
|
||||
PQfinish(st->con);
|
||||
st->con = NULL;
|
||||
@ -294,44 +312,50 @@ static void doSelectOnly(CState *state, int n, int debug) {
|
||||
|
||||
/* increment state counter */
|
||||
st->state++;
|
||||
if (st->state > 0) {
|
||||
if (st->state > 0)
|
||||
st->state = 0;
|
||||
}
|
||||
}
|
||||
|
||||
switch (st->state) {
|
||||
switch (st->state)
|
||||
{
|
||||
case 0:
|
||||
st->aid = getrand(1,naccounts*tps);
|
||||
sprintf(sql,"select abalance from accounts where aid = %d",st->aid);
|
||||
st->aid = getrand(1, naccounts * tps);
|
||||
sprintf(sql, "select abalance from accounts where aid = %d", st->aid);
|
||||
break;
|
||||
}
|
||||
|
||||
if (debug) {
|
||||
fprintf(stderr,"client %d sending %s\n",n,sql);
|
||||
}
|
||||
if (debug)
|
||||
fprintf(stderr, "client %d sending %s\n", n, sql);
|
||||
|
||||
if (PQsendQuery(st->con, sql) == 0) {
|
||||
if (debug) {
|
||||
fprintf(stderr, "PQsendQuery(%s)failed\n",sql);
|
||||
}
|
||||
if (PQsendQuery(st->con, sql) == 0)
|
||||
{
|
||||
if (debug)
|
||||
fprintf(stderr, "PQsendQuery(%s)failed\n", sql);
|
||||
st->ecnt++;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
st->listen++; /* flags that should be listned */
|
||||
}
|
||||
}
|
||||
|
||||
/* discard connections */
|
||||
static void disconnect_all(CState *state) {
|
||||
static void
|
||||
disconnect_all(CState * state)
|
||||
{
|
||||
int i;
|
||||
for (i=0;i<nclients;i++) {
|
||||
if (state[i].con) {
|
||||
|
||||
for (i = 0; i < nclients; i++)
|
||||
{
|
||||
if (state[i].con)
|
||||
PQfinish(state[i].con);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* create tables and setup data */
|
||||
static void init(char *pghost, char *pgport,char *dbName) {
|
||||
static void
|
||||
init(char *pghost, char *pgport, char *dbName)
|
||||
{
|
||||
PGconn *con;
|
||||
PGresult *res;
|
||||
static char *DDLs[] = {
|
||||
@ -348,140 +372,164 @@ static void init(char *pghost, char *pgport,char *dbName) {
|
||||
int i;
|
||||
|
||||
con = PQsetdb(pghost, pgport, NULL, NULL, dbName);
|
||||
if (PQstatus(con) == CONNECTION_BAD) {
|
||||
fprintf(stderr, "Connection to database '%s' on %s failed.\n", dbName,pghost);
|
||||
if (PQstatus(con) == CONNECTION_BAD)
|
||||
{
|
||||
fprintf(stderr, "Connection to database '%s' on %s failed.\n", dbName, pghost);
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (i=0;i<(sizeof(DDLs)/sizeof(char *));i++) {
|
||||
res = PQexec(con,DDLs[i]);
|
||||
if (strncmp(DDLs[i],"drop",4) && PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
for (i = 0; i < (sizeof(DDLs) / sizeof(char *)); i++)
|
||||
{
|
||||
res = PQexec(con, DDLs[i]);
|
||||
if (strncmp(DDLs[i], "drop", 4) && PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
res = PQexec(con,"begin");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
res = PQexec(con, "begin");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for(i = 0; i < nbranches * tps; i++) {
|
||||
sprintf(sql,"insert into branches(bid,bbalance) values(%d,0)",i+1);
|
||||
res = PQexec(con,sql);
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
for (i = 0; i < nbranches * tps; i++)
|
||||
{
|
||||
sprintf(sql, "insert into branches(bid,bbalance) values(%d,0)", i + 1);
|
||||
res = PQexec(con, sql);
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
for(i = 0; i < ntellers * tps; i++) {
|
||||
sprintf(sql,"insert into tellers(tid,bid,tbalance) values (%d,%d,0)"
|
||||
,i+1,i/ntellers+1);
|
||||
res = PQexec(con,sql);
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
for (i = 0; i < ntellers * tps; i++)
|
||||
{
|
||||
sprintf(sql, "insert into tellers(tid,bid,tbalance) values (%d,%d,0)"
|
||||
,i + 1, i / ntellers + 1);
|
||||
res = PQexec(con, sql);
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
res = PQexec(con,"copy accounts from stdin");
|
||||
if (PQresultStatus(res) != PGRES_COPY_IN) {
|
||||
res = PQexec(con, "copy accounts from stdin");
|
||||
if (PQresultStatus(res) != PGRES_COPY_IN)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
|
||||
fprintf(stderr,"creating tables...\n");
|
||||
for(i = 0; i < naccounts*tps; i++) {
|
||||
fprintf(stderr, "creating tables...\n");
|
||||
for (i = 0; i < naccounts * tps; i++)
|
||||
{
|
||||
int j = i + 1;
|
||||
sprintf(sql,"%d\t%d\t%d\t\n",i+1,(i+1)/naccounts,0);
|
||||
if (PQputline(con,sql)) {
|
||||
fprintf(stderr,"PQputline failed\n");
|
||||
|
||||
sprintf(sql, "%d\t%d\t%d\t\n", i + 1, (i + 1) / naccounts, 0);
|
||||
if (PQputline(con, sql))
|
||||
{
|
||||
fprintf(stderr, "PQputline failed\n");
|
||||
exit(1);
|
||||
}
|
||||
if (j % 10000 == 0) {
|
||||
fprintf(stderr,"%d tuples done.\n",j);
|
||||
if (j % 10000 == 0)
|
||||
fprintf(stderr, "%d tuples done.\n", j);
|
||||
}
|
||||
}
|
||||
if (PQputline(con,"\\.\n")) {
|
||||
fprintf(stderr,"very last PQputline failed\n");
|
||||
if (PQputline(con, "\\.\n"))
|
||||
{
|
||||
fprintf(stderr, "very last PQputline failed\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (PQendcopy(con)) {
|
||||
fprintf(stderr,"PQendcopy failed\n");
|
||||
if (PQendcopy(con))
|
||||
{
|
||||
fprintf(stderr, "PQendcopy failed\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
res = PQexec(con,"end");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
res = PQexec(con, "end");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* vacuum */
|
||||
fprintf(stderr,"vacuum...");
|
||||
res = PQexec(con,"vacuum analyze");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
fprintf(stderr, "vacuum...");
|
||||
res = PQexec(con, "vacuum analyze");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
fprintf(stderr,"done.\n");
|
||||
fprintf(stderr, "done.\n");
|
||||
|
||||
PQfinish(con);
|
||||
}
|
||||
|
||||
/* print out results */
|
||||
static void printResults(
|
||||
int ttype, CState *state,
|
||||
struct timeval *tv1,struct timeval *tv2,
|
||||
struct timeval *tv3) {
|
||||
double t1,t2;
|
||||
static void
|
||||
printResults(
|
||||
int ttype, CState * state,
|
||||
struct timeval * tv1, struct timeval * tv2,
|
||||
struct timeval * tv3)
|
||||
{
|
||||
double t1,
|
||||
t2;
|
||||
int i;
|
||||
int normal_xacts = 0;
|
||||
|
||||
for (i=0;i<nclients;i++) {
|
||||
for (i = 0; i < nclients; i++)
|
||||
normal_xacts += state[i].cnt;
|
||||
}
|
||||
|
||||
t1 = (tv3->tv_sec - tv1->tv_sec)*1000000.0+(tv3->tv_usec - tv1->tv_usec);
|
||||
t1 = normal_xacts*1000000.0/t1;
|
||||
t1 = (tv3->tv_sec - tv1->tv_sec) * 1000000.0 + (tv3->tv_usec - tv1->tv_usec);
|
||||
t1 = normal_xacts * 1000000.0 / t1;
|
||||
|
||||
t2 = (tv3->tv_sec - tv2->tv_sec)*1000000.0+(tv3->tv_usec - tv2->tv_usec);
|
||||
t2 = normal_xacts*1000000.0/t2;
|
||||
t2 = (tv3->tv_sec - tv2->tv_sec) * 1000000.0 + (tv3->tv_usec - tv2->tv_usec);
|
||||
t2 = normal_xacts * 1000000.0 / t2;
|
||||
|
||||
printf("transaction type: %s\n",ttype==0?"TPC-B (sort of)":"SELECT only");
|
||||
printf("scaling factor: %d\n",tps);
|
||||
printf("number of clients: %d\n",nclients);
|
||||
printf("number of transactions per client: %d\n",nxacts);
|
||||
printf("number of transactions actually processed: %d/%d\n",normal_xacts,nxacts*nclients);
|
||||
printf("tps = %f(including connections establishing)\n",t1);
|
||||
printf("tps = %f(excluding connections establishing)\n",t2);
|
||||
printf("transaction type: %s\n", ttype == 0 ? "TPC-B (sort of)" : "SELECT only");
|
||||
printf("scaling factor: %d\n", tps);
|
||||
printf("number of clients: %d\n", nclients);
|
||||
printf("number of transactions per client: %d\n", nxacts);
|
||||
printf("number of transactions actually processed: %d/%d\n", normal_xacts, nxacts * nclients);
|
||||
printf("tps = %f(including connections establishing)\n", t1);
|
||||
printf("tps = %f(excluding connections establishing)\n", t2);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
extern char *optarg;
|
||||
extern int optind, opterr, optopt;
|
||||
extern int optind,
|
||||
opterr,
|
||||
optopt;
|
||||
int c;
|
||||
char *pghost = "";
|
||||
char *pgport = "";
|
||||
char *dbName;
|
||||
int is_init_mode = 0; /* initialize mode? */
|
||||
int is_no_vacuum = 0; /* no vacuum at all before testing? */
|
||||
int is_no_vacuum = 0; /* no vacuum at all before
|
||||
* testing? */
|
||||
int is_full_vacuum = 0; /* do full vacuum before testing? */
|
||||
int debug = 0; /* debug flag */
|
||||
int ttype = 0; /* transaction type. 0: TPC-B, 1: SELECT only */
|
||||
int ttype = 0; /* transaction type. 0: TPC-B, 1: SELECT
|
||||
* only */
|
||||
|
||||
static CState state[MAXCLIENTS]; /* clients status */
|
||||
|
||||
struct timeval tv1; /* start up time */
|
||||
struct timeval tv2; /* after establishing all connections to the backend */
|
||||
struct timeval tv2; /* after establishing all connections to
|
||||
* the backend */
|
||||
struct timeval tv3; /* end time */
|
||||
|
||||
int i;
|
||||
@ -492,13 +540,16 @@ int main(int argc, char **argv) {
|
||||
|
||||
#ifndef __CYGWIN32__
|
||||
struct rlimit rlim;
|
||||
|
||||
#endif
|
||||
|
||||
PGconn *con;
|
||||
PGresult *res;
|
||||
|
||||
while ((c = getopt(argc, argv, "ih:nvp:dc:t:s:S")) != EOF) {
|
||||
switch (c) {
|
||||
while ((c = getopt(argc, argv, "ih:nvp:dc:t:s:S")) != EOF)
|
||||
{
|
||||
switch (c)
|
||||
{
|
||||
case 'i':
|
||||
is_init_mode++;
|
||||
break;
|
||||
@ -522,37 +573,43 @@ int main(int argc, char **argv) {
|
||||
break;
|
||||
case 'c':
|
||||
nclients = atoi(optarg);
|
||||
if (nclients <= 0 || nclients > MAXCLIENTS) {
|
||||
fprintf(stderr,"wrong number of clients: %d\n",nclients);
|
||||
if (nclients <= 0 || nclients > MAXCLIENTS)
|
||||
{
|
||||
fprintf(stderr, "wrong number of clients: %d\n", nclients);
|
||||
exit(1);
|
||||
}
|
||||
#ifndef __CYGWIN32__
|
||||
#ifdef RLIMIT_NOFILE /* most platform uses RLIMIT_NOFILE */
|
||||
if (getrlimit(RLIMIT_NOFILE,&rlim) == -1) {
|
||||
if (getrlimit(RLIMIT_NOFILE, &rlim) == -1)
|
||||
{
|
||||
#else /* but BSD doesn't ... */
|
||||
if (getrlimit(RLIMIT_OFILE,&rlim) == -1) {
|
||||
if (getrlimit(RLIMIT_OFILE, &rlim) == -1)
|
||||
{
|
||||
#endif /* HAVE_RLIMIT_NOFILE */
|
||||
fprintf(stderr,"getrlimit failed. reason: %s\n",strerror(errno));
|
||||
fprintf(stderr, "getrlimit failed. reason: %s\n", strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
if (rlim.rlim_cur <= (nclients+2)) {
|
||||
fprintf(stderr,"You need at least %d open files resource but you are only allowed to use %ld.\n",nclients+2,rlim.rlim_cur);
|
||||
fprintf(stderr,"Use limit/ulimt to increase the limit before using pgbench.\n");
|
||||
if (rlim.rlim_cur <= (nclients + 2))
|
||||
{
|
||||
fprintf(stderr, "You need at least %d open files resource but you are only allowed to use %ld.\n", nclients + 2, rlim.rlim_cur);
|
||||
fprintf(stderr, "Use limit/ulimt to increase the limit before using pgbench.\n");
|
||||
exit(1);
|
||||
}
|
||||
#endif /* #ifndef __CYGWIN32__ */
|
||||
break;
|
||||
case 's':
|
||||
tps = atoi(optarg);
|
||||
if (tps <= 0) {
|
||||
fprintf(stderr,"wrong scaling factor: %d\n",tps);
|
||||
if (tps <= 0)
|
||||
{
|
||||
fprintf(stderr, "wrong scaling factor: %d\n", tps);
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 't':
|
||||
nxacts = atoi(optarg);
|
||||
if (nxacts <= 0) {
|
||||
fprintf(stderr,"wrong number of transactions: %d\n",nxacts);
|
||||
if (nxacts <= 0)
|
||||
{
|
||||
fprintf(stderr, "wrong number of transactions: %d\n", nxacts);
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
@ -563,103 +620,120 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
}
|
||||
|
||||
if (argc > optind) {
|
||||
if (argc > optind)
|
||||
dbName = argv[optind];
|
||||
} else {
|
||||
else
|
||||
{
|
||||
dbName = getenv("USER");
|
||||
if (dbName == NULL) {
|
||||
if (dbName == NULL)
|
||||
dbName = "";
|
||||
}
|
||||
}
|
||||
|
||||
if (is_init_mode) {
|
||||
if (is_init_mode)
|
||||
{
|
||||
init(pghost, pgport, dbName);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
remains = nclients;
|
||||
|
||||
if (debug) {
|
||||
if (debug)
|
||||
{
|
||||
printf("pghost: %s pgport: %s nclients: %d nxacts: %d dbName: %s\n",
|
||||
pghost, pgport, nclients, nxacts, dbName);
|
||||
}
|
||||
|
||||
/* opening connection... */
|
||||
con = PQsetdb(pghost, pgport, NULL, NULL, dbName);
|
||||
if (PQstatus(con) == CONNECTION_BAD) {
|
||||
if (PQstatus(con) == CONNECTION_BAD)
|
||||
{
|
||||
fprintf(stderr, "Connection to database '%s' failed.\n", dbName);
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* get the scaling factor that should be same as count(*) from branches... */
|
||||
res = PQexec(con,"select count(*) from branches");
|
||||
if (PQresultStatus(res) != PGRES_TUPLES_OK) {
|
||||
/*
|
||||
* get the scaling factor that should be same as count(*) from
|
||||
* branches...
|
||||
*/
|
||||
res = PQexec(con, "select count(*) from branches");
|
||||
if (PQresultStatus(res) != PGRES_TUPLES_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
tps = atoi(PQgetvalue(res, 0, 0));
|
||||
if (tps < 0) {
|
||||
fprintf(stderr,"count(*) from branches invalid (%d)\n",tps);
|
||||
if (tps < 0)
|
||||
{
|
||||
fprintf(stderr, "count(*) from branches invalid (%d)\n", tps);
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
|
||||
if (!is_no_vacuum) {
|
||||
fprintf(stderr,"starting vacuum...");
|
||||
res = PQexec(con,"vacuum branches");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
if (!is_no_vacuum)
|
||||
{
|
||||
fprintf(stderr, "starting vacuum...");
|
||||
res = PQexec(con, "vacuum branches");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
|
||||
res = PQexec(con,"vacuum tellers");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
res = PQexec(con, "vacuum tellers");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
|
||||
res = PQexec(con,"delete from history");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
res = PQexec(con, "delete from history");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
res = PQexec(con,"vacuum history");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
res = PQexec(con, "vacuum history");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
|
||||
fprintf(stderr,"end.\n");
|
||||
fprintf(stderr, "end.\n");
|
||||
|
||||
if (is_full_vacuum) {
|
||||
fprintf(stderr,"starting full vacuum...");
|
||||
res = PQexec(con,"vacuum analyze accounts");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK) {
|
||||
if (is_full_vacuum)
|
||||
{
|
||||
fprintf(stderr, "starting full vacuum...");
|
||||
res = PQexec(con, "vacuum analyze accounts");
|
||||
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
||||
{
|
||||
fprintf(stderr, "%s", PQerrorMessage(con));
|
||||
exit(1);
|
||||
}
|
||||
PQclear(res);
|
||||
fprintf(stderr,"end.\n");
|
||||
fprintf(stderr, "end.\n");
|
||||
}
|
||||
}
|
||||
PQfinish(con);
|
||||
|
||||
/* set random seed */
|
||||
gettimeofday(&tv1, 0);
|
||||
srand((uint)tv1.tv_usec);
|
||||
srand((uint) tv1.tv_usec);
|
||||
|
||||
/* get start up time */
|
||||
gettimeofday(&tv1, 0);
|
||||
|
||||
/* make connections to the database */
|
||||
for (i=0;i<nclients;i++) {
|
||||
for (i = 0; i < nclients; i++)
|
||||
{
|
||||
state[i].con = PQsetdb(pghost, pgport, NULL, NULL, dbName);
|
||||
if (PQstatus(state[i].con) == CONNECTION_BAD) {
|
||||
if (PQstatus(state[i].con) == CONNECTION_BAD)
|
||||
{
|
||||
fprintf(stderr, "Connection to database '%s' failed.\n", dbName);
|
||||
fprintf(stderr, "%s", PQerrorMessage(state[i].con));
|
||||
exit(1);
|
||||
@ -670,68 +744,77 @@ int main(int argc, char **argv) {
|
||||
gettimeofday(&tv2, 0);
|
||||
|
||||
/* send start up quries in async manner */
|
||||
for (i=0;i<nclients;i++) {
|
||||
if (ttype == 0) {
|
||||
for (i = 0; i < nclients; i++)
|
||||
{
|
||||
if (ttype == 0)
|
||||
doOne(state, i, debug);
|
||||
} else if (ttype == 1) {
|
||||
else if (ttype == 1)
|
||||
doSelectOnly(state, i, debug);
|
||||
}
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
if (remains <= 0) { /* all done ? */
|
||||
for (;;)
|
||||
{
|
||||
if (remains <= 0)
|
||||
{ /* all done ? */
|
||||
disconnect_all(state);
|
||||
/* get end time */
|
||||
gettimeofday(&tv3, 0);
|
||||
printResults(ttype, state, &tv1,&tv2,&tv3);
|
||||
printResults(ttype, state, &tv1, &tv2, &tv3);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
FD_ZERO(&input_mask);
|
||||
|
||||
maxsock = 0;
|
||||
for (i=0;i<nclients;i++) {
|
||||
if (state[i].con) {
|
||||
for (i = 0; i < nclients; i++)
|
||||
{
|
||||
if (state[i].con)
|
||||
{
|
||||
int sock = PQsocket(state[i].con);
|
||||
if (sock < 0) {
|
||||
fprintf(stderr,"Client %d: PQsock failed\n",i);
|
||||
|
||||
if (sock < 0)
|
||||
{
|
||||
fprintf(stderr, "Client %d: PQsock failed\n", i);
|
||||
disconnect_all(state);
|
||||
exit(1);
|
||||
}
|
||||
FD_SET(sock, &input_mask);
|
||||
if (maxsock < sock) {
|
||||
if (maxsock < sock)
|
||||
maxsock = sock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((nsocks = select(maxsock +1, &input_mask, (fd_set *)NULL,
|
||||
(fd_set *)NULL, (struct timeval *)NULL)) < 0) {
|
||||
if (errno == EINTR) {
|
||||
if ((nsocks = select(maxsock + 1, &input_mask, (fd_set *) NULL,
|
||||
(fd_set *) NULL, (struct timeval *) NULL)) < 0)
|
||||
{
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
}
|
||||
/* must be something wrong */
|
||||
disconnect_all(state);
|
||||
fprintf(stderr,"select failed: %s\n",strerror(errno));
|
||||
fprintf(stderr, "select failed: %s\n", strerror(errno));
|
||||
exit(1);
|
||||
} else if (nsocks == 0) { /* timeout */
|
||||
fprintf(stderr,"select timeout\n");
|
||||
for (i=0;i<nclients;i++) {
|
||||
fprintf(stderr,"client %d:state %d cnt %d ecnt %d listen %d\n",
|
||||
i,state[i].state,state[i].cnt,state[i].ecnt,state[i].listen);
|
||||
}
|
||||
else if (nsocks == 0)
|
||||
{ /* timeout */
|
||||
fprintf(stderr, "select timeout\n");
|
||||
for (i = 0; i < nclients; i++)
|
||||
{
|
||||
fprintf(stderr, "client %d:state %d cnt %d ecnt %d listen %d\n",
|
||||
i, state[i].state, state[i].cnt, state[i].ecnt, state[i].listen);
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/* ok, backend returns reply */
|
||||
for (i=0;i<nclients;i++) {
|
||||
if (state[i].con && FD_ISSET(PQsocket(state[i].con), &input_mask)) {
|
||||
if (ttype == 0) {
|
||||
for (i = 0; i < nclients; i++)
|
||||
{
|
||||
if (state[i].con && FD_ISSET(PQsocket(state[i].con), &input_mask))
|
||||
{
|
||||
if (ttype == 0)
|
||||
doOne(state, i, debug);
|
||||
} else if (ttype == 1) {
|
||||
else if (ttype == 1)
|
||||
doSelectOnly(state, i, debug);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,9 +89,8 @@ string_output(unsigned char *data, int size)
|
||||
break;
|
||||
case '{':
|
||||
/* Escape beginning of string, to distinguish from arrays */
|
||||
if (p == data) {
|
||||
if (p == data)
|
||||
len++;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (NOTPRINTABLE(*p))
|
||||
@ -137,9 +136,8 @@ string_output(unsigned char *data, int size)
|
||||
break;
|
||||
case '{':
|
||||
/* Escape beginning of string, to distinguish from arrays */
|
||||
if (p == data) {
|
||||
if (p == data)
|
||||
*r++ = '\\';
|
||||
}
|
||||
*r++ = c;
|
||||
break;
|
||||
default:
|
||||
@ -361,6 +359,7 @@ c_charin(unsigned char *str)
|
||||
{
|
||||
return (string_input(str, 1, 0, NULL));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* end of file */
|
||||
|
@ -1,16 +1,17 @@
|
||||
#ifndef STRING_IO_H
|
||||
#define STRING_IO_H
|
||||
|
||||
unsigned char* string_output(unsigned char *data, int size);
|
||||
unsigned char* string_input(unsigned char *str, int size, int hdrsize,
|
||||
unsigned char *string_output(unsigned char *data, int size);
|
||||
unsigned char *string_input(unsigned char *str, int size, int hdrsize,
|
||||
int *rtn_size);
|
||||
unsigned char* c_charout(int32 c);
|
||||
unsigned char* c_textout(struct varlena * vlena);
|
||||
unsigned char* c_varcharout(unsigned char *s);
|
||||
unsigned char *c_charout(int32 c);
|
||||
unsigned char *c_textout(struct varlena * vlena);
|
||||
unsigned char *c_varcharout(unsigned char *s);
|
||||
|
||||
#if 0
|
||||
struct varlena* c_textin(unsigned char *str);
|
||||
int32* c_charin(unsigned char *str)
|
||||
struct varlena *c_textin(unsigned char *str);
|
||||
int32 *
|
||||
c_charin(unsigned char *str)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.61 2000/01/26 05:55:53 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.62 2000/04/12 17:14:36 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* The old interface functions have been converted to macros
|
||||
@ -139,7 +139,7 @@ DataFill(char *data,
|
||||
default:
|
||||
Assert(att[i]->attlen >= 0);
|
||||
memmove(data, DatumGetPointer(value[i]),
|
||||
(size_t)(att[i]->attlen));
|
||||
(size_t) (att[i]->attlen));
|
||||
break;
|
||||
}
|
||||
data = (char *) att_addlength((long) data, att[i]->attlen, value[i]);
|
||||
@ -326,7 +326,7 @@ nocachegetattr(HeapTuple tuple,
|
||||
Form_pg_attribute *att = tupleDesc->attrs;
|
||||
int slow = 0; /* do we have to walk nulls? */
|
||||
|
||||
(void)isnull; /*not used*/
|
||||
(void) isnull; /* not used */
|
||||
#ifdef IN_MACRO
|
||||
/* This is handled in the macro */
|
||||
Assert(attnum > 0);
|
||||
@ -806,11 +806,9 @@ void
|
||||
heap_freetuple(HeapTuple htup)
|
||||
{
|
||||
if (htup->t_data != NULL)
|
||||
if (htup->t_datamcxt != NULL && (char *)(htup->t_data) !=
|
||||
if (htup->t_datamcxt != NULL && (char *) (htup->t_data) !=
|
||||
((char *) htup + HEAPTUPLESIZE))
|
||||
{
|
||||
elog(NOTICE, "TELL Jan Wieck: heap_freetuple() found separate t_data");
|
||||
}
|
||||
|
||||
pfree(htup);
|
||||
}
|
||||
@ -851,7 +849,7 @@ heap_addheader(uint32 natts, /* max domain index */
|
||||
td->t_infomask |= HEAP_XMAX_INVALID;
|
||||
|
||||
if (structlen > 0)
|
||||
memmove((char *) td + hoff, structure, (size_t)structlen);
|
||||
memmove((char *) td + hoff, structure, (size_t) structlen);
|
||||
|
||||
return tuple;
|
||||
}
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.42 2000/01/26 05:55:53 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.43 2000/04/12 17:14:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -134,7 +134,7 @@ nocache_index_getattr(IndexTuple tup,
|
||||
int data_off; /* tuple data offset */
|
||||
Form_pg_attribute *att = tupleDesc->attrs;
|
||||
|
||||
(void)isnull;
|
||||
(void) isnull;
|
||||
/* ----------------
|
||||
* sanity checks
|
||||
* ----------------
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.61 2000/01/31 04:35:48 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.62 2000/04/12 17:14:37 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* some of the executor utility code such as "ExecTypeFromTL" should be
|
||||
@ -238,8 +238,10 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
|
||||
Form_pg_attribute attr1 = tupdesc1->attrs[i];
|
||||
Form_pg_attribute attr2 = tupdesc2->attrs[i];
|
||||
|
||||
/* We do not need to check every single field here, and in fact
|
||||
* some fields such as attdisbursion probably shouldn't be compared.
|
||||
/*
|
||||
* We do not need to check every single field here, and in fact
|
||||
* some fields such as attdisbursion probably shouldn't be
|
||||
* compared.
|
||||
*/
|
||||
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
|
||||
return false;
|
||||
@ -585,8 +587,9 @@ BuildDescForRelation(List *schema, char *relname)
|
||||
constr->has_not_null = true;
|
||||
desc->attrs[attnum - 1]->attnotnull = entry->is_not_null;
|
||||
|
||||
/* Note we copy only pre-cooked default expressions.
|
||||
* Digestion of raw ones is someone else's problem.
|
||||
/*
|
||||
* Note we copy only pre-cooked default expressions. Digestion of
|
||||
* raw ones is someone else's problem.
|
||||
*/
|
||||
if (entry->cooked_default != NULL)
|
||||
{
|
||||
|
@ -6,7 +6,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.52 2000/03/17 02:36:00 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.53 2000/04/12 17:14:39 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -52,8 +52,10 @@ void gistdelete(Relation r, ItemPointer tid);
|
||||
static IndexTuple gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t);
|
||||
static void gistcentryinit(GISTSTATE *giststate, GISTENTRY *e, char *pr,
|
||||
Relation r, Page pg, OffsetNumber o, int b, bool l);
|
||||
|
||||
#ifdef GISTDEBUG
|
||||
static char *int_range_out(INTRANGE *r);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -186,7 +188,7 @@ gistbuild(Relation heap,
|
||||
#ifndef OMIT_PARTIAL_INDEX
|
||||
/* SetSlotContents(slot, htup); */
|
||||
slot->val = htup;
|
||||
if (! ExecQual((List *) pred, econtext, false))
|
||||
if (!ExecQual((List *) pred, econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
@ -272,13 +274,13 @@ gistbuild(Relation heap,
|
||||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
|
@ -266,7 +266,7 @@ gistdropscan(IndexScanDesc s)
|
||||
prev = l;
|
||||
|
||||
if (l == (GISTScanList) NULL)
|
||||
elog(ERROR, "GiST scan list corrupted -- cannot find 0x%p", (void*)s);
|
||||
elog(ERROR, "GiST scan list corrupted -- cannot find 0x%p", (void *) s);
|
||||
|
||||
if (prev == (GISTScanList) NULL)
|
||||
GISTScans = l->gsl_next;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.36 2000/03/01 05:39:22 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.37 2000/04/12 17:14:43 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains only the public interface routines.
|
||||
@ -149,7 +149,7 @@ hashbuild(Relation heap,
|
||||
#ifndef OMIT_PARTIAL_INDEX
|
||||
/* SetSlotContents(slot, htup); */
|
||||
slot->val = htup;
|
||||
if (! ExecQual((List *) pred, econtext, false))
|
||||
if (!ExecQual((List *) pred, econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
@ -230,13 +230,13 @@ hashbuild(Relation heap,
|
||||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.24 2000/02/21 03:36:46 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.25 2000/04/12 17:14:44 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These functions are stored in pg_amproc. For each operator class
|
||||
@ -146,7 +146,7 @@ hashoidvector(Oid *key)
|
||||
int i;
|
||||
uint32 result = 0;
|
||||
|
||||
for (i = INDEX_MAX_KEYS; --i >= 0; )
|
||||
for (i = INDEX_MAX_KEYS; --i >= 0;)
|
||||
result = (result << 1) ^ (~(uint32) key[i]);
|
||||
return result;
|
||||
}
|
||||
@ -162,7 +162,7 @@ hashint2vector(int16 *key)
|
||||
int i;
|
||||
uint32 result = 0;
|
||||
|
||||
for (i = INDEX_MAX_KEYS; --i >= 0; )
|
||||
for (i = INDEX_MAX_KEYS; --i >= 0;)
|
||||
result = (result << 1) ^ (~(uint32) key[i]);
|
||||
return result;
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.22 2000/01/26 05:55:55 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.23 2000/04/12 17:14:44 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Because we can be doing an index scan on a relation while we
|
||||
@ -75,7 +75,7 @@ _hash_dropscan(IndexScanDesc scan)
|
||||
last = chk;
|
||||
|
||||
if (chk == (HashScanList) NULL)
|
||||
elog(ERROR, "hash scan list trashed; can't find 0x%p", (void*)scan);
|
||||
elog(ERROR, "hash scan list trashed; can't find 0x%p", (void *) scan);
|
||||
|
||||
if (last == (HashScanList) NULL)
|
||||
HashScans = chk->hashsl_next;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.23 2000/03/17 02:36:02 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.24 2000/04/12 17:14:44 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.66 2000/02/09 03:49:47 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.67 2000/04/12 17:14:45 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@ -188,8 +188,9 @@ unpinscan(HeapScanDesc scan)
|
||||
if (BufferIsValid(scan->rs_nbuf))
|
||||
ReleaseBuffer(scan->rs_nbuf);
|
||||
|
||||
/* we don't bother to clear rs_pbuf etc --- caller must
|
||||
* reinitialize them if scan descriptor is not being deleted.
|
||||
/*
|
||||
* we don't bother to clear rs_pbuf etc --- caller must reinitialize
|
||||
* them if scan descriptor is not being deleted.
|
||||
*/
|
||||
}
|
||||
|
||||
@ -544,7 +545,7 @@ heap_open(Oid relationId, LOCKMODE lockmode)
|
||||
if (lockmode == NoLock)
|
||||
return r; /* caller must check RelationIsValid! */
|
||||
|
||||
if (! RelationIsValid(r))
|
||||
if (!RelationIsValid(r))
|
||||
elog(ERROR, "Relation %u does not exist", relationId);
|
||||
|
||||
LockRelation(r, lockmode);
|
||||
@ -586,7 +587,7 @@ heap_openr(const char *relationName, LOCKMODE lockmode)
|
||||
if (lockmode == NoLock)
|
||||
return r; /* caller must check RelationIsValid! */
|
||||
|
||||
if (! RelationIsValid(r))
|
||||
if (!RelationIsValid(r))
|
||||
elog(ERROR, "Relation '%s' does not exist", relationName);
|
||||
|
||||
LockRelation(r, lockmode);
|
||||
@ -646,7 +647,7 @@ heap_beginscan(Relation relation,
|
||||
* sanity checks
|
||||
* ----------------
|
||||
*/
|
||||
if (! RelationIsValid(relation))
|
||||
if (!RelationIsValid(relation))
|
||||
elog(ERROR, "heap_beginscan: !RelationIsValid(relation)");
|
||||
|
||||
/* ----------------
|
||||
@ -681,6 +682,7 @@ heap_beginscan(Relation relation,
|
||||
scan->rs_nkeys = (short) nkeys;
|
||||
|
||||
if (nkeys)
|
||||
|
||||
/*
|
||||
* we do this here instead of in initscan() because heap_rescan
|
||||
* also calls initscan() and we don't want to allocate memory
|
||||
@ -847,9 +849,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
|
||||
|
||||
if (scan->rs_ptup.t_data == scan->rs_ctup.t_data &&
|
||||
BufferIsInvalid(scan->rs_pbuf))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the "current" tuple/buffer to "next". Pin/unpin the
|
||||
@ -1095,8 +1095,10 @@ heap_fetch(Relation relation,
|
||||
}
|
||||
else
|
||||
{
|
||||
/* All checks passed, so return the tuple as valid.
|
||||
* Caller is now responsible for releasing the buffer.
|
||||
|
||||
/*
|
||||
* All checks passed, so return the tuple as valid. Caller is now
|
||||
* responsible for releasing the buffer.
|
||||
*/
|
||||
*userbuf = buffer;
|
||||
}
|
||||
@ -1119,7 +1121,8 @@ heap_get_latest_tid(Relation relation,
|
||||
HeapTupleData tp;
|
||||
HeapTupleHeader t_data;
|
||||
ItemPointerData ctid;
|
||||
bool invalidBlock,linkend;
|
||||
bool invalidBlock,
|
||||
linkend;
|
||||
|
||||
/* ----------------
|
||||
* get the buffer from the relation descriptor
|
||||
@ -1300,10 +1303,11 @@ l1:
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (TransactionIdDidAbort(xwait))
|
||||
goto l1;
|
||||
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked
|
||||
* the tuple for update then some other xaction could
|
||||
* update this tuple before we got to this point.
|
||||
* xwait is committed but if xwait had just marked the tuple for
|
||||
* update then some other xaction could update this tuple before
|
||||
* we got to this point.
|
||||
*/
|
||||
if (tp.t_data->t_xmax != xwait)
|
||||
goto l1;
|
||||
@ -1396,10 +1400,11 @@ l2:
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (TransactionIdDidAbort(xwait))
|
||||
goto l2;
|
||||
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked
|
||||
* the tuple for update then some other xaction could
|
||||
* update this tuple before we got to this point.
|
||||
* xwait is committed but if xwait had just marked the tuple for
|
||||
* update then some other xaction could update this tuple before
|
||||
* we got to this point.
|
||||
*/
|
||||
if (oldtup.t_data->t_xmax != xwait)
|
||||
goto l2;
|
||||
@ -1521,10 +1526,11 @@ l3:
|
||||
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
if (TransactionIdDidAbort(xwait))
|
||||
goto l3;
|
||||
|
||||
/*
|
||||
* xwait is committed but if xwait had just marked
|
||||
* the tuple for update then some other xaction could
|
||||
* update this tuple before we got to this point.
|
||||
* xwait is committed but if xwait had just marked the tuple for
|
||||
* update then some other xaction could update this tuple before
|
||||
* we got to this point.
|
||||
*/
|
||||
if (tuple->t_data->t_xmax != xwait)
|
||||
goto l3;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Id: hio.c,v 1.30 2000/03/17 02:36:02 tgl Exp $
|
||||
* $Id: hio.c,v 1.31 2000/04/12 17:14:45 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -111,8 +111,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
|
||||
len = MAXALIGN(tuple->t_len); /* be conservative */
|
||||
|
||||
/*
|
||||
* If we're gonna fail for oversize tuple, do it right away...
|
||||
* this code should go away eventually.
|
||||
* If we're gonna fail for oversize tuple, do it right away... this
|
||||
* code should go away eventually.
|
||||
*/
|
||||
if (len > MaxTupleSize)
|
||||
elog(ERROR, "Tuple is too big: size %u, max size %ld",
|
||||
@ -136,8 +136,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
|
||||
lastblock = RelationGetNumberOfBlocks(relation);
|
||||
|
||||
/*
|
||||
* Get the last existing page --- may need to create the first one
|
||||
* if this is a virgin relation.
|
||||
* Get the last existing page --- may need to create the first one if
|
||||
* this is a virgin relation.
|
||||
*/
|
||||
if (lastblock == 0)
|
||||
{
|
||||
@ -168,12 +168,14 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
|
||||
|
||||
if (len > PageGetFreeSpace(pageHeader))
|
||||
{
|
||||
|
||||
/*
|
||||
* BUG: by elog'ing here, we leave the new buffer locked and not
|
||||
* marked dirty, which may result in an invalid page header
|
||||
* being left on disk. But we should not get here given the
|
||||
* test at the top of the routine, and the whole deal should
|
||||
* go away when we implement tuple splitting anyway...
|
||||
* BUG: by elog'ing here, we leave the new buffer locked and
|
||||
* not marked dirty, which may result in an invalid page
|
||||
* header being left on disk. But we should not get here
|
||||
* given the test at the top of the routine, and the whole
|
||||
* deal should go away when we implement tuple splitting
|
||||
* anyway...
|
||||
*/
|
||||
elog(ERROR, "Tuple is too big: size %u", len);
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.2 2000/01/20 21:50:59 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.3 2000/04/12 17:14:45 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@ -30,14 +30,14 @@
|
||||
#ifdef TUPLE_TOASTER_ACTIVE
|
||||
|
||||
void
|
||||
heap_tuple_toast_attrs (Relation rel, HeapTuple newtup, HeapTuple oldtup)
|
||||
heap_tuple_toast_attrs(Relation rel, HeapTuple newtup, HeapTuple oldtup)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
varattrib *
|
||||
heap_tuple_untoast_attr (varattrib *attr)
|
||||
heap_tuple_untoast_attr(varattrib * attr)
|
||||
{
|
||||
elog(ERROR, "heap_tuple_untoast_attr() called");
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.24 2000/03/14 23:52:01 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.25 2000/04/12 17:14:47 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* many of the old access method routines have been turned into
|
||||
@ -114,7 +114,10 @@ RelationGetIndexScan(Relation relation,
|
||||
ItemPointerSetInvalid(&scan->currentMarkData);
|
||||
ItemPointerSetInvalid(&scan->nextMarkData);
|
||||
|
||||
/* mark cached function lookup data invalid; it will be set on first use */
|
||||
/*
|
||||
* mark cached function lookup data invalid; it will be set on first
|
||||
* use
|
||||
*/
|
||||
scan->fn_getnext.fn_oid = InvalidOid;
|
||||
|
||||
if (numberOfKeys > 0)
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.41 2000/03/14 23:52:01 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.42 2000/04/12 17:14:47 momjian Exp $
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
* index_open - open an index relation by relationId
|
||||
@ -129,7 +129,7 @@ index_open(Oid relationId)
|
||||
|
||||
r = RelationIdGetRelation(relationId);
|
||||
|
||||
if (! RelationIsValid(r))
|
||||
if (!RelationIsValid(r))
|
||||
elog(ERROR, "Index %u does not exist", relationId);
|
||||
|
||||
if (r->rd_rel->relkind != RELKIND_INDEX)
|
||||
@ -151,7 +151,7 @@ index_openr(char *relationName)
|
||||
|
||||
r = RelationNameGetRelation(relationName);
|
||||
|
||||
if (! RelationIsValid(r))
|
||||
if (!RelationIsValid(r))
|
||||
elog(ERROR, "Index '%s' does not exist", relationName);
|
||||
|
||||
if (r->rd_rel->relkind != RELKIND_INDEX)
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.41 2000/02/18 09:29:16 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.42 2000/04/12 17:14:47 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.33 2000/02/10 19:51:38 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.34 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These functions are stored in pg_amproc. For each operator class
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.56 2000/03/17 02:36:03 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.57 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -267,21 +267,20 @@ _bt_insertonpg(Relation rel,
|
||||
itemsz = IndexTupleDSize(btitem->bti_itup)
|
||||
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
|
||||
|
||||
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do
|
||||
* this but we need to be
|
||||
* consistent */
|
||||
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but
|
||||
* we need to be consistent */
|
||||
|
||||
/*
|
||||
* Check whether the item can fit on a btree page at all.
|
||||
* (Eventually, we ought to try to apply TOAST methods if not.)
|
||||
* We actually need to be able to fit three items on every page,
|
||||
* so restrict any one item to 1/3 the per-page available space.
|
||||
* Note that at this point, itemsz doesn't include the ItemId.
|
||||
* Check whether the item can fit on a btree page at all. (Eventually,
|
||||
* we ought to try to apply TOAST methods if not.) We actually need to
|
||||
* be able to fit three items on every page, so restrict any one item
|
||||
* to 1/3 the per-page available space. Note that at this point,
|
||||
* itemsz doesn't include the ItemId.
|
||||
*/
|
||||
if (itemsz > (PageGetPageSize(page)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData))
|
||||
if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
|
||||
elog(ERROR, "btree: index item size %u exceeds maximum %lu",
|
||||
itemsz,
|
||||
(PageGetPageSize(page)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData));
|
||||
(PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
|
||||
|
||||
/*
|
||||
* If we have to insert item on the leftmost page which is the first
|
||||
@ -415,8 +414,8 @@ _bt_insertonpg(Relation rel,
|
||||
bool is_root = lpageop->btpo_flags & BTP_ROOT;
|
||||
|
||||
/*
|
||||
* Instead of splitting leaf page in the chain of duplicates
|
||||
* by new duplicate, insert it into some right page.
|
||||
* Instead of splitting leaf page in the chain of duplicates by
|
||||
* new duplicate, insert it into some right page.
|
||||
*/
|
||||
if ((lpageop->btpo_flags & BTP_CHAIN) &&
|
||||
(lpageop->btpo_flags & BTP_LEAF) && keys_equal)
|
||||
@ -424,6 +423,7 @@ _bt_insertonpg(Relation rel,
|
||||
rbuf = _bt_getbuf(rel, lpageop->btpo_next, BT_WRITE);
|
||||
rpage = BufferGetPage(rbuf);
|
||||
rpageop = (BTPageOpaque) PageGetSpecialPointer(rpage);
|
||||
|
||||
/*
|
||||
* some checks
|
||||
*/
|
||||
@ -442,6 +442,7 @@ _bt_insertonpg(Relation rel,
|
||||
BTGreaterStrategyNumber))
|
||||
elog(FATAL, "btree: hikey is out of order");
|
||||
else if (rpageop->btpo_flags & BTP_CHAIN)
|
||||
|
||||
/*
|
||||
* If hikey > scankey then it's last page in chain and
|
||||
* BTP_CHAIN must be OFF
|
||||
@ -450,9 +451,7 @@ _bt_insertonpg(Relation rel,
|
||||
}
|
||||
else
|
||||
/* rightmost page */
|
||||
{
|
||||
Assert(!(rpageop->btpo_flags & BTP_CHAIN));
|
||||
}
|
||||
_bt_relbuf(rel, buf, BT_WRITE);
|
||||
return (_bt_insertonpg(rel, rbuf, stack, keysz,
|
||||
scankey, btitem, afteritem));
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.35 2000/01/26 05:55:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.36 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Postgres btree pages look like ordinary relation pages. The opaque
|
||||
|
@ -12,7 +12,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.53 2000/02/18 09:29:54 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.54 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -185,7 +185,7 @@ btbuild(Relation heap,
|
||||
#ifndef OMIT_PARTIAL_INDEX
|
||||
/* SetSlotContents(slot, htup); */
|
||||
slot->val = htup;
|
||||
if (! ExecQual((List *) pred, econtext, false))
|
||||
if (!ExecQual((List *) pred, econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
@ -276,9 +276,9 @@ btbuild(Relation heap,
|
||||
}
|
||||
|
||||
/*
|
||||
* if we are doing bottom-up btree build, finish the build by
|
||||
* (1) completing the sort of the spool file, (2) inserting the
|
||||
* sorted tuples into btree pages and (3) building the upper levels.
|
||||
* if we are doing bottom-up btree build, finish the build by (1)
|
||||
* completing the sort of the spool file, (2) inserting the sorted
|
||||
* tuples into btree pages and (3) building the upper levels.
|
||||
*/
|
||||
if (usefast)
|
||||
{
|
||||
@ -298,13 +298,13 @@ btbuild(Relation heap,
|
||||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
@ -314,9 +314,10 @@ btbuild(Relation heap,
|
||||
|
||||
heap_close(heap, NoLock);
|
||||
index_close(index);
|
||||
|
||||
/*
|
||||
UpdateStats(hrelid, nhtups, true);
|
||||
UpdateStats(irelid, nitups, false);
|
||||
* UpdateStats(hrelid, nhtups, true); UpdateStats(irelid, nitups,
|
||||
* false);
|
||||
*/
|
||||
UpdateStats(hrelid, nhtups, inplace);
|
||||
UpdateStats(irelid, nitups, inplace);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.30 2000/01/26 05:55:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.31 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*
|
||||
* NOTES
|
||||
@ -52,13 +52,16 @@ static void _bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offn
|
||||
void
|
||||
AtEOXact_nbtree(void)
|
||||
{
|
||||
/* Note: these actions should only be necessary during xact abort;
|
||||
* but they can't hurt during a commit.
|
||||
|
||||
/*
|
||||
* Note: these actions should only be necessary during xact abort; but
|
||||
* they can't hurt during a commit.
|
||||
*/
|
||||
|
||||
/* Reset the active-scans list to empty.
|
||||
* We do not need to free the list elements, because they're all
|
||||
* palloc()'d, so they'll go away at end of transaction anyway.
|
||||
/*
|
||||
* Reset the active-scans list to empty. We do not need to free the
|
||||
* list elements, because they're all palloc()'d, so they'll go away
|
||||
* at end of transaction anyway.
|
||||
*/
|
||||
BTScans = NULL;
|
||||
|
||||
@ -96,7 +99,7 @@ _bt_dropscan(IndexScanDesc scan)
|
||||
last = chk;
|
||||
|
||||
if (chk == (BTScanList) NULL)
|
||||
elog(ERROR, "btree scan list trashed; can't find 0x%p", (void*)scan);
|
||||
elog(ERROR, "btree scan list trashed; can't find 0x%p", (void *) scan);
|
||||
|
||||
if (last == (BTScanList) NULL)
|
||||
BTScans = chk->btsl_next;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.58 2000/03/17 02:36:04 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.59 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -299,9 +299,7 @@ _bt_skeycmp(Relation rel,
|
||||
compare = -1; /* not-NULL key "<" NULL datum */
|
||||
}
|
||||
else
|
||||
{
|
||||
compare = (int32) FMGR_PTR2(&entry->sk_func, keyDatum, attrDatum);
|
||||
}
|
||||
|
||||
if (compare != 0)
|
||||
break; /* done when we find unequal attributes */
|
||||
@ -368,19 +366,18 @@ _bt_binsrch(Relation rel,
|
||||
|
||||
/*
|
||||
* If there are no keys on the page, return the first available slot.
|
||||
* Note this covers two cases: the page is really empty (no keys),
|
||||
* or it contains only a high key. The latter case is possible after
|
||||
* Note this covers two cases: the page is really empty (no keys), or
|
||||
* it contains only a high key. The latter case is possible after
|
||||
* vacuuming.
|
||||
*/
|
||||
if (high < low)
|
||||
return low;
|
||||
|
||||
/*
|
||||
* Binary search to find the first key on the page >= scan key.
|
||||
* Loop invariant: all slots before 'low' are < scan key, all slots
|
||||
* at or after 'high' are >= scan key. Also, haveEq is true if the
|
||||
* tuple at 'high' is == scan key.
|
||||
* We can fall out when high == low.
|
||||
* Binary search to find the first key on the page >= scan key. Loop
|
||||
* invariant: all slots before 'low' are < scan key, all slots at or
|
||||
* after 'high' are >= scan key. Also, haveEq is true if the tuple at
|
||||
* 'high' is == scan key. We can fall out when high == low.
|
||||
*/
|
||||
high++; /* establish the loop invariant for high */
|
||||
haveEq = false;
|
||||
@ -388,6 +385,7 @@ _bt_binsrch(Relation rel,
|
||||
while (high > low)
|
||||
{
|
||||
OffsetNumber mid = low + ((high - low) / 2);
|
||||
|
||||
/* We have low <= mid < high, so mid points at a real slot */
|
||||
|
||||
result = _bt_compare(rel, itupdesc, page, keysz, scankey, mid);
|
||||
@ -443,18 +441,20 @@ _bt_binsrch(Relation rel,
|
||||
|
||||
if (haveEq)
|
||||
{
|
||||
|
||||
/*
|
||||
* There is an equal key. We return either the first equal key
|
||||
* (which we just found), or the last lesser key.
|
||||
*
|
||||
* We need not check srchtype != BT_DESCENT here, since if that
|
||||
* is true then natts == keysz by assumption.
|
||||
* We need not check srchtype != BT_DESCENT here, since if that is
|
||||
* true then natts == keysz by assumption.
|
||||
*/
|
||||
if (natts == keysz)
|
||||
return low; /* return first equal key */
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* There is no equal key. We return either the first greater key
|
||||
* (which we just found), or the last lesser key.
|
||||
@ -524,6 +524,7 @@ _bt_compare(Relation rel,
|
||||
&& P_LEFTMOST(opaque)
|
||||
&& offnum == P_HIKEY)
|
||||
{
|
||||
|
||||
/*
|
||||
* we just have to believe that this will only be called with
|
||||
* offnum == P_HIKEY when P_HIKEY is the OffsetNumber of the first
|
||||
@ -704,7 +705,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
ScanKey scankeys = 0;
|
||||
int keysCount = 0;
|
||||
int *nKeyIs = 0;
|
||||
int i, j;
|
||||
int i,
|
||||
j;
|
||||
StrategyNumber strat_total;
|
||||
|
||||
rel = scan->relation;
|
||||
@ -730,8 +732,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
{
|
||||
AttrNumber attno;
|
||||
|
||||
nKeyIs = (int *)palloc(so->numberOfKeys*sizeof(int));
|
||||
for (i=0; i < so->numberOfKeys; i++)
|
||||
nKeyIs = (int *) palloc(so->numberOfKeys * sizeof(int));
|
||||
for (i = 0; i < so->numberOfKeys; i++)
|
||||
{
|
||||
attno = so->keyData[i].sk_attno;
|
||||
if (attno == keysCount)
|
||||
@ -748,7 +750,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
}
|
||||
if (ScanDirectionIsBackward(dir) &&
|
||||
(strat == BTLessStrategyNumber ||
|
||||
strat == BTLessEqualStrategyNumber) )
|
||||
strat == BTLessEqualStrategyNumber))
|
||||
{
|
||||
nKeyIs[keysCount++] = i;
|
||||
strat_total = strat;
|
||||
@ -758,7 +760,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
}
|
||||
if (ScanDirectionIsForward(dir) &&
|
||||
(strat == BTGreaterStrategyNumber ||
|
||||
strat == BTGreaterEqualStrategyNumber) )
|
||||
strat == BTGreaterEqualStrategyNumber))
|
||||
{
|
||||
nKeyIs[keysCount++] = i;
|
||||
strat_total = strat;
|
||||
@ -794,8 +796,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
* at the right place in the scan.
|
||||
*/
|
||||
/* _bt_orderkeys disallows it, but it's place to add some code latter */
|
||||
scankeys = (ScanKey)palloc(keysCount*sizeof(ScanKeyData));
|
||||
for (i=0; i < keysCount; i++)
|
||||
scankeys = (ScanKey) palloc(keysCount * sizeof(ScanKeyData));
|
||||
for (i = 0; i < keysCount; i++)
|
||||
{
|
||||
j = nKeyIs[i];
|
||||
if (so->keyData[j].sk_flags & SK_ISNULL)
|
||||
@ -805,11 +807,12 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
elog(ERROR, "_bt_first: btree doesn't support is(not)null, yet");
|
||||
return ((RetrieveIndexResult) NULL);
|
||||
}
|
||||
proc = index_getprocid(rel, i+1, BTORDER_PROC);
|
||||
ScanKeyEntryInitialize(scankeys+i, so->keyData[j].sk_flags,
|
||||
i+1, proc, so->keyData[j].sk_argument);
|
||||
proc = index_getprocid(rel, i + 1, BTORDER_PROC);
|
||||
ScanKeyEntryInitialize(scankeys + i, so->keyData[j].sk_flags,
|
||||
i + 1, proc, so->keyData[j].sk_argument);
|
||||
}
|
||||
if (nKeyIs) pfree(nKeyIs);
|
||||
if (nKeyIs)
|
||||
pfree(nKeyIs);
|
||||
|
||||
stack = _bt_search(rel, keysCount, scankeys, &buf);
|
||||
_bt_freestack(stack);
|
||||
|
@ -28,7 +28,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.51 2000/02/18 06:32:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.52 2000/04/12 17:14:49 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -99,9 +99,9 @@ _bt_spoolinit(Relation index, bool isunique)
|
||||
btspool->sortstate = tuplesort_begin_index(index, isunique, false);
|
||||
|
||||
/*
|
||||
* Currently, tuplesort provides sort functions on IndexTuples.
|
||||
* If we kept anything in a BTItem other than a regular IndexTuple,
|
||||
* we'd need to modify tuplesort to understand BTItems as such.
|
||||
* Currently, tuplesort provides sort functions on IndexTuples. If we
|
||||
* kept anything in a BTItem other than a regular IndexTuple, we'd
|
||||
* need to modify tuplesort to understand BTItems as such.
|
||||
*/
|
||||
Assert(sizeof(BTItemData) == sizeof(IndexTupleData));
|
||||
|
||||
@ -306,20 +306,20 @@ _bt_buildadd(Relation index, Size keysz, ScanKey scankey,
|
||||
btisz = MAXALIGN(btisz);
|
||||
|
||||
/*
|
||||
* Check whether the item can fit on a btree page at all.
|
||||
* (Eventually, we ought to try to apply TOAST methods if not.)
|
||||
* We actually need to be able to fit three items on every page,
|
||||
* so restrict any one item to 1/3 the per-page available space.
|
||||
* Note that at this point, btisz doesn't include the ItemId.
|
||||
* Check whether the item can fit on a btree page at all. (Eventually,
|
||||
* we ought to try to apply TOAST methods if not.) We actually need to
|
||||
* be able to fit three items on every page, so restrict any one item
|
||||
* to 1/3 the per-page available space. Note that at this point, btisz
|
||||
* doesn't include the ItemId.
|
||||
*
|
||||
* NOTE: similar code appears in _bt_insertonpg() to defend against
|
||||
* oversize items being inserted into an already-existing index.
|
||||
* But during creation of an index, we don't go through there.
|
||||
* oversize items being inserted into an already-existing index. But
|
||||
* during creation of an index, we don't go through there.
|
||||
*/
|
||||
if (btisz > (PageGetPageSize(npage)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData))
|
||||
if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
|
||||
elog(ERROR, "btree: index item size %d exceeds maximum %ld",
|
||||
btisz,
|
||||
(PageGetPageSize(npage)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData));
|
||||
(PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
|
||||
|
||||
if (pgspc < btisz)
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.35 2000/02/18 06:32:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.36 2000/04/12 17:14:50 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -141,7 +141,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
|
||||
uint16 numberOfKeys = so->numberOfKeys;
|
||||
uint16 new_numberOfKeys = 0;
|
||||
AttrNumber attno = 1;
|
||||
bool equalStrategyEnd, underEqualStrategy;
|
||||
bool equalStrategyEnd,
|
||||
underEqualStrategy;
|
||||
|
||||
if (numberOfKeys < 1)
|
||||
return;
|
||||
@ -194,6 +195,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
|
||||
elog(ERROR, "_bt_orderkeys: key(s) for attribute %d missed", attno + 1);
|
||||
|
||||
underEqualStrategy = (!equalStrategyEnd);
|
||||
|
||||
/*
|
||||
* If = has been specified, no other key will be used. In case
|
||||
* of key < 2 && key == 1 and so on we have to set qual_ok to
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.44 2000/03/01 05:39:23 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.45 2000/04/12 17:14:51 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -181,7 +181,7 @@ rtbuild(Relation heap,
|
||||
#ifndef OMIT_PARTIAL_INDEX
|
||||
/* SetSlotContents(slot, htup); */
|
||||
slot->val = htup;
|
||||
if (! ExecQual((List *) pred, econtext, false))
|
||||
if (!ExecQual((List *) pred, econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
@ -249,13 +249,13 @@ rtbuild(Relation heap,
|
||||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.31 2000/01/26 05:56:00 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.32 2000/04/12 17:14:51 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -268,7 +268,7 @@ rtdropscan(IndexScanDesc s)
|
||||
prev = l;
|
||||
|
||||
if (l == (RTScanList) NULL)
|
||||
elog(ERROR, "rtree scan list corrupted -- cannot find 0x%p", (void*)s);
|
||||
elog(ERROR, "rtree scan list corrupted -- cannot find 0x%p", (void *) s);
|
||||
|
||||
if (prev == (RTScanList) NULL)
|
||||
RTScans = l->rtsl_next;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.33 2000/01/26 05:56:03 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.34 2000/04/12 17:14:52 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains the high level access-method interface to the
|
||||
@ -162,6 +162,7 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
|
||||
|
||||
if (!fail)
|
||||
{
|
||||
|
||||
/*
|
||||
* DO NOT cache status for transactions in unknown state !!!
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.27 2000/03/31 02:43:31 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.28 2000/04/12 17:14:53 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -331,8 +331,8 @@ ReadNewTransactionId(TransactionId *xid)
|
||||
SpinAcquire(OidGenLockId); /* not good for concurrency... */
|
||||
|
||||
/*
|
||||
* Note that we don't check is ShmemVariableCache->xid_count equal
|
||||
* to 0 or not. This will work as long as we don't call
|
||||
* Note that we don't check is ShmemVariableCache->xid_count equal to
|
||||
* 0 or not. This will work as long as we don't call
|
||||
* ReadNewTransactionId() before GetNewTransactionId().
|
||||
*/
|
||||
if (ShmemVariableCache->nextXid == 0)
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.63 2000/04/09 04:43:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.64 2000/04/12 17:14:53 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Transaction aborts can now occur two ways:
|
||||
@ -517,8 +517,8 @@ CommandCounterIncrement()
|
||||
CurrentTransactionStateData.scanCommandId = CurrentTransactionStateData.commandId;
|
||||
|
||||
/*
|
||||
* make cache changes visible to me. AtCommit_LocalCache()
|
||||
* instead of AtCommit_Cache() is called here.
|
||||
* make cache changes visible to me. AtCommit_LocalCache() instead of
|
||||
* AtCommit_Cache() is called here.
|
||||
*/
|
||||
AtCommit_LocalCache();
|
||||
AtStart_Cache();
|
||||
@ -628,15 +628,14 @@ RecordTransactionCommit()
|
||||
xid = GetCurrentTransactionId();
|
||||
|
||||
/*
|
||||
* flush the buffer manager pages. Note: if we have stable
|
||||
* main memory, dirty shared buffers are not flushed
|
||||
* plai 8/7/90
|
||||
* flush the buffer manager pages. Note: if we have stable main
|
||||
* memory, dirty shared buffers are not flushed plai 8/7/90
|
||||
*/
|
||||
leak = BufferPoolCheckLeak();
|
||||
|
||||
/*
|
||||
* If no one shared buffer was changed by this transaction then
|
||||
* we don't flush shared buffers and don't record commit status.
|
||||
* If no one shared buffer was changed by this transaction then we
|
||||
* don't flush shared buffers and don't record commit status.
|
||||
*/
|
||||
if (SharedBufferChanged)
|
||||
{
|
||||
@ -645,8 +644,8 @@ RecordTransactionCommit()
|
||||
ResetBufferPool(true);
|
||||
|
||||
/*
|
||||
* have the transaction access methods record the status
|
||||
* of this transaction id in the pg_log relation.
|
||||
* have the transaction access methods record the status of this
|
||||
* transaction id in the pg_log relation.
|
||||
*/
|
||||
TransactionIdCommit(xid);
|
||||
|
||||
@ -752,9 +751,9 @@ RecordTransactionAbort()
|
||||
xid = GetCurrentTransactionId();
|
||||
|
||||
/*
|
||||
* Have the transaction access methods record the status of
|
||||
* this transaction id in the pg_log relation. We skip it
|
||||
* if no one shared buffer was changed by this transaction.
|
||||
* Have the transaction access methods record the status of this
|
||||
* transaction id in the pg_log relation. We skip it if no one shared
|
||||
* buffer was changed by this transaction.
|
||||
*/
|
||||
if (SharedBufferChanged && !TransactionIdDidCommit(xid))
|
||||
TransactionIdAbort(xid);
|
||||
@ -965,13 +964,13 @@ CommitTransaction()
|
||||
RecordTransactionCommit();
|
||||
|
||||
/*
|
||||
* Let others know about no transaction in progress by me.
|
||||
* Note that this must be done _before_ releasing locks we hold
|
||||
* and SpinAcquire(SInvalLock) is required: UPDATE with xid 0 is
|
||||
* blocked by xid 1' UPDATE, xid 1 is doing commit while xid 2
|
||||
* gets snapshot - if xid 2' GetSnapshotData sees xid 1 as running
|
||||
* then it must see xid 0 as running as well or it will see two
|
||||
* tuple versions - one deleted by xid 1 and one inserted by xid 0.
|
||||
* Let others know about no transaction in progress by me. Note that
|
||||
* this must be done _before_ releasing locks we hold and
|
||||
* SpinAcquire(SInvalLock) is required: UPDATE with xid 0 is blocked
|
||||
* by xid 1' UPDATE, xid 1 is doing commit while xid 2 gets snapshot -
|
||||
* if xid 2' GetSnapshotData sees xid 1 as running then it must see
|
||||
* xid 0 as running as well or it will see two tuple versions - one
|
||||
* deleted by xid 1 and one inserted by xid 0.
|
||||
*/
|
||||
if (MyProc != (PROC *) NULL)
|
||||
{
|
||||
@ -995,7 +994,7 @@ CommitTransaction()
|
||||
* ----------------
|
||||
*/
|
||||
s->state = TRANS_DEFAULT;
|
||||
SharedBufferChanged = false; /* safest place to do it */
|
||||
SharedBufferChanged = false;/* safest place to do it */
|
||||
|
||||
}
|
||||
|
||||
@ -1070,7 +1069,7 @@ AbortTransaction()
|
||||
* ----------------
|
||||
*/
|
||||
s->state = TRANS_DEFAULT;
|
||||
SharedBufferChanged = false; /* safest place to do it */
|
||||
SharedBufferChanged = false;/* safest place to do it */
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.12 2000/03/20 07:25:39 vadim Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.13 2000/04/12 17:14:53 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -121,16 +121,16 @@ typedef struct ControlFileData
|
||||
DBState state; /* */
|
||||
|
||||
/*
|
||||
* this data is used to make sure that configuration of this DB
|
||||
* is compatible with the current backend
|
||||
* this data is used to make sure that configuration of this DB is
|
||||
* compatible with the current backend
|
||||
*/
|
||||
uint32 blcksz; /* block size for this DB */
|
||||
uint32 relseg_size; /* blocks per segment of large relation */
|
||||
uint32 catalog_version_no; /* internal version number */
|
||||
|
||||
/*
|
||||
* MORE DATA FOLLOWS AT THE END OF THIS STRUCTURE
|
||||
* - locations of data dirs
|
||||
* MORE DATA FOLLOWS AT THE END OF THIS STRUCTURE - locations of data
|
||||
* dirs
|
||||
*/
|
||||
} ControlFileData;
|
||||
|
||||
@ -242,7 +242,7 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
|
||||
bool do_lgwr = true;
|
||||
unsigned i = 0;
|
||||
|
||||
for ( ; ; )
|
||||
for (;;)
|
||||
{
|
||||
/* try to read LgwrResult while waiting for insert lock */
|
||||
if (!TAS(&(XLogCtl->info_lck)))
|
||||
@ -250,6 +250,7 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
|
||||
LgwrRqst = XLogCtl->LgwrRqst;
|
||||
LgwrResult = XLogCtl->LgwrResult;
|
||||
S_UNLOCK(&(XLogCtl->info_lck));
|
||||
|
||||
/*
|
||||
* If cache is half filled then try to acquire lgwr lock
|
||||
* and do LGWR work, but only once.
|
||||
@ -282,7 +283,7 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
|
||||
}
|
||||
}
|
||||
|
||||
freespace = ((char*) Insert->currpage) + BLCKSZ - Insert->currpos;
|
||||
freespace = ((char *) Insert->currpage) + BLCKSZ - Insert->currpos;
|
||||
if (freespace < SizeOfXLogRecord)
|
||||
{
|
||||
curridx = NextBufIdx(Insert->curridx);
|
||||
@ -296,7 +297,7 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
|
||||
curridx = Insert->curridx;
|
||||
|
||||
freespace -= SizeOfXLogRecord;
|
||||
record = (XLogRecord*) Insert->currpos;
|
||||
record = (XLogRecord *) Insert->currpos;
|
||||
record->xl_prev = Insert->PrevRecord;
|
||||
if (rmid != RM_XLOG_ID)
|
||||
record->xl_xact_prev = MyLastRecPtr;
|
||||
@ -312,7 +313,7 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
|
||||
RecPtr.xlogid = XLogCtl->xlblocks[curridx].xlogid;
|
||||
RecPtr.xrecoff =
|
||||
XLogCtl->xlblocks[curridx].xrecoff - BLCKSZ +
|
||||
Insert->currpos - ((char*) Insert->currpage);
|
||||
Insert->currpos - ((char *) Insert->currpage);
|
||||
if (MyLastRecPtr.xrecoff == 0 && rmid != RM_XLOG_ID)
|
||||
{
|
||||
SpinAcquire(SInvalLock);
|
||||
@ -339,8 +340,8 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
|
||||
buf += wlen;
|
||||
Insert->currpos += wlen;
|
||||
}
|
||||
Insert->currpos = ((char*)Insert->currpage) +
|
||||
DOUBLEALIGN(Insert->currpos - ((char*)Insert->currpage));
|
||||
Insert->currpos = ((char *) Insert->currpage) +
|
||||
DOUBLEALIGN(Insert->currpos - ((char *) Insert->currpage));
|
||||
len = hdrlen + buflen;
|
||||
}
|
||||
|
||||
@ -360,7 +361,7 @@ nbuf:
|
||||
}
|
||||
freespace = BLCKSZ - SizeOfXLogPHD - SizeOfXLogSubRecord;
|
||||
Insert->currpage->xlp_info |= XLP_FIRST_IS_SUBRECORD;
|
||||
subrecord = (XLogSubRecord*) Insert->currpos;
|
||||
subrecord = (XLogSubRecord *) Insert->currpos;
|
||||
Insert->currpos += SizeOfXLogSubRecord;
|
||||
if (hdrlen > freespace)
|
||||
{
|
||||
@ -400,15 +401,17 @@ nbuf:
|
||||
RecPtr.xlogid = XLogCtl->xlblocks[curridx].xlogid;
|
||||
RecPtr.xrecoff = XLogCtl->xlblocks[curridx].xrecoff -
|
||||
BLCKSZ + SizeOfXLogPHD + subrecord->xl_len;
|
||||
Insert->currpos = ((char*)Insert->currpage) +
|
||||
DOUBLEALIGN(Insert->currpos - ((char*)Insert->currpage));
|
||||
Insert->currpos = ((char *) Insert->currpage) +
|
||||
DOUBLEALIGN(Insert->currpos - ((char *) Insert->currpage));
|
||||
}
|
||||
freespace = ((char*) Insert->currpage) + BLCKSZ - Insert->currpos;
|
||||
freespace = ((char *) Insert->currpage) + BLCKSZ - Insert->currpos;
|
||||
|
||||
/*
|
||||
* All done! Update global LgwrRqst if some block was filled up.
|
||||
*/
|
||||
if (freespace < SizeOfXLogRecord)
|
||||
updrqst = true; /* curridx is filled and available for writing out */
|
||||
updrqst = true; /* curridx is filled and available for
|
||||
* writing out */
|
||||
else
|
||||
curridx = PrevBufIdx(curridx);
|
||||
LgwrRqst.Write = XLogCtl->xlblocks[curridx];
|
||||
@ -419,7 +422,7 @@ nbuf:
|
||||
{
|
||||
unsigned i = 0;
|
||||
|
||||
for ( ; ; )
|
||||
for (;;)
|
||||
{
|
||||
if (!TAS(&(XLogCtl->info_lck)))
|
||||
{
|
||||
@ -447,7 +450,7 @@ XLogFlush(XLogRecPtr record)
|
||||
if (XLByteLE(record, LgwrResult.Flush))
|
||||
return;
|
||||
WriteRqst = LgwrRqst.Write;
|
||||
for ( ; ; )
|
||||
for (;;)
|
||||
{
|
||||
/* try to read LgwrResult */
|
||||
if (!TAS(&(XLogCtl->info_lck)))
|
||||
@ -472,7 +475,7 @@ XLogFlush(XLogRecPtr record)
|
||||
{
|
||||
XLogCtlInsert *Insert = &XLogCtl->Insert;
|
||||
uint32 freespace =
|
||||
((char*) Insert->currpage) + BLCKSZ - Insert->currpos;
|
||||
((char *) Insert->currpage) + BLCKSZ - Insert->currpos;
|
||||
|
||||
if (freespace < SizeOfXLogRecord) /* buffer is full */
|
||||
{
|
||||
@ -486,7 +489,7 @@ XLogFlush(XLogRecPtr record)
|
||||
memset(usebuf + BLCKSZ - freespace, 0, freespace);
|
||||
WriteRqst = XLogCtl->xlblocks[Insert->curridx];
|
||||
WriteRqst.xrecoff = WriteRqst.xrecoff - BLCKSZ +
|
||||
Insert->currpos - ((char*) Insert->currpage);
|
||||
Insert->currpos - ((char *) Insert->currpage);
|
||||
}
|
||||
S_UNLOCK(&(XLogCtl->insert_lck));
|
||||
force_lgwr = true;
|
||||
@ -540,7 +543,7 @@ XLogFlush(XLogRecPtr record)
|
||||
logId, logSeg, errno);
|
||||
LgwrResult.Flush = LgwrResult.Write;
|
||||
|
||||
for (i = 0; ; )
|
||||
for (i = 0;;)
|
||||
{
|
||||
if (!TAS(&(XLogCtl->info_lck)))
|
||||
{
|
||||
@ -567,7 +570,7 @@ GetFreeXLBuffer()
|
||||
uint16 curridx = NextBufIdx(Insert->curridx);
|
||||
|
||||
LgwrRqst.Write = XLogCtl->xlblocks[Insert->curridx];
|
||||
for ( ; ; )
|
||||
for (;;)
|
||||
{
|
||||
if (!TAS(&(XLogCtl->info_lck)))
|
||||
{
|
||||
@ -581,6 +584,7 @@ GetFreeXLBuffer()
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* LgwrResult lock is busy or un-updated. Try to acquire lgwr lock
|
||||
* and write full blocks.
|
||||
@ -595,9 +599,10 @@ GetFreeXLBuffer()
|
||||
InitXLBuffer(curridx);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Have to write buffers while holding insert lock -
|
||||
* not good...
|
||||
* Have to write buffers while holding insert lock - not
|
||||
* good...
|
||||
*/
|
||||
XLogWrite(NULL);
|
||||
S_UNLOCK(&(XLogCtl->lgwr_lck));
|
||||
@ -618,7 +623,7 @@ XLogWrite(char *buffer)
|
||||
uint32 wcnt = 0;
|
||||
int i = 0;
|
||||
|
||||
for ( ; XLByteLT(LgwrResult.Write, LgwrRqst.Write); )
|
||||
for (; XLByteLT(LgwrResult.Write, LgwrRqst.Write);)
|
||||
{
|
||||
LgwrResult.Write = XLogCtl->xlblocks[Write->curridx];
|
||||
if (LgwrResult.Write.xlogid != logId ||
|
||||
@ -675,7 +680,7 @@ XLogWrite(char *buffer)
|
||||
if (logOff != (LgwrResult.Write.xrecoff - BLCKSZ) % XLogSegSize)
|
||||
{
|
||||
logOff = (LgwrResult.Write.xrecoff - BLCKSZ) % XLogSegSize;
|
||||
if (lseek(logFile, (off_t)logOff, SEEK_SET) < 0)
|
||||
if (lseek(logFile, (off_t) logOff, SEEK_SET) < 0)
|
||||
elog(STOP, "Lseek(logfile %u seg %u off %u) failed: %d",
|
||||
logId, logSeg, logOff, errno);
|
||||
}
|
||||
@ -709,7 +714,7 @@ XLogWrite(char *buffer)
|
||||
LgwrResult.Flush = LgwrResult.Write;
|
||||
}
|
||||
|
||||
for ( ; ; )
|
||||
for (;;)
|
||||
{
|
||||
if (!TAS(&(XLogCtl->info_lck)))
|
||||
{
|
||||
@ -735,9 +740,9 @@ XLogFileInit(uint32 log, uint32 seg)
|
||||
|
||||
tryAgain:
|
||||
#ifndef __CYGWIN__
|
||||
fd = open(path, O_RDWR|O_CREAT|O_EXCL, S_IRUSR|S_IWUSR);
|
||||
fd = open(path, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
|
||||
#else
|
||||
fd = open(path, O_RDWR|O_CREAT|O_EXCL|O_BINARY, S_IRUSR|S_IWUSR);
|
||||
fd = open(path, O_RDWR | O_CREAT | O_EXCL | O_BINARY, S_IRUSR | S_IWUSR);
|
||||
#endif
|
||||
if (fd < 0 && (errno == EMFILE || errno == ENFILE))
|
||||
{
|
||||
@ -767,7 +772,7 @@ tryAgain:
|
||||
elog(STOP, "Lseek(logfile %u seg %u off %u) failed: %d",
|
||||
log, seg, 0, errno);
|
||||
|
||||
return(fd);
|
||||
return (fd);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -804,10 +809,10 @@ tryAgain:
|
||||
logId, logSeg, errno);
|
||||
}
|
||||
|
||||
return(fd);
|
||||
return (fd);
|
||||
}
|
||||
|
||||
static XLogRecord*
|
||||
static XLogRecord *
|
||||
ReadRecord(XLogRecPtr *RecPtr, char *buffer)
|
||||
{
|
||||
XLogRecord *record;
|
||||
@ -856,28 +861,28 @@ ReadRecord(XLogRecPtr *RecPtr, char *buffer)
|
||||
if (noBlck || readOff != (RecPtr->xrecoff % XLogSegSize) / BLCKSZ)
|
||||
{
|
||||
readOff = (RecPtr->xrecoff % XLogSegSize) / BLCKSZ;
|
||||
if (lseek(readFile, (off_t)(readOff * BLCKSZ), SEEK_SET) < 0)
|
||||
if (lseek(readFile, (off_t) (readOff * BLCKSZ), SEEK_SET) < 0)
|
||||
elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
|
||||
readId, readSeg, readOff, errno);
|
||||
if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
|
||||
elog(STOP, "ReadRecord: read(logfile %u seg %u off %u) failed: %d",
|
||||
readId, readSeg, readOff, errno);
|
||||
if (((XLogPageHeader)readBuf)->xlp_magic != XLOG_PAGE_MAGIC)
|
||||
if (((XLogPageHeader) readBuf)->xlp_magic != XLOG_PAGE_MAGIC)
|
||||
{
|
||||
elog(emode, "ReadRecord: invalid magic number %u in logfile %u seg %u off %u",
|
||||
((XLogPageHeader)readBuf)->xlp_magic,
|
||||
((XLogPageHeader) readBuf)->xlp_magic,
|
||||
readId, readSeg, readOff);
|
||||
goto next_record_is_invalid;
|
||||
}
|
||||
}
|
||||
if ((((XLogPageHeader)readBuf)->xlp_info & XLP_FIRST_IS_SUBRECORD) &&
|
||||
if ((((XLogPageHeader) readBuf)->xlp_info & XLP_FIRST_IS_SUBRECORD) &&
|
||||
RecPtr->xrecoff % BLCKSZ == SizeOfXLogPHD)
|
||||
{
|
||||
elog(emode, "ReadRecord: subrecord is requested by (%u, %u)",
|
||||
RecPtr->xlogid, RecPtr->xrecoff);
|
||||
goto next_record_is_invalid;
|
||||
}
|
||||
record = (XLogRecord*)((char*) readBuf + RecPtr->xrecoff % BLCKSZ);
|
||||
record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
|
||||
|
||||
got_record:;
|
||||
if (record->xl_len == 0 || record->xl_len >
|
||||
@ -906,9 +911,9 @@ got_record:;
|
||||
goto next_record_is_invalid;
|
||||
}
|
||||
memcpy(buffer, record, record->xl_len + SizeOfXLogRecord);
|
||||
record = (XLogRecord*) buffer;
|
||||
record = (XLogRecord *) buffer;
|
||||
buffer += record->xl_len + SizeOfXLogRecord;
|
||||
for ( ; ; )
|
||||
for (;;)
|
||||
{
|
||||
readOff++;
|
||||
if (readOff == XLogSegSize / BLCKSZ)
|
||||
@ -928,20 +933,20 @@ got_record:;
|
||||
if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
|
||||
elog(STOP, "ReadRecord: read(logfile %u seg %u off %u) failed: %d",
|
||||
readId, readSeg, readOff, errno);
|
||||
if (((XLogPageHeader)readBuf)->xlp_magic != XLOG_PAGE_MAGIC)
|
||||
if (((XLogPageHeader) readBuf)->xlp_magic != XLOG_PAGE_MAGIC)
|
||||
{
|
||||
elog(emode, "ReadRecord: invalid magic number %u in logfile %u seg %u off %u",
|
||||
((XLogPageHeader)readBuf)->xlp_magic,
|
||||
((XLogPageHeader) readBuf)->xlp_magic,
|
||||
readId, readSeg, readOff);
|
||||
goto next_record_is_invalid;
|
||||
}
|
||||
if (!(((XLogPageHeader)readBuf)->xlp_info & XLP_FIRST_IS_SUBRECORD))
|
||||
if (!(((XLogPageHeader) readBuf)->xlp_info & XLP_FIRST_IS_SUBRECORD))
|
||||
{
|
||||
elog(emode, "ReadRecord: there is no subrecord flag in logfile %u seg %u off %u",
|
||||
readId, readSeg, readOff);
|
||||
goto next_record_is_invalid;
|
||||
}
|
||||
subrecord = (XLogSubRecord*)((char*) readBuf + SizeOfXLogPHD);
|
||||
subrecord = (XLogSubRecord *) ((char *) readBuf + SizeOfXLogPHD);
|
||||
if (subrecord->xl_len == 0 || subrecord->xl_len >
|
||||
(BLCKSZ - SizeOfXLogPHD - SizeOfXLogSubRecord))
|
||||
{
|
||||
@ -956,7 +961,7 @@ got_record:;
|
||||
len, RecPtr->xlogid, RecPtr->xrecoff);
|
||||
goto next_record_is_invalid;
|
||||
}
|
||||
memcpy(buffer, (char*)subrecord + SizeOfXLogSubRecord, subrecord->xl_len);
|
||||
memcpy(buffer, (char *) subrecord + SizeOfXLogSubRecord, subrecord->xl_len);
|
||||
buffer += subrecord->xl_len;
|
||||
if (subrecord->xl_info & XLR_TO_BE_CONTINUED)
|
||||
{
|
||||
@ -974,39 +979,38 @@ got_record:;
|
||||
if (BLCKSZ - SizeOfXLogRecord >=
|
||||
subrecord->xl_len + SizeOfXLogPHD + SizeOfXLogSubRecord)
|
||||
{
|
||||
nextRecord = (XLogRecord*)
|
||||
((char*)subrecord + subrecord->xl_len + SizeOfXLogSubRecord);
|
||||
nextRecord = (XLogRecord *)
|
||||
((char *) subrecord + subrecord->xl_len + SizeOfXLogSubRecord);
|
||||
}
|
||||
EndRecPtr.xlogid = readId;
|
||||
EndRecPtr.xrecoff = readSeg * XLogSegSize + readOff * BLCKSZ +
|
||||
SizeOfXLogPHD + SizeOfXLogSubRecord + subrecord->xl_len;
|
||||
ReadRecPtr = *RecPtr;
|
||||
return(record);
|
||||
return (record);
|
||||
}
|
||||
if (BLCKSZ - SizeOfXLogRecord >=
|
||||
record->xl_len + RecPtr->xrecoff % BLCKSZ + SizeOfXLogRecord)
|
||||
{
|
||||
nextRecord = (XLogRecord*)((char*)record + record->xl_len + SizeOfXLogRecord);
|
||||
}
|
||||
nextRecord = (XLogRecord *) ((char *) record + record->xl_len + SizeOfXLogRecord);
|
||||
EndRecPtr.xlogid = RecPtr->xlogid;
|
||||
EndRecPtr.xrecoff = RecPtr->xrecoff + record->xl_len + SizeOfXLogRecord;
|
||||
ReadRecPtr = *RecPtr;
|
||||
|
||||
return(record);
|
||||
return (record);
|
||||
|
||||
next_record_is_invalid:;
|
||||
close(readFile);
|
||||
readFile = -1;
|
||||
nextRecord = NULL;
|
||||
memset(buffer, 0, SizeOfXLogRecord);
|
||||
record = (XLogRecord*) buffer;
|
||||
record = (XLogRecord *) buffer;
|
||||
|
||||
/*
|
||||
* If we assumed that next record began on the same page where
|
||||
* previous one ended - zero end of page.
|
||||
*/
|
||||
if (XLByteEQ(tmpRecPtr, EndRecPtr))
|
||||
{
|
||||
Assert (EndRecPtr.xrecoff % BLCKSZ > (SizeOfXLogPHD + SizeOfXLogSubRecord) &&
|
||||
Assert(EndRecPtr.xrecoff % BLCKSZ > (SizeOfXLogPHD + SizeOfXLogSubRecord) &&
|
||||
BLCKSZ - EndRecPtr.xrecoff % BLCKSZ >= SizeOfXLogRecord);
|
||||
readId = EndRecPtr.xlogid;
|
||||
readSeg = EndRecPtr.xrecoff / XLogSegSize;
|
||||
@ -1014,7 +1018,7 @@ next_record_is_invalid:;
|
||||
elog(LOG, "Formatting logfile %u seg %u block %u at offset %u",
|
||||
readId, readSeg, readOff, EndRecPtr.xrecoff % BLCKSZ);
|
||||
readFile = XLogFileOpen(readId, readSeg, false);
|
||||
if (lseek(readFile, (off_t)(readOff * BLCKSZ), SEEK_SET) < 0)
|
||||
if (lseek(readFile, (off_t) (readOff * BLCKSZ), SEEK_SET) < 0)
|
||||
elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
|
||||
readId, readSeg, readOff, errno);
|
||||
if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
|
||||
@ -1022,7 +1026,7 @@ next_record_is_invalid:;
|
||||
readId, readSeg, readOff, errno);
|
||||
memset(readBuf + EndRecPtr.xrecoff % BLCKSZ, 0,
|
||||
BLCKSZ - EndRecPtr.xrecoff % BLCKSZ);
|
||||
if (lseek(readFile, (off_t)(readOff * BLCKSZ), SEEK_SET) < 0)
|
||||
if (lseek(readFile, (off_t) (readOff * BLCKSZ), SEEK_SET) < 0)
|
||||
elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
|
||||
readId, readSeg, readOff, errno);
|
||||
if (write(readFile, readBuf, BLCKSZ) != BLCKSZ)
|
||||
@ -1032,7 +1036,7 @@ next_record_is_invalid:;
|
||||
}
|
||||
else
|
||||
{
|
||||
Assert (EndRecPtr.xrecoff % BLCKSZ == 0 ||
|
||||
Assert(EndRecPtr.xrecoff % BLCKSZ == 0 ||
|
||||
BLCKSZ - EndRecPtr.xrecoff % BLCKSZ < SizeOfXLogRecord);
|
||||
readId = tmpRecPtr.xlogid;
|
||||
readSeg = tmpRecPtr.xrecoff / XLogSegSize;
|
||||
@ -1047,7 +1051,7 @@ next_record_is_invalid:;
|
||||
readOff *= BLCKSZ;
|
||||
memset(readBuf, 0, BLCKSZ);
|
||||
readFile = XLogFileOpen(readId, readSeg, false);
|
||||
if (lseek(readFile, (off_t)readOff, SEEK_SET) < 0)
|
||||
if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
|
||||
elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
|
||||
readId, readSeg, readOff, errno);
|
||||
while (readOff < XLogSegSize)
|
||||
@ -1090,7 +1094,7 @@ next_record_is_invalid:;
|
||||
unlink(path);
|
||||
}
|
||||
|
||||
return(record);
|
||||
return (record);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1132,7 +1136,7 @@ XLOGShmemSize()
|
||||
if (XLOGbuffers < MinXLOGbuffers)
|
||||
XLOGbuffers = MinXLOGbuffers;
|
||||
|
||||
return(sizeof(XLogCtlData) + BLCKSZ * XLOGbuffers +
|
||||
return (sizeof(XLogCtlData) + BLCKSZ * XLOGbuffers +
|
||||
sizeof(XLogRecPtr) * XLOGbuffers + BLCKSZ);
|
||||
}
|
||||
|
||||
@ -1144,10 +1148,10 @@ XLOGShmemInit(void)
|
||||
if (XLOGbuffers < MinXLOGbuffers)
|
||||
XLOGbuffers = MinXLOGbuffers;
|
||||
|
||||
ControlFile = (ControlFileData*)
|
||||
ControlFile = (ControlFileData *)
|
||||
ShmemInitStruct("Control File", BLCKSZ, &found);
|
||||
Assert(!found);
|
||||
XLogCtl = (XLogCtlData*)
|
||||
XLogCtl = (XLogCtlData *)
|
||||
ShmemInitStruct("XLOG Ctl", sizeof(XLogCtlData) + BLCKSZ * XLOGbuffers +
|
||||
sizeof(XLogRecPtr) * XLOGbuffers, &found);
|
||||
Assert(!found);
|
||||
@ -1164,14 +1168,15 @@ BootStrapXLOG()
|
||||
CheckPoint checkPoint;
|
||||
|
||||
#ifdef NOT_USED
|
||||
XLogPageHeader page = (XLogPageHeader)buffer;
|
||||
XLogPageHeader page = (XLogPageHeader) buffer;
|
||||
XLogRecord *record;
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef __CYGWIN__
|
||||
fd = open(ControlFilePath, O_RDWR|O_CREAT|O_EXCL, S_IRUSR|S_IWUSR);
|
||||
fd = open(ControlFilePath, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
|
||||
#else
|
||||
fd = open(ControlFilePath, O_RDWR|O_CREAT|O_EXCL|O_BINARY, S_IRUSR|S_IWUSR);
|
||||
fd = open(ControlFilePath, O_RDWR | O_CREAT | O_EXCL | O_BINARY, S_IRUSR | S_IWUSR);
|
||||
#endif
|
||||
if (fd < 0)
|
||||
elog(STOP, "BootStrapXLOG failed to create control file (%s): %d",
|
||||
@ -1188,14 +1193,15 @@ BootStrapXLOG()
|
||||
memset(buffer, 0, BLCKSZ);
|
||||
page->xlp_magic = XLOG_PAGE_MAGIC;
|
||||
page->xlp_info = 0;
|
||||
record = (XLogRecord*) ((char*)page + SizeOfXLogPHD);
|
||||
record->xl_prev.xlogid = 0; record->xl_prev.xrecoff = 0;
|
||||
record = (XLogRecord *) ((char *) page + SizeOfXLogPHD);
|
||||
record->xl_prev.xlogid = 0;
|
||||
record->xl_prev.xrecoff = 0;
|
||||
record->xl_xact_prev = record->xl_prev;
|
||||
record->xl_xid = InvalidTransactionId;
|
||||
record->xl_len = sizeof(checkPoint);
|
||||
record->xl_info = 0;
|
||||
record->xl_rmid = RM_XLOG_ID;
|
||||
memcpy((char*)record + SizeOfXLogRecord, &checkPoint, sizeof(checkPoint));
|
||||
memcpy((char *) record + SizeOfXLogRecord, &checkPoint, sizeof(checkPoint));
|
||||
|
||||
logFile = XLogFileInit(0, 0);
|
||||
|
||||
@ -1211,7 +1217,7 @@ BootStrapXLOG()
|
||||
#endif
|
||||
|
||||
memset(buffer, 0, BLCKSZ);
|
||||
ControlFile = (ControlFileData*) buffer;
|
||||
ControlFile = (ControlFileData *) buffer;
|
||||
ControlFile->logId = 0;
|
||||
ControlFile->logSeg = 1;
|
||||
ControlFile->checkPoint = checkPoint.redo;
|
||||
@ -1230,7 +1236,7 @@ BootStrapXLOG()
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static char*
|
||||
static char *
|
||||
str_time(time_t tnow)
|
||||
{
|
||||
char *result = ctime(&tnow);
|
||||
@ -1239,7 +1245,7 @@ str_time(time_t tnow)
|
||||
if (p != NULL)
|
||||
*p = 0;
|
||||
|
||||
return(result);
|
||||
return (result);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1254,16 +1260,17 @@ StartupXLOG()
|
||||
XLogRecPtr RecPtr,
|
||||
LastRec;
|
||||
XLogRecord *record;
|
||||
char buffer[MAXLOGRECSZ+SizeOfXLogRecord];
|
||||
char buffer[MAXLOGRECSZ + SizeOfXLogRecord];
|
||||
int recovery = 0;
|
||||
bool sie_saved = false;
|
||||
|
||||
#endif
|
||||
int fd;
|
||||
|
||||
elog(LOG, "Data Base System is starting up at %s", str_time(time(NULL)));
|
||||
|
||||
XLogCtl->xlblocks = (XLogRecPtr*) (((char *)XLogCtl) + sizeof(XLogCtlData));
|
||||
XLogCtl->pages = ((char *)XLogCtl->xlblocks + sizeof(XLogRecPtr) * XLOGbuffers);
|
||||
XLogCtl->xlblocks = (XLogRecPtr *) (((char *) XLogCtl) + sizeof(XLogCtlData));
|
||||
XLogCtl->pages = ((char *) XLogCtl->xlblocks + sizeof(XLogRecPtr) * XLOGbuffers);
|
||||
XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers;
|
||||
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
|
||||
memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
|
||||
@ -1350,7 +1357,7 @@ tryAgain:
|
||||
elog(STOP, "Invalid RMID in checkPoint record");
|
||||
if (record->xl_len != sizeof(checkPoint))
|
||||
elog(STOP, "Invalid length of checkPoint record");
|
||||
checkPoint = *((CheckPoint*)((char*)record + SizeOfXLogRecord));
|
||||
checkPoint = *((CheckPoint *) ((char *) record + SizeOfXLogRecord));
|
||||
|
||||
elog(LOG, "Redo record at (%u, %u); Undo record at (%u, %u)",
|
||||
checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
|
||||
@ -1400,7 +1407,8 @@ tryAgain:
|
||||
/* Is REDO required ? */
|
||||
if (XLByteLT(checkPoint.redo, RecPtr))
|
||||
record = ReadRecord(&(checkPoint.redo), buffer);
|
||||
else /* read past CheckPoint record */
|
||||
else
|
||||
/* read past CheckPoint record */
|
||||
record = ReadRecord(NULL, buffer);
|
||||
|
||||
/* REDO */
|
||||
@ -1461,8 +1469,8 @@ tryAgain:
|
||||
XLogCtl->xlblocks[0].xrecoff =
|
||||
((EndRecPtr.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
|
||||
Insert = &XLogCtl->Insert;
|
||||
memcpy((char*)(Insert->currpage), readBuf, BLCKSZ);
|
||||
Insert->currpos = ((char*) Insert->currpage) +
|
||||
memcpy((char *) (Insert->currpage), readBuf, BLCKSZ);
|
||||
Insert->currpos = ((char *) Insert->currpage) +
|
||||
(EndRecPtr.xrecoff + BLCKSZ - XLogCtl->xlblocks[0].xrecoff);
|
||||
Insert->PrevRecord = ControlFile->checkPoint;
|
||||
|
||||
@ -1531,7 +1539,7 @@ CreateCheckPoint(bool shutdown)
|
||||
elog(STOP, "XLog insert lock is busy while data base is shutting down");
|
||||
(void) select(0, NULL, NULL, NULL, &delay);
|
||||
}
|
||||
freespace = ((char*) Insert->currpage) + BLCKSZ - Insert->currpos;
|
||||
freespace = ((char *) Insert->currpage) + BLCKSZ - Insert->currpos;
|
||||
if (freespace < SizeOfXLogRecord)
|
||||
{
|
||||
curridx = NextBufIdx(Insert->curridx);
|
||||
@ -1545,7 +1553,7 @@ CreateCheckPoint(bool shutdown)
|
||||
curridx = Insert->curridx;
|
||||
checkPoint.redo.xlogid = XLogCtl->xlblocks[curridx].xlogid;
|
||||
checkPoint.redo.xrecoff = XLogCtl->xlblocks[curridx].xrecoff - BLCKSZ +
|
||||
Insert->currpos - ((char*) Insert->currpage);
|
||||
Insert->currpos - ((char *) Insert->currpage);
|
||||
S_UNLOCK(&(XLogCtl->insert_lck));
|
||||
|
||||
SpinAcquire(XidGenLockId);
|
||||
@ -1563,7 +1571,7 @@ CreateCheckPoint(bool shutdown)
|
||||
if (shutdown && checkPoint.undo.xrecoff != 0)
|
||||
elog(STOP, "Active transaction while data base is shutting down");
|
||||
|
||||
recptr = XLogInsert(RM_XLOG_ID, (char*)&checkPoint, sizeof(checkPoint), NULL, 0);
|
||||
recptr = XLogInsert(RM_XLOG_ID, (char *) &checkPoint, sizeof(checkPoint), NULL, 0);
|
||||
|
||||
if (shutdown && !XLByteEQ(checkPoint.redo, MyLastRecPtr))
|
||||
elog(STOP, "XLog concurrent activity while data base is shutting down");
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.80 2000/02/18 09:28:39 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.81 2000/04/12 17:14:54 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -107,7 +107,7 @@ static struct typinfo Procid[] = {
|
||||
{"char", CHAROID, 0, 1, F_CHARIN, F_CHAROUT},
|
||||
{"name", NAMEOID, 0, NAMEDATALEN, F_NAMEIN, F_NAMEOUT},
|
||||
{"int2", INT2OID, 0, 2, F_INT2IN, F_INT2OUT},
|
||||
{"int2vector", INT2VECTOROID, 0, INDEX_MAX_KEYS*2, F_INT2VECTORIN, F_INT2VECTOROUT},
|
||||
{"int2vector", INT2VECTOROID, 0, INDEX_MAX_KEYS * 2, F_INT2VECTORIN, F_INT2VECTOROUT},
|
||||
{"int4", INT4OID, 0, 4, F_INT4IN, F_INT4OUT},
|
||||
{"regproc", REGPROCOID, 0, 4, F_REGPROCIN, F_REGPROCOUT},
|
||||
{"text", TEXTOID, 0, -1, F_TEXTIN, F_TEXTOUT},
|
||||
@ -115,7 +115,7 @@ static struct typinfo Procid[] = {
|
||||
{"tid", TIDOID, 0, 6, F_TIDIN, F_TIDOUT},
|
||||
{"xid", XIDOID, 0, 4, F_XIDIN, F_XIDOUT},
|
||||
{"cid", CIDOID, 0, 4, F_CIDIN, F_CIDOUT},
|
||||
{"oidvector", 30, 0, INDEX_MAX_KEYS*4, F_OIDVECTORIN, F_OIDVECTOROUT},
|
||||
{"oidvector", 30, 0, INDEX_MAX_KEYS * 4, F_OIDVECTORIN, F_OIDVECTOROUT},
|
||||
{"smgr", 210, 0, 2, F_SMGRIN, F_SMGROUT},
|
||||
{"_int4", 1007, INT4OID, -1, F_ARRAY_IN, F_ARRAY_OUT},
|
||||
{"_aclitem", 1034, 1033, -1, F_ARRAY_IN, F_ARRAY_OUT}
|
||||
@ -325,8 +325,8 @@ BootstrapMain(int argc, char *argv[])
|
||||
}
|
||||
|
||||
/*
|
||||
* Bootstrap under Postmaster means two things:
|
||||
* (xloginit) ? StartupXLOG : ShutdownXLOG
|
||||
* Bootstrap under Postmaster means two things: (xloginit) ?
|
||||
* StartupXLOG : ShutdownXLOG
|
||||
*
|
||||
* If !under Postmaster and xloginit then BootStrapXLOG.
|
||||
*/
|
||||
@ -345,9 +345,7 @@ BootstrapMain(int argc, char *argv[])
|
||||
}
|
||||
|
||||
if (!IsUnderPostmaster && xloginit)
|
||||
{
|
||||
BootStrapXLOG();
|
||||
}
|
||||
|
||||
/*
|
||||
* backend initialization
|
||||
@ -1153,8 +1151,10 @@ build_indices()
|
||||
index_build(heap, ind, ILHead->il_natts, ILHead->il_attnos,
|
||||
ILHead->il_nparams, ILHead->il_params, ILHead->il_finfo,
|
||||
ILHead->il_predInfo);
|
||||
/* In normal processing mode, index_build would close the heap
|
||||
* and index, but in bootstrap mode it will not.
|
||||
|
||||
/*
|
||||
* In normal processing mode, index_build would close the heap and
|
||||
* index, but in bootstrap mode it will not.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.37 2000/01/26 05:56:09 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.38 2000/04/12 17:14:55 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* See acl.h.
|
||||
@ -364,7 +364,7 @@ pg_aclcheck(char *relname, char *usename, AclMode mode)
|
||||
*/
|
||||
if (((mode & ACL_WR) || (mode & ACL_AP)) &&
|
||||
!allowSystemTableMods && IsSystemRelationName(relname) &&
|
||||
strncmp(relname,"pg_temp.", strlen("pg_temp.")) != 0 &&
|
||||
strncmp(relname, "pg_temp.", strlen("pg_temp.")) != 0 &&
|
||||
!((Form_pg_shadow) GETSTRUCT(tuple))->usecatupd)
|
||||
{
|
||||
elog(DEBUG, "pg_aclcheck: catalog update to \"%s\": permission denied",
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.31 2000/04/09 04:43:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.32 2000/04/12 17:14:55 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -44,6 +44,7 @@ relpath(const char *relname)
|
||||
snprintf(path, bufsize, "%s%c%s", DataDir, SEP_CHAR, relname);
|
||||
return path;
|
||||
}
|
||||
|
||||
/*
|
||||
* If it is in the current database, assume it is in current working
|
||||
* directory. NB: this does not work during bootstrap!
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.124 2000/03/17 02:36:05 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.125 2000/04/12 17:14:55 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@ -273,6 +273,7 @@ heap_create(char *relname,
|
||||
MemSet((char *) rel, 0, len);
|
||||
rel->rd_fd = -1; /* table is not open */
|
||||
rel->rd_unlinked = TRUE; /* table is not created yet */
|
||||
|
||||
/*
|
||||
* create a new tuple descriptor from the one passed in
|
||||
*/
|
||||
@ -715,6 +716,7 @@ AddNewRelationTuple(Relation pg_class_desc,
|
||||
|
||||
if (!IsIgnoringSystemIndexes())
|
||||
{
|
||||
|
||||
/*
|
||||
* First, open the catalog indices and insert index tuples for the
|
||||
* new relation.
|
||||
@ -1096,18 +1098,26 @@ DeleteRelationTuple(Relation rel)
|
||||
static void
|
||||
RelationTruncateIndexes(Relation heapRelation)
|
||||
{
|
||||
Relation indexRelation, currentIndex;
|
||||
Relation indexRelation,
|
||||
currentIndex;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple indexTuple, procTuple, classTuple;
|
||||
HeapTuple indexTuple,
|
||||
procTuple,
|
||||
classTuple;
|
||||
Form_pg_index index;
|
||||
Oid heapId, indexId, procId, accessMethodId;
|
||||
Oid heapId,
|
||||
indexId,
|
||||
procId,
|
||||
accessMethodId;
|
||||
Node *oldPred = NULL;
|
||||
PredInfo *predInfo;
|
||||
List *cnfPred = NULL;
|
||||
AttrNumber *attributeNumberA;
|
||||
FuncIndexInfo fInfo, *funcInfo = NULL;
|
||||
int i, numberOfAttributes;
|
||||
FuncIndexInfo fInfo,
|
||||
*funcInfo = NULL;
|
||||
int i,
|
||||
numberOfAttributes;
|
||||
char *predString;
|
||||
|
||||
heapId = RelationGetRelid(heapRelation);
|
||||
@ -1120,8 +1130,10 @@ RelationTruncateIndexes(Relation heapRelation)
|
||||
scan = heap_beginscan(indexRelation, false, SnapshotNow, 1, &entry);
|
||||
while (HeapTupleIsValid(indexTuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
|
||||
/*
|
||||
* For each index, fetch index attributes so we can apply index_build
|
||||
* For each index, fetch index attributes so we can apply
|
||||
* index_build
|
||||
*/
|
||||
index = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
indexId = index->indexrelid;
|
||||
@ -1181,8 +1193,8 @@ RelationTruncateIndexes(Relation heapRelation)
|
||||
LockRelation(currentIndex, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Release any buffers associated with this index. If they're dirty,
|
||||
* they're just dropped without bothering to flush to disk.
|
||||
* Release any buffers associated with this index. If they're
|
||||
* dirty, they're just dropped without bothering to flush to disk.
|
||||
*/
|
||||
ReleaseRelationBuffers(currentIndex);
|
||||
if (FlushRelationBuffers(currentIndex, (BlockNumber) 0, false) < 0)
|
||||
@ -1198,11 +1210,11 @@ RelationTruncateIndexes(Relation heapRelation)
|
||||
attributeNumberA, 0, NULL, funcInfo, predInfo);
|
||||
|
||||
/*
|
||||
* index_build will close both the heap and index relations
|
||||
* (but not give up the locks we hold on them). That's fine
|
||||
* for the index, but we need to open the heap again. We need
|
||||
* no new lock, since this backend still has the exclusive lock
|
||||
* grabbed by heap_truncate.
|
||||
* index_build will close both the heap and index relations (but
|
||||
* not give up the locks we hold on them). That's fine for the
|
||||
* index, but we need to open the heap again. We need no new
|
||||
* lock, since this backend still has the exclusive lock grabbed
|
||||
* by heap_truncate.
|
||||
*/
|
||||
heapRelation = heap_open(heapId, NoLock);
|
||||
Assert(heapRelation != NULL);
|
||||
@ -1245,12 +1257,12 @@ heap_truncate(char *relname)
|
||||
* they don't exist anyway. So, no warning in that case.
|
||||
* ----------------
|
||||
*/
|
||||
if (IsTransactionBlock() && ! rel->rd_myxactonly)
|
||||
if (IsTransactionBlock() && !rel->rd_myxactonly)
|
||||
elog(NOTICE, "Caution: TRUNCATE TABLE cannot be rolled back, so don't abort now");
|
||||
|
||||
/*
|
||||
* Release any buffers associated with this relation. If they're dirty,
|
||||
* they're just dropped without bothering to flush to disk.
|
||||
* Release any buffers associated with this relation. If they're
|
||||
* dirty, they're just dropped without bothering to flush to disk.
|
||||
*/
|
||||
|
||||
ReleaseRelationBuffers(rel);
|
||||
@ -1477,7 +1489,7 @@ heap_drop_with_catalog(const char *relname)
|
||||
* they don't exist anyway. So, no warning in that case.
|
||||
* ----------------
|
||||
*/
|
||||
if (IsTransactionBlock() && ! rel->rd_myxactonly)
|
||||
if (IsTransactionBlock() && !rel->rd_myxactonly)
|
||||
elog(NOTICE, "Caution: DROP TABLE cannot be rolled back, so don't abort now");
|
||||
|
||||
/* ----------------
|
||||
@ -1547,8 +1559,8 @@ heap_drop_with_catalog(const char *relname)
|
||||
|
||||
/*
|
||||
* Close relcache entry, but *keep* AccessExclusiveLock on the
|
||||
* relation until transaction commit. This ensures no one else
|
||||
* will try to do something with the doomed relation.
|
||||
* relation until transaction commit. This ensures no one else will
|
||||
* try to do something with the doomed relation.
|
||||
*/
|
||||
heap_close(rel, NoLock);
|
||||
|
||||
@ -1714,6 +1726,7 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
|
||||
* Need to construct source equivalent of given node-string.
|
||||
*/
|
||||
expr = stringToNode(adbin);
|
||||
|
||||
/*
|
||||
* deparse_expression needs a RangeTblEntry list, so make one
|
||||
*/
|
||||
@ -1747,7 +1760,7 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
|
||||
heap_freetuple(tuple);
|
||||
pfree(adsrc);
|
||||
|
||||
if (! updatePgAttribute)
|
||||
if (!updatePgAttribute)
|
||||
return; /* done if pg_attribute is OK */
|
||||
|
||||
attrrel = heap_openr(AttributeRelationName, RowExclusiveLock);
|
||||
@ -1758,7 +1771,7 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
|
||||
elog(ERROR, "cache lookup of attribute %d in relation %u failed",
|
||||
attnum, RelationGetRelid(rel));
|
||||
attStruct = (Form_pg_attribute) GETSTRUCT(atttup);
|
||||
if (! attStruct->atthasdef)
|
||||
if (!attStruct->atthasdef)
|
||||
{
|
||||
attStruct->atthasdef = true;
|
||||
heap_update(attrrel, &atttup->t_self, atttup, NULL);
|
||||
@ -1796,6 +1809,7 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
|
||||
*/
|
||||
expr = stringToNode(ccbin);
|
||||
expr = (Node *) make_ands_explicit((List *) expr);
|
||||
|
||||
/*
|
||||
* deparse_expression needs a RangeTblEntry list, so make one
|
||||
*/
|
||||
@ -1850,9 +1864,10 @@ StoreConstraints(Relation rel)
|
||||
if (!constr)
|
||||
return;
|
||||
|
||||
/* deparsing of constraint expressions will fail unless the just-created
|
||||
* pg_attribute tuples for this relation are made visible. So, bump
|
||||
* the command counter.
|
||||
/*
|
||||
* deparsing of constraint expressions will fail unless the
|
||||
* just-created pg_attribute tuples for this relation are made
|
||||
* visible. So, bump the command counter.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
@ -1921,8 +1936,8 @@ AddRelationRawConstraints(Relation rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a dummy ParseState and insert the target relation as
|
||||
* its sole rangetable entry. We need a ParseState for transformExpr.
|
||||
* Create a dummy ParseState and insert the target relation as its
|
||||
* sole rangetable entry. We need a ParseState for transformExpr.
|
||||
*/
|
||||
pstate = make_parsestate(NULL);
|
||||
makeRangeTable(pstate, NULL);
|
||||
@ -1938,25 +1953,28 @@ AddRelationRawConstraints(Relation rel,
|
||||
Oid type_id;
|
||||
|
||||
Assert(colDef->raw_default != NULL);
|
||||
|
||||
/*
|
||||
* Transform raw parsetree to executable expression.
|
||||
*/
|
||||
expr = transformExpr(pstate, colDef->raw_default, EXPR_COLUMN_FIRST);
|
||||
|
||||
/*
|
||||
* Make sure default expr does not refer to any vars.
|
||||
*/
|
||||
if (contain_var_clause(expr))
|
||||
elog(ERROR, "Cannot use attribute(s) in DEFAULT clause");
|
||||
|
||||
/*
|
||||
* Check that it will be possible to coerce the expression
|
||||
* to the column's type. We store the expression without
|
||||
* coercion, however, to avoid premature coercion in cases like
|
||||
* Check that it will be possible to coerce the expression to the
|
||||
* column's type. We store the expression without coercion,
|
||||
* however, to avoid premature coercion in cases like
|
||||
*
|
||||
* CREATE TABLE tbl (fld datetime DEFAULT 'now');
|
||||
*
|
||||
* NB: this should match the code in updateTargetListEntry()
|
||||
* that will actually do the coercion, to ensure we don't accept
|
||||
* an unusable default expression.
|
||||
* NB: this should match the code in updateTargetListEntry() that
|
||||
* will actually do the coercion, to ensure we don't accept an
|
||||
* unusable default expression.
|
||||
*/
|
||||
type_id = exprType(expr);
|
||||
if (type_id != InvalidOid)
|
||||
@ -1975,14 +1993,17 @@ AddRelationRawConstraints(Relation rel,
|
||||
typeidTypeName(type_id));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Might as well try to reduce any constant expressions.
|
||||
*/
|
||||
expr = eval_const_expressions(expr);
|
||||
|
||||
/*
|
||||
* Must fix opids, in case any operators remain...
|
||||
*/
|
||||
fix_opids(expr);
|
||||
|
||||
/*
|
||||
* OK, store it.
|
||||
*/
|
||||
@ -2037,26 +2058,31 @@ AddRelationRawConstraints(Relation rel,
|
||||
ccname = (char *) palloc(NAMEDATALEN);
|
||||
snprintf(ccname, NAMEDATALEN, "$%d", numchecks + 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Transform raw parsetree to executable expression.
|
||||
*/
|
||||
expr = transformExpr(pstate, cdef->raw_expr, EXPR_COLUMN_FIRST);
|
||||
|
||||
/*
|
||||
* Make sure it yields a boolean result.
|
||||
*/
|
||||
if (exprType(expr) != BOOLOID)
|
||||
elog(ERROR, "CHECK '%s' does not yield boolean result",
|
||||
ccname);
|
||||
|
||||
/*
|
||||
* Make sure no outside relations are referred to.
|
||||
*/
|
||||
if (length(pstate->p_rtable) != 1)
|
||||
elog(ERROR, "Only relation '%s' can be referenced in CHECK",
|
||||
relname);
|
||||
|
||||
/*
|
||||
* Might as well try to reduce any constant expressions.
|
||||
*/
|
||||
expr = eval_const_expressions(expr);
|
||||
|
||||
/*
|
||||
* Constraints are evaluated with execQual, which expects an
|
||||
* implicit-AND list, so convert expression to implicit-AND form.
|
||||
@ -2064,10 +2090,12 @@ AddRelationRawConstraints(Relation rel,
|
||||
* overkill...)
|
||||
*/
|
||||
expr = (Node *) make_ands_implicit((Expr *) expr);
|
||||
|
||||
/*
|
||||
* Must fix opids in operator clauses.
|
||||
*/
|
||||
fix_opids(expr);
|
||||
|
||||
/*
|
||||
* OK, store it.
|
||||
*/
|
||||
@ -2081,8 +2109,8 @@ AddRelationRawConstraints(Relation rel,
|
||||
* We do this even if there was no change, in order to ensure that an
|
||||
* SI update message is sent out for the pg_class tuple, which will
|
||||
* force other backends to rebuild their relcache entries for the rel.
|
||||
* (Of course, for a newly created rel there is no need for an SI message,
|
||||
* but for ALTER TABLE ADD ATTRIBUTE this'd be important.)
|
||||
* (Of course, for a newly created rel there is no need for an SI
|
||||
* message, but for ALTER TABLE ADD ATTRIBUTE this'd be important.)
|
||||
*/
|
||||
relrel = heap_openr(RelationRelationName, RowExclusiveLock);
|
||||
reltup = SearchSysCacheTupleCopy(RELOID,
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.107 2000/03/01 05:39:24 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.108 2000/04/12 17:14:55 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@ -77,16 +77,20 @@ static void DefaultBuild(Relation heapRelation, Relation indexRelation,
|
||||
static Oid IndexGetRelation(Oid indexId);
|
||||
|
||||
static bool reindexing = false;
|
||||
extern bool SetReindexProcessing(bool reindexmode)
|
||||
extern bool
|
||||
SetReindexProcessing(bool reindexmode)
|
||||
{
|
||||
bool old = reindexing;
|
||||
|
||||
reindexing = reindexmode;
|
||||
return old;
|
||||
}
|
||||
extern bool IsReindexProcessing(void)
|
||||
extern bool
|
||||
IsReindexProcessing(void)
|
||||
{
|
||||
return reindexing;
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
* sysatts is a structure containing attribute tuple forms
|
||||
* for system attributes (numbered -1, -2, ...). This really
|
||||
@ -1075,9 +1079,9 @@ index_create(char *heapRelationName,
|
||||
* bootstrapping. Otherwise, we call the routine that constructs the
|
||||
* index.
|
||||
*
|
||||
* In normal processing mode, the heap and index relations are closed
|
||||
* by index_build() --- but we continue to hold the ShareLock on the
|
||||
* heap that we acquired above, until end of transaction.
|
||||
* In normal processing mode, the heap and index relations are closed by
|
||||
* index_build() --- but we continue to hold the ShareLock on the heap
|
||||
* that we acquired above, until end of transaction.
|
||||
*/
|
||||
if (IsBootstrapProcessingMode())
|
||||
{
|
||||
@ -1139,7 +1143,7 @@ index_drop(Oid indexId)
|
||||
* they don't exist anyway. So, no warning in that case.
|
||||
* ----------------
|
||||
*/
|
||||
if (IsTransactionBlock() && ! userIndexRelation->rd_myxactonly)
|
||||
if (IsTransactionBlock() && !userIndexRelation->rd_myxactonly)
|
||||
elog(NOTICE, "Caution: DROP INDEX cannot be rolled back, so don't abort now");
|
||||
|
||||
/* ----------------
|
||||
@ -1267,7 +1271,8 @@ FormIndexDatum(int numberOfAttributes,
|
||||
* --------------------------------------------
|
||||
*/
|
||||
static
|
||||
bool LockClassinfoForUpdate(Oid relid, HeapTuple rtup, Buffer *buffer, bool confirmCommitted)
|
||||
bool
|
||||
LockClassinfoForUpdate(Oid relid, HeapTuple rtup, Buffer *buffer, bool confirmCommitted)
|
||||
{
|
||||
HeapTuple classTuple;
|
||||
Form_pg_class pgcform;
|
||||
@ -1295,6 +1300,7 @@ bool LockClassinfoForUpdate(Oid relid, HeapTuple rtup, Buffer *buffer, bool conf
|
||||
if (confirmCommitted)
|
||||
{
|
||||
HeapTupleHeader th = rtup->t_data;
|
||||
|
||||
if (!(th->t_infomask & HEAP_XMIN_COMMITTED))
|
||||
elog(ERROR, "The tuple isn't committed");
|
||||
if (th->t_infomask & HEAP_XMAX_COMMITTED)
|
||||
@ -1309,7 +1315,8 @@ bool LockClassinfoForUpdate(Oid relid, HeapTuple rtup, Buffer *buffer, bool conf
|
||||
* Indexes of the relation active ?
|
||||
* ---------------------------------------------
|
||||
*/
|
||||
bool IndexesAreActive(Oid relid, bool confirmCommitted)
|
||||
bool
|
||||
IndexesAreActive(Oid relid, bool confirmCommitted)
|
||||
{
|
||||
HeapTupleData tuple;
|
||||
Relation indexRelation;
|
||||
@ -1406,13 +1413,15 @@ setRelhasindexInplace(Oid relid, bool hasindex, bool immediate)
|
||||
heap_close(pg_class, RowExclusiveLock);
|
||||
elog(ERROR, "setRelhasindexInplace: cannot scan RELATION relation");
|
||||
}
|
||||
|
||||
/*
|
||||
* Confirm that target tuple is locked by this transaction
|
||||
* in case of immedaite updation.
|
||||
* Confirm that target tuple is locked by this transaction in case of
|
||||
* immedaite updation.
|
||||
*/
|
||||
if (immediate)
|
||||
{
|
||||
HeapTupleHeader th = tuple->t_data;
|
||||
|
||||
if (!(th->t_infomask & HEAP_XMIN_COMMITTED))
|
||||
elog(ERROR, "Immediate hasindex updation can be done only for committed tuples %x", th->t_infomask);
|
||||
if (th->t_infomask & HEAP_XMAX_INVALID)
|
||||
@ -1697,10 +1706,12 @@ DefaultBuild(Relation heapRelation,
|
||||
char *nullv;
|
||||
long reltuples,
|
||||
indtuples;
|
||||
|
||||
#ifndef OMIT_PARTIAL_INDEX
|
||||
ExprContext *econtext;
|
||||
TupleTable tupleTable;
|
||||
TupleTableSlot *slot;
|
||||
|
||||
#endif
|
||||
Node *predicate;
|
||||
Node *oldPred;
|
||||
@ -1781,6 +1792,7 @@ DefaultBuild(Relation heapRelation,
|
||||
reltuples++;
|
||||
|
||||
#ifndef OMIT_PARTIAL_INDEX
|
||||
|
||||
/*
|
||||
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
|
||||
* this tuple if it was already in the existing partial index
|
||||
@ -1804,7 +1816,7 @@ DefaultBuild(Relation heapRelation,
|
||||
{
|
||||
/* SetSlotContents(slot, heapTuple); */
|
||||
slot->val = heapTuple;
|
||||
if (! ExecQual((List *) predicate, econtext, false))
|
||||
if (!ExecQual((List *) predicate, econtext, false))
|
||||
continue;
|
||||
}
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
@ -1854,13 +1866,13 @@ DefaultBuild(Relation heapRelation,
|
||||
/*
|
||||
* Since we just counted the tuples in the heap, we update its stats
|
||||
* in pg_class to guarantee that the planner takes advantage of the
|
||||
* index we just created. But, only update statistics during
|
||||
* normal index definitions, not for indices on system catalogs
|
||||
* created during bootstrap processing. We must close the relations
|
||||
* before updating statistics to guarantee that the relcache entries
|
||||
* are flushed when we increment the command counter in UpdateStats().
|
||||
* But we do not release any locks on the relations; those will be
|
||||
* held until end of transaction.
|
||||
* index we just created. But, only update statistics during normal
|
||||
* index definitions, not for indices on system catalogs created
|
||||
* during bootstrap processing. We must close the relations before
|
||||
* updating statistics to guarantee that the relcache entries are
|
||||
* flushed when we increment the command counter in UpdateStats(). But
|
||||
* we do not release any locks on the relations; those will be held
|
||||
* until end of transaction.
|
||||
*/
|
||||
if (IsNormalProcessingMode())
|
||||
{
|
||||
@ -2049,17 +2061,25 @@ activate_index(Oid indexId, bool activate)
|
||||
bool
|
||||
reindex_index(Oid indexId, bool force)
|
||||
{
|
||||
Relation iRel, indexRelation, heapRelation;
|
||||
Relation iRel,
|
||||
indexRelation,
|
||||
heapRelation;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple indexTuple, procTuple, classTuple;
|
||||
HeapTuple indexTuple,
|
||||
procTuple,
|
||||
classTuple;
|
||||
Form_pg_index index;
|
||||
Oid heapId, procId, accessMethodId;
|
||||
Oid heapId,
|
||||
procId,
|
||||
accessMethodId;
|
||||
Node *oldPred = NULL;
|
||||
PredInfo *predInfo;
|
||||
AttrNumber *attributeNumberA;
|
||||
FuncIndexInfo fInfo, *funcInfo = NULL;
|
||||
int i, numberOfAttributes;
|
||||
FuncIndexInfo fInfo,
|
||||
*funcInfo = NULL;
|
||||
int i,
|
||||
numberOfAttributes;
|
||||
char *predString;
|
||||
bool old;
|
||||
|
||||
@ -2152,11 +2172,10 @@ reindex_index(Oid indexId, bool force)
|
||||
attributeNumberA, 0, NULL, funcInfo, predInfo);
|
||||
|
||||
/*
|
||||
* index_build will close both the heap and index relations
|
||||
* (but not give up the locks we hold on them). That's fine
|
||||
* for the index, but we need to open the heap again. We need
|
||||
* no new lock, since this backend still has the exclusive lock
|
||||
* grabbed by heap_truncate.
|
||||
* index_build will close both the heap and index relations (but not
|
||||
* give up the locks we hold on them). That's fine for the index, but
|
||||
* we need to open the heap again. We need no new lock, since this
|
||||
* backend still has the exclusive lock grabbed by heap_truncate.
|
||||
*/
|
||||
iRel = index_open(indexId);
|
||||
Assert(iRel != NULL);
|
||||
@ -2182,21 +2201,18 @@ activate_indexes_of_a_table(Oid relid, bool activate)
|
||||
if (!activate)
|
||||
setRelhasindexInplace(relid, false, true);
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (activate)
|
||||
reindex_relation(relid, false);
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* --------------------------------
|
||||
* reindex_relation - This routine is used to recreate indexes
|
||||
* of a relation.
|
||||
@ -2209,7 +2225,8 @@ reindex_relation(Oid relid, bool force)
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple indexTuple;
|
||||
bool old, reindexed;
|
||||
bool old,
|
||||
reindexed;
|
||||
|
||||
old = SetReindexProcessing(true);
|
||||
if (IndexesAreActive(relid, true))
|
||||
@ -2231,6 +2248,7 @@ reindex_relation(Oid relid, bool force)
|
||||
while (HeapTupleIsValid(indexTuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
Form_pg_index index = (Form_pg_index) GETSTRUCT(indexTuple);
|
||||
|
||||
if (activate_index(index->indexrelid, true))
|
||||
reindexed = true;
|
||||
else
|
||||
@ -2242,9 +2260,7 @@ reindex_relation(Oid relid, bool force)
|
||||
heap_endscan(scan);
|
||||
heap_close(indexRelation, AccessShareLock);
|
||||
if (reindexed)
|
||||
{
|
||||
setRelhasindexInplace(relid, true, false);
|
||||
}
|
||||
SetReindexProcessing(old);
|
||||
return reindexed;
|
||||
}
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.59 2000/02/18 09:28:41 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.60 2000/04/12 17:14:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -31,47 +31,47 @@
|
||||
*/
|
||||
|
||||
char *Name_pg_aggregate_indices[Num_pg_aggregate_indices] =
|
||||
{AggregateNameTypeIndex};
|
||||
{AggregateNameTypeIndex};
|
||||
char *Name_pg_am_indices[Num_pg_am_indices] =
|
||||
{AmNameIndex};
|
||||
{AmNameIndex};
|
||||
char *Name_pg_amop_indices[Num_pg_amop_indices] =
|
||||
{AccessMethodOpidIndex, AccessMethodStrategyIndex};
|
||||
{AccessMethodOpidIndex, AccessMethodStrategyIndex};
|
||||
char *Name_pg_attr_indices[Num_pg_attr_indices] =
|
||||
{AttributeRelidNameIndex, AttributeRelidNumIndex};
|
||||
{AttributeRelidNameIndex, AttributeRelidNumIndex};
|
||||
char *Name_pg_attrdef_indices[Num_pg_attrdef_indices] =
|
||||
{AttrDefaultIndex};
|
||||
{AttrDefaultIndex};
|
||||
char *Name_pg_class_indices[Num_pg_class_indices] =
|
||||
{ClassNameIndex, ClassOidIndex};
|
||||
{ClassNameIndex, ClassOidIndex};
|
||||
char *Name_pg_group_indices[Num_pg_group_indices] =
|
||||
{GroupNameIndex, GroupSysidIndex};
|
||||
{GroupNameIndex, GroupSysidIndex};
|
||||
char *Name_pg_index_indices[Num_pg_index_indices] =
|
||||
{IndexRelidIndex};
|
||||
{IndexRelidIndex};
|
||||
char *Name_pg_inherits_indices[Num_pg_inherits_indices] =
|
||||
{InheritsRelidSeqnoIndex};
|
||||
{InheritsRelidSeqnoIndex};
|
||||
char *Name_pg_language_indices[Num_pg_language_indices] =
|
||||
{LanguageOidIndex, LanguageNameIndex};
|
||||
{LanguageOidIndex, LanguageNameIndex};
|
||||
char *Name_pg_listener_indices[Num_pg_listener_indices] =
|
||||
{ListenerRelnamePidIndex};
|
||||
{ListenerRelnamePidIndex};
|
||||
char *Name_pg_opclass_indices[Num_pg_opclass_indices] =
|
||||
{OpclassNameIndex, OpclassDeftypeIndex};
|
||||
{OpclassNameIndex, OpclassDeftypeIndex};
|
||||
char *Name_pg_operator_indices[Num_pg_operator_indices] =
|
||||
{OperatorOidIndex, OperatorNameIndex};
|
||||
{OperatorOidIndex, OperatorNameIndex};
|
||||
char *Name_pg_proc_indices[Num_pg_proc_indices] =
|
||||
{ProcedureOidIndex, ProcedureNameIndex};
|
||||
{ProcedureOidIndex, ProcedureNameIndex};
|
||||
char *Name_pg_relcheck_indices[Num_pg_relcheck_indices] =
|
||||
{RelCheckIndex};
|
||||
{RelCheckIndex};
|
||||
char *Name_pg_rewrite_indices[Num_pg_rewrite_indices] =
|
||||
{RewriteOidIndex, RewriteRulenameIndex};
|
||||
{RewriteOidIndex, RewriteRulenameIndex};
|
||||
char *Name_pg_shadow_indices[Num_pg_shadow_indices] =
|
||||
{ShadowNameIndex, ShadowSysidIndex};
|
||||
{ShadowNameIndex, ShadowSysidIndex};
|
||||
char *Name_pg_statistic_indices[Num_pg_statistic_indices] =
|
||||
{StatisticRelidAttnumIndex};
|
||||
{StatisticRelidAttnumIndex};
|
||||
char *Name_pg_trigger_indices[Num_pg_trigger_indices] =
|
||||
{TriggerRelidIndex, TriggerConstrNameIndex, TriggerConstrRelidIndex};
|
||||
{TriggerRelidIndex, TriggerConstrNameIndex, TriggerConstrRelidIndex};
|
||||
char *Name_pg_type_indices[Num_pg_type_indices] =
|
||||
{TypeNameIndex, TypeOidIndex};
|
||||
{TypeNameIndex, TypeOidIndex};
|
||||
char *Name_pg_description_indices[Num_pg_description_indices] =
|
||||
{DescriptionObjIndex};
|
||||
{DescriptionObjIndex};
|
||||
|
||||
|
||||
|
||||
@ -1004,4 +1004,3 @@ TypeOidIndexScan(Relation heapRelation, Oid typeId)
|
||||
|
||||
return tuple;
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.30 2000/03/26 19:43:58 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.31 2000/04/12 17:14:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -200,8 +200,10 @@ AggregateCreate(char *aggName,
|
||||
}
|
||||
else
|
||||
{
|
||||
/* If no finalfn, aggregate result type is type of the sole
|
||||
* state value (we already checked there is only one)
|
||||
|
||||
/*
|
||||
* If no finalfn, aggregate result type is type of the sole state
|
||||
* value (we already checked there is only one)
|
||||
*/
|
||||
if (OidIsValid(xret1))
|
||||
fret = xret1;
|
||||
@ -284,9 +286,9 @@ AggNameGetInitVal(char *aggName, Oid basetype, int xfuncno, bool *isNull)
|
||||
Assert(xfuncno == 1 || xfuncno == 2);
|
||||
|
||||
/*
|
||||
* since we will have to use fastgetattr (in case one or both init vals
|
||||
* are NULL), we will need to open the relation. Do that first to
|
||||
* ensure we don't get a stale tuple from the cache.
|
||||
* since we will have to use fastgetattr (in case one or both init
|
||||
* vals are NULL), we will need to open the relation. Do that first
|
||||
* to ensure we don't get a stale tuple from the cache.
|
||||
*/
|
||||
|
||||
aggRel = heap_openr(AggregateRelationName, AccessShareLock);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.41 2000/04/04 21:44:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.42 2000/04/12 17:14:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.49 2000/01/26 05:56:11 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.50 2000/04/12 17:14:56 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -374,7 +374,7 @@ TypeCreate(char *typeName,
|
||||
values[i++] = (Datum) GetUserId(); /* 2 */
|
||||
values[i++] = (Datum) internalSize; /* 3 */
|
||||
values[i++] = (Datum) externalSize; /* 4 */
|
||||
values[i++] = (Datum) passedByValue;/* 5 */
|
||||
values[i++] = (Datum) passedByValue; /* 5 */
|
||||
values[i++] = (Datum) typeType; /* 6 */
|
||||
values[i++] = (Datum) (bool) 1; /* 7 */
|
||||
values[i++] = (Datum) typDelim; /* 8 */
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.10 2000/01/26 05:56:17 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.11 2000/04/12 17:15:06 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -702,7 +702,7 @@ getParamTypes(TgElement * elem, Oid *typev)
|
||||
if (parameterCount == FUNC_MAX_ARGS)
|
||||
{
|
||||
elog(ERROR,
|
||||
"getParamTypes: Ingredients cannot take > %d arguments",FUNC_MAX_ARGS);
|
||||
"getParamTypes: Ingredients cannot take > %d arguments", FUNC_MAX_ARGS);
|
||||
}
|
||||
t = elem->inTypes->val[j];
|
||||
if (strcmp(t, "opaque") == 0)
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.58 2000/01/26 05:56:12 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.59 2000/04/12 17:14:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -155,12 +155,13 @@ Async_Notify(char *relname)
|
||||
/* no point in making duplicate entries in the list ... */
|
||||
if (!AsyncExistsPendingNotify(relname))
|
||||
{
|
||||
|
||||
/*
|
||||
* We allocate list memory from the global malloc pool to ensure
|
||||
* that it will live until we want to use it. This is probably not
|
||||
* necessary any longer, since we will use it before the end of the
|
||||
* transaction. DLList only knows how to use malloc() anyway, but we
|
||||
* could probably palloc() the strings...
|
||||
* that it will live until we want to use it. This is probably
|
||||
* not necessary any longer, since we will use it before the end
|
||||
* of the transaction. DLList only knows how to use malloc()
|
||||
* anyway, but we could probably palloc() the strings...
|
||||
*/
|
||||
notifyName = strdup(relname);
|
||||
DLAddHead(pendingNotifies, DLNewElem(notifyName));
|
||||
@ -466,6 +467,7 @@ AtCommit_Notify()
|
||||
|
||||
if (listenerPID == MyProcPid)
|
||||
{
|
||||
|
||||
/*
|
||||
* Self-notify: no need to bother with table update.
|
||||
* Indeed, we *must not* clear the notification field in
|
||||
@ -491,6 +493,7 @@ AtCommit_Notify()
|
||||
*/
|
||||
if (kill(listenerPID, SIGUSR2) < 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* Get rid of pg_listener entry if it refers to a PID
|
||||
* that no longer exists. Presumably, that backend
|
||||
|
@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.50 2000/01/26 05:56:13 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.51 2000/04/12 17:14:57 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -104,8 +104,8 @@ cluster(char *oldrelname, char *oldindexname)
|
||||
* Like vacuum, cluster spans transactions, so I'm going to handle it
|
||||
* in the same way: commit and restart transactions where needed.
|
||||
*
|
||||
* We grab exclusive access to the target rel and index for the
|
||||
* duration of the initial transaction.
|
||||
* We grab exclusive access to the target rel and index for the duration
|
||||
* of the initial transaction.
|
||||
*/
|
||||
|
||||
OldHeap = heap_openr(oldrelname, AccessExclusiveLock);
|
||||
@ -115,7 +115,7 @@ cluster(char *oldrelname, char *oldindexname)
|
||||
LockRelation(OldIndex, AccessExclusiveLock);
|
||||
OIDOldIndex = RelationGetRelid(OldIndex);
|
||||
|
||||
heap_close(OldHeap, NoLock); /* do NOT give up the locks */
|
||||
heap_close(OldHeap, NoLock);/* do NOT give up the locks */
|
||||
index_close(OldIndex);
|
||||
|
||||
/*
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.70 2000/03/09 05:00:23 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.71 2000/04/12 17:14:57 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* The PortalExecutorHeapMemory crap needs to be eliminated
|
||||
@ -327,8 +327,8 @@ AlterTableAddColumn(const char *relationName,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction.
|
||||
* Grab an exclusive lock on the target table, which we will NOT
|
||||
* release until end of transaction.
|
||||
*/
|
||||
rel = heap_openr(relationName, AccessExclusiveLock);
|
||||
myrelid = RelationGetRelid(rel);
|
||||
@ -547,8 +547,8 @@ AlterTableAlterColumn(const char *relationName,
|
||||
|
||||
/*
|
||||
* find_all_inheritors does the recursive search of the
|
||||
* inheritance hierarchy, so all we have to do is process all
|
||||
* of the relids in the list that it returns.
|
||||
* inheritance hierarchy, so all we have to do is process all of
|
||||
* the relids in the list that it returns.
|
||||
*/
|
||||
foreach(child, children)
|
||||
{
|
||||
@ -566,14 +566,14 @@ AlterTableAlterColumn(const char *relationName,
|
||||
/* -= now do the thing on this relation =- */
|
||||
|
||||
/* reopen the business */
|
||||
rel = heap_openr((char *)relationName, AccessExclusiveLock);
|
||||
rel = heap_openr((char *) relationName, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* get the number of the attribute
|
||||
*/
|
||||
tuple = SearchSysCacheTuple(ATTNAME,
|
||||
ObjectIdGetDatum(myrelid),
|
||||
NameGetDatum(namein((char *)colName)),
|
||||
NameGetDatum(namein((char *) colName)),
|
||||
0, 0);
|
||||
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
@ -587,7 +587,7 @@ AlterTableAlterColumn(const char *relationName,
|
||||
|
||||
if (newDefault) /* SET DEFAULT */
|
||||
{
|
||||
List* rawDefaults = NIL;
|
||||
List *rawDefaults = NIL;
|
||||
RawColumnDefault *rawEnt;
|
||||
|
||||
/* Get rid of the old one first */
|
||||
@ -599,13 +599,14 @@ AlterTableAlterColumn(const char *relationName,
|
||||
rawDefaults = lappend(rawDefaults, rawEnt);
|
||||
|
||||
/*
|
||||
* This function is intended for CREATE TABLE,
|
||||
* so it processes a _list_ of defaults, but we just do one.
|
||||
* This function is intended for CREATE TABLE, so it processes a
|
||||
* _list_ of defaults, but we just do one.
|
||||
*/
|
||||
AddRelationRawConstraints(rel, rawDefaults, NIL);
|
||||
}
|
||||
|
||||
else /* DROP DEFAULT */
|
||||
else
|
||||
/* DROP DEFAULT */
|
||||
{
|
||||
Relation attr_rel;
|
||||
ScanKeyData scankeys[3];
|
||||
@ -621,7 +622,7 @@ AlterTableAlterColumn(const char *relationName,
|
||||
TRUE);
|
||||
|
||||
scan = heap_beginscan(attr_rel, false, SnapshotNow, 3, scankeys);
|
||||
AssertState(scan!=NULL);
|
||||
AssertState(scan != NULL);
|
||||
|
||||
if (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
@ -666,7 +667,7 @@ drop_default(Oid relid, int16 attnum)
|
||||
Int16GetDatum(attnum));
|
||||
|
||||
scan = heap_beginscan(attrdef_rel, false, SnapshotNow, 2, scankeys);
|
||||
AssertState(scan!=NULL);
|
||||
AssertState(scan != NULL);
|
||||
|
||||
if (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
heap_delete(attrdef_rel, &tuple->t_self, NULL);
|
||||
@ -710,7 +711,7 @@ systable_beginscan(Relation rel, const char *indexRelname, int nkeys, ScanKey en
|
||||
sysscan->buffer = InvalidBuffer;
|
||||
if (hasindex)
|
||||
{
|
||||
sysscan->irel = index_openr((char *)indexRelname);
|
||||
sysscan->irel = index_openr((char *) indexRelname);
|
||||
sysscan->iscan = index_beginscan(sysscan->irel, false, nkeys, entry);
|
||||
}
|
||||
else
|
||||
@ -775,28 +776,32 @@ find_attribute_walker(Node *node, int attnum)
|
||||
if (IsA(node, Var))
|
||||
{
|
||||
Var *var = (Var *) node;
|
||||
|
||||
if (var->varlevelsup == 0 && var->varno == 1 &&
|
||||
var->varattno == attnum)
|
||||
return true;
|
||||
}
|
||||
return expression_tree_walker(node, find_attribute_walker, (void *)attnum);
|
||||
return expression_tree_walker(node, find_attribute_walker, (void *) attnum);
|
||||
}
|
||||
static bool
|
||||
find_attribute_in_node(Node *node, int attnum)
|
||||
{
|
||||
return expression_tree_walker(node, find_attribute_walker, (void *)attnum);
|
||||
return expression_tree_walker(node, find_attribute_walker, (void *) attnum);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove/check references for the column
|
||||
*/
|
||||
static bool
|
||||
RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
|
||||
{
|
||||
Relation indexRelation, rcrel;
|
||||
Relation indexRelation,
|
||||
rcrel;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
void *sysscan;
|
||||
HeapTuple htup, indexTuple;
|
||||
HeapTuple htup,
|
||||
indexTuple;
|
||||
Form_pg_index index;
|
||||
Form_pg_relcheck relcheck;
|
||||
Form_pg_class pgcform = (Form_pg_class) NULL;
|
||||
@ -805,14 +810,15 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
|
||||
|
||||
|
||||
if (!checkonly)
|
||||
pgcform = (Form_pg_class) GETSTRUCT (reltup);
|
||||
pgcform = (Form_pg_class) GETSTRUCT(reltup);
|
||||
|
||||
/*
|
||||
* Remove/check constraints here
|
||||
*/
|
||||
ScanKeyEntryInitialize(&entry, (bits16) 0x0, Anum_pg_relcheck_rcrelid,
|
||||
(RegProcedure) F_OIDEQ, ObjectIdGetDatum(reloid));
|
||||
rcrel = heap_openr(RelCheckRelationName, RowExclusiveLock);
|
||||
sysscan = systable_beginscan(rcrel, RelCheckIndex,1 ,&entry);
|
||||
sysscan = systable_beginscan(rcrel, RelCheckIndex, 1, &entry);
|
||||
|
||||
while (HeapTupleIsValid(htup = systable_getnext(sysscan)))
|
||||
{
|
||||
@ -883,6 +889,7 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
|
||||
|
||||
return checkok;
|
||||
}
|
||||
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
|
||||
/*
|
||||
@ -894,8 +901,11 @@ AlterTableDropColumn(const char *relationName,
|
||||
int behavior)
|
||||
{
|
||||
#ifdef _DROP_COLUMN_HACK__
|
||||
Relation rel, attrdesc, adrel;
|
||||
Oid myrelid, attoid;
|
||||
Relation rel,
|
||||
attrdesc,
|
||||
adrel;
|
||||
Oid myrelid,
|
||||
attoid;
|
||||
HeapTuple reltup;
|
||||
HeapTupleData classtuple;
|
||||
Buffer buffer;
|
||||
@ -910,6 +920,7 @@ AlterTableDropColumn(const char *relationName,
|
||||
|
||||
if (inh)
|
||||
elog(ERROR, "ALTER TABLE / DROP COLUMN with inherit option is not supported yet");
|
||||
|
||||
/*
|
||||
* permissions checking. this would normally be done in utility.c,
|
||||
* but this particular routine is recursive.
|
||||
@ -925,8 +936,8 @@ AlterTableDropColumn(const char *relationName,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction.
|
||||
* Grab an exclusive lock on the target table, which we will NOT
|
||||
* release until end of transaction.
|
||||
*/
|
||||
rel = heap_openr(relationName, AccessExclusiveLock);
|
||||
myrelid = RelationGetRelid(rel);
|
||||
@ -987,6 +998,7 @@ AlterTableDropColumn(const char *relationName,
|
||||
elog(ERROR, "ALTER TABLE: column name \"%s\" was already dropped", colName);
|
||||
attnum = attribute->attnum;
|
||||
attoid = tup->t_data->t_oid;
|
||||
|
||||
/*
|
||||
* Check constraints/indices etc here
|
||||
*/
|
||||
@ -1021,10 +1033,12 @@ AlterTableDropColumn(const char *relationName,
|
||||
adrel = heap_openr(AttrDefaultRelationName, RowExclusiveLock);
|
||||
ScanKeyEntryInitialize(&scankeys[0], 0x0, Anum_pg_attrdef_adrelid,
|
||||
F_OIDEQ, ObjectIdGetDatum(myrelid));
|
||||
/* Oops pg_attrdef doesn't have (adrelid,adnum) index
|
||||
ScanKeyEntryInitialize(&scankeys[1], 0x0, Anum_pg_attrdef_adnum,
|
||||
F_INT2EQ, Int16GetDatum(attnum));
|
||||
sysscan = systable_beginscan(adrel, AttrDefaultIndex, 2, scankeys);
|
||||
|
||||
/*
|
||||
* Oops pg_attrdef doesn't have (adrelid,adnum) index
|
||||
* ScanKeyEntryInitialize(&scankeys[1], 0x0, Anum_pg_attrdef_adnum,
|
||||
* F_INT2EQ, Int16GetDatum(attnum)); sysscan =
|
||||
* systable_beginscan(adrel, AttrDefaultIndex, 2, scankeys);
|
||||
*/
|
||||
sysscan = systable_beginscan(adrel, AttrDefaultIndex, 1, scankeys);
|
||||
while (HeapTupleIsValid(tup = systable_getnext(sysscan)))
|
||||
@ -1037,6 +1051,7 @@ AlterTableDropColumn(const char *relationName,
|
||||
}
|
||||
systable_endscan(sysscan);
|
||||
heap_close(adrel, NoLock);
|
||||
|
||||
/*
|
||||
* Remove objects which reference this column
|
||||
*/
|
||||
@ -1077,7 +1092,7 @@ AlterTableAddConstraint(const char *relationName,
|
||||
elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented");
|
||||
case T_FkConstraint:
|
||||
{
|
||||
FkConstraint *fkconstraint=(FkConstraint *)newConstraint;
|
||||
FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
|
||||
Relation rel;
|
||||
HeapScanDesc scan;
|
||||
HeapTuple tuple;
|
||||
@ -1094,10 +1109,11 @@ AlterTableAddConstraint(const char *relationName,
|
||||
heap_close(rel, NoLock);
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the fk table, and then scan through
|
||||
* each tuple, calling the RI_FKey_Match_Ins (insert trigger)
|
||||
* as if that tuple had just been inserted. If any of those
|
||||
* fail, it should elog(ERROR) and that's that.
|
||||
* Grab an exclusive lock on the fk table, and then scan
|
||||
* through each tuple, calling the RI_FKey_Match_Ins
|
||||
* (insert trigger) as if that tuple had just been
|
||||
* inserted. If any of those fail, it should elog(ERROR)
|
||||
* and that's that.
|
||||
*/
|
||||
rel = heap_openr(relationName, AccessExclusiveLock);
|
||||
trig.tgoid = 0;
|
||||
@ -1109,33 +1125,36 @@ AlterTableAddConstraint(const char *relationName,
|
||||
trig.tginitdeferred = FALSE;
|
||||
trig.tgdeferrable = FALSE;
|
||||
|
||||
trig.tgargs = (char **)palloc(
|
||||
trig.tgargs = (char **) palloc(
|
||||
sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
|
||||
+ length(fkconstraint->pk_attrs)));
|
||||
|
||||
trig.tgargs[0] = "<unnamed>";
|
||||
trig.tgargs[1] = (char *)relationName;
|
||||
trig.tgargs[1] = (char *) relationName;
|
||||
trig.tgargs[2] = fkconstraint->pktable_name;
|
||||
trig.tgargs[3] = fkconstraint->match_type;
|
||||
count = 4;
|
||||
foreach (list, fkconstraint->fk_attrs)
|
||||
foreach(list, fkconstraint->fk_attrs)
|
||||
{
|
||||
Ident *fk_at = lfirst(list);
|
||||
|
||||
trig.tgargs[count++] = fk_at->name;
|
||||
}
|
||||
foreach (list, fkconstraint->pk_attrs)
|
||||
foreach(list, fkconstraint->pk_attrs)
|
||||
{
|
||||
Ident *pk_at = lfirst(list);
|
||||
|
||||
trig.tgargs[count++] = pk_at->name;
|
||||
}
|
||||
trig.tgnargs = count;
|
||||
|
||||
scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
|
||||
AssertState(scan!=NULL);
|
||||
AssertState(scan != NULL);
|
||||
|
||||
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
TriggerData newtrigdata;
|
||||
|
||||
newtrigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
|
||||
newtrigdata.tg_relation = rel;
|
||||
newtrigdata.tg_trigtuple = tuple;
|
||||
@ -1149,7 +1168,8 @@ AlterTableAddConstraint(const char *relationName,
|
||||
/* Make a call to the check function */
|
||||
}
|
||||
heap_endscan(scan);
|
||||
heap_close(rel, NoLock); /* close rel but keep lock! */
|
||||
heap_close(rel, NoLock); /* close rel but keep
|
||||
* lock! */
|
||||
|
||||
pfree(trig.tgargs);
|
||||
}
|
||||
@ -1186,7 +1206,7 @@ LockTableCommand(LockStmt *lockstmt)
|
||||
int aclresult;
|
||||
|
||||
rel = heap_openr(lockstmt->relname, NoLock);
|
||||
if (! RelationIsValid(rel))
|
||||
if (!RelationIsValid(rel))
|
||||
elog(ERROR, "Relation '%s' does not exist", lockstmt->relname);
|
||||
|
||||
if (lockstmt->mode == AccessShareLock)
|
||||
|
@ -67,10 +67,13 @@ static void CommentTrigger(char *trigger, char *relation, char *comments);
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
void CommentObject(int objtype, char *objname, char *objproperty,
|
||||
List *objlist, char *comment) {
|
||||
void
|
||||
CommentObject(int objtype, char *objname, char *objproperty,
|
||||
List *objlist, char *comment)
|
||||
{
|
||||
|
||||
switch (objtype) {
|
||||
switch (objtype)
|
||||
{
|
||||
case (INDEX):
|
||||
case (SEQUENCE):
|
||||
case (TABLE):
|
||||
@ -120,13 +123,16 @@ void CommentObject(int objtype, char *objname, char *objproperty,
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
void CreateComments(Oid oid, char *comment) {
|
||||
void
|
||||
CreateComments(Oid oid, char *comment)
|
||||
{
|
||||
|
||||
Relation description;
|
||||
TupleDesc tupDesc;
|
||||
HeapScanDesc scan;
|
||||
ScanKeyData entry;
|
||||
HeapTuple desctuple = NULL, searchtuple;
|
||||
HeapTuple desctuple = NULL,
|
||||
searchtuple;
|
||||
Datum values[Natts_pg_description];
|
||||
char nulls[Natts_pg_description];
|
||||
char replaces[Natts_pg_description];
|
||||
@ -137,8 +143,10 @@ void CreateComments(Oid oid, char *comment) {
|
||||
|
||||
description = heap_openr(DescriptionRelationName, RowExclusiveLock);
|
||||
tupDesc = description->rd_att;
|
||||
if ((comment != NULL) && (strlen(comment) > 0)) {
|
||||
for (i = 0; i < Natts_pg_description; i++) {
|
||||
if ((comment != NULL) && (strlen(comment) > 0))
|
||||
{
|
||||
for (i = 0; i < Natts_pg_description; i++)
|
||||
{
|
||||
nulls[i] = ' ';
|
||||
replaces[i] = 'r';
|
||||
values[i] = (Datum) NULL;
|
||||
@ -157,24 +165,29 @@ void CreateComments(Oid oid, char *comment) {
|
||||
|
||||
/*** If a previous tuple exists, either delete or prep replacement ***/
|
||||
|
||||
if (HeapTupleIsValid(searchtuple)) {
|
||||
if (HeapTupleIsValid(searchtuple))
|
||||
{
|
||||
|
||||
/*** If the comment is blank, call heap_delete, else heap_update ***/
|
||||
|
||||
if ((comment == NULL) || (strlen(comment) == 0)) {
|
||||
if ((comment == NULL) || (strlen(comment) == 0))
|
||||
heap_delete(description, &searchtuple->t_self, NULL);
|
||||
} else {
|
||||
else
|
||||
{
|
||||
desctuple = heap_modifytuple(searchtuple, description, values,
|
||||
nulls, replaces);
|
||||
heap_update(description, &searchtuple->t_self, desctuple, NULL);
|
||||
modified = TRUE;
|
||||
}
|
||||
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*** Only if comment is non-blank do we form a new tuple ***/
|
||||
|
||||
if ((comment != NULL) && (strlen(comment) > 0)) {
|
||||
if ((comment != NULL) && (strlen(comment) > 0))
|
||||
{
|
||||
desctuple = heap_formtuple(tupDesc, values, nulls);
|
||||
heap_insert(description, desctuple);
|
||||
modified = TRUE;
|
||||
@ -186,8 +199,10 @@ void CreateComments(Oid oid, char *comment) {
|
||||
|
||||
heap_endscan(scan);
|
||||
|
||||
if (modified) {
|
||||
if (RelationGetForm(description)->relhasindex) {
|
||||
if (modified)
|
||||
{
|
||||
if (RelationGetForm(description)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_description_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_description_indices,
|
||||
@ -214,7 +229,9 @@ void CreateComments(Oid oid, char *comment) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
void DeleteComments(Oid oid) {
|
||||
void
|
||||
DeleteComments(Oid oid)
|
||||
{
|
||||
|
||||
Relation description;
|
||||
TupleDesc tupDesc;
|
||||
@ -234,9 +251,8 @@ void DeleteComments(Oid oid) {
|
||||
|
||||
/*** If a previous tuple exists, delete it ***/
|
||||
|
||||
if (HeapTupleIsValid(searchtuple)) {
|
||||
if (HeapTupleIsValid(searchtuple))
|
||||
heap_delete(description, &searchtuple->t_self, NULL);
|
||||
}
|
||||
|
||||
/*** Complete the scan, update indices, if necessary ***/
|
||||
|
||||
@ -256,7 +272,9 @@ void DeleteComments(Oid oid) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentRelation(int reltype, char *relname, char *comment) {
|
||||
static void
|
||||
CommentRelation(int reltype, char *relname, char *comment)
|
||||
{
|
||||
|
||||
HeapTuple reltuple;
|
||||
Oid oid;
|
||||
@ -264,19 +282,17 @@ static void CommentRelation(int reltype, char *relname, char *comment) {
|
||||
|
||||
/*** First, check object security ***/
|
||||
|
||||
#ifndef NO_SECURITY
|
||||
if (!pg_ownercheck(GetPgUserName(), relname, RELNAME)) {
|
||||
#ifndef NO_SECURITY
|
||||
if (!pg_ownercheck(GetPgUserName(), relname, RELNAME))
|
||||
elog(ERROR, "you are not permitted to comment on class '%s'", relname);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*** Now, attempt to find the oid in the cached version of pg_class ***/
|
||||
|
||||
reltuple = SearchSysCacheTuple(RELNAME, PointerGetDatum(relname),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(reltuple)) {
|
||||
if (!HeapTupleIsValid(reltuple))
|
||||
elog(ERROR, "relation '%s' does not exist", relname);
|
||||
}
|
||||
|
||||
oid = reltuple->t_data->t_oid;
|
||||
|
||||
@ -284,26 +300,23 @@ static void CommentRelation(int reltype, char *relname, char *comment) {
|
||||
|
||||
relkind = ((Form_pg_class) GETSTRUCT(reltuple))->relkind;
|
||||
|
||||
switch (reltype) {
|
||||
switch (reltype)
|
||||
{
|
||||
case (INDEX):
|
||||
if (relkind != 'i') {
|
||||
if (relkind != 'i')
|
||||
elog(ERROR, "relation '%s' is not an index", relname);
|
||||
}
|
||||
break;
|
||||
case (TABLE):
|
||||
if (relkind != 'r') {
|
||||
if (relkind != 'r')
|
||||
elog(ERROR, "relation '%s' is not a table", relname);
|
||||
}
|
||||
break;
|
||||
case (VIEW):
|
||||
if (relkind != 'r') {
|
||||
if (relkind != 'r')
|
||||
elog(ERROR, "relation '%s' is not a view", relname);
|
||||
}
|
||||
break;
|
||||
case (SEQUENCE):
|
||||
if (relkind != 'S') {
|
||||
if (relkind != 'S')
|
||||
elog(ERROR, "relation '%s' is not a sequence", relname);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -325,7 +338,9 @@ static void CommentRelation(int reltype, char *relname, char *comment) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentAttribute(char *relname, char *attrname, char *comment) {
|
||||
static void
|
||||
CommentAttribute(char *relname, char *attrname, char *comment)
|
||||
{
|
||||
|
||||
Relation relation;
|
||||
HeapTuple attrtuple;
|
||||
@ -333,18 +348,18 @@ static void CommentAttribute(char *relname, char *attrname, char *comment) {
|
||||
|
||||
/*** First, check object security ***/
|
||||
|
||||
#ifndef NO_SECURITY
|
||||
if (!pg_ownercheck(GetPgUserName(), relname, RELNAME)) {
|
||||
#ifndef NO_SECURITY
|
||||
if (!pg_ownercheck(GetPgUserName(), relname, RELNAME))
|
||||
elog(ERROR, "you are not permitted to comment on class '%s\'", relname);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*** Now, fetch the attribute oid from the system cache ***/
|
||||
|
||||
relation = heap_openr(relname, AccessShareLock);
|
||||
attrtuple = SearchSysCacheTuple(ATTNAME, ObjectIdGetDatum(relation->rd_id),
|
||||
PointerGetDatum(attrname), 0, 0);
|
||||
if (!HeapTupleIsValid(attrtuple)) {
|
||||
if (!HeapTupleIsValid(attrtuple))
|
||||
{
|
||||
elog(ERROR, "'%s' is not an attribute of class '%s'",
|
||||
attrname, relname);
|
||||
}
|
||||
@ -371,15 +386,19 @@ static void CommentAttribute(char *relname, char *attrname, char *comment) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentDatabase(char *database, char *comment) {
|
||||
static void
|
||||
CommentDatabase(char *database, char *comment)
|
||||
{
|
||||
|
||||
Relation pg_database;
|
||||
HeapTuple dbtuple, usertuple;
|
||||
HeapTuple dbtuple,
|
||||
usertuple;
|
||||
ScanKeyData entry;
|
||||
HeapScanDesc scan;
|
||||
Oid oid;
|
||||
bool superuser;
|
||||
int4 dba, userid;
|
||||
int4 dba,
|
||||
userid;
|
||||
char *username;
|
||||
|
||||
/*** First find the tuple in pg_database for the database ***/
|
||||
@ -392,9 +411,8 @@ static void CommentDatabase(char *database, char *comment) {
|
||||
|
||||
/*** Validate database exists, and fetch the dba id and oid ***/
|
||||
|
||||
if (!HeapTupleIsValid(dbtuple)) {
|
||||
if (!HeapTupleIsValid(dbtuple))
|
||||
elog(ERROR, "database '%s' does not exist", database);
|
||||
}
|
||||
dba = ((Form_pg_database) GETSTRUCT(dbtuple))->datdba;
|
||||
oid = dbtuple->t_data->t_oid;
|
||||
|
||||
@ -403,20 +421,20 @@ static void CommentDatabase(char *database, char *comment) {
|
||||
username = GetPgUserName();
|
||||
usertuple = SearchSysCacheTuple(SHADOWNAME, PointerGetDatum(username),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(usertuple)) {
|
||||
if (!HeapTupleIsValid(usertuple))
|
||||
elog(ERROR, "current user '%s' does not exist", username);
|
||||
}
|
||||
userid = ((Form_pg_shadow) GETSTRUCT(usertuple))->usesysid;
|
||||
superuser = ((Form_pg_shadow) GETSTRUCT(usertuple))->usesuper;
|
||||
|
||||
/*** Allow if the userid matches the database dba or is a superuser ***/
|
||||
|
||||
#ifndef NO_SECURITY
|
||||
if (!(superuser || (userid == dba))) {
|
||||
#ifndef NO_SECURITY
|
||||
if (!(superuser || (userid == dba)))
|
||||
{
|
||||
elog(ERROR, "you are not permitted to comment on database '%s'",
|
||||
database);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*** Create the comments with the pg_database oid ***/
|
||||
|
||||
@ -439,32 +457,35 @@ static void CommentDatabase(char *database, char *comment) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentRewrite(char *rule, char *comment) {
|
||||
static void
|
||||
CommentRewrite(char *rule, char *comment)
|
||||
{
|
||||
|
||||
HeapTuple rewritetuple;
|
||||
Oid oid;
|
||||
char *user, *relation;
|
||||
char *user,
|
||||
*relation;
|
||||
int aclcheck;
|
||||
|
||||
/*** First, validate user ***/
|
||||
|
||||
#ifndef NO_SECURITY
|
||||
#ifndef NO_SECURITY
|
||||
user = GetPgUserName();
|
||||
relation = RewriteGetRuleEventRel(rule);
|
||||
aclcheck = pg_aclcheck(relation, user, ACL_RU);
|
||||
if (aclcheck != ACLCHECK_OK) {
|
||||
if (aclcheck != ACLCHECK_OK)
|
||||
{
|
||||
elog(ERROR, "you are not permitted to comment on rule '%s'",
|
||||
rule);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*** Next, find the rule's oid ***/
|
||||
|
||||
rewritetuple = SearchSysCacheTuple(RULENAME, PointerGetDatum(rule),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(rewritetuple)) {
|
||||
if (!HeapTupleIsValid(rewritetuple))
|
||||
elog(ERROR, "rule '%s' does not exist", rule);
|
||||
}
|
||||
|
||||
oid = rewritetuple->t_data->t_oid;
|
||||
|
||||
@ -485,7 +506,9 @@ static void CommentRewrite(char *rule, char *comment) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentType(char *type, char *comment) {
|
||||
static void
|
||||
CommentType(char *type, char *comment)
|
||||
{
|
||||
|
||||
HeapTuple typetuple;
|
||||
Oid oid;
|
||||
@ -493,21 +516,21 @@ static void CommentType(char *type, char *comment) {
|
||||
|
||||
/*** First, validate user ***/
|
||||
|
||||
#ifndef NO_SECURITY
|
||||
#ifndef NO_SECURITY
|
||||
user = GetPgUserName();
|
||||
if (!pg_ownercheck(user, type, TYPENAME)) {
|
||||
if (!pg_ownercheck(user, type, TYPENAME))
|
||||
{
|
||||
elog(ERROR, "you are not permitted to comment on type '%s'",
|
||||
type);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*** Next, find the type's oid ***/
|
||||
|
||||
typetuple = SearchSysCacheTuple(TYPENAME, PointerGetDatum(type),
|
||||
0, 0, 0);
|
||||
if (!HeapTupleIsValid(typetuple)) {
|
||||
if (!HeapTupleIsValid(typetuple))
|
||||
elog(ERROR, "type '%s' does not exist", type);
|
||||
}
|
||||
|
||||
oid = typetuple->t_data->t_oid;
|
||||
|
||||
@ -527,50 +550,59 @@ static void CommentType(char *type, char *comment) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentAggregate(char *aggregate, char *argument, char *comment) {
|
||||
static void
|
||||
CommentAggregate(char *aggregate, char *argument, char *comment)
|
||||
{
|
||||
|
||||
HeapTuple aggtuple;
|
||||
Oid baseoid, oid;
|
||||
Oid baseoid,
|
||||
oid;
|
||||
bool defined;
|
||||
char *user;
|
||||
|
||||
/*** First, attempt to determine the base aggregate oid ***/
|
||||
|
||||
if (argument) {
|
||||
if (argument)
|
||||
{
|
||||
baseoid = TypeGet(argument, &defined);
|
||||
if (!OidIsValid(baseoid)) {
|
||||
if (!OidIsValid(baseoid))
|
||||
elog(ERROR, "aggregate type '%s' does not exist", argument);
|
||||
}
|
||||
} else {
|
||||
else
|
||||
baseoid = 0;
|
||||
}
|
||||
|
||||
/*** Next, validate the user's attempt to comment ***/
|
||||
|
||||
#ifndef NO_SECURITY
|
||||
#ifndef NO_SECURITY
|
||||
user = GetPgUserName();
|
||||
if (!pg_aggr_ownercheck(user, aggregate, baseoid)) {
|
||||
if (argument) {
|
||||
if (!pg_aggr_ownercheck(user, aggregate, baseoid))
|
||||
{
|
||||
if (argument)
|
||||
{
|
||||
elog(ERROR, "you are not permitted to comment on aggregate '%s' %s '%s'",
|
||||
aggregate, "with type", argument);
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(ERROR, "you are not permitted to comment on aggregate '%s'",
|
||||
aggregate);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*** Now, attempt to find the actual tuple in pg_aggregate ***/
|
||||
|
||||
aggtuple = SearchSysCacheTuple(AGGNAME, PointerGetDatum(aggregate),
|
||||
ObjectIdGetDatum(baseoid), 0, 0);
|
||||
if (!HeapTupleIsValid(aggtuple)) {
|
||||
if (argument) {
|
||||
if (!HeapTupleIsValid(aggtuple))
|
||||
{
|
||||
if (argument)
|
||||
{
|
||||
elog(ERROR, "aggregate type '%s' does not exist for aggregate '%s'",
|
||||
argument, aggregate);
|
||||
} else {
|
||||
elog(ERROR, "aggregate '%s' does not exist", aggregate);
|
||||
}
|
||||
else
|
||||
elog(ERROR, "aggregate '%s' does not exist", aggregate);
|
||||
}
|
||||
|
||||
oid = aggtuple->t_data->t_oid;
|
||||
@ -592,12 +624,17 @@ static void CommentAggregate(char *aggregate, char *argument, char *comment) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentProc(char *function, List *arguments, char *comment)
|
||||
static void
|
||||
CommentProc(char *function, List *arguments, char *comment)
|
||||
{
|
||||
HeapTuple argtuple, functuple;
|
||||
Oid oid, argoids[FUNC_MAX_ARGS];
|
||||
char *user, *argument;
|
||||
int i, argcount;
|
||||
HeapTuple argtuple,
|
||||
functuple;
|
||||
Oid oid,
|
||||
argoids[FUNC_MAX_ARGS];
|
||||
char *user,
|
||||
*argument;
|
||||
int i,
|
||||
argcount;
|
||||
|
||||
/*** First, initialize function's argument list with their type oids ***/
|
||||
|
||||
@ -606,13 +643,12 @@ static void CommentProc(char *function, List *arguments, char *comment)
|
||||
if (argcount > FUNC_MAX_ARGS)
|
||||
elog(ERROR, "functions cannot have more than %d arguments",
|
||||
FUNC_MAX_ARGS);
|
||||
for (i = 0; i < argcount; i++) {
|
||||
for (i = 0; i < argcount; i++)
|
||||
{
|
||||
argument = strVal(lfirst(arguments));
|
||||
arguments = lnext(arguments);
|
||||
if (strcmp(argument, "opaque") == 0)
|
||||
{
|
||||
argoids[i] = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
argtuple = SearchSysCacheTuple(TYPENAME,
|
||||
@ -663,47 +699,56 @@ static void CommentProc(char *function, List *arguments, char *comment)
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentOperator(char *opername, List *arguments, char *comment) {
|
||||
static void
|
||||
CommentOperator(char *opername, List *arguments, char *comment)
|
||||
{
|
||||
|
||||
Form_pg_operator data;
|
||||
HeapTuple optuple;
|
||||
Oid oid, leftoid = InvalidOid, rightoid = InvalidOid;
|
||||
Oid oid,
|
||||
leftoid = InvalidOid,
|
||||
rightoid = InvalidOid;
|
||||
bool defined;
|
||||
char oprtype = 0, *user, *lefttype = NULL, *righttype = NULL;
|
||||
char oprtype = 0,
|
||||
*user,
|
||||
*lefttype = NULL,
|
||||
*righttype = NULL;
|
||||
|
||||
/*** Initialize our left and right argument types ***/
|
||||
|
||||
if (lfirst(arguments) != NULL) {
|
||||
if (lfirst(arguments) != NULL)
|
||||
lefttype = strVal(lfirst(arguments));
|
||||
}
|
||||
if (lsecond(arguments) != NULL) {
|
||||
if (lsecond(arguments) != NULL)
|
||||
righttype = strVal(lsecond(arguments));
|
||||
}
|
||||
|
||||
/*** Attempt to fetch the left oid, if specified ***/
|
||||
|
||||
if (lefttype != NULL) {
|
||||
if (lefttype != NULL)
|
||||
{
|
||||
leftoid = TypeGet(lefttype, &defined);
|
||||
if (!OidIsValid(leftoid)) {
|
||||
if (!OidIsValid(leftoid))
|
||||
elog(ERROR, "left type '%s' does not exist", lefttype);
|
||||
}
|
||||
}
|
||||
|
||||
/*** Attempt to fetch the right oid, if specified ***/
|
||||
|
||||
if (righttype != NULL) {
|
||||
if (righttype != NULL)
|
||||
{
|
||||
rightoid = TypeGet(righttype, &defined);
|
||||
if (!OidIsValid(rightoid)) {
|
||||
if (!OidIsValid(rightoid))
|
||||
elog(ERROR, "right type '%s' does not exist", righttype);
|
||||
}
|
||||
}
|
||||
|
||||
/*** Determine operator type ***/
|
||||
|
||||
if (OidIsValid(leftoid) && (OidIsValid(rightoid))) oprtype = 'b';
|
||||
else if (OidIsValid(leftoid)) oprtype = 'l';
|
||||
else if (OidIsValid(rightoid)) oprtype = 'r';
|
||||
else elog(ERROR, "operator '%s' is of an illegal type'", opername);
|
||||
if (OidIsValid(leftoid) && (OidIsValid(rightoid)))
|
||||
oprtype = 'b';
|
||||
else if (OidIsValid(leftoid))
|
||||
oprtype = 'l';
|
||||
else if (OidIsValid(rightoid))
|
||||
oprtype = 'r';
|
||||
else
|
||||
elog(ERROR, "operator '%s' is of an illegal type'", opername);
|
||||
|
||||
/*** Attempt to fetch the operator oid ***/
|
||||
|
||||
@ -711,29 +756,28 @@ static void CommentOperator(char *opername, List *arguments, char *comment) {
|
||||
ObjectIdGetDatum(leftoid),
|
||||
ObjectIdGetDatum(rightoid),
|
||||
CharGetDatum(oprtype));
|
||||
if (!HeapTupleIsValid(optuple)) {
|
||||
if (!HeapTupleIsValid(optuple))
|
||||
elog(ERROR, "operator '%s' does not exist", opername);
|
||||
}
|
||||
|
||||
oid = optuple->t_data->t_oid;
|
||||
|
||||
/*** Valid user's ability to comment on this operator ***/
|
||||
|
||||
#ifndef NO_SECURITY
|
||||
#ifndef NO_SECURITY
|
||||
user = GetPgUserName();
|
||||
if (!pg_ownercheck(user, (char *) ObjectIdGetDatum(oid), OPEROID)) {
|
||||
if (!pg_ownercheck(user, (char *) ObjectIdGetDatum(oid), OPEROID))
|
||||
{
|
||||
elog(ERROR, "you are not permitted to comment on operator '%s'",
|
||||
opername);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*** Get the procedure associated with the operator ***/
|
||||
|
||||
data = (Form_pg_operator) GETSTRUCT(optuple);
|
||||
oid = regproctooid(data->oprcode);
|
||||
if (oid == InvalidOid) {
|
||||
if (oid == InvalidOid)
|
||||
elog(ERROR, "operator '%s' does not have an underlying function", opername);
|
||||
}
|
||||
|
||||
/*** Call CreateComments() to create/drop the comments ***/
|
||||
|
||||
@ -752,10 +796,13 @@ static void CommentOperator(char *opername, List *arguments, char *comment) {
|
||||
*------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static void CommentTrigger(char *trigger, char *relname, char *comment) {
|
||||
static void
|
||||
CommentTrigger(char *trigger, char *relname, char *comment)
|
||||
{
|
||||
|
||||
Form_pg_trigger data;
|
||||
Relation pg_trigger, relation;
|
||||
Relation pg_trigger,
|
||||
relation;
|
||||
HeapTuple triggertuple;
|
||||
HeapScanDesc scan;
|
||||
ScanKeyData entry;
|
||||
@ -764,13 +811,14 @@ static void CommentTrigger(char *trigger, char *relname, char *comment) {
|
||||
|
||||
/*** First, validate the user's action ***/
|
||||
|
||||
#ifndef NO_SECURITY
|
||||
#ifndef NO_SECURITY
|
||||
user = GetPgUserName();
|
||||
if (!pg_ownercheck(user, relname, RELNAME)) {
|
||||
if (!pg_ownercheck(user, relname, RELNAME))
|
||||
{
|
||||
elog(ERROR, "you are not permitted to comment on trigger '%s' %s '%s'",
|
||||
trigger, "defined for relation", relname);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*** Now, fetch the trigger oid from pg_trigger ***/
|
||||
|
||||
@ -780,9 +828,11 @@ static void CommentTrigger(char *trigger, char *relname, char *comment) {
|
||||
F_OIDEQ, RelationGetRelid(relation));
|
||||
scan = heap_beginscan(pg_trigger, 0, SnapshotNow, 1, &entry);
|
||||
triggertuple = heap_getnext(scan, 0);
|
||||
while (HeapTupleIsValid(triggertuple)) {
|
||||
while (HeapTupleIsValid(triggertuple))
|
||||
{
|
||||
data = (Form_pg_trigger) GETSTRUCT(triggertuple);
|
||||
if (namestrcmp(&(data->tgname), trigger) == 0) {
|
||||
if (namestrcmp(&(data->tgname), trigger) == 0)
|
||||
{
|
||||
oid = triggertuple->t_data->t_oid;
|
||||
break;
|
||||
}
|
||||
@ -791,7 +841,8 @@ static void CommentTrigger(char *trigger, char *relname, char *comment) {
|
||||
|
||||
/*** If no trigger exists for the relation specified, notify user ***/
|
||||
|
||||
if (oid == InvalidOid) {
|
||||
if (oid == InvalidOid)
|
||||
{
|
||||
elog(ERROR, "trigger '%s' defined for relation '%s' does not exist",
|
||||
trigger, relname);
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.103 2000/03/23 21:38:58 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.104 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -77,8 +77,10 @@ static bool fe_eof;
|
||||
* encoding, if needed, can be set once at the start of the copy operation.
|
||||
*/
|
||||
static StringInfoData attribute_buf;
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
static int encoding;
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@ -195,6 +197,7 @@ CopyPeekChar(FILE *fp)
|
||||
if (!fp)
|
||||
{
|
||||
int ch = pq_peekbyte();
|
||||
|
||||
if (ch == EOF)
|
||||
fe_eof = true;
|
||||
return ch;
|
||||
@ -280,8 +283,8 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
|
||||
* Open and lock the relation, using the appropriate lock type.
|
||||
*
|
||||
* Note: AccessExclusive is probably overkill for copying to a relation,
|
||||
* but that's what the code grabs on the rel's indices. If this lock is
|
||||
* relaxed then I think the index locks need relaxed also.
|
||||
* but that's what the code grabs on the rel's indices. If this lock
|
||||
* is relaxed then I think the index locks need relaxed also.
|
||||
*/
|
||||
rel = heap_openr(relname, (from ? AccessExclusiveLock : AccessShareLock));
|
||||
|
||||
@ -369,9 +372,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
|
||||
}
|
||||
|
||||
if (!pipe)
|
||||
{
|
||||
FreeFile(fp);
|
||||
}
|
||||
else if (!from)
|
||||
{
|
||||
if (!binary)
|
||||
@ -383,8 +384,9 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
|
||||
|
||||
/*
|
||||
* Close the relation. If reading, we can release the AccessShareLock
|
||||
* we got; if writing, we should hold the lock until end of transaction
|
||||
* to ensure that updates will be committed before lock is released.
|
||||
* we got; if writing, we should hold the lock until end of
|
||||
* transaction to ensure that updates will be committed before lock is
|
||||
* released.
|
||||
*/
|
||||
heap_close(rel, (from ? NoLock : AccessShareLock));
|
||||
}
|
||||
@ -399,8 +401,10 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null_p
|
||||
|
||||
int32 attr_count,
|
||||
i;
|
||||
|
||||
#ifdef _DROP_COLUMN_HACK__
|
||||
bool *valid;
|
||||
|
||||
#endif /* _DROP_COLUMN_HACK__ */
|
||||
Form_pg_attribute *attr;
|
||||
FmgrInfo *out_functions;
|
||||
@ -765,7 +769,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null
|
||||
|
||||
while (!done)
|
||||
{
|
||||
if (QueryCancel) {
|
||||
if (QueryCancel)
|
||||
{
|
||||
lineno = 0;
|
||||
CancelQuery();
|
||||
}
|
||||
@ -937,7 +942,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim, char *null
|
||||
*/
|
||||
slot->val = tuple;
|
||||
/* SetSlotContents(slot, tuple); */
|
||||
if (! ExecQual((List *) indexPred[i], econtext, false))
|
||||
if (!ExecQual((List *) indexPred[i], econtext, false))
|
||||
continue;
|
||||
#endif /* OMIT_PARTIAL_INDEX */
|
||||
}
|
||||
@ -1189,6 +1194,7 @@ static char *
|
||||
CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_print)
|
||||
{
|
||||
int c;
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
int mblen;
|
||||
unsigned char s[2];
|
||||
@ -1222,9 +1228,7 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_
|
||||
break;
|
||||
}
|
||||
if (strchr(delim, c))
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (c == '\\')
|
||||
{
|
||||
c = CopyGetChar(fp);
|
||||
@ -1272,9 +1276,12 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_
|
||||
c = val & 0377;
|
||||
}
|
||||
break;
|
||||
/* This is a special hack to parse `\N' as <backslash-N>
|
||||
rather then just 'N' to provide compatibility with
|
||||
the default NULL output. -- pe */
|
||||
|
||||
/*
|
||||
* This is a special hack to parse `\N' as
|
||||
* <backslash-N> rather then just 'N' to provide
|
||||
* compatibility with the default NULL output. -- pe
|
||||
*/
|
||||
case 'N':
|
||||
appendStringInfoCharMacro(&attribute_buf, '\\');
|
||||
c = 'N';
|
||||
@ -1332,7 +1339,7 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim, int *newline, char *null_
|
||||
}
|
||||
#endif
|
||||
|
||||
if (strcmp(attribute_buf.data, null_print)==0)
|
||||
if (strcmp(attribute_buf.data, null_print) == 0)
|
||||
*isnull = true;
|
||||
|
||||
return attribute_buf.data;
|
||||
@ -1346,10 +1353,12 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
|
||||
{
|
||||
char *string;
|
||||
char c;
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
char *string_start;
|
||||
int mblen;
|
||||
int i;
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.56 2000/01/29 16:58:34 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.57 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -145,14 +145,14 @@ DefineRelation(CreateStmt *stmt, char relkind)
|
||||
StoreCatalogInheritance(relationId, inheritList);
|
||||
|
||||
/*
|
||||
* Now add any newly specified column default values
|
||||
* and CHECK constraints to the new relation. These are passed
|
||||
* to us in the form of raw parsetrees; we need to transform
|
||||
* them to executable expression trees before they can be added.
|
||||
* The most convenient way to do that is to apply the parser's
|
||||
* transformExpr routine, but transformExpr doesn't work unless
|
||||
* we have a pre-existing relation. So, the transformation has
|
||||
* to be postponed to this final step of CREATE TABLE.
|
||||
* Now add any newly specified column default values and CHECK
|
||||
* constraints to the new relation. These are passed to us in the
|
||||
* form of raw parsetrees; we need to transform them to executable
|
||||
* expression trees before they can be added. The most convenient way
|
||||
* to do that is to apply the parser's transformExpr routine, but
|
||||
* transformExpr doesn't work unless we have a pre-existing relation.
|
||||
* So, the transformation has to be postponed to this final step of
|
||||
* CREATE TABLE.
|
||||
*
|
||||
* First, scan schema to find new column defaults.
|
||||
*/
|
||||
@ -181,21 +181,24 @@ DefineRelation(CreateStmt *stmt, char relkind)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We must bump the command counter to make the newly-created
|
||||
* relation tuple visible for opening.
|
||||
* We must bump the command counter to make the newly-created relation
|
||||
* tuple visible for opening.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
||||
/*
|
||||
* Open the new relation.
|
||||
*/
|
||||
rel = heap_openr(relname, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Parse and add the defaults/constraints.
|
||||
*/
|
||||
AddRelationRawConstraints(rel, rawDefaults, stmt->constraints);
|
||||
|
||||
/*
|
||||
* Clean up. We keep lock on new relation (although it shouldn't
|
||||
* be visible to anyone else anyway, until commit).
|
||||
* Clean up. We keep lock on new relation (although it shouldn't be
|
||||
* visible to anyone else anyway, until commit).
|
||||
*/
|
||||
heap_close(rel, NoLock);
|
||||
}
|
||||
@ -284,6 +287,7 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
|
||||
|
||||
foreach(rest, lnext(entry))
|
||||
{
|
||||
|
||||
/*
|
||||
* check for duplicated names within the new relation
|
||||
*/
|
||||
@ -357,6 +361,7 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
|
||||
attributeName);
|
||||
|
||||
if (checkAttrExists(attributeName, attributeType, inhSchema))
|
||||
|
||||
/*
|
||||
* this entry already exists
|
||||
*/
|
||||
@ -642,8 +647,9 @@ checkAttrExists(const char *attributeName, const char *attributeType, List *sche
|
||||
{
|
||||
ColumnDef *def = lfirst(s);
|
||||
|
||||
if (strcmp(attributeName, def->colname)==0)
|
||||
if (strcmp(attributeName, def->colname) == 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* attribute exists. Make sure the types are the same.
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.52 2000/03/26 18:32:28 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.53 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -45,10 +45,10 @@
|
||||
|
||||
/* non-export function prototypes */
|
||||
static bool
|
||||
get_user_info(const char *name, int4 *use_sysid, bool *use_super, bool *use_createdb);
|
||||
get_user_info(const char *name, int4 *use_sysid, bool *use_super, bool *use_createdb);
|
||||
|
||||
static bool
|
||||
get_db_info(const char *name, char *dbpath, Oid *dbIdP, int4 *ownerIdP);
|
||||
get_db_info(const char *name, char *dbpath, Oid *dbIdP, int4 *ownerIdP);
|
||||
|
||||
|
||||
|
||||
@ -63,12 +63,13 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
||||
char *loc;
|
||||
char locbuf[512];
|
||||
int4 user_id;
|
||||
bool use_super, use_createdb;
|
||||
bool use_super,
|
||||
use_createdb;
|
||||
Relation pg_database_rel;
|
||||
HeapTuple tuple;
|
||||
TupleDesc pg_database_dsc;
|
||||
Datum new_record[Natts_pg_database];
|
||||
char new_record_nulls[Natts_pg_database] = { ' ', ' ', ' ', ' ' };
|
||||
char new_record_nulls[Natts_pg_database] = {' ', ' ', ' ', ' '};
|
||||
|
||||
if (!get_user_info(GetPgUserName(), &user_id, &use_super, &use_createdb))
|
||||
elog(ERROR, "current user name is invalid");
|
||||
@ -84,7 +85,7 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
||||
elog(ERROR, "CREATE DATABASE: may not be called in a transaction block");
|
||||
|
||||
/* Generate directory name for the new database */
|
||||
if (dbpath == NULL || strcmp(dbpath, dbname)==0)
|
||||
if (dbpath == NULL || strcmp(dbpath, dbname) == 0)
|
||||
strcpy(locbuf, dbname);
|
||||
else
|
||||
snprintf(locbuf, sizeof(locbuf), "%s/%s", dbpath, dbname);
|
||||
@ -97,8 +98,10 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
||||
"This may be due to a character that is not allowed or because the chosen "
|
||||
"path isn't permitted for databases", dbpath);
|
||||
|
||||
/* close virtual file descriptors so the kernel has more available for
|
||||
the system() calls */
|
||||
/*
|
||||
* close virtual file descriptors so the kernel has more available for
|
||||
* the system() calls
|
||||
*/
|
||||
closeAllVfds();
|
||||
|
||||
/*
|
||||
@ -108,10 +111,10 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
||||
pg_database_dsc = RelationGetDescr(pg_database_rel);
|
||||
|
||||
/* Form tuple */
|
||||
new_record[Anum_pg_database_datname-1] = NameGetDatum(namein(dbname));
|
||||
new_record[Anum_pg_database_datdba-1] = Int32GetDatum(user_id);
|
||||
new_record[Anum_pg_database_encoding-1] = Int32GetDatum(encoding);
|
||||
new_record[Anum_pg_database_datpath-1] = PointerGetDatum(textin(locbuf));
|
||||
new_record[Anum_pg_database_datname - 1] = NameGetDatum(namein(dbname));
|
||||
new_record[Anum_pg_database_datdba - 1] = Int32GetDatum(user_id);
|
||||
new_record[Anum_pg_database_encoding - 1] = Int32GetDatum(encoding);
|
||||
new_record[Anum_pg_database_datpath - 1] = PointerGetDatum(textin(locbuf));
|
||||
|
||||
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
|
||||
|
||||
@ -124,7 +127,8 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
||||
* Update indexes (there aren't any currently)
|
||||
*/
|
||||
#ifdef Num_pg_database_indices
|
||||
if (RelationGetForm(pg_database_rel)->relhasindex) {
|
||||
if (RelationGetForm(pg_database_rel)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_database_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_database_indices,
|
||||
@ -139,14 +143,15 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
||||
|
||||
/* Copy the template database to the new location */
|
||||
|
||||
if (mkdir(loc, S_IRWXU) != 0) {
|
||||
if (mkdir(loc, S_IRWXU) != 0)
|
||||
elog(ERROR, "CREATE DATABASE: unable to create database directory '%s': %s", loc, strerror(errno));
|
||||
}
|
||||
|
||||
snprintf(buf, sizeof(buf), "cp %s%cbase%ctemplate1%c* '%s'",
|
||||
DataDir, SEP_CHAR, SEP_CHAR, SEP_CHAR, loc);
|
||||
if (system(buf) != 0) {
|
||||
if (system(buf) != 0)
|
||||
{
|
||||
int ret;
|
||||
|
||||
snprintf(buf, sizeof(buf), "rm -rf '%s'", loc);
|
||||
ret = system(buf);
|
||||
if (ret == 0)
|
||||
@ -165,7 +170,8 @@ createdb(const char *dbname, const char *dbpath, int encoding)
|
||||
void
|
||||
dropdb(const char *dbname)
|
||||
{
|
||||
int4 user_id, db_owner;
|
||||
int4 user_id,
|
||||
db_owner;
|
||||
bool use_super;
|
||||
Oid db_id;
|
||||
char *path,
|
||||
@ -203,25 +209,28 @@ dropdb(const char *dbname)
|
||||
"This may be due to a character that is not allowed or because the chosen "
|
||||
"path isn't permitted for databases", path);
|
||||
|
||||
/* close virtual file descriptors so the kernel has more available for
|
||||
the system() calls */
|
||||
/*
|
||||
* close virtual file descriptors so the kernel has more available for
|
||||
* the system() calls
|
||||
*/
|
||||
closeAllVfds();
|
||||
|
||||
/*
|
||||
* Obtain exclusive lock on pg_database. We need this to ensure
|
||||
* that no new backend starts up in the target database while we
|
||||
* are deleting it. (Actually, a new backend might still manage to
|
||||
* start up, because it will read pg_database without any locking
|
||||
* to discover the database's OID. But it will detect its error
|
||||
* in ReverifyMyDatabase and shut down before any serious damage
|
||||
* is done. See postinit.c.)
|
||||
* Obtain exclusive lock on pg_database. We need this to ensure that
|
||||
* no new backend starts up in the target database while we are
|
||||
* deleting it. (Actually, a new backend might still manage to start
|
||||
* up, because it will read pg_database without any locking to
|
||||
* discover the database's OID. But it will detect its error in
|
||||
* ReverifyMyDatabase and shut down before any serious damage is done.
|
||||
* See postinit.c.)
|
||||
*/
|
||||
pgdbrel = heap_openr(DatabaseRelationName, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Check for active backends in the target database.
|
||||
*/
|
||||
if (DatabaseHasActiveBackends(db_id)) {
|
||||
if (DatabaseHasActiveBackends(db_id))
|
||||
{
|
||||
heap_close(pgdbrel, AccessExclusiveLock);
|
||||
elog(ERROR, "DROP DATABASE: Database \"%s\" is being accessed by other users", dbname);
|
||||
}
|
||||
@ -238,8 +247,11 @@ dropdb(const char *dbname)
|
||||
if (!HeapTupleIsValid(tup))
|
||||
{
|
||||
heap_close(pgdbrel, AccessExclusiveLock);
|
||||
/* This error should never come up since the existence of the
|
||||
database is checked earlier */
|
||||
|
||||
/*
|
||||
* This error should never come up since the existence of the
|
||||
* database is checked earlier
|
||||
*/
|
||||
elog(ERROR, "DROP DATABASE: Database \"%s\" doesn't exist despite earlier reports to the contrary",
|
||||
dbname);
|
||||
}
|
||||
@ -289,7 +301,7 @@ get_db_info(const char *name, char *dbpath, Oid *dbIdP, int4 *ownerIdP)
|
||||
|
||||
AssertArg(name);
|
||||
|
||||
relation = heap_openr(DatabaseRelationName, AccessExclusiveLock/*???*/);
|
||||
relation = heap_openr(DatabaseRelationName, AccessExclusiveLock /* ??? */ );
|
||||
|
||||
ScanKeyEntryInitialize(&scanKey, 0, Anum_pg_database_datname,
|
||||
F_NAMEEQ, NameGetDatum(name));
|
||||
@ -354,7 +366,7 @@ get_db_info(const char *name, char *dbpath, Oid *dbIdP, int4 *ownerIdP)
|
||||
|
||||
|
||||
static bool
|
||||
get_user_info(const char * name, int4 *use_sysid, bool *use_super, bool *use_createdb)
|
||||
get_user_info(const char *name, int4 *use_sysid, bool *use_super, bool *use_createdb)
|
||||
{
|
||||
HeapTuple utup;
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.39 2000/04/07 13:39:24 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.40 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@ -143,6 +143,7 @@ compute_full_attributes(List *parameters, int32 *byte_pct_p,
|
||||
*canCache_p = true;
|
||||
else if (strcasecmp(param->defname, "trusted") == 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* we don't have untrusted functions any more. The 4.2
|
||||
* implementation is lousy anyway so I took it out. -ay 10/94
|
||||
@ -233,12 +234,14 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
|
||||
*/
|
||||
|
||||
bool returnsSet;
|
||||
|
||||
/* The function returns a set of values, as opposed to a singleton. */
|
||||
|
||||
bool lanisPL = false;
|
||||
|
||||
/*
|
||||
* The following are optional user-supplied attributes of the function.
|
||||
* The following are optional user-supplied attributes of the
|
||||
* function.
|
||||
*/
|
||||
int32 byte_pct,
|
||||
perbyte_cpu,
|
||||
@ -316,8 +319,8 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
|
||||
interpret_AS_clause(languageName, stmt->as, &prosrc_str, &probin_str);
|
||||
|
||||
/*
|
||||
* And now that we have all the parameters, and know we're
|
||||
* permitted to do so, go ahead and create the function.
|
||||
* And now that we have all the parameters, and know we're permitted
|
||||
* to do so, go ahead and create the function.
|
||||
*/
|
||||
ProcedureCreate(stmt->funcname,
|
||||
returnsSet,
|
||||
@ -378,7 +381,7 @@ DefineOperator(char *oprName,
|
||||
if (!strcasecmp(defel->defname, "leftarg"))
|
||||
{
|
||||
if ((nodeTag(defel->arg) == T_TypeName)
|
||||
&& (((TypeName *)defel->arg)->setof))
|
||||
&& (((TypeName *) defel->arg)->setof))
|
||||
elog(ERROR, "setof type not implemented for leftarg");
|
||||
|
||||
typeName1 = defGetString(defel);
|
||||
@ -388,7 +391,7 @@ DefineOperator(char *oprName,
|
||||
else if (!strcasecmp(defel->defname, "rightarg"))
|
||||
{
|
||||
if ((nodeTag(defel->arg) == T_TypeName)
|
||||
&& (((TypeName *)defel->arg)->setof))
|
||||
&& (((TypeName *) defel->arg)->setof))
|
||||
elog(ERROR, "setof type not implemented for rightarg");
|
||||
|
||||
typeName2 = defGetString(defel);
|
||||
@ -703,7 +706,7 @@ defGetString(DefElem *def)
|
||||
if (nodeTag(def->arg) == T_String)
|
||||
string = strVal(def->arg);
|
||||
else if (nodeTag(def->arg) == T_TypeName)
|
||||
string = ((TypeName *)def->arg)->name;
|
||||
string = ((TypeName *) def->arg)->name;
|
||||
else
|
||||
string = NULL;
|
||||
#if 0
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994-5, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.55 2000/03/14 23:06:12 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.56 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*/
|
||||
|
||||
@ -209,7 +209,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
||||
switch (nodeTag(plan))
|
||||
{
|
||||
case T_IndexScan:
|
||||
if (ScanDirectionIsBackward(((IndexScan *)plan)->indxorderdir))
|
||||
if (ScanDirectionIsBackward(((IndexScan *) plan)->indxorderdir))
|
||||
appendStringInfo(str, " Backward");
|
||||
appendStringInfo(str, " using ");
|
||||
i = 0;
|
||||
@ -246,9 +246,9 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
|
||||
int firstEntry = true;
|
||||
|
||||
appendStringInfo(str, " (");
|
||||
foreach (c, rte->ref->attrs)
|
||||
foreach(c, rte->ref->attrs)
|
||||
{
|
||||
if (! firstEntry)
|
||||
if (!firstEntry)
|
||||
{
|
||||
appendStringInfo(str, ", ");
|
||||
firstEntry = false;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.22 2000/02/25 02:58:48 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.23 2000/04/12 17:14:58 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -608,7 +608,7 @@ RemoveIndex(char *name)
|
||||
* ...
|
||||
*/
|
||||
void
|
||||
ReindexIndex(const char *name, bool force /* currently unused */)
|
||||
ReindexIndex(const char *name, bool force /* currently unused */ )
|
||||
{
|
||||
HeapTuple tuple;
|
||||
|
||||
@ -671,17 +671,24 @@ extern Oid MyDatabaseId;
|
||||
void
|
||||
ReindexDatabase(const char *dbname, bool force, bool all)
|
||||
{
|
||||
Relation relation, relationRelation;
|
||||
HeapTuple usertuple, dbtuple, tuple;
|
||||
Relation relation,
|
||||
relationRelation;
|
||||
HeapTuple usertuple,
|
||||
dbtuple,
|
||||
tuple;
|
||||
HeapScanDesc scan;
|
||||
int4 user_id, db_owner;
|
||||
int4 user_id,
|
||||
db_owner;
|
||||
bool superuser;
|
||||
Oid db_id;
|
||||
char *username;
|
||||
ScanKeyData scankey;
|
||||
PortalVariableMemory pmem;
|
||||
MemoryContext old;
|
||||
int relcnt, relalc, i, oncealc = 200;
|
||||
int relcnt,
|
||||
relalc,
|
||||
i,
|
||||
oncealc = 200;
|
||||
Oid *relids = (Oid *) NULL;
|
||||
|
||||
AssertArg(dbname);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.45 2000/01/26 05:56:13 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.46 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -156,7 +156,8 @@ SingleOpOperatorRemove(Oid typeOid)
|
||||
{
|
||||
key[0].sk_attno = attnums[i];
|
||||
scan = heap_beginscan(rel, 0, SnapshotNow, 1, key);
|
||||
while (HeapTupleIsValid(tup = heap_getnext(scan, 0))) {
|
||||
while (HeapTupleIsValid(tup = heap_getnext(scan, 0)))
|
||||
{
|
||||
|
||||
/*** This is apparently a routine not in use, but remove ***/
|
||||
/*** any comments anyways ***/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.41 2000/01/26 05:56:13 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.42 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -76,8 +76,8 @@ renameatt(char *relname,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction.
|
||||
* Grab an exclusive lock on the target table, which we will NOT
|
||||
* release until end of transaction.
|
||||
*/
|
||||
targetrelation = heap_openr(relname, AccessExclusiveLock);
|
||||
relid = RelationGetRelid(targetrelation);
|
||||
@ -160,6 +160,7 @@ renameatt(char *relname,
|
||||
/* keep system catalog indices current */
|
||||
{
|
||||
Relation irelations[Num_pg_attr_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, irelations);
|
||||
CatalogIndexInsert(irelations, Num_pg_attr_indices, attrelation, oldatttup);
|
||||
CatalogCloseIndices(Num_pg_attr_indices, irelations);
|
||||
@ -194,8 +195,8 @@ renamerel(const char *oldrelname, const char *newrelname)
|
||||
newrelname);
|
||||
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction.
|
||||
* Grab an exclusive lock on the target table, which we will NOT
|
||||
* release until end of transaction.
|
||||
*/
|
||||
targetrelation = heap_openr(oldrelname, AccessExclusiveLock);
|
||||
|
||||
@ -211,14 +212,15 @@ renamerel(const char *oldrelname, const char *newrelname)
|
||||
* they don't exist anyway. So, no warning in that case.
|
||||
* ----------------
|
||||
*/
|
||||
if (IsTransactionBlock() && ! targetrelation->rd_myxactonly)
|
||||
if (IsTransactionBlock() && !targetrelation->rd_myxactonly)
|
||||
elog(NOTICE, "Caution: RENAME TABLE cannot be rolled back, so don't abort now");
|
||||
|
||||
/*
|
||||
* Flush all blocks of the relation out of the buffer pool. We need this
|
||||
* because the blocks are marked with the relation's name as well as OID.
|
||||
* If some backend tries to write a dirty buffer with mdblindwrt after
|
||||
* we've renamed the physical file, we'll be in big trouble.
|
||||
* Flush all blocks of the relation out of the buffer pool. We need
|
||||
* this because the blocks are marked with the relation's name as well
|
||||
* as OID. If some backend tries to write a dirty buffer with
|
||||
* mdblindwrt after we've renamed the physical file, we'll be in big
|
||||
* trouble.
|
||||
*
|
||||
* Since we hold the exclusive lock on the relation, we don't have to
|
||||
* worry about more blocks being read in while we finish the rename.
|
||||
@ -227,8 +229,8 @@ renamerel(const char *oldrelname, const char *newrelname)
|
||||
elog(ERROR, "renamerel: unable to flush relation from buffer pool");
|
||||
|
||||
/*
|
||||
* Make sure smgr and lower levels close the relation's files.
|
||||
* (Next access to rel will reopen them.)
|
||||
* Make sure smgr and lower levels close the relation's files. (Next
|
||||
* access to rel will reopen them.)
|
||||
*
|
||||
* Note: we rely on shared cache invalidation message to make other
|
||||
* backends close and re-open the files.
|
||||
@ -238,14 +240,15 @@ renamerel(const char *oldrelname, const char *newrelname)
|
||||
/*
|
||||
* Close rel, but keep exclusive lock!
|
||||
*
|
||||
* Note: we don't do anything about updating the relcache entry;
|
||||
* we assume it will be flushed by shared cache invalidate.
|
||||
* XXX is this good enough? What if relation is myxactonly?
|
||||
* Note: we don't do anything about updating the relcache entry; we
|
||||
* assume it will be flushed by shared cache invalidate. XXX is this
|
||||
* good enough? What if relation is myxactonly?
|
||||
*/
|
||||
heap_close(targetrelation, NoLock);
|
||||
|
||||
/*
|
||||
* Find relation's pg_class tuple, and make sure newrelname isn't in use.
|
||||
* Find relation's pg_class tuple, and make sure newrelname isn't in
|
||||
* use.
|
||||
*/
|
||||
relrelation = heap_openr(RelationRelationName, RowExclusiveLock);
|
||||
|
||||
@ -262,8 +265,8 @@ renamerel(const char *oldrelname, const char *newrelname)
|
||||
* Perform physical rename of files. If this fails, we haven't yet
|
||||
* done anything irreversible.
|
||||
*
|
||||
* XXX smgr.c ought to provide an interface for this; doing it
|
||||
* directly is bletcherous.
|
||||
* XXX smgr.c ought to provide an interface for this; doing it directly
|
||||
* is bletcherous.
|
||||
*/
|
||||
strcpy(oldpath, relpath(oldrelname));
|
||||
strcpy(newpath, relpath(newrelname));
|
||||
|
@ -410,7 +410,9 @@ init_sequence(char *caller, char *name)
|
||||
|
||||
if (elm != (SeqTable) NULL)
|
||||
{
|
||||
/* We are using a seqtable entry left over from a previous xact;
|
||||
|
||||
/*
|
||||
* We are using a seqtable entry left over from a previous xact;
|
||||
* must check for relid change.
|
||||
*/
|
||||
elm->rel = seqrel;
|
||||
@ -424,7 +426,9 @@ init_sequence(char *caller, char *name)
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Time to make a new seqtable entry. These entries live as long
|
||||
|
||||
/*
|
||||
* Time to make a new seqtable entry. These entries live as long
|
||||
* as the backend does, so we use plain malloc for them.
|
||||
*/
|
||||
elm = (SeqTable) malloc(sizeof(SeqTableData));
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.62 2000/02/29 12:28:24 wieck Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.63 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -261,10 +261,11 @@ CreateTrigger(CreateTrigStmt *stmt)
|
||||
CatalogCloseIndices(Num_pg_class_indices, ridescs);
|
||||
heap_freetuple(tuple);
|
||||
heap_close(pgrel, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* We used to try to update the rel's relcache entry here, but that's
|
||||
* fairly pointless since it will happen as a byproduct of the upcoming
|
||||
* CommandCounterIncrement...
|
||||
* fairly pointless since it will happen as a byproduct of the
|
||||
* upcoming CommandCounterIncrement...
|
||||
*/
|
||||
/* Keep lock on target rel until end of xact */
|
||||
heap_close(rel, NoLock);
|
||||
@ -337,10 +338,11 @@ DropTrigger(DropTrigStmt *stmt)
|
||||
CatalogCloseIndices(Num_pg_class_indices, ridescs);
|
||||
heap_freetuple(tuple);
|
||||
heap_close(pgrel, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* We used to try to update the rel's relcache entry here, but that's
|
||||
* fairly pointless since it will happen as a byproduct of the upcoming
|
||||
* CommandCounterIncrement...
|
||||
* fairly pointless since it will happen as a byproduct of the
|
||||
* upcoming CommandCounterIncrement...
|
||||
*/
|
||||
/* Keep lock on target rel until end of xact */
|
||||
heap_close(rel, NoLock);
|
||||
@ -360,7 +362,8 @@ RelationRemoveTriggers(Relation rel)
|
||||
|
||||
tgscan = heap_beginscan(tgrel, 0, SnapshotNow, 1, &key);
|
||||
|
||||
while (HeapTupleIsValid(tup = heap_getnext(tgscan, 0))) {
|
||||
while (HeapTupleIsValid(tup = heap_getnext(tgscan, 0)))
|
||||
{
|
||||
|
||||
/*** Delete any comments associated with this trigger ***/
|
||||
|
||||
@ -688,9 +691,9 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
|
||||
j;
|
||||
|
||||
/*
|
||||
* We need not examine the "index" data, just the trigger array itself;
|
||||
* if we have the same triggers with the same types, the derived index
|
||||
* data should match.
|
||||
* We need not examine the "index" data, just the trigger array
|
||||
* itself; if we have the same triggers with the same types, the
|
||||
* derived index data should match.
|
||||
*
|
||||
* XXX It seems possible that the same triggers could appear in different
|
||||
* orders in the two trigger arrays; do we need to handle that?
|
||||
@ -1068,7 +1071,7 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
|
||||
* Lookup if we know an individual state for this trigger
|
||||
* ----------
|
||||
*/
|
||||
foreach (sl, deftrig_trigstates)
|
||||
foreach(sl, deftrig_trigstates)
|
||||
{
|
||||
trigstate = (DeferredTriggerStatus) lfirst(sl);
|
||||
if (trigstate->dts_tgoid == tgoid)
|
||||
@ -1286,7 +1289,7 @@ deferredTriggerInvokeEvents(bool immediate_only)
|
||||
* SET CONSTRAINTS ... command finishes and calls EndQuery.
|
||||
* ----------
|
||||
*/
|
||||
foreach (el, deftrig_events)
|
||||
foreach(el, deftrig_events)
|
||||
{
|
||||
eventno++;
|
||||
|
||||
@ -1382,13 +1385,13 @@ DeferredTriggerBeginXact(void)
|
||||
* ----------
|
||||
*/
|
||||
deftrig_cxt = CreateGlobalMemory("DeferredTriggerXact");
|
||||
oldcxt = MemoryContextSwitchTo((MemoryContext)deftrig_cxt);
|
||||
oldcxt = MemoryContextSwitchTo((MemoryContext) deftrig_cxt);
|
||||
|
||||
deftrig_all_isset = deftrig_dfl_all_isset;
|
||||
deftrig_all_isdeferred = deftrig_dfl_all_isdeferred;
|
||||
|
||||
deftrig_trigstates = NIL;
|
||||
foreach (l, deftrig_dfl_trigstates)
|
||||
foreach(l, deftrig_dfl_trigstates)
|
||||
{
|
||||
dflstat = (DeferredTriggerStatus) lfirst(l);
|
||||
stat = (DeferredTriggerStatus)
|
||||
@ -1499,7 +1502,8 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
* Handle SET CONSTRAINTS ALL ...
|
||||
* ----------
|
||||
*/
|
||||
if (stmt->constraints == NIL) {
|
||||
if (stmt->constraints == NIL)
|
||||
{
|
||||
if (!IsTransactionBlock())
|
||||
{
|
||||
/* ----------
|
||||
@ -1533,7 +1537,9 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
return;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
/* ----------
|
||||
* ... inside of a transaction block
|
||||
* ----------
|
||||
@ -1578,7 +1584,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
if (hasindex)
|
||||
irel = index_openr(TriggerConstrNameIndex);
|
||||
|
||||
foreach (l, stmt->constraints)
|
||||
foreach(l, stmt->constraints)
|
||||
{
|
||||
ScanKeyData skey;
|
||||
HeapTupleData tuple;
|
||||
@ -1594,7 +1600,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
* Check that only named constraints are set explicitly
|
||||
* ----------
|
||||
*/
|
||||
if (strcmp((char *)lfirst(l), "") == 0)
|
||||
if (strcmp((char *) lfirst(l), "") == 0)
|
||||
elog(ERROR, "unnamed constraints cannot be set explicitly");
|
||||
|
||||
/* ----------
|
||||
@ -1605,7 +1611,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
(bits16) 0x0,
|
||||
(AttrNumber) 1,
|
||||
(RegProcedure) F_NAMEEQ,
|
||||
PointerGetDatum((char *)lfirst(l)));
|
||||
PointerGetDatum((char *) lfirst(l)));
|
||||
|
||||
if (hasindex)
|
||||
sd = index_beginscan(irel, false, 1, &skey);
|
||||
@ -1629,9 +1635,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
heap_fetch(tgrel, SnapshotNow, &tuple, &buffer);
|
||||
pfree(indexRes);
|
||||
if (!tuple.t_data)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
htup = &tuple;
|
||||
}
|
||||
else
|
||||
@ -1652,10 +1656,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
|
||||
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL)
|
||||
elog(ERROR, "Constraint '%s' is not deferrable",
|
||||
(char *)lfirst(l));
|
||||
(char *) lfirst(l));
|
||||
|
||||
constr_oid = htup->t_data->t_oid;
|
||||
loid = lappend(loid, (Node *)constr_oid);
|
||||
loid = lappend(loid, (Node *) constr_oid);
|
||||
found = true;
|
||||
|
||||
if (hasindex)
|
||||
@ -1667,7 +1671,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
* ----------
|
||||
*/
|
||||
if (!found)
|
||||
elog(ERROR, "Constraint '%s' does not exist", (char *)lfirst(l));
|
||||
elog(ERROR, "Constraint '%s' does not exist", (char *) lfirst(l));
|
||||
|
||||
if (hasindex)
|
||||
index_endscan(sd);
|
||||
@ -1688,10 +1692,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
*/
|
||||
oldcxt = MemoryContextSwitchTo((MemoryContext) deftrig_gcxt);
|
||||
|
||||
foreach (l, loid)
|
||||
foreach(l, loid)
|
||||
{
|
||||
found = false;
|
||||
foreach (ls, deftrig_dfl_trigstates)
|
||||
foreach(ls, deftrig_dfl_trigstates)
|
||||
{
|
||||
state = (DeferredTriggerStatus) lfirst(ls);
|
||||
if (state->dts_tgoid == (Oid) lfirst(l))
|
||||
@ -1716,7 +1720,9 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
|
||||
return;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
/* ----------
|
||||
* Inside of a transaction block set the trigger
|
||||
* states of individual triggers on transaction level.
|
||||
@ -1724,10 +1730,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
|
||||
*/
|
||||
oldcxt = MemoryContextSwitchTo((MemoryContext) deftrig_cxt);
|
||||
|
||||
foreach (l, loid)
|
||||
foreach(l, loid)
|
||||
{
|
||||
found = false;
|
||||
foreach (ls, deftrig_trigstates)
|
||||
foreach(ls, deftrig_trigstates)
|
||||
{
|
||||
state = (DeferredTriggerStatus) lfirst(ls);
|
||||
if (state->dts_tgoid == (Oid) lfirst(l))
|
||||
@ -2012,5 +2018,3 @@ DeferredTriggerSaveEvent(Relation rel, int event,
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: user.c,v 1.51 2000/03/15 07:02:56 tgl Exp $
|
||||
* $Id: user.c,v 1.52 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -81,24 +81,33 @@ write_password_file(Relation rel)
|
||||
scan = heap_beginscan(rel, false, SnapshotSelf, 0, NULL);
|
||||
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
|
||||
{
|
||||
Datum datum_n, datum_p, datum_v;
|
||||
bool null_n, null_p, null_v;
|
||||
Datum datum_n,
|
||||
datum_p,
|
||||
datum_v;
|
||||
bool null_n,
|
||||
null_p,
|
||||
null_v;
|
||||
|
||||
datum_n = heap_getattr(tuple, Anum_pg_shadow_usename, dsc, &null_n);
|
||||
if (null_n)
|
||||
continue; /* don't allow empty users */
|
||||
datum_p = heap_getattr(tuple, Anum_pg_shadow_passwd, dsc, &null_p);
|
||||
/* It could be argued that people having a null password
|
||||
shouldn't be allowed to connect, because they need
|
||||
to have a password set up first. If you think assuming
|
||||
an empty password in that case is better, erase the following line. */
|
||||
|
||||
/*
|
||||
* It could be argued that people having a null password shouldn't
|
||||
* be allowed to connect, because they need to have a password set
|
||||
* up first. If you think assuming an empty password in that case
|
||||
* is better, erase the following line.
|
||||
*/
|
||||
if (null_p)
|
||||
continue;
|
||||
datum_v = heap_getattr(tuple, Anum_pg_shadow_valuntil, dsc, &null_v);
|
||||
|
||||
/* These fake entries are not really necessary. To remove them, the parser
|
||||
in backend/libpq/crypt.c would need to be adjusted. Initdb might also
|
||||
need adjustments. */
|
||||
/*
|
||||
* These fake entries are not really necessary. To remove them,
|
||||
* the parser in backend/libpq/crypt.c would need to be adjusted.
|
||||
* Initdb might also need adjustments.
|
||||
*/
|
||||
fprintf(fp,
|
||||
"%s"
|
||||
CRYPT_PWD_FILE_SEPSTR
|
||||
@ -116,8 +125,9 @@ write_password_file(Relation rel)
|
||||
CRYPT_PWD_FILE_SEPSTR
|
||||
"%s\n",
|
||||
nameout(DatumGetName(datum_n)),
|
||||
null_p ? "" : textout((text*)datum_p),
|
||||
null_v ? "\\N" : nabstimeout((AbsoluteTime)datum_v) /* this is how the parser wants it */
|
||||
null_p ? "" : textout((text *) datum_p),
|
||||
null_v ? "\\N" : nabstimeout((AbsoluteTime) datum_v) /* this is how the
|
||||
* parser wants it */
|
||||
);
|
||||
if (ferror(fp))
|
||||
elog(ERROR, "%s: %s", tempname, strerror(errno));
|
||||
@ -127,7 +137,8 @@ write_password_file(Relation rel)
|
||||
FreeFile(fp);
|
||||
|
||||
/*
|
||||
* And rename the temp file to its final name, deleting the old pg_pwd.
|
||||
* And rename the temp file to its final name, deleting the old
|
||||
* pg_pwd.
|
||||
*/
|
||||
rename(tempname, filename);
|
||||
|
||||
@ -150,12 +161,13 @@ HeapTuple
|
||||
update_pg_pwd(void)
|
||||
{
|
||||
Relation rel = heap_openr(ShadowRelationName, AccessExclusiveLock);
|
||||
|
||||
write_password_file(rel);
|
||||
heap_close(rel, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* This is a trigger, so clean out the information provided by
|
||||
* the trigger manager.
|
||||
* This is a trigger, so clean out the information provided by the
|
||||
* trigger manager.
|
||||
*/
|
||||
CurrentTriggerData = NULL;
|
||||
return NULL;
|
||||
@ -190,19 +202,20 @@ CreateUser(CreateUserStmt *stmt)
|
||||
if (!superuser())
|
||||
elog(ERROR, "CREATE USER: permission denied");
|
||||
|
||||
/* The reason for the following is this:
|
||||
* If you start a transaction block, create a user, then roll back the
|
||||
* transaction, the pg_pwd won't get rolled back due to a bug in the
|
||||
* Unix file system ( :}). Hence this is in the interest of security.
|
||||
/*
|
||||
* The reason for the following is this: If you start a transaction
|
||||
* block, create a user, then roll back the transaction, the pg_pwd
|
||||
* won't get rolled back due to a bug in the Unix file system ( :}).
|
||||
* Hence this is in the interest of security.
|
||||
*/
|
||||
if (IsTransactionBlock())
|
||||
elog(ERROR, "CREATE USER: may not be called in a transaction block");
|
||||
|
||||
/*
|
||||
* Scan the pg_shadow relation to be certain the user or id doesn't already
|
||||
* exist. Note we secure exclusive lock, because we also need to be
|
||||
* sure of what the next usesysid should be, and we need to protect
|
||||
* our update of the flat password file.
|
||||
* Scan the pg_shadow relation to be certain the user or id doesn't
|
||||
* already exist. Note we secure exclusive lock, because we also need
|
||||
* to be sure of what the next usesysid should be, and we need to
|
||||
* protect our update of the flat password file.
|
||||
*/
|
||||
pg_shadow_rel = heap_openr(ShadowRelationName, AccessExclusiveLock);
|
||||
pg_shadow_dsc = RelationGetDescr(pg_shadow_rel);
|
||||
@ -218,8 +231,9 @@ CreateUser(CreateUserStmt *stmt)
|
||||
|
||||
datum = heap_getattr(tuple, Anum_pg_shadow_usesysid, pg_shadow_dsc, &null);
|
||||
if (havesysid) /* customized id wanted */
|
||||
sysid_exists = datum && !null && ((int)datum == stmt->sysid);
|
||||
else /* pick 1 + max */
|
||||
sysid_exists = datum && !null && ((int) datum == stmt->sysid);
|
||||
else
|
||||
/* pick 1 + max */
|
||||
{
|
||||
if ((int) datum > max_id)
|
||||
max_id = (int) datum;
|
||||
@ -240,32 +254,33 @@ CreateUser(CreateUserStmt *stmt)
|
||||
/*
|
||||
* Build a tuple to insert
|
||||
*/
|
||||
new_record[Anum_pg_shadow_usename-1] = PointerGetDatum(namein(stmt->user)); /* this truncated properly */
|
||||
new_record[Anum_pg_shadow_usesysid-1] = Int32GetDatum(havesysid ? stmt->sysid : max_id + 1);
|
||||
new_record[Anum_pg_shadow_usename - 1] = PointerGetDatum(namein(stmt->user)); /* this truncated
|
||||
* properly */
|
||||
new_record[Anum_pg_shadow_usesysid - 1] = Int32GetDatum(havesysid ? stmt->sysid : max_id + 1);
|
||||
|
||||
AssertState(BoolIsValid(stmt->createdb));
|
||||
new_record[Anum_pg_shadow_usecreatedb-1] = (Datum)(stmt->createdb);
|
||||
new_record[Anum_pg_shadow_usetrace-1] = (Datum)(false);
|
||||
new_record[Anum_pg_shadow_usecreatedb - 1] = (Datum) (stmt->createdb);
|
||||
new_record[Anum_pg_shadow_usetrace - 1] = (Datum) (false);
|
||||
AssertState(BoolIsValid(stmt->createuser));
|
||||
new_record[Anum_pg_shadow_usesuper-1] = (Datum)(stmt->createuser);
|
||||
new_record[Anum_pg_shadow_usesuper - 1] = (Datum) (stmt->createuser);
|
||||
/* superuser gets catupd right by default */
|
||||
new_record[Anum_pg_shadow_usecatupd-1] = (Datum)(stmt->createuser);
|
||||
new_record[Anum_pg_shadow_usecatupd - 1] = (Datum) (stmt->createuser);
|
||||
|
||||
if (stmt->password)
|
||||
new_record[Anum_pg_shadow_passwd-1] = PointerGetDatum(textin(stmt->password));
|
||||
new_record[Anum_pg_shadow_passwd - 1] = PointerGetDatum(textin(stmt->password));
|
||||
if (stmt->validUntil)
|
||||
new_record[Anum_pg_shadow_valuntil-1] = PointerGetDatum(nabstimein(stmt->validUntil));
|
||||
new_record[Anum_pg_shadow_valuntil - 1] = PointerGetDatum(nabstimein(stmt->validUntil));
|
||||
|
||||
new_record_nulls[Anum_pg_shadow_usename-1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usesysid-1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usename - 1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usesysid - 1] = ' ';
|
||||
|
||||
new_record_nulls[Anum_pg_shadow_usecreatedb-1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usetrace-1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usesuper-1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usecatupd-1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usecreatedb - 1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usetrace - 1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usesuper - 1] = ' ';
|
||||
new_record_nulls[Anum_pg_shadow_usecatupd - 1] = ' ';
|
||||
|
||||
new_record_nulls[Anum_pg_shadow_passwd-1] = stmt->password ? ' ' : 'n';
|
||||
new_record_nulls[Anum_pg_shadow_valuntil-1] = stmt->validUntil ? ' ' : 'n';
|
||||
new_record_nulls[Anum_pg_shadow_passwd - 1] = stmt->password ? ' ' : 'n';
|
||||
new_record_nulls[Anum_pg_shadow_valuntil - 1] = stmt->validUntil ? ' ' : 'n';
|
||||
|
||||
tuple = heap_formtuple(pg_shadow_dsc, new_record, new_record_nulls);
|
||||
Assert(tuple);
|
||||
@ -279,7 +294,8 @@ CreateUser(CreateUserStmt *stmt)
|
||||
/*
|
||||
* Update indexes
|
||||
*/
|
||||
if (RelationGetForm(pg_shadow_rel)->relhasindex) {
|
||||
if (RelationGetForm(pg_shadow_rel)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_shadow_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_shadow_indices,
|
||||
@ -297,9 +313,10 @@ CreateUser(CreateUserStmt *stmt)
|
||||
{
|
||||
AlterGroupStmt ags;
|
||||
|
||||
ags.name = strVal(lfirst(item)); /* the group name to add this in */
|
||||
ags.name = strVal(lfirst(item)); /* the group name to add
|
||||
* this in */
|
||||
ags.action = +1;
|
||||
ags.listUsers = lcons((void*)makeInteger(havesysid ? stmt->sysid : max_id + 1), NIL);
|
||||
ags.listUsers = lcons((void *) makeInteger(havesysid ? stmt->sysid : max_id + 1), NIL);
|
||||
AlterGroup(&ags, "CREATE USER");
|
||||
}
|
||||
|
||||
@ -307,6 +324,7 @@ CreateUser(CreateUserStmt *stmt)
|
||||
* Write the updated pg_shadow data to the flat password file.
|
||||
*/
|
||||
write_password_file(pg_shadow_rel);
|
||||
|
||||
/*
|
||||
* Now we can clean up.
|
||||
*/
|
||||
@ -325,7 +343,8 @@ AlterUser(AlterUserStmt *stmt)
|
||||
char new_record_nulls[Natts_pg_shadow];
|
||||
Relation pg_shadow_rel;
|
||||
TupleDesc pg_shadow_dsc;
|
||||
HeapTuple tuple, new_tuple;
|
||||
HeapTuple tuple,
|
||||
new_tuple;
|
||||
bool null;
|
||||
|
||||
if (stmt->password)
|
||||
@ -333,8 +352,8 @@ AlterUser(AlterUserStmt *stmt)
|
||||
|
||||
/* must be superuser or just want to change your own password */
|
||||
if (!superuser() &&
|
||||
!(stmt->createdb==0 && stmt->createuser==0 && !stmt->validUntil
|
||||
&& stmt->password && strcmp(GetPgUserName(), stmt->user)==0))
|
||||
!(stmt->createdb == 0 && stmt->createuser == 0 && !stmt->validUntil
|
||||
&& stmt->password && strcmp(GetPgUserName(), stmt->user) == 0))
|
||||
elog(ERROR, "ALTER USER: permission denied");
|
||||
|
||||
/* see comments in create user */
|
||||
@ -342,9 +361,9 @@ AlterUser(AlterUserStmt *stmt)
|
||||
elog(ERROR, "ALTER USER: may not be called in a transaction block");
|
||||
|
||||
/*
|
||||
* Scan the pg_shadow relation to be certain the user exists.
|
||||
* Note we secure exclusive lock to protect our update of the
|
||||
* flat password file.
|
||||
* Scan the pg_shadow relation to be certain the user exists. Note we
|
||||
* secure exclusive lock to protect our update of the flat password
|
||||
* file.
|
||||
*/
|
||||
pg_shadow_rel = heap_openr(ShadowRelationName, AccessExclusiveLock);
|
||||
pg_shadow_dsc = RelationGetDescr(pg_shadow_rel);
|
||||
@ -361,80 +380,80 @@ AlterUser(AlterUserStmt *stmt)
|
||||
/*
|
||||
* Build a tuple to update, perusing the information just obtained
|
||||
*/
|
||||
new_record[Anum_pg_shadow_usename-1] = PointerGetDatum(namein(stmt->user));
|
||||
new_record_nulls[Anum_pg_shadow_usename-1] = ' ';
|
||||
new_record[Anum_pg_shadow_usename - 1] = PointerGetDatum(namein(stmt->user));
|
||||
new_record_nulls[Anum_pg_shadow_usename - 1] = ' ';
|
||||
|
||||
/* sysid - leave as is */
|
||||
new_record[Anum_pg_shadow_usesysid-1] = heap_getattr(tuple, Anum_pg_shadow_usesysid, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usesysid-1] = null ? 'n' : ' ';
|
||||
new_record[Anum_pg_shadow_usesysid - 1] = heap_getattr(tuple, Anum_pg_shadow_usesysid, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usesysid - 1] = null ? 'n' : ' ';
|
||||
|
||||
/* createdb */
|
||||
if (stmt->createdb == 0)
|
||||
{
|
||||
/* don't change */
|
||||
new_record[Anum_pg_shadow_usecreatedb-1] = heap_getattr(tuple, Anum_pg_shadow_usecreatedb, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usecreatedb-1] = null ? 'n' : ' ';
|
||||
new_record[Anum_pg_shadow_usecreatedb - 1] = heap_getattr(tuple, Anum_pg_shadow_usecreatedb, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usecreatedb - 1] = null ? 'n' : ' ';
|
||||
}
|
||||
else
|
||||
{
|
||||
new_record[Anum_pg_shadow_usecreatedb-1] = (Datum)(stmt->createdb > 0 ? true : false);
|
||||
new_record_nulls[Anum_pg_shadow_usecreatedb-1] = ' ';
|
||||
new_record[Anum_pg_shadow_usecreatedb - 1] = (Datum) (stmt->createdb > 0 ? true : false);
|
||||
new_record_nulls[Anum_pg_shadow_usecreatedb - 1] = ' ';
|
||||
}
|
||||
|
||||
/* trace - leave as is */
|
||||
new_record[Anum_pg_shadow_usetrace-1] = heap_getattr(tuple, Anum_pg_shadow_usetrace, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usetrace-1] = null ? 'n' : ' ';
|
||||
new_record[Anum_pg_shadow_usetrace - 1] = heap_getattr(tuple, Anum_pg_shadow_usetrace, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usetrace - 1] = null ? 'n' : ' ';
|
||||
|
||||
/* createuser (superuser) */
|
||||
if (stmt->createuser == 0)
|
||||
{
|
||||
/* don't change */
|
||||
new_record[Anum_pg_shadow_usesuper-1] = heap_getattr(tuple, Anum_pg_shadow_usesuper, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usesuper-1] = null ? 'n' : ' ';
|
||||
new_record[Anum_pg_shadow_usesuper - 1] = heap_getattr(tuple, Anum_pg_shadow_usesuper, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usesuper - 1] = null ? 'n' : ' ';
|
||||
}
|
||||
else
|
||||
{
|
||||
new_record[Anum_pg_shadow_usesuper-1] = (Datum)(stmt->createuser > 0 ? true : false);
|
||||
new_record_nulls[Anum_pg_shadow_usesuper-1] = ' ';
|
||||
new_record[Anum_pg_shadow_usesuper - 1] = (Datum) (stmt->createuser > 0 ? true : false);
|
||||
new_record_nulls[Anum_pg_shadow_usesuper - 1] = ' ';
|
||||
}
|
||||
|
||||
/* catupd - set to false if someone's superuser priv is being yanked */
|
||||
if (stmt->createuser < 0)
|
||||
{
|
||||
new_record[Anum_pg_shadow_usecatupd-1] = (Datum)(false);
|
||||
new_record_nulls[Anum_pg_shadow_usecatupd-1] = ' ';
|
||||
new_record[Anum_pg_shadow_usecatupd - 1] = (Datum) (false);
|
||||
new_record_nulls[Anum_pg_shadow_usecatupd - 1] = ' ';
|
||||
}
|
||||
else
|
||||
{
|
||||
/* leave alone */
|
||||
new_record[Anum_pg_shadow_usecatupd-1] = heap_getattr(tuple, Anum_pg_shadow_usecatupd, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usecatupd-1] = null ? 'n' : ' ';
|
||||
new_record[Anum_pg_shadow_usecatupd - 1] = heap_getattr(tuple, Anum_pg_shadow_usecatupd, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_usecatupd - 1] = null ? 'n' : ' ';
|
||||
}
|
||||
|
||||
/* password */
|
||||
if (stmt->password)
|
||||
{
|
||||
new_record[Anum_pg_shadow_passwd-1] = PointerGetDatum(textin(stmt->password));
|
||||
new_record_nulls[Anum_pg_shadow_passwd-1] = ' ';
|
||||
new_record[Anum_pg_shadow_passwd - 1] = PointerGetDatum(textin(stmt->password));
|
||||
new_record_nulls[Anum_pg_shadow_passwd - 1] = ' ';
|
||||
}
|
||||
else
|
||||
{
|
||||
/* leave as is */
|
||||
new_record[Anum_pg_shadow_passwd-1] = heap_getattr(tuple, Anum_pg_shadow_passwd, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_passwd-1] = null ? 'n' : ' ';
|
||||
new_record[Anum_pg_shadow_passwd - 1] = heap_getattr(tuple, Anum_pg_shadow_passwd, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_passwd - 1] = null ? 'n' : ' ';
|
||||
}
|
||||
|
||||
/* valid until */
|
||||
if (stmt->validUntil)
|
||||
{
|
||||
new_record[Anum_pg_shadow_valuntil-1] = PointerGetDatum(nabstimein(stmt->validUntil));
|
||||
new_record_nulls[Anum_pg_shadow_valuntil-1] = ' ';
|
||||
new_record[Anum_pg_shadow_valuntil - 1] = PointerGetDatum(nabstimein(stmt->validUntil));
|
||||
new_record_nulls[Anum_pg_shadow_valuntil - 1] = ' ';
|
||||
}
|
||||
else
|
||||
{
|
||||
/* leave as is */
|
||||
new_record[Anum_pg_shadow_valuntil-1] = heap_getattr(tuple, Anum_pg_shadow_valuntil, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_valuntil-1] = null ? 'n' : ' ';
|
||||
new_record[Anum_pg_shadow_valuntil - 1] = heap_getattr(tuple, Anum_pg_shadow_valuntil, pg_shadow_dsc, &null);
|
||||
new_record_nulls[Anum_pg_shadow_valuntil - 1] = null ? 'n' : ' ';
|
||||
}
|
||||
|
||||
new_tuple = heap_formtuple(pg_shadow_dsc, new_record, new_record_nulls);
|
||||
@ -548,9 +567,9 @@ DropUser(DropUserStmt *stmt)
|
||||
heap_close(pg_rel, AccessExclusiveLock);
|
||||
|
||||
/*
|
||||
* Somehow we'd have to check for tables, views, etc. owned by the user
|
||||
* as well, but those could be spread out over all sorts of databases
|
||||
* which we don't have access to (easily).
|
||||
* Somehow we'd have to check for tables, views, etc. owned by the
|
||||
* user as well, but those could be spread out over all sorts of
|
||||
* databases which we don't have access to (easily).
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -572,9 +591,11 @@ DropUser(DropUserStmt *stmt)
|
||||
|
||||
datum = heap_getattr(tmp_tuple, Anum_pg_group_groname, pg_dsc, &null);
|
||||
|
||||
ags.name = nameout(DatumGetName(datum)); /* the group name from which to try to drop the user */
|
||||
ags.name = nameout(DatumGetName(datum)); /* the group name from
|
||||
* which to try to drop
|
||||
* the user */
|
||||
ags.action = -1;
|
||||
ags.listUsers = lcons((void*)makeInteger(usesysid), NIL);
|
||||
ags.listUsers = lcons((void *) makeInteger(usesysid), NIL);
|
||||
AlterGroup(&ags, "DROP USER");
|
||||
}
|
||||
heap_endscan(scan);
|
||||
@ -643,7 +664,8 @@ CreateGroup(CreateGroupStmt *stmt)
|
||||
int max_id = 0;
|
||||
Datum new_record[Natts_pg_group];
|
||||
char new_record_nulls[Natts_pg_group];
|
||||
List *item, *newlist=NULL;
|
||||
List *item,
|
||||
*newlist = NULL;
|
||||
ArrayType *userarray;
|
||||
|
||||
/*
|
||||
@ -653,8 +675,8 @@ CreateGroup(CreateGroupStmt *stmt)
|
||||
elog(ERROR, "CREATE GROUP: permission denied");
|
||||
|
||||
/*
|
||||
* There is not real reason for this, but it makes it consistent
|
||||
* with create user, and it seems like a good idea anyway.
|
||||
* There is not real reason for this, but it makes it consistent with
|
||||
* create user, and it seems like a good idea anyway.
|
||||
*/
|
||||
if (IsTransactionBlock())
|
||||
elog(ERROR, "CREATE GROUP: may not be called in a transaction block");
|
||||
@ -674,8 +696,9 @@ CreateGroup(CreateGroupStmt *stmt)
|
||||
|
||||
datum = heap_getattr(tuple, Anum_pg_group_grosysid, pg_group_dsc, &null);
|
||||
if (stmt->sysid >= 0) /* customized id wanted */
|
||||
sysid_exists = datum && !null && ((int)datum == stmt->sysid);
|
||||
else /* pick 1 + max */
|
||||
sysid_exists = datum && !null && ((int) datum == stmt->sysid);
|
||||
else
|
||||
/* pick 1 + max */
|
||||
{
|
||||
if ((int) datum > max_id)
|
||||
max_id = (int) datum;
|
||||
@ -698,7 +721,7 @@ CreateGroup(CreateGroupStmt *stmt)
|
||||
|
||||
foreach(item, stmt->initUsers)
|
||||
{
|
||||
const char * groupuser = strVal(lfirst(item));
|
||||
const char *groupuser = strVal(lfirst(item));
|
||||
Value *v;
|
||||
|
||||
tuple = SearchSysCacheTuple(SHADOWNAME,
|
||||
@ -723,15 +746,13 @@ CreateGroup(CreateGroupStmt *stmt)
|
||||
userarray = palloc(ARR_OVERHEAD(1) + length(newlist) * sizeof(int32));
|
||||
ARR_SIZE(userarray) = ARR_OVERHEAD(1) + length(newlist) * sizeof(int32);
|
||||
ARR_FLAGS(userarray) = 0x0;
|
||||
ARR_NDIM(userarray) = 1; /* one dimensional array */
|
||||
ARR_NDIM(userarray) = 1;/* one dimensional array */
|
||||
ARR_LBOUND(userarray)[0] = 1; /* axis starts at one */
|
||||
ARR_DIMS(userarray)[0] = length(newlist); /* axis is this long */
|
||||
/* fill the array */
|
||||
i = 0;
|
||||
foreach(item, newlist)
|
||||
{
|
||||
((int*)ARR_DATA_PTR(userarray))[i++] = intVal(lfirst(item));
|
||||
}
|
||||
((int *) ARR_DATA_PTR(userarray))[i++] = intVal(lfirst(item));
|
||||
}
|
||||
else
|
||||
userarray = NULL;
|
||||
@ -739,18 +760,18 @@ CreateGroup(CreateGroupStmt *stmt)
|
||||
/*
|
||||
* Form a tuple to insert
|
||||
*/
|
||||
if (stmt->sysid >=0)
|
||||
if (stmt->sysid >= 0)
|
||||
max_id = stmt->sysid;
|
||||
else
|
||||
max_id++;
|
||||
|
||||
new_record[Anum_pg_group_groname-1] = (Datum)(stmt->name);
|
||||
new_record[Anum_pg_group_grosysid-1] = (Datum)(max_id);
|
||||
new_record[Anum_pg_group_grolist-1] = (Datum)userarray;
|
||||
new_record[Anum_pg_group_groname - 1] = (Datum) (stmt->name);
|
||||
new_record[Anum_pg_group_grosysid - 1] = (Datum) (max_id);
|
||||
new_record[Anum_pg_group_grolist - 1] = (Datum) userarray;
|
||||
|
||||
new_record_nulls[Anum_pg_group_groname-1] = ' ';
|
||||
new_record_nulls[Anum_pg_group_grosysid-1] = ' ';
|
||||
new_record_nulls[Anum_pg_group_grolist-1] = userarray ? ' ' : 'n';
|
||||
new_record_nulls[Anum_pg_group_groname - 1] = ' ';
|
||||
new_record_nulls[Anum_pg_group_grosysid - 1] = ' ';
|
||||
new_record_nulls[Anum_pg_group_grolist - 1] = userarray ? ' ' : 'n';
|
||||
|
||||
tuple = heap_formtuple(pg_group_dsc, new_record, new_record_nulls);
|
||||
|
||||
@ -762,7 +783,8 @@ CreateGroup(CreateGroupStmt *stmt)
|
||||
/*
|
||||
* Update indexes
|
||||
*/
|
||||
if (RelationGetForm(pg_group_rel)->relhasindex) {
|
||||
if (RelationGetForm(pg_group_rel)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_group_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_group_indices,
|
||||
@ -781,7 +803,7 @@ CreateGroup(CreateGroupStmt *stmt)
|
||||
* ALTER GROUP
|
||||
*/
|
||||
void
|
||||
AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
AlterGroup(AlterGroupStmt *stmt, const char *tag)
|
||||
{
|
||||
Relation pg_group_rel;
|
||||
TupleDesc pg_group_dsc;
|
||||
@ -794,8 +816,8 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
elog(ERROR, "%s: permission denied", tag);
|
||||
|
||||
/*
|
||||
* There is not real reason for this, but it makes it consistent
|
||||
* with alter user, and it seems like a good idea anyway.
|
||||
* There is not real reason for this, but it makes it consistent with
|
||||
* alter user, and it seems like a good idea anyway.
|
||||
*/
|
||||
if (IsTransactionBlock())
|
||||
elog(ERROR, "%s: may not be called in a transaction block", tag);
|
||||
@ -805,9 +827,8 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
pg_group_dsc = RelationGetDescr(pg_group_rel);
|
||||
|
||||
/*
|
||||
* Verify that group exists.
|
||||
* If we find a tuple, will take that the rest of the way and make our
|
||||
* modifications on it.
|
||||
* Verify that group exists. If we find a tuple, will take that the
|
||||
* rest of the way and make our modifications on it.
|
||||
*/
|
||||
if (!HeapTupleIsValid(group_tuple = SearchSysCacheTupleCopy(GRONAME, PointerGetDatum(stmt->name), 0, 0, 0)))
|
||||
{
|
||||
@ -816,31 +837,37 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
}
|
||||
|
||||
AssertState(stmt->action == +1 || stmt->action == -1);
|
||||
|
||||
/*
|
||||
* Now decide what to do.
|
||||
*/
|
||||
if (stmt->action == +1) /* add users, might also be invoked by create user */
|
||||
if (stmt->action == +1) /* add users, might also be invoked by
|
||||
* create user */
|
||||
{
|
||||
Datum new_record[Natts_pg_group];
|
||||
char new_record_nulls[Natts_pg_group] = { ' ', ' ', ' '};
|
||||
ArrayType *newarray, *oldarray;
|
||||
List * newlist = NULL, *item;
|
||||
char new_record_nulls[Natts_pg_group] = {' ', ' ', ' '};
|
||||
ArrayType *newarray,
|
||||
*oldarray;
|
||||
List *newlist = NULL,
|
||||
*item;
|
||||
HeapTuple tuple;
|
||||
bool null = false;
|
||||
Datum datum = heap_getattr(group_tuple, Anum_pg_group_grolist, pg_group_dsc, &null);
|
||||
int i;
|
||||
|
||||
oldarray = (ArrayType*)datum;
|
||||
oldarray = (ArrayType *) datum;
|
||||
Assert(null || ARR_NDIM(oldarray) == 1);
|
||||
/* first add the old array to the hitherto empty list */
|
||||
if (!null)
|
||||
for (i = ARR_LBOUND(oldarray)[0]; i < ARR_LBOUND(oldarray)[0] + ARR_DIMS(oldarray)[0]; i++)
|
||||
{
|
||||
int index, arrval;
|
||||
int index,
|
||||
arrval;
|
||||
Value *v;
|
||||
bool valueNull;
|
||||
|
||||
index = i;
|
||||
arrval = DatumGetInt32(array_ref(oldarray, 1, &index, true/*by value*/,
|
||||
arrval = DatumGetInt32(array_ref(oldarray, 1, &index, true /* by value */ ,
|
||||
sizeof(int), 0, &valueNull));
|
||||
v = makeInteger(arrval);
|
||||
/* filter out duplicates */
|
||||
@ -849,14 +876,14 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
}
|
||||
|
||||
/*
|
||||
* now convert the to be added usernames to sysids and add them
|
||||
* to the list
|
||||
* now convert the to be added usernames to sysids and add them to
|
||||
* the list
|
||||
*/
|
||||
foreach(item, stmt->listUsers)
|
||||
{
|
||||
Value *v;
|
||||
|
||||
if (strcmp(tag, "ALTER GROUP")==0)
|
||||
if (strcmp(tag, "ALTER GROUP") == 0)
|
||||
{
|
||||
/* Get the uid of the proposed user to add. */
|
||||
tuple = SearchSysCacheTuple(SHADOWNAME,
|
||||
@ -869,10 +896,13 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
}
|
||||
v = makeInteger(((Form_pg_shadow) GETSTRUCT(tuple))->usesysid);
|
||||
}
|
||||
else if (strcmp(tag, "CREATE USER")==0)
|
||||
else if (strcmp(tag, "CREATE USER") == 0)
|
||||
{
|
||||
/* in this case we already know the uid and it wouldn't
|
||||
be in the cache anyway yet */
|
||||
|
||||
/*
|
||||
* in this case we already know the uid and it wouldn't be
|
||||
* in the cache anyway yet
|
||||
*/
|
||||
v = lfirst(item);
|
||||
}
|
||||
else
|
||||
@ -884,8 +914,11 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
if (!member(v, newlist))
|
||||
newlist = lcons(v, newlist);
|
||||
else
|
||||
/* we silently assume here that this error will only come up
|
||||
in a ALTER GROUP statement */
|
||||
|
||||
/*
|
||||
* we silently assume here that this error will only come
|
||||
* up in a ALTER GROUP statement
|
||||
*/
|
||||
elog(NOTICE, "%s: user \"%s\" is already in group \"%s\"", tag, strVal(lfirst(item)), stmt->name);
|
||||
}
|
||||
|
||||
@ -898,22 +931,21 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
/* fill the array */
|
||||
i = 0;
|
||||
foreach(item, newlist)
|
||||
{
|
||||
((int*)ARR_DATA_PTR(newarray))[i++] = intVal(lfirst(item));
|
||||
}
|
||||
((int *) ARR_DATA_PTR(newarray))[i++] = intVal(lfirst(item));
|
||||
|
||||
/*
|
||||
* Form a tuple with the new array and write it back.
|
||||
*/
|
||||
new_record[Anum_pg_group_groname-1] = (Datum)(stmt->name);
|
||||
new_record[Anum_pg_group_grosysid-1] = heap_getattr(group_tuple, Anum_pg_group_grosysid, pg_group_dsc, &null);
|
||||
new_record[Anum_pg_group_grolist-1] = PointerGetDatum(newarray);
|
||||
new_record[Anum_pg_group_groname - 1] = (Datum) (stmt->name);
|
||||
new_record[Anum_pg_group_grosysid - 1] = heap_getattr(group_tuple, Anum_pg_group_grosysid, pg_group_dsc, &null);
|
||||
new_record[Anum_pg_group_grolist - 1] = PointerGetDatum(newarray);
|
||||
|
||||
tuple = heap_formtuple(pg_group_dsc, new_record, new_record_nulls);
|
||||
heap_update(pg_group_rel, &group_tuple->t_self, tuple, NULL);
|
||||
|
||||
/* Update indexes */
|
||||
if (RelationGetForm(pg_group_rel)->relhasindex) {
|
||||
if (RelationGetForm(pg_group_rel)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_group_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_group_indices,
|
||||
@ -924,11 +956,11 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
}
|
||||
} /* endif alter group add user */
|
||||
|
||||
else if (stmt->action == -1) /*drop users from group */
|
||||
else if (stmt->action == -1)/* drop users from group */
|
||||
{
|
||||
Datum datum;
|
||||
bool null;
|
||||
bool is_dropuser = strcmp(tag, "DROP USER")==0;
|
||||
bool is_dropuser = strcmp(tag, "DROP USER") == 0;
|
||||
|
||||
datum = heap_getattr(group_tuple, Anum_pg_group_grolist, pg_group_dsc, &null);
|
||||
if (null)
|
||||
@ -940,21 +972,25 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
Datum new_record[Natts_pg_group];
|
||||
char new_record_nulls[Natts_pg_group] = { ' ', ' ', ' '};
|
||||
ArrayType *oldarray, *newarray;
|
||||
List * newlist = NULL, *item;
|
||||
char new_record_nulls[Natts_pg_group] = {' ', ' ', ' '};
|
||||
ArrayType *oldarray,
|
||||
*newarray;
|
||||
List *newlist = NULL,
|
||||
*item;
|
||||
int i;
|
||||
|
||||
oldarray = (ArrayType*)datum;
|
||||
oldarray = (ArrayType *) datum;
|
||||
Assert(ARR_NDIM(oldarray) == 1);
|
||||
/* first add the old array to the hitherto empty list */
|
||||
for (i = ARR_LBOUND(oldarray)[0]; i < ARR_LBOUND(oldarray)[0] + ARR_DIMS(oldarray)[0]; i++)
|
||||
{
|
||||
int index, arrval;
|
||||
int index,
|
||||
arrval;
|
||||
Value *v;
|
||||
bool valueNull;
|
||||
|
||||
index = i;
|
||||
arrval = DatumGetInt32(array_ref(oldarray, 1, &index, true/*by value*/,
|
||||
arrval = DatumGetInt32(array_ref(oldarray, 1, &index, true /* by value */ ,
|
||||
sizeof(int), 0, &valueNull));
|
||||
v = makeInteger(arrval);
|
||||
/* filter out duplicates */
|
||||
@ -963,12 +999,13 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
}
|
||||
|
||||
/*
|
||||
* now convert the to be dropped usernames to sysids and remove
|
||||
* them from the list
|
||||
* now convert the to be dropped usernames to sysids and
|
||||
* remove them from the list
|
||||
*/
|
||||
foreach(item, stmt->listUsers)
|
||||
{
|
||||
Value *v;
|
||||
|
||||
if (!is_dropuser)
|
||||
{
|
||||
/* Get the uid of the proposed user to drop. */
|
||||
@ -1002,22 +1039,21 @@ AlterGroup(AlterGroupStmt *stmt, const char * tag)
|
||||
/* fill the array */
|
||||
i = 0;
|
||||
foreach(item, newlist)
|
||||
{
|
||||
((int*)ARR_DATA_PTR(newarray))[i++] = intVal(lfirst(item));
|
||||
}
|
||||
((int *) ARR_DATA_PTR(newarray))[i++] = intVal(lfirst(item));
|
||||
|
||||
/*
|
||||
* Insert the new tuple with the updated user list
|
||||
*/
|
||||
new_record[Anum_pg_group_groname-1] = (Datum)(stmt->name);
|
||||
new_record[Anum_pg_group_grosysid-1] = heap_getattr(group_tuple, Anum_pg_group_grosysid, pg_group_dsc, &null);
|
||||
new_record[Anum_pg_group_grolist-1] = PointerGetDatum(newarray);
|
||||
new_record[Anum_pg_group_groname - 1] = (Datum) (stmt->name);
|
||||
new_record[Anum_pg_group_grosysid - 1] = heap_getattr(group_tuple, Anum_pg_group_grosysid, pg_group_dsc, &null);
|
||||
new_record[Anum_pg_group_grolist - 1] = PointerGetDatum(newarray);
|
||||
|
||||
tuple = heap_formtuple(pg_group_dsc, new_record, new_record_nulls);
|
||||
heap_update(pg_group_rel, &group_tuple->t_self, tuple, NULL);
|
||||
|
||||
/* Update indexes */
|
||||
if (RelationGetForm(pg_group_rel)->relhasindex) {
|
||||
if (RelationGetForm(pg_group_rel)->relhasindex)
|
||||
{
|
||||
Relation idescs[Num_pg_group_indices];
|
||||
|
||||
CatalogOpenIndices(Num_pg_group_indices,
|
||||
@ -1056,8 +1092,8 @@ DropGroup(DropGroupStmt *stmt)
|
||||
elog(ERROR, "DROP GROUP: permission denied");
|
||||
|
||||
/*
|
||||
* There is not real reason for this, but it makes it consistent
|
||||
* with drop user, and it seems like a good idea anyway.
|
||||
* There is not real reason for this, but it makes it consistent with
|
||||
* drop user, and it seems like a good idea anyway.
|
||||
*/
|
||||
if (IsTransactionBlock())
|
||||
elog(ERROR, "DROP GROUP: may not be called in a transaction block");
|
||||
@ -1075,7 +1111,7 @@ DropGroup(DropGroupStmt *stmt)
|
||||
bool null;
|
||||
|
||||
datum = heap_getattr(tuple, Anum_pg_group_groname, pg_group_dsc, &null);
|
||||
if (datum && !null && strcmp((char*)datum, stmt->name)==0)
|
||||
if (datum && !null && strcmp((char *) datum, stmt->name) == 0)
|
||||
{
|
||||
gro_exists = true;
|
||||
heap_delete(pg_group_rel, &tuple->t_self, NULL);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.146 2000/04/06 18:12:07 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.147 2000/04/12 17:14:59 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -104,13 +104,15 @@ static char *vc_show_rusage(struct rusage * ru0);
|
||||
* This routines handle a special cross-transaction portal.
|
||||
* However it is automatically closed in case of abort.
|
||||
*/
|
||||
void CommonSpecialPortalOpen(void)
|
||||
void
|
||||
CommonSpecialPortalOpen(void)
|
||||
{
|
||||
char *pname;
|
||||
|
||||
|
||||
if (CommonSpecialPortalInUse)
|
||||
elog(ERROR, "CommonSpecialPortal is in use");
|
||||
|
||||
/*
|
||||
* Create a portal for safe memory across transactions. We need to
|
||||
* palloc the name space for it because our hash function expects the
|
||||
@ -130,7 +132,8 @@ void CommonSpecialPortalOpen(void)
|
||||
CommonSpecialPortalInUse = true;
|
||||
}
|
||||
|
||||
void CommonSpecialPortalClose(void)
|
||||
void
|
||||
CommonSpecialPortalClose(void)
|
||||
{
|
||||
/* Clear flag first, to avoid recursion if PortalDrop elog's */
|
||||
CommonSpecialPortalInUse = false;
|
||||
@ -141,12 +144,14 @@ void CommonSpecialPortalClose(void)
|
||||
PortalDrop(&vc_portal);
|
||||
}
|
||||
|
||||
PortalVariableMemory CommonSpecialPortalGetMemory(void)
|
||||
PortalVariableMemory
|
||||
CommonSpecialPortalGetMemory(void)
|
||||
{
|
||||
return PortalGetVariableMemory(vc_portal);
|
||||
}
|
||||
|
||||
bool CommonSpecialPortalIsOpen(void)
|
||||
bool
|
||||
CommonSpecialPortalIsOpen(void)
|
||||
{
|
||||
return CommonSpecialPortalInUse;
|
||||
}
|
||||
@ -208,9 +213,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *va_spec)
|
||||
* Start up the vacuum cleaner.
|
||||
*
|
||||
* NOTE: since this commits the current transaction, the memory holding
|
||||
* any passed-in parameters gets freed here. We must have already copied
|
||||
* pass-by-reference parameters to safe storage. Don't make me fix this
|
||||
* again!
|
||||
* any passed-in parameters gets freed here. We must have already
|
||||
* copied pass-by-reference parameters to safe storage. Don't make me
|
||||
* fix this again!
|
||||
*/
|
||||
vc_init();
|
||||
|
||||
@ -316,6 +321,7 @@ vc_getrels(NameData *VacRelP)
|
||||
|
||||
if (NameStr(*VacRelP))
|
||||
{
|
||||
|
||||
/*
|
||||
* we could use the cache here, but it is clearer to use scankeys
|
||||
* for both vacuum cases, bjm 2000/01/19
|
||||
@ -1456,8 +1462,8 @@ vc_repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
* we stop shrinking here. I could try to find
|
||||
* real parent row but want not to do it because
|
||||
* of real solution will be implemented anyway,
|
||||
* latter, and we are too close to 6.5 release.
|
||||
* - vadim 06/11/99
|
||||
* latter, and we are too close to 6.5 release. -
|
||||
* vadim 06/11/99
|
||||
*/
|
||||
if (Ptp.t_data->t_xmax != tp.t_data->t_xmin)
|
||||
{
|
||||
@ -1539,6 +1545,7 @@ vc_repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
* to get t_infomask of inserted heap tuple !!!
|
||||
*/
|
||||
ToPage = BufferGetPage(cur_buffer);
|
||||
|
||||
/*
|
||||
* If this page was not used before - clean it.
|
||||
*
|
||||
@ -1546,13 +1553,15 @@ vc_repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
* vc_vacpage, because we have already incremented the
|
||||
* vpd's vpd_offsets_used field to account for the
|
||||
* tuple(s) we expect to move onto the page. Therefore
|
||||
* vc_vacpage's check for vpd_offsets_used == 0 is wrong.
|
||||
* But since that's a good debugging check for all other
|
||||
* callers, we work around it here rather than remove it.
|
||||
* vc_vacpage's check for vpd_offsets_used == 0 is
|
||||
* wrong. But since that's a good debugging check for
|
||||
* all other callers, we work around it here rather
|
||||
* than remove it.
|
||||
*/
|
||||
if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
|
||||
{
|
||||
int sv_offsets_used = destvpd->vpd_offsets_used;
|
||||
|
||||
destvpd->vpd_offsets_used = 0;
|
||||
vc_vacpage(ToPage, destvpd);
|
||||
destvpd->vpd_offsets_used = sv_offsets_used;
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.33 2000/04/07 13:39:24 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.34 2000/04/12 17:15:00 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -505,7 +505,7 @@ static bool
|
||||
show_effective_cache_size()
|
||||
{
|
||||
elog(NOTICE, "EFFECTIVE_CACHE_SIZE is %g (%dK pages)",
|
||||
effective_cache_size, BLCKSZ/1024);
|
||||
effective_cache_size, BLCKSZ / 1024);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
@ -777,8 +777,9 @@ set_default_datestyle(void)
|
||||
{
|
||||
char *DBDate;
|
||||
|
||||
/* Initialize from compile-time defaults in init/globals.c.
|
||||
* NB: this is a necessary step; consider PGDATESTYLE="DEFAULT".
|
||||
/*
|
||||
* Initialize from compile-time defaults in init/globals.c. NB: this
|
||||
* is a necessary step; consider PGDATESTYLE="DEFAULT".
|
||||
*/
|
||||
DefaultDateStyle = DateStyle;
|
||||
DefaultEuroDates = EuroDates;
|
||||
@ -788,9 +789,11 @@ set_default_datestyle(void)
|
||||
if (DBDate == NULL)
|
||||
return;
|
||||
|
||||
/* Make a modifiable copy --- overwriting the env var doesn't seem
|
||||
/*
|
||||
* Make a modifiable copy --- overwriting the env var doesn't seem
|
||||
* like a good idea, even though we currently won't look at it again.
|
||||
* Note that we cannot use palloc at this early stage of initialization.
|
||||
* Note that we cannot use palloc at this early stage of
|
||||
* initialization.
|
||||
*/
|
||||
DBDate = strdup(DBDate);
|
||||
|
||||
@ -1041,9 +1044,8 @@ reset_XactIsoLevel()
|
||||
static bool
|
||||
parse_pg_options(char *value)
|
||||
{
|
||||
if (!superuser()) {
|
||||
if (!superuser())
|
||||
elog(ERROR, "Only users with superuser privilege can set pg_options");
|
||||
}
|
||||
if (value == NULL)
|
||||
read_pg_options(0);
|
||||
else
|
||||
@ -1061,9 +1063,8 @@ show_pg_options(void)
|
||||
static bool
|
||||
reset_pg_options(void)
|
||||
{
|
||||
if (!superuser()) {
|
||||
if (!superuser())
|
||||
elog(ERROR, "Only users with superuser privilege can set pg_options");
|
||||
}
|
||||
read_pg_options(0);
|
||||
return (TRUE);
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: execAmi.c,v 1.45 2000/01/26 05:56:21 momjian Exp $
|
||||
* $Id: execAmi.c,v 1.46 2000/04/12 17:15:07 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -236,8 +236,9 @@ ExecCloseR(Plan *node)
|
||||
|
||||
/*
|
||||
* endscan released AccessShareLock acquired by beginscan. If we are
|
||||
* holding any stronger locks on the rel, they should be held till end of
|
||||
* xact. Therefore, we need only close the rel and not release locks.
|
||||
* holding any stronger locks on the rel, they should be held till end
|
||||
* of xact. Therefore, we need only close the rel and not release
|
||||
* locks.
|
||||
*/
|
||||
if (relation != NULL)
|
||||
heap_close(relation, NoLock);
|
||||
|
@ -27,7 +27,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.112 2000/04/07 07:24:47 vadim Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.113 2000/04/12 17:15:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -82,6 +82,7 @@ static void ExecCheckRTPerms(List *rangeTable, CmdType operation,
|
||||
int resultRelation, bool resultIsScanned);
|
||||
static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation,
|
||||
bool isResultRelation, bool resultIsScanned);
|
||||
|
||||
/* end of local decls */
|
||||
|
||||
|
||||
@ -491,10 +492,12 @@ ExecCheckPlanPerms(Plan *plan, CmdType operation,
|
||||
|
||||
if (app->inheritrelid > 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* Append implements expansion of inheritance; all members
|
||||
* of inheritrtable list will be plugged into same RTE slot.
|
||||
* Therefore, they are either all result relations or none.
|
||||
* Append implements expansion of inheritance; all
|
||||
* members of inheritrtable list will be plugged into
|
||||
* same RTE slot. Therefore, they are either all
|
||||
* result relations or none.
|
||||
*/
|
||||
List *rtable;
|
||||
|
||||
@ -576,10 +579,11 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation,
|
||||
|
||||
if (rte->skipAcl)
|
||||
{
|
||||
|
||||
/*
|
||||
* This happens if the access to this table is due to a view
|
||||
* query rewriting - the rewrite handler already checked the
|
||||
* permissions against the view owner, so we just skip this entry.
|
||||
* This happens if the access to this table is due to a view query
|
||||
* rewriting - the rewrite handler already checked the permissions
|
||||
* against the view owner, so we just skip this entry.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
@ -625,9 +629,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation,
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
aclcheck_result = CHECK(ACL_RD);
|
||||
}
|
||||
|
||||
if (aclcheck_result != ACLCHECK_OK)
|
||||
elog(ERROR, "%s: %s",
|
||||
@ -734,8 +736,9 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
||||
/*
|
||||
* If there are indices on the result relation, open them and save
|
||||
* descriptors in the result relation info, so that we can add new
|
||||
* index entries for the tuples we add/update. We need not do this
|
||||
* for a DELETE, however, since deletion doesn't affect indexes.
|
||||
* index entries for the tuples we add/update. We need not do
|
||||
* this for a DELETE, however, since deletion doesn't affect
|
||||
* indexes.
|
||||
*/
|
||||
if (resultRelationDesc->rd_rel->relhasindex &&
|
||||
operation != CMD_DELETE)
|
||||
@ -805,10 +808,11 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
|
||||
targetList = plan->targetlist;
|
||||
|
||||
/*
|
||||
* Now that we have the target list, initialize the junk filter if needed.
|
||||
* SELECT and INSERT queries need a filter if there are any junk attrs
|
||||
* in the tlist. UPDATE and DELETE always need one, since there's always
|
||||
* a junk 'ctid' attribute present --- no need to look first.
|
||||
* Now that we have the target list, initialize the junk filter if
|
||||
* needed. SELECT and INSERT queries need a filter if there are any
|
||||
* junk attrs in the tlist. UPDATE and DELETE always need one, since
|
||||
* there's always a junk 'ctid' attribute present --- no need to look
|
||||
* first.
|
||||
*/
|
||||
{
|
||||
bool junk_filter_needed = false;
|
||||
@ -948,8 +952,8 @@ EndPlan(Plan *plan, EState *estate)
|
||||
}
|
||||
|
||||
/*
|
||||
* close the result relations if necessary,
|
||||
* but hold locks on them until xact commit
|
||||
* close the result relations if necessary, but hold locks on them
|
||||
* until xact commit
|
||||
*/
|
||||
if (resultRelationInfo != NULL)
|
||||
{
|
||||
@ -1708,10 +1712,10 @@ ExecRelCheck(Relation rel, HeapTuple tuple, EState *estate)
|
||||
|
||||
/*
|
||||
* NOTE: SQL92 specifies that a NULL result from a constraint
|
||||
* expression is not to be treated as a failure. Therefore,
|
||||
* tell ExecQual to return TRUE for NULL.
|
||||
* expression is not to be treated as a failure. Therefore, tell
|
||||
* ExecQual to return TRUE for NULL.
|
||||
*/
|
||||
if (! ExecQual(qual, econtext, true))
|
||||
if (!ExecQual(qual, econtext, true))
|
||||
return check[i].ccname;
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.68 2000/02/20 21:32:04 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.69 2000/04/12 17:15:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -108,12 +108,14 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Null refexpr indicates we are doing an INSERT into an array column.
|
||||
* For now, we just take the refassgnexpr (which the parser will have
|
||||
* ensured is an array value) and return it as-is, ignoring any
|
||||
* subscripts that may have been supplied in the INSERT column list.
|
||||
* This is a kluge, but it's not real clear what the semantics ought
|
||||
* to be...
|
||||
|
||||
/*
|
||||
* Null refexpr indicates we are doing an INSERT into an array
|
||||
* column. For now, we just take the refassgnexpr (which the
|
||||
* parser will have ensured is an array value) and return it
|
||||
* as-is, ignoring any subscripts that may have been supplied in
|
||||
* the INSERT column list. This is a kluge, but it's not real
|
||||
* clear what the semantics ought to be...
|
||||
*/
|
||||
array_scanner = NULL;
|
||||
}
|
||||
@ -153,9 +155,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
|
||||
lIndex = lower.indx;
|
||||
}
|
||||
else
|
||||
{
|
||||
lIndex = NULL;
|
||||
}
|
||||
|
||||
if (arrayRef->refassgnexpr != NULL)
|
||||
{
|
||||
@ -163,6 +163,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
|
||||
econtext,
|
||||
isNull,
|
||||
&dummy);
|
||||
|
||||
if (*isNull)
|
||||
return (Datum) NULL;
|
||||
|
||||
@ -633,7 +634,7 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
|
||||
*/
|
||||
argV[i] = ExecEvalExpr((Node *) lfirst(arg),
|
||||
econtext,
|
||||
& nullVect[i],
|
||||
&nullVect[i],
|
||||
argIsDone);
|
||||
|
||||
if (!(*argIsDone))
|
||||
@ -779,9 +780,9 @@ ExecMakeFunctionResult(Node *node,
|
||||
result = postquel_function(funcNode, (char **) argV,
|
||||
isNull, isDone);
|
||||
|
||||
if (! *isDone)
|
||||
if (!*isDone)
|
||||
break; /* got a result from current argument */
|
||||
if (! fcache->hasSetArg)
|
||||
if (!fcache->hasSetArg)
|
||||
break; /* input not a set, so done */
|
||||
|
||||
/* OK, get the next argument... */
|
||||
@ -789,7 +790,11 @@ ExecMakeFunctionResult(Node *node,
|
||||
|
||||
if (argDone)
|
||||
{
|
||||
/* End of arguments, so reset the setArg flag and say "Done" */
|
||||
|
||||
/*
|
||||
* End of arguments, so reset the setArg flag and say
|
||||
* "Done"
|
||||
*/
|
||||
fcache->setArg = (char *) NULL;
|
||||
fcache->hasSetArg = false;
|
||||
*isDone = true;
|
||||
@ -797,7 +802,8 @@ ExecMakeFunctionResult(Node *node,
|
||||
break;
|
||||
}
|
||||
|
||||
/* If we reach here, loop around to run the function on the
|
||||
/*
|
||||
* If we reach here, loop around to run the function on the
|
||||
* new argument.
|
||||
*/
|
||||
}
|
||||
@ -1003,20 +1009,22 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
|
||||
AnyNull = false;
|
||||
|
||||
/*
|
||||
* If any of the clauses is TRUE, the OR result is TRUE regardless
|
||||
* of the states of the rest of the clauses, so we can stop evaluating
|
||||
* If any of the clauses is TRUE, the OR result is TRUE regardless of
|
||||
* the states of the rest of the clauses, so we can stop evaluating
|
||||
* and return TRUE immediately. If none are TRUE and one or more is
|
||||
* NULL, we return NULL; otherwise we return FALSE. This makes sense
|
||||
* when you interpret NULL as "don't know": if we have a TRUE then the
|
||||
* OR is TRUE even if we aren't sure about some of the other inputs.
|
||||
* If all the known inputs are FALSE, but we have one or more "don't
|
||||
* knows", then we have to report that we "don't know" what the OR's
|
||||
* result should be --- perhaps one of the "don't knows" would have been
|
||||
* TRUE if we'd known its value. Only when all the inputs are known
|
||||
* to be FALSE can we state confidently that the OR's result is FALSE.
|
||||
* result should be --- perhaps one of the "don't knows" would have
|
||||
* been TRUE if we'd known its value. Only when all the inputs are
|
||||
* known to be FALSE can we state confidently that the OR's result is
|
||||
* FALSE.
|
||||
*/
|
||||
foreach(clause, clauses)
|
||||
{
|
||||
|
||||
/*
|
||||
* We don't iterate over sets in the quals, so pass in an isDone
|
||||
* flag, but ignore it.
|
||||
@ -1025,6 +1033,7 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
|
||||
econtext,
|
||||
isNull,
|
||||
&isDone);
|
||||
|
||||
/*
|
||||
* if we have a non-null true result, then return it.
|
||||
*/
|
||||
@ -1065,6 +1074,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
|
||||
*/
|
||||
foreach(clause, clauses)
|
||||
{
|
||||
|
||||
/*
|
||||
* We don't iterate over sets in the quals, so pass in an isDone
|
||||
* flag, but ignore it.
|
||||
@ -1073,6 +1083,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
|
||||
econtext,
|
||||
isNull,
|
||||
&isDone);
|
||||
|
||||
/*
|
||||
* if we have a non-null false result, then return it.
|
||||
*/
|
||||
@ -1084,7 +1095,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
|
||||
|
||||
/* AnyNull is true if at least one clause evaluated to NULL */
|
||||
*isNull = AnyNull;
|
||||
return (Datum) (! AnyNull);
|
||||
return (Datum) (!AnyNull);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
@ -1129,7 +1140,7 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
|
||||
* case statement is satisfied. A NULL result from the test is
|
||||
* not considered true.
|
||||
*/
|
||||
if (DatumGetInt32(clause_value) != 0 && ! *isNull)
|
||||
if (DatumGetInt32(clause_value) != 0 && !*isNull)
|
||||
{
|
||||
return ExecEvalExpr(wclause->result,
|
||||
econtext,
|
||||
@ -1353,14 +1364,15 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
|
||||
|
||||
/*
|
||||
* If there is a null clause, consider the qualification to fail.
|
||||
* XXX is this still correct for constraints? It probably shouldn't
|
||||
* happen at all ...
|
||||
* XXX is this still correct for constraints? It probably
|
||||
* shouldn't happen at all ...
|
||||
*/
|
||||
if (clause == NULL)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* pass isDone, but ignore it. We don't iterate over multiple returns
|
||||
* in the qualifications.
|
||||
* pass isDone, but ignore it. We don't iterate over multiple
|
||||
* returns in the qualifications.
|
||||
*/
|
||||
expr_value = ExecEvalExpr(clause, econtext, &isNull, &isDone);
|
||||
|
||||
@ -1429,7 +1441,8 @@ ExecTargetList(List *targetlist,
|
||||
HeapTuple newTuple;
|
||||
bool isNull;
|
||||
bool haveDoneIters;
|
||||
static struct tupleDesc NullTupleDesc; /* we assume this inits to zeroes */
|
||||
static struct tupleDesc NullTupleDesc; /* we assume this inits to
|
||||
* zeroes */
|
||||
|
||||
/*
|
||||
* debugging stuff
|
||||
@ -1512,7 +1525,8 @@ ExecTargetList(List *targetlist,
|
||||
if (itemIsDone[resind])
|
||||
haveDoneIters = true;
|
||||
else
|
||||
*isDone = false; /* we have undone Iters in the list */
|
||||
*isDone = false; /* we have undone Iters in the
|
||||
* list */
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -1571,7 +1585,9 @@ ExecTargetList(List *targetlist,
|
||||
{
|
||||
if (*isDone)
|
||||
{
|
||||
/* all Iters are done, so return a null indicating tlist set
|
||||
|
||||
/*
|
||||
* all Iters are done, so return a null indicating tlist set
|
||||
* expansion is complete.
|
||||
*/
|
||||
newTuple = NULL;
|
||||
@ -1579,21 +1595,24 @@ ExecTargetList(List *targetlist,
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have some done and some undone Iters. Restart the done ones
|
||||
* so that we can deliver a tuple (if possible).
|
||||
|
||||
/*
|
||||
* We have some done and some undone Iters. Restart the done
|
||||
* ones so that we can deliver a tuple (if possible).
|
||||
*
|
||||
* XXX this code is a crock, because it only works for Iters at
|
||||
* the top level of tlist expressions, and doesn't even work right
|
||||
* for them: you should get all possible combinations of Iter
|
||||
* results, but you won't unless the numbers of values returned by
|
||||
* each are relatively prime. Should have a mechanism more like
|
||||
* aggregate functions, where we make a list of all Iters
|
||||
* contained in the tlist and cycle through their values in a
|
||||
* methodical fashion. To do someday; can't get excited about
|
||||
* fixing a Berkeley feature that's not in SQL92. (The only
|
||||
* reason we're doing this much is that we have to be sure all
|
||||
* the Iters are run to completion, or their subplan executors
|
||||
* will have unreleased resources, e.g. pinned buffers...)
|
||||
* the top level of tlist expressions, and doesn't even work
|
||||
* right for them: you should get all possible combinations of
|
||||
* Iter results, but you won't unless the numbers of values
|
||||
* returned by each are relatively prime. Should have a
|
||||
* mechanism more like aggregate functions, where we make a
|
||||
* list of all Iters contained in the tlist and cycle through
|
||||
* their values in a methodical fashion. To do someday; can't
|
||||
* get excited about fixing a Berkeley feature that's not in
|
||||
* SQL92. (The only reason we're doing this much is that we
|
||||
* have to be sure all the Iters are run to completion, or
|
||||
* their subplan executors will have unreleased resources,
|
||||
* e.g. pinned buffers...)
|
||||
*/
|
||||
foreach(tl, targetlist)
|
||||
{
|
||||
@ -1605,7 +1624,7 @@ ExecTargetList(List *targetlist,
|
||||
resdom = tle->resdom;
|
||||
resind = resdom->resno - 1;
|
||||
|
||||
if (IsA(expr, Iter) && itemIsDone[resind])
|
||||
if (IsA(expr, Iter) &&itemIsDone[resind])
|
||||
{
|
||||
constvalue = (Datum) ExecEvalExpr(expr,
|
||||
econtext,
|
||||
@ -1613,8 +1632,10 @@ ExecTargetList(List *targetlist,
|
||||
&itemIsDone[resind]);
|
||||
if (itemIsDone[resind])
|
||||
{
|
||||
/* Oh dear, this Iter is returning an empty set.
|
||||
* Guess we can't make a tuple after all.
|
||||
|
||||
/*
|
||||
* Oh dear, this Iter is returning an empty
|
||||
* set. Guess we can't make a tuple after all.
|
||||
*/
|
||||
*isDone = true;
|
||||
newTuple = NULL;
|
||||
@ -1639,6 +1660,7 @@ ExecTargetList(List *targetlist,
|
||||
newTuple = (HeapTuple) heap_formtuple(targettype, values, null_head);
|
||||
|
||||
exit:
|
||||
|
||||
/*
|
||||
* free the status arrays if we palloc'd them
|
||||
*/
|
||||
|
@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.36 2000/01/27 18:11:27 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.37 2000/04/12 17:15:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -262,7 +262,7 @@ TupleTableSlot * /* return: the slot allocated in the tuple
|
||||
ExecAllocTableSlot(TupleTable table)
|
||||
{
|
||||
int slotnum; /* new slot number */
|
||||
TupleTableSlot* slot;
|
||||
TupleTableSlot *slot;
|
||||
|
||||
/* ----------------
|
||||
* sanity checks
|
||||
@ -385,7 +385,8 @@ ExecStoreTuple(HeapTuple tuple,
|
||||
slot->ttc_buffer = buffer;
|
||||
slot->ttc_shouldFree = shouldFree;
|
||||
|
||||
/* If tuple is on a disk page, keep the page pinned as long as we hold
|
||||
/*
|
||||
* If tuple is on a disk page, keep the page pinned as long as we hold
|
||||
* a pointer into it.
|
||||
*/
|
||||
if (BufferIsValid(buffer))
|
||||
@ -426,7 +427,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
|
||||
|
||||
slot->val = (HeapTuple) NULL;
|
||||
|
||||
slot->ttc_shouldFree = true; /* probably useless code... */
|
||||
slot->ttc_shouldFree = true;/* probably useless code... */
|
||||
|
||||
/* ----------------
|
||||
* Drop the pin on the referenced buffer, if there is one.
|
||||
@ -776,6 +777,7 @@ NodeGetResultTupleSlot(Plan *node)
|
||||
case T_TidScan:
|
||||
{
|
||||
CommonScanState *scanstate = ((IndexScan *) node)->scan.scanstate;
|
||||
|
||||
slot = scanstate->cstate.cs_ResultTupleSlot;
|
||||
}
|
||||
break;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.54 2000/02/18 09:29:57 inoue Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.55 2000/04/12 17:15:08 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -923,8 +923,8 @@ ExecOpenIndices(Oid resultRelationOid,
|
||||
|
||||
/*
|
||||
* Hack for not btree and hash indices: they use relation
|
||||
* level exclusive locking on update (i.e. - they are
|
||||
* not ready for MVCC) and so we have to exclusively lock
|
||||
* level exclusive locking on update (i.e. - they are not
|
||||
* ready for MVCC) and so we have to exclusively lock
|
||||
* indices here to prevent deadlocks if we will scan them
|
||||
* - index_beginscan places AccessShareLock, indices
|
||||
* update methods don't use locks at all. We release this
|
||||
@ -1186,7 +1186,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
|
||||
econtext->ecxt_scantuple = slot;
|
||||
|
||||
/* Skip this index-update if the predicate isn't satisfied */
|
||||
if (! ExecQual((List *) predicate, econtext, false))
|
||||
if (!ExecQual((List *) predicate, econtext, false))
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.32 2000/04/04 21:44:39 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.33 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -150,6 +150,7 @@ init_execution_state(FunctionCachePtr fcache,
|
||||
static TupleDesc
|
||||
postquel_start(execution_state *es)
|
||||
{
|
||||
|
||||
/*
|
||||
* Do nothing for utility commands. (create, destroy...) DZ -
|
||||
* 30-8-1996
|
||||
@ -166,9 +167,9 @@ postquel_getnext(execution_state *es)
|
||||
|
||||
if (es->qd->operation == CMD_UTILITY)
|
||||
{
|
||||
|
||||
/*
|
||||
* Process a utility command. (create, destroy...) DZ -
|
||||
* 30-8-1996
|
||||
* Process a utility command. (create, destroy...) DZ - 30-8-1996
|
||||
*/
|
||||
ProcessUtility(es->qd->parsetree->utilityStmt, es->qd->dest);
|
||||
if (!LAST_POSTQUEL_COMMAND(es))
|
||||
@ -184,6 +185,7 @@ postquel_getnext(execution_state *es)
|
||||
static void
|
||||
postquel_end(execution_state *es)
|
||||
{
|
||||
|
||||
/*
|
||||
* Do nothing for utility commands. (create, destroy...) DZ -
|
||||
* 30-8-1996
|
||||
|
@ -32,7 +32,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.62 2000/01/26 05:56:22 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.63 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -56,6 +56,7 @@
|
||||
*/
|
||||
typedef struct AggStatePerAggData
|
||||
{
|
||||
|
||||
/*
|
||||
* These values are set up during ExecInitAgg() and do not change
|
||||
* thereafter:
|
||||
@ -68,6 +69,7 @@ typedef struct AggStatePerAggData
|
||||
Oid xfn1_oid;
|
||||
Oid xfn2_oid;
|
||||
Oid finalfn_oid;
|
||||
|
||||
/*
|
||||
* fmgr lookup data for transfer functions --- only valid when
|
||||
* corresponding oid is not InvalidOid
|
||||
@ -75,18 +77,21 @@ typedef struct AggStatePerAggData
|
||||
FmgrInfo xfn1;
|
||||
FmgrInfo xfn2;
|
||||
FmgrInfo finalfn;
|
||||
|
||||
/*
|
||||
* Type of input data and Oid of sort operator to use for it;
|
||||
* only set/used when aggregate has DISTINCT flag. (These are not
|
||||
* used directly by nodeAgg, but must be passed to the Tuplesort object.)
|
||||
* Type of input data and Oid of sort operator to use for it; only
|
||||
* set/used when aggregate has DISTINCT flag. (These are not used
|
||||
* directly by nodeAgg, but must be passed to the Tuplesort object.)
|
||||
*/
|
||||
Oid inputType;
|
||||
Oid sortOperator;
|
||||
|
||||
/*
|
||||
* fmgr lookup data for input type's equality operator --- only set/used
|
||||
* when aggregate has DISTINCT flag.
|
||||
* fmgr lookup data for input type's equality operator --- only
|
||||
* set/used when aggregate has DISTINCT flag.
|
||||
*/
|
||||
FmgrInfo equalfn;
|
||||
|
||||
/*
|
||||
* initial values from pg_aggregate entry
|
||||
*/
|
||||
@ -94,6 +99,7 @@ typedef struct AggStatePerAggData
|
||||
Datum initValue2; /* for transtype2 */
|
||||
bool initValue1IsNull,
|
||||
initValue2IsNull;
|
||||
|
||||
/*
|
||||
* We need the len and byval info for the agg's input and transition
|
||||
* data types in order to know how to copy/delete values.
|
||||
@ -106,14 +112,14 @@ typedef struct AggStatePerAggData
|
||||
transtype2ByVal;
|
||||
|
||||
/*
|
||||
* These values are working state that is initialized at the start
|
||||
* of an input tuple group and updated for each input tuple.
|
||||
* These values are working state that is initialized at the start of
|
||||
* an input tuple group and updated for each input tuple.
|
||||
*
|
||||
* For a simple (non DISTINCT) aggregate, we just feed the input values
|
||||
* straight to the transition functions. If it's DISTINCT, we pass the
|
||||
* input values into a Tuplesort object; then at completion of the input
|
||||
* tuple group, we scan the sorted values, eliminate duplicates, and run
|
||||
* the transition functions on the rest.
|
||||
* straight to the transition functions. If it's DISTINCT, we pass
|
||||
* the input values into a Tuplesort object; then at completion of the
|
||||
* input tuple group, we scan the sorted values, eliminate duplicates,
|
||||
* and run the transition functions on the rest.
|
||||
*/
|
||||
|
||||
Tuplesortstate *sortstate; /* sort object, if a DISTINCT agg */
|
||||
@ -123,19 +129,21 @@ typedef struct AggStatePerAggData
|
||||
bool value1IsNull,
|
||||
value2IsNull;
|
||||
bool noInitValue; /* true if value1 not set yet */
|
||||
|
||||
/*
|
||||
* Note: right now, noInitValue always has the same value as value1IsNull.
|
||||
* But we should keep them separate because once the fmgr interface is
|
||||
* fixed, we'll need to distinguish a null returned by transfn1 from
|
||||
* a null we haven't yet replaced with an input value.
|
||||
* Note: right now, noInitValue always has the same value as
|
||||
* value1IsNull. But we should keep them separate because once the
|
||||
* fmgr interface is fixed, we'll need to distinguish a null returned
|
||||
* by transfn1 from a null we haven't yet replaced with an input
|
||||
* value.
|
||||
*/
|
||||
} AggStatePerAggData;
|
||||
|
||||
|
||||
static void initialize_aggregate (AggStatePerAgg peraggstate);
|
||||
static void advance_transition_functions (AggStatePerAgg peraggstate,
|
||||
static void initialize_aggregate(AggStatePerAgg peraggstate);
|
||||
static void advance_transition_functions(AggStatePerAgg peraggstate,
|
||||
Datum newVal, bool isNull);
|
||||
static void finalize_aggregate (AggStatePerAgg peraggstate,
|
||||
static void finalize_aggregate(AggStatePerAgg peraggstate,
|
||||
Datum *resultVal, bool *resultIsNull);
|
||||
static Datum copyDatum(Datum val, int typLen, bool typByVal);
|
||||
|
||||
@ -144,7 +152,7 @@ static Datum copyDatum(Datum val, int typLen, bool typByVal);
|
||||
* Initialize one aggregate for a new set of input values.
|
||||
*/
|
||||
static void
|
||||
initialize_aggregate (AggStatePerAgg peraggstate)
|
||||
initialize_aggregate(AggStatePerAgg peraggstate)
|
||||
{
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
|
||||
@ -153,8 +161,10 @@ initialize_aggregate (AggStatePerAgg peraggstate)
|
||||
*/
|
||||
if (aggref->aggdistinct)
|
||||
{
|
||||
/* In case of rescan, maybe there could be an uncompleted
|
||||
* sort operation? Clean it up if so.
|
||||
|
||||
/*
|
||||
* In case of rescan, maybe there could be an uncompleted sort
|
||||
* operation? Clean it up if so.
|
||||
*/
|
||||
if (peraggstate->sortstate)
|
||||
tuplesort_end(peraggstate->sortstate);
|
||||
@ -169,7 +179,7 @@ initialize_aggregate (AggStatePerAgg peraggstate)
|
||||
* (Re)set value1 and value2 to their initial values.
|
||||
*/
|
||||
if (OidIsValid(peraggstate->xfn1_oid) &&
|
||||
! peraggstate->initValue1IsNull)
|
||||
!peraggstate->initValue1IsNull)
|
||||
peraggstate->value1 = copyDatum(peraggstate->initValue1,
|
||||
peraggstate->transtype1Len,
|
||||
peraggstate->transtype1ByVal);
|
||||
@ -178,7 +188,7 @@ initialize_aggregate (AggStatePerAgg peraggstate)
|
||||
peraggstate->value1IsNull = peraggstate->initValue1IsNull;
|
||||
|
||||
if (OidIsValid(peraggstate->xfn2_oid) &&
|
||||
! peraggstate->initValue2IsNull)
|
||||
!peraggstate->initValue2IsNull)
|
||||
peraggstate->value2 = copyDatum(peraggstate->initValue2,
|
||||
peraggstate->transtype2Len,
|
||||
peraggstate->transtype2ByVal);
|
||||
@ -205,7 +215,7 @@ initialize_aggregate (AggStatePerAgg peraggstate)
|
||||
* out before reaching here.
|
||||
*/
|
||||
static void
|
||||
advance_transition_functions (AggStatePerAgg peraggstate,
|
||||
advance_transition_functions(AggStatePerAgg peraggstate,
|
||||
Datum newVal, bool isNull)
|
||||
{
|
||||
Datum args[2];
|
||||
@ -214,6 +224,7 @@ advance_transition_functions (AggStatePerAgg peraggstate,
|
||||
{
|
||||
if (peraggstate->noInitValue)
|
||||
{
|
||||
|
||||
/*
|
||||
* value1 has not been initialized. This is the first non-NULL
|
||||
* input value. We use it as the initial value for value1.
|
||||
@ -238,7 +249,7 @@ advance_transition_functions (AggStatePerAgg peraggstate,
|
||||
newVal = (Datum) fmgr_c(&peraggstate->xfn1,
|
||||
(FmgrValues *) args,
|
||||
&isNull);
|
||||
if (! peraggstate->transtype1ByVal)
|
||||
if (!peraggstate->transtype1ByVal)
|
||||
pfree(peraggstate->value1);
|
||||
peraggstate->value1 = newVal;
|
||||
}
|
||||
@ -252,7 +263,7 @@ advance_transition_functions (AggStatePerAgg peraggstate,
|
||||
newVal = (Datum) fmgr_c(&peraggstate->xfn2,
|
||||
(FmgrValues *) args,
|
||||
&isNull);
|
||||
if (! peraggstate->transtype2ByVal)
|
||||
if (!peraggstate->transtype2ByVal)
|
||||
pfree(peraggstate->value2);
|
||||
peraggstate->value2 = newVal;
|
||||
}
|
||||
@ -262,7 +273,7 @@ advance_transition_functions (AggStatePerAgg peraggstate,
|
||||
* Compute the final value of one aggregate.
|
||||
*/
|
||||
static void
|
||||
finalize_aggregate (AggStatePerAgg peraggstate,
|
||||
finalize_aggregate(AggStatePerAgg peraggstate,
|
||||
Datum *resultVal, bool *resultIsNull)
|
||||
{
|
||||
Aggref *aggref = peraggstate->aggref;
|
||||
@ -270,9 +281,10 @@ finalize_aggregate (AggStatePerAgg peraggstate,
|
||||
|
||||
/*
|
||||
* If it's a DISTINCT aggregate, all we've done so far is to stuff the
|
||||
* input values into the sort object. Complete the sort, then run
|
||||
* the transition functions on the non-duplicate values. Note that
|
||||
* DISTINCT always suppresses nulls, per SQL spec, regardless of usenulls.
|
||||
* input values into the sort object. Complete the sort, then run the
|
||||
* transition functions on the non-duplicate values. Note that
|
||||
* DISTINCT always suppresses nulls, per SQL spec, regardless of
|
||||
* usenulls.
|
||||
*/
|
||||
if (aggref->aggdistinct)
|
||||
{
|
||||
@ -295,35 +307,35 @@ finalize_aggregate (AggStatePerAgg peraggstate,
|
||||
newVal);
|
||||
if (DatumGetInt32(equal) != 0)
|
||||
{
|
||||
if (! peraggstate->inputtypeByVal)
|
||||
if (!peraggstate->inputtypeByVal)
|
||||
pfree(DatumGetPointer(newVal));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
advance_transition_functions(peraggstate, newVal, false);
|
||||
if (haveOldVal && ! peraggstate->inputtypeByVal)
|
||||
if (haveOldVal && !peraggstate->inputtypeByVal)
|
||||
pfree(DatumGetPointer(oldVal));
|
||||
oldVal = newVal;
|
||||
haveOldVal = true;
|
||||
}
|
||||
if (haveOldVal && ! peraggstate->inputtypeByVal)
|
||||
if (haveOldVal && !peraggstate->inputtypeByVal)
|
||||
pfree(DatumGetPointer(oldVal));
|
||||
tuplesort_end(peraggstate->sortstate);
|
||||
peraggstate->sortstate = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now apply the agg's finalfn, or substitute the appropriate transition
|
||||
* value if there is no finalfn.
|
||||
* Now apply the agg's finalfn, or substitute the appropriate
|
||||
* transition value if there is no finalfn.
|
||||
*
|
||||
* XXX For now, only apply finalfn if we got at least one
|
||||
* non-null input value. This prevents zero divide in AVG().
|
||||
* If we had cleaner handling of null inputs/results in functions,
|
||||
* we could probably take out this hack and define the result
|
||||
* for no inputs as whatever finalfn returns for null input.
|
||||
* XXX For now, only apply finalfn if we got at least one non-null input
|
||||
* value. This prevents zero divide in AVG(). If we had cleaner
|
||||
* handling of null inputs/results in functions, we could probably
|
||||
* take out this hack and define the result for no inputs as whatever
|
||||
* finalfn returns for null input.
|
||||
*/
|
||||
if (OidIsValid(peraggstate->finalfn_oid) &&
|
||||
! peraggstate->noInitValue)
|
||||
!peraggstate->noInitValue)
|
||||
{
|
||||
if (peraggstate->finalfn.fn_nargs > 1)
|
||||
{
|
||||
@ -361,17 +373,17 @@ finalize_aggregate (AggStatePerAgg peraggstate,
|
||||
elog(ERROR, "ExecAgg: no valid transition functions??");
|
||||
|
||||
/*
|
||||
* Release any per-group working storage, unless we're passing
|
||||
* it back as the result of the aggregate.
|
||||
* Release any per-group working storage, unless we're passing it back
|
||||
* as the result of the aggregate.
|
||||
*/
|
||||
if (OidIsValid(peraggstate->xfn1_oid) &&
|
||||
! peraggstate->value1IsNull &&
|
||||
! peraggstate->transtype1ByVal)
|
||||
!peraggstate->value1IsNull &&
|
||||
!peraggstate->transtype1ByVal)
|
||||
pfree(peraggstate->value1);
|
||||
|
||||
if (OidIsValid(peraggstate->xfn2_oid) &&
|
||||
! peraggstate->value2IsNull &&
|
||||
! peraggstate->transtype2ByVal)
|
||||
!peraggstate->value2IsNull &&
|
||||
!peraggstate->transtype2ByVal)
|
||||
pfree(peraggstate->value2);
|
||||
}
|
||||
|
||||
@ -479,37 +491,37 @@ ExecAgg(Agg *node)
|
||||
|
||||
/*
|
||||
* Keep a copy of the first input tuple for the projection.
|
||||
* (We only need one since only the GROUP BY columns in it
|
||||
* can be referenced, and these will be the same for all
|
||||
* tuples aggregated over.)
|
||||
* (We only need one since only the GROUP BY columns in it can
|
||||
* be referenced, and these will be the same for all tuples
|
||||
* aggregated over.)
|
||||
*/
|
||||
if (!inputTuple)
|
||||
inputTuple = heap_copytuple(outerslot->val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Done scanning input tuple group.
|
||||
* Finalize each aggregate calculation.
|
||||
* Done scanning input tuple group. Finalize each aggregate
|
||||
* calculation.
|
||||
*/
|
||||
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
|
||||
{
|
||||
AggStatePerAgg peraggstate = &peragg[aggno];
|
||||
|
||||
finalize_aggregate(peraggstate,
|
||||
& aggvalues[aggno], & aggnulls[aggno]);
|
||||
&aggvalues[aggno], &aggnulls[aggno]);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the outerPlan is a Group node, we will reach here after each
|
||||
* group. We are not done unless the Group node is done (a little
|
||||
* ugliness here while we reach into the Group's state to find out).
|
||||
* Furthermore, when grouping we return nothing at all unless we
|
||||
* had some input tuple(s). By the nature of Group, there are
|
||||
* no empty groups, so if we get here with no input the whole scan
|
||||
* is empty.
|
||||
* ugliness here while we reach into the Group's state to find
|
||||
* out). Furthermore, when grouping we return nothing at all
|
||||
* unless we had some input tuple(s). By the nature of Group,
|
||||
* there are no empty groups, so if we get here with no input the
|
||||
* whole scan is empty.
|
||||
*
|
||||
* If the outerPlan isn't a Group, we are done when we get here,
|
||||
* and we will emit a (single) tuple even if there were no input
|
||||
* If the outerPlan isn't a Group, we are done when we get here, and
|
||||
* we will emit a (single) tuple even if there were no input
|
||||
* tuples.
|
||||
*/
|
||||
if (IsA(outerPlan, Group))
|
||||
@ -523,17 +535,18 @@ ExecAgg(Agg *node)
|
||||
else
|
||||
{
|
||||
aggstate->agg_done = true;
|
||||
|
||||
/*
|
||||
* If inputtuple==NULL (ie, the outerPlan didn't return anything),
|
||||
* create a dummy all-nulls input tuple for use by execProject.
|
||||
* 99.44% of the time this is a waste of cycles, because
|
||||
* ordinarily the projected output tuple's targetlist cannot
|
||||
* contain any direct (non-aggregated) references to input
|
||||
* columns, so the dummy tuple will not be referenced. However
|
||||
* there are special cases where this isn't so --- in particular
|
||||
* an UPDATE involving an aggregate will have a targetlist
|
||||
* reference to ctid. We need to return a null for ctid in that
|
||||
* situation, not coredump.
|
||||
* If inputtuple==NULL (ie, the outerPlan didn't return
|
||||
* anything), create a dummy all-nulls input tuple for use by
|
||||
* execProject. 99.44% of the time this is a waste of cycles,
|
||||
* because ordinarily the projected output tuple's targetlist
|
||||
* cannot contain any direct (non-aggregated) references to
|
||||
* input columns, so the dummy tuple will not be referenced.
|
||||
* However there are special cases where this isn't so --- in
|
||||
* particular an UPDATE involving an aggregate will have a
|
||||
* targetlist reference to ctid. We need to return a null for
|
||||
* ctid in that situation, not coredump.
|
||||
*
|
||||
* The values returned for the aggregates will be the initial
|
||||
* values of the transition functions.
|
||||
@ -550,7 +563,7 @@ ExecAgg(Agg *node)
|
||||
/* watch out for null input tuples, though... */
|
||||
if (tupType && tupValue)
|
||||
{
|
||||
null_array = (char *) palloc(sizeof(char)*tupType->natts);
|
||||
null_array = (char *) palloc(sizeof(char) * tupType->natts);
|
||||
for (attnum = 0; attnum < tupType->natts; attnum++)
|
||||
null_array[attnum] = 'n';
|
||||
inputTuple = heap_formtuple(tupType, tupValue, null_array);
|
||||
@ -576,12 +589,12 @@ ExecAgg(Agg *node)
|
||||
resultSlot = ExecProject(projInfo, &isDone);
|
||||
|
||||
/*
|
||||
* If the completed tuple does not match the qualifications,
|
||||
* it is ignored and we loop back to try to process another group.
|
||||
* If the completed tuple does not match the qualifications, it is
|
||||
* ignored and we loop back to try to process another group.
|
||||
* Otherwise, return the tuple.
|
||||
*/
|
||||
}
|
||||
while (! ExecQual(node->plan.qual, econtext, false));
|
||||
while (!ExecQual(node->plan.qual, econtext, false));
|
||||
|
||||
return resultSlot;
|
||||
}
|
||||
@ -620,21 +633,23 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
||||
* find aggregates in targetlist and quals
|
||||
*
|
||||
* Note: pull_agg_clauses also checks that no aggs contain other agg
|
||||
* calls in their arguments. This would make no sense under SQL semantics
|
||||
* anyway (and it's forbidden by the spec). Because that is true, we
|
||||
* don't need to worry about evaluating the aggs in any particular order.
|
||||
* calls in their arguments. This would make no sense under SQL
|
||||
* semantics anyway (and it's forbidden by the spec). Because that is
|
||||
* true, we don't need to worry about evaluating the aggs in any
|
||||
* particular order.
|
||||
*/
|
||||
aggstate->aggs = nconc(pull_agg_clause((Node *) node->plan.targetlist),
|
||||
pull_agg_clause((Node *) node->plan.qual));
|
||||
aggstate->numaggs = numaggs = length(aggstate->aggs);
|
||||
if (numaggs <= 0)
|
||||
{
|
||||
|
||||
/*
|
||||
* This used to be treated as an error, but we can't do that anymore
|
||||
* because constant-expression simplification could optimize away
|
||||
* all of the Aggrefs in the targetlist and qual. So, just make a
|
||||
* debug note, and force numaggs positive so that palloc()s below
|
||||
* don't choke.
|
||||
* This used to be treated as an error, but we can't do that
|
||||
* anymore because constant-expression simplification could
|
||||
* optimize away all of the Aggrefs in the targetlist and qual.
|
||||
* So, just make a debug note, and force numaggs positive so that
|
||||
* palloc()s below don't choke.
|
||||
*/
|
||||
elog(DEBUG, "ExecInitAgg: could not find any aggregate functions");
|
||||
numaggs = 1;
|
||||
@ -655,8 +670,8 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
||||
ExecInitResultTupleSlot(estate, &aggstate->csstate.cstate);
|
||||
|
||||
/*
|
||||
* Set up aggregate-result storage in the expr context,
|
||||
* and also allocate my private per-agg working storage
|
||||
* Set up aggregate-result storage in the expr context, and also
|
||||
* allocate my private per-agg working storage
|
||||
*/
|
||||
econtext = aggstate->csstate.cstate.cs_ExprContext;
|
||||
econtext->ecxt_aggvalues = (Datum *) palloc(sizeof(Datum) * numaggs);
|
||||
@ -762,9 +777,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
||||
}
|
||||
|
||||
if (OidIsValid(finalfn_oid))
|
||||
{
|
||||
fmgr_info(finalfn_oid, &peraggstate->finalfn);
|
||||
}
|
||||
|
||||
if (aggref->aggdistinct)
|
||||
{
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.29 2000/01/26 05:56:22 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.30 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -304,6 +304,7 @@ ExecInitAppend(Append *node, EState *estate, Plan *parent)
|
||||
{
|
||||
JunkFilter *j = ExecInitJunkFilter(initNode->targetlist,
|
||||
ExecGetTupType(initNode));
|
||||
|
||||
junkList = lappend(junkList, j);
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
* locate group boundaries.
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.33 2000/01/27 18:11:27 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.34 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -97,8 +97,9 @@ ExecGroupEveryTuple(Group *node)
|
||||
{
|
||||
grpstate->grp_useFirstTuple = FALSE;
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
/*
|
||||
* note we rely on subplan to hold ownership of the tuple for as
|
||||
* long as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(grpstate->grp_firstTuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
@ -122,17 +123,20 @@ ExecGroupEveryTuple(Group *node)
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/*
|
||||
* Compare with first tuple and see if this tuple is of the
|
||||
* same group.
|
||||
*/
|
||||
if (! execTuplesMatch(firsttuple, outerTuple,
|
||||
if (!execTuplesMatch(firsttuple, outerTuple,
|
||||
tupdesc,
|
||||
node->numCols, node->grpColIdx,
|
||||
grpstate->eqfunctions))
|
||||
{
|
||||
|
||||
/*
|
||||
* No; save the tuple to return it next time, and return NULL
|
||||
* No; save the tuple to return it next time, and return
|
||||
* NULL
|
||||
*/
|
||||
grpstate->grp_useFirstTuple = TRUE;
|
||||
heap_freetuple(firsttuple);
|
||||
@ -142,8 +146,9 @@ ExecGroupEveryTuple(Group *node)
|
||||
}
|
||||
}
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
/*
|
||||
* note we rely on subplan to hold ownership of the tuple for as
|
||||
* long as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(outerTuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
@ -227,10 +232,10 @@ ExecGroupOneTuple(Group *node)
|
||||
outerTuple = outerslot->val;
|
||||
|
||||
/*
|
||||
* Compare with first tuple and see if this tuple is of the
|
||||
* same group.
|
||||
* Compare with first tuple and see if this tuple is of the same
|
||||
* group.
|
||||
*/
|
||||
if (! execTuplesMatch(firsttuple, outerTuple,
|
||||
if (!execTuplesMatch(firsttuple, outerTuple,
|
||||
tupdesc,
|
||||
node->numCols, node->grpColIdx,
|
||||
grpstate->eqfunctions))
|
||||
@ -244,8 +249,9 @@ ExecGroupOneTuple(Group *node)
|
||||
*/
|
||||
projInfo = grpstate->csstate.cstate.cs_ProjInfo;
|
||||
|
||||
/* note we rely on subplan to hold ownership of the tuple
|
||||
* for as long as we need it; we don't copy it.
|
||||
/*
|
||||
* note we rely on subplan to hold ownership of the tuple for as long
|
||||
* as we need it; we don't copy it.
|
||||
*/
|
||||
ExecStoreTuple(firsttuple,
|
||||
grpstate->csstate.css_ScanTupleSlot,
|
||||
@ -418,7 +424,7 @@ execTuplesMatch(HeapTuple tuple1,
|
||||
* start comparing at the last field (least significant sort key).
|
||||
* That's the most likely to be different...
|
||||
*/
|
||||
for (i = numCols; --i >= 0; )
|
||||
for (i = numCols; --i >= 0;)
|
||||
{
|
||||
AttrNumber att = matchColIdx[i];
|
||||
Datum attr1,
|
||||
@ -445,7 +451,7 @@ execTuplesMatch(HeapTuple tuple1,
|
||||
|
||||
/* Apply the type-specific equality function */
|
||||
|
||||
equal = (Datum) (*fmgr_faddr(& eqfunctions[i])) (attr1, attr2);
|
||||
equal = (Datum) (*fmgr_faddr(&eqfunctions[i])) (attr1, attr2);
|
||||
|
||||
if (DatumGetInt32(equal) == 0)
|
||||
return FALSE;
|
||||
@ -481,7 +487,7 @@ execTuplesMatchPrepare(TupleDesc tupdesc,
|
||||
typeidTypeName(typid));
|
||||
}
|
||||
pgopform = (Form_pg_operator) GETSTRUCT(eq_operator);
|
||||
fmgr_info(pgopform->oprcode, & eqfunctions[i]);
|
||||
fmgr_info(pgopform->oprcode, &eqfunctions[i]);
|
||||
}
|
||||
|
||||
return eqfunctions;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.48 2000/04/07 00:30:41 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.49 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -195,8 +195,8 @@ IndexNext(IndexScan *node)
|
||||
List *qual;
|
||||
|
||||
/*
|
||||
* store the scanned tuple in the scan tuple slot of
|
||||
* the scan state. Eventually we will only do this and not
|
||||
* store the scanned tuple in the scan tuple slot of the
|
||||
* scan state. Eventually we will only do this and not
|
||||
* return a tuple. Note: we pass 'false' because tuples
|
||||
* returned by amgetnext are pointers onto disk pages and
|
||||
* must not be pfree()'d.
|
||||
@ -208,16 +208,17 @@ IndexNext(IndexScan *node)
|
||||
|
||||
/*
|
||||
* At this point we have an extra pin on the buffer,
|
||||
* because ExecStoreTuple incremented the pin count.
|
||||
* Drop our local pin.
|
||||
* because ExecStoreTuple incremented the pin count. Drop
|
||||
* our local pin.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
|
||||
/*
|
||||
* We must check to see if the current tuple was already
|
||||
* matched by an earlier index, so we don't double-report it.
|
||||
* We do this by passing the tuple through ExecQual and
|
||||
* checking for failure with all previous qualifications.
|
||||
* matched by an earlier index, so we don't double-report
|
||||
* it. We do this by passing the tuple through ExecQual
|
||||
* and checking for failure with all previous
|
||||
* qualifications.
|
||||
*/
|
||||
scanstate->cstate.cs_ExprContext->ecxt_scantuple = slot;
|
||||
qual = node->indxqualorig;
|
||||
@ -234,7 +235,7 @@ IndexNext(IndexScan *node)
|
||||
qual = lnext(qual);
|
||||
}
|
||||
if (!prev_matches)
|
||||
return slot; /* OK to return tuple */
|
||||
return slot;/* OK to return tuple */
|
||||
/* Duplicate tuple, so drop it and loop back for another */
|
||||
ExecClearTuple(slot);
|
||||
}
|
||||
@ -380,6 +381,7 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
|
||||
scanexpr = (run_keys[j] == RIGHT_OP) ?
|
||||
(Node *) get_rightop(clause) :
|
||||
(Node *) get_leftop(clause);
|
||||
|
||||
/*
|
||||
* pass in isDone but ignore it. We don't iterate in
|
||||
* quals
|
||||
@ -750,7 +752,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
|
||||
clause = nth(j, qual);
|
||||
|
||||
op = (Oper *) clause->oper;
|
||||
if (!IsA(clause, Expr) || !IsA(op, Oper))
|
||||
if (!IsA(clause, Expr) ||!IsA(op, Oper))
|
||||
elog(ERROR, "ExecInitIndexScan: indxqual not an opclause!");
|
||||
|
||||
opid = op->opid;
|
||||
@ -801,7 +803,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
|
||||
|
||||
Assert(leftop != NULL);
|
||||
|
||||
if (IsA(leftop, Var) && var_is_rel((Var *) leftop))
|
||||
if (IsA(leftop, Var) &&var_is_rel((Var *) leftop))
|
||||
{
|
||||
/* ----------------
|
||||
* if the leftop is a "rel-var", then it means
|
||||
@ -884,7 +886,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
|
||||
|
||||
Assert(rightop != NULL);
|
||||
|
||||
if (IsA(rightop, Var) && var_is_rel((Var *) rightop))
|
||||
if (IsA(rightop, Var) &&var_is_rel((Var *) rightop))
|
||||
{
|
||||
/* ----------------
|
||||
* here we make sure only one op identifies the
|
||||
@ -1049,10 +1051,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
|
||||
¤tRelation, /* return: rel desc */
|
||||
(Pointer *) ¤tScanDesc); /* return: scan desc */
|
||||
|
||||
if (!RelationGetForm(currentRelation)->relhasindex)
|
||||
{
|
||||
if (!RelationGetForm(currentRelation)->relhasindex)
|
||||
elog(ERROR, "indexes of the relation %u was inactivated", reloid);
|
||||
}
|
||||
scanstate->css_currentRelation = currentRelation;
|
||||
scanstate->css_currentScanDesc = currentScanDesc;
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSort.c,v 1.25 2000/01/26 05:56:23 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSort.c,v 1.26 2000/04/12 17:15:09 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -126,7 +126,7 @@ ExecSort(Sort *node)
|
||||
* ----------------
|
||||
*/
|
||||
|
||||
if (! sortstate->sort_Done)
|
||||
if (!sortstate->sort_Done)
|
||||
{
|
||||
Plan *outerNode;
|
||||
TupleDesc tupDesc;
|
||||
@ -156,7 +156,7 @@ ExecSort(Sort *node)
|
||||
sortkeys = (ScanKey) sortstate->sort_Keys;
|
||||
|
||||
tuplesortstate = tuplesort_begin_heap(tupDesc, keycount, sortkeys,
|
||||
true /* randomAccess */);
|
||||
true /* randomAccess */ );
|
||||
|
||||
sortstate->tuplesortstate = (void *) tuplesortstate;
|
||||
|
||||
@ -371,7 +371,7 @@ ExecSortMarkPos(Sort *node)
|
||||
* if we haven't sorted yet, just return
|
||||
* ----------------
|
||||
*/
|
||||
if (! sortstate->sort_Done)
|
||||
if (!sortstate->sort_Done)
|
||||
return;
|
||||
|
||||
tuplesort_markpos((Tuplesortstate *) sortstate->tuplesortstate);
|
||||
@ -392,7 +392,7 @@ ExecSortRestrPos(Sort *node)
|
||||
* if we haven't sorted yet, just return.
|
||||
* ----------------
|
||||
*/
|
||||
if (! sortstate->sort_Done)
|
||||
if (!sortstate->sort_Done)
|
||||
return;
|
||||
|
||||
/* ----------------
|
||||
@ -412,14 +412,14 @@ ExecReScanSort(Sort *node, ExprContext *exprCtxt, Plan *parent)
|
||||
* not NULL then it will be re-scanned by ExecProcNode, else - no
|
||||
* reason to re-scan it at all.
|
||||
*/
|
||||
if (! sortstate->sort_Done)
|
||||
if (!sortstate->sort_Done)
|
||||
return;
|
||||
|
||||
ExecClearTuple(sortstate->csstate.cstate.cs_ResultTupleSlot);
|
||||
|
||||
/*
|
||||
* If subnode is to be rescanned then we forget previous sort
|
||||
* results; we have to re-read the subplan and re-sort.
|
||||
* If subnode is to be rescanned then we forget previous sort results;
|
||||
* we have to re-read the subplan and re-sort.
|
||||
*
|
||||
* Otherwise we can just rewind and rescan the sorted output.
|
||||
*/
|
||||
@ -430,7 +430,5 @@ ExecReScanSort(Sort *node, ExprContext *exprCtxt, Plan *parent)
|
||||
sortstate->tuplesortstate = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
tuplesort_rescan((Tuplesortstate *) sortstate->tuplesortstate);
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.24 2000/03/23 07:32:58 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.25 2000/04/12 17:15:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -67,8 +67,8 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
||||
ExecReScan(plan, (ExprContext *) NULL, plan);
|
||||
|
||||
/*
|
||||
* For all sublink types except EXPR_SUBLINK, the result is boolean
|
||||
* as are the results of the combining operators. We combine results
|
||||
* For all sublink types except EXPR_SUBLINK, the result is boolean as
|
||||
* are the results of the combining operators. We combine results
|
||||
* within a tuple (if there are multiple columns) using OR semantics
|
||||
* if "useor" is true, AND semantics if not. We then combine results
|
||||
* across tuples (if the subplan produces more than one) using OR
|
||||
@ -106,13 +106,14 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
||||
if (found)
|
||||
elog(ERROR, "More than one tuple returned by a subselect used as an expression.");
|
||||
found = true;
|
||||
|
||||
/*
|
||||
* We need to copy the subplan's tuple in case the result is of
|
||||
* pass-by-ref type --- our return value will point into this
|
||||
* copied tuple! Can't use the subplan's instance of the tuple
|
||||
* since it won't still be valid after next ExecProcNode() call.
|
||||
* node->curTuple keeps track of the copied tuple for eventual
|
||||
* freeing.
|
||||
* We need to copy the subplan's tuple in case the result is
|
||||
* of pass-by-ref type --- our return value will point into
|
||||
* this copied tuple! Can't use the subplan's instance of the
|
||||
* tuple since it won't still be valid after next
|
||||
* ExecProcNode() call. node->curTuple keeps track of the
|
||||
* copied tuple for eventual freeing.
|
||||
*/
|
||||
tup = heap_copytuple(tup);
|
||||
if (node->curTuple)
|
||||
@ -129,7 +130,8 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
||||
|
||||
found = true;
|
||||
|
||||
/* For ALL, ANY, and MULTIEXPR sublinks, iterate over combining
|
||||
/*
|
||||
* For ALL, ANY, and MULTIEXPR sublinks, iterate over combining
|
||||
* operators for columns of tuple.
|
||||
*/
|
||||
foreach(lst, sublink->oper)
|
||||
@ -140,14 +142,14 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
||||
bool expnull;
|
||||
|
||||
/*
|
||||
* The righthand side of the expression should be either a Const
|
||||
* or a function call or RelabelType node taking a Const as arg
|
||||
* (these nodes represent run-time type coercions inserted by
|
||||
* the parser to get to the input type needed by the operator).
|
||||
* Find the Const node and insert the actual righthand-side value
|
||||
* into it.
|
||||
* The righthand side of the expression should be either a
|
||||
* Const or a function call or RelabelType node taking a Const
|
||||
* as arg (these nodes represent run-time type coercions
|
||||
* inserted by the parser to get to the input type needed by
|
||||
* the operator). Find the Const node and insert the actual
|
||||
* righthand-side value into it.
|
||||
*/
|
||||
if (! IsA(con, Const))
|
||||
if (!IsA(con, Const))
|
||||
{
|
||||
switch (con->type)
|
||||
{
|
||||
@ -161,16 +163,18 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
||||
/* will fail below */
|
||||
break;
|
||||
}
|
||||
if (! IsA(con, Const))
|
||||
if (!IsA(con, Const))
|
||||
elog(ERROR, "ExecSubPlan: failed to find placeholder for subplan result");
|
||||
}
|
||||
con->constvalue = heap_getattr(tup, col, tdesc,
|
||||
&(con->constisnull));
|
||||
|
||||
/*
|
||||
* Now we can eval the combining operator for this column.
|
||||
*/
|
||||
expresult = ExecEvalExpr((Node *) expr, econtext, &expnull,
|
||||
(bool *) NULL);
|
||||
|
||||
/*
|
||||
* Combine the result into the row result as appropriate.
|
||||
*/
|
||||
@ -240,7 +244,9 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
|
||||
|
||||
if (!found)
|
||||
{
|
||||
/* deal with empty subplan result. result/isNull were previously
|
||||
|
||||
/*
|
||||
* deal with empty subplan result. result/isNull were previously
|
||||
* initialized correctly for all sublink types except EXPR and
|
||||
* MULTIEXPR; for those, return NULL.
|
||||
*/
|
||||
@ -354,9 +360,9 @@ ExecSetParamPlan(SubPlan *node)
|
||||
|
||||
/*
|
||||
* We need to copy the subplan's tuple in case any of the params
|
||||
* are pass-by-ref type --- the pointers stored in the param structs
|
||||
* will point at this copied tuple! node->curTuple keeps track
|
||||
* of the copied tuple for eventual freeing.
|
||||
* are pass-by-ref type --- the pointers stored in the param
|
||||
* structs will point at this copied tuple! node->curTuple keeps
|
||||
* track of the copied tuple for eventual freeing.
|
||||
*/
|
||||
tup = heap_copytuple(tup);
|
||||
if (node->curTuple)
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.5 2000/04/07 00:30:41 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.6 2000/04/12 17:15:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -43,10 +43,10 @@ TidListCreate(List *evalList, ExprContext *econtext, ItemPointer *tidList)
|
||||
bool isNull;
|
||||
int numTids = 0;
|
||||
|
||||
foreach (lst, evalList)
|
||||
foreach(lst, evalList)
|
||||
{
|
||||
itemptr = (ItemPointer)ExecEvalExpr(lfirst(lst), econtext,
|
||||
&isNull, (bool *)0);
|
||||
itemptr = (ItemPointer) ExecEvalExpr(lfirst(lst), econtext,
|
||||
&isNull, (bool *) 0);
|
||||
if (itemptr && ItemPointerIsValid(itemptr))
|
||||
{
|
||||
tidList[numTids] = itemptr;
|
||||
@ -80,7 +80,8 @@ TidNext(TidScan *node)
|
||||
|
||||
bool bBackward;
|
||||
int tidNumber;
|
||||
ItemPointer *tidList, itemptr;
|
||||
ItemPointer *tidList,
|
||||
itemptr;
|
||||
|
||||
/* ----------------
|
||||
* extract necessary information from tid scan node
|
||||
@ -170,22 +171,22 @@ TidNext(TidScan *node)
|
||||
* ----------------
|
||||
*/
|
||||
ExecStoreTuple(tuple, /* tuple to store */
|
||||
slot, /* slot to store in */
|
||||
slot,/* slot to store in */
|
||||
buffer, /* buffer associated with tuple */
|
||||
false); /* don't pfree */
|
||||
|
||||
/*
|
||||
* At this point we have an extra pin on the buffer,
|
||||
* because ExecStoreTuple incremented the pin count.
|
||||
* Drop our local pin.
|
||||
* At this point we have an extra pin on the buffer, because
|
||||
* ExecStoreTuple incremented the pin count. Drop our local
|
||||
* pin.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
|
||||
/*
|
||||
* We must check to see if the current tuple would have
|
||||
* been matched by an earlier tid, so we don't double
|
||||
* report it. We do this by passing the tuple through
|
||||
* ExecQual and look for failure with all previous
|
||||
* qualifications.
|
||||
* We must check to see if the current tuple would have been
|
||||
* matched by an earlier tid, so we don't double report it. We
|
||||
* do this by passing the tuple through ExecQual and look for
|
||||
* failure with all previous qualifications.
|
||||
*/
|
||||
for (prev_tid = 0; prev_tid < tidstate->tss_TidPtr;
|
||||
prev_tid++)
|
||||
@ -473,7 +474,7 @@ ExecInitTidScan(TidScan *node, EState *estate, Plan *parent)
|
||||
* get the tid node information
|
||||
* ----------------
|
||||
*/
|
||||
tidList = (ItemPointer *)palloc(length(node->tideval) * sizeof(ItemPointer));
|
||||
tidList = (ItemPointer *) palloc(length(node->tideval) * sizeof(ItemPointer));
|
||||
numTids = 0;
|
||||
if (!node->needRescan)
|
||||
numTids = TidListCreate(node->tideval, scanstate->cstate.cs_ExprContext, tidList);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.27 2000/01/27 18:11:27 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.28 2000/04/12 17:15:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -55,7 +55,7 @@ ExecUnique(Unique *node)
|
||||
uniquestate = node->uniquestate;
|
||||
outerPlan = outerPlan((Plan *) node);
|
||||
resultTupleSlot = uniquestate->cstate.cs_ResultTupleSlot;
|
||||
tupDesc = ExecGetResultType(& uniquestate->cstate);
|
||||
tupDesc = ExecGetResultType(&uniquestate->cstate);
|
||||
|
||||
/* ----------------
|
||||
* now loop, returning only non-duplicate tuples.
|
||||
@ -86,7 +86,7 @@ ExecUnique(Unique *node)
|
||||
* another new tuple from the subplan.
|
||||
* ----------------
|
||||
*/
|
||||
if (! execTuplesMatch(slot->val, uniquestate->priorTuple,
|
||||
if (!execTuplesMatch(slot->val, uniquestate->priorTuple,
|
||||
tupDesc,
|
||||
node->numCols, node->uniqColIdx,
|
||||
uniquestate->eqfunctions))
|
||||
@ -151,14 +151,14 @@ ExecInitUnique(Unique *node, EState *estate, Plan *parent)
|
||||
* they never call ExecQual or ExecTargetList.
|
||||
* ----------------
|
||||
*/
|
||||
ExecAssignNodeBaseInfo(estate, & uniquestate->cstate, parent);
|
||||
ExecAssignNodeBaseInfo(estate, &uniquestate->cstate, parent);
|
||||
|
||||
#define UNIQUE_NSLOTS 1
|
||||
/* ------------
|
||||
* Tuple table initialization
|
||||
* ------------
|
||||
*/
|
||||
ExecInitResultTupleSlot(estate, & uniquestate->cstate);
|
||||
ExecInitResultTupleSlot(estate, &uniquestate->cstate);
|
||||
|
||||
/* ----------------
|
||||
* then initialize outer plan
|
||||
@ -172,14 +172,14 @@ ExecInitUnique(Unique *node, EState *estate, Plan *parent)
|
||||
* projection info for this node appropriately
|
||||
* ----------------
|
||||
*/
|
||||
ExecAssignResultTypeFromOuterPlan((Plan *) node, & uniquestate->cstate);
|
||||
ExecAssignResultTypeFromOuterPlan((Plan *) node, &uniquestate->cstate);
|
||||
uniquestate->cstate.cs_ProjInfo = NULL;
|
||||
|
||||
/*
|
||||
* Precompute fmgr lookup data for inner loop
|
||||
*/
|
||||
uniquestate->eqfunctions =
|
||||
execTuplesMatchPrepare(ExecGetResultType(& uniquestate->cstate),
|
||||
execTuplesMatchPrepare(ExecGetResultType(&uniquestate->cstate),
|
||||
node->numCols,
|
||||
node->uniqColIdx);
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/lib/dllist.c,v 1.16 2000/01/26 05:56:26 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/lib/dllist.c,v 1.17 2000/04/12 17:15:10 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -128,14 +128,16 @@ DLRemove(Dlelem *e)
|
||||
|
||||
if (e->dle_prev)
|
||||
e->dle_prev->dle_next = e->dle_next;
|
||||
else /* must be the head element */
|
||||
else
|
||||
/* must be the head element */
|
||||
{
|
||||
Assert(e == l->dll_head);
|
||||
l->dll_head = e->dle_next;
|
||||
}
|
||||
if (e->dle_next)
|
||||
e->dle_next->dle_prev = e->dle_prev;
|
||||
else /* must be the tail element */
|
||||
else
|
||||
/* must be the tail element */
|
||||
{
|
||||
Assert(e == l->dll_tail);
|
||||
l->dll_tail = e->dle_prev;
|
||||
@ -236,7 +238,8 @@ DLMoveToFront(Dlelem *e)
|
||||
|
||||
if (e->dle_next)
|
||||
e->dle_next->dle_prev = e->dle_prev;
|
||||
else /* must be the tail element */
|
||||
else
|
||||
/* must be the tail element */
|
||||
{
|
||||
Assert(e == l->dll_tail);
|
||||
l->dll_tail = e->dle_prev;
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: stringinfo.c,v 1.24 2000/01/26 05:56:26 momjian Exp $
|
||||
* $Id: stringinfo.c,v 1.25 2000/04/12 17:15:11 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -122,12 +122,13 @@ appendStringInfo(StringInfo str, const char *fmt,...)
|
||||
nprinted = vsnprintf(str->data + str->len, avail,
|
||||
fmt, args);
|
||||
va_end(args);
|
||||
|
||||
/*
|
||||
* Note: some versions of vsnprintf return the number of chars
|
||||
* actually stored, but at least one returns -1 on failure.
|
||||
* Be conservative about believing whether the print worked.
|
||||
* actually stored, but at least one returns -1 on failure. Be
|
||||
* conservative about believing whether the print worked.
|
||||
*/
|
||||
if (nprinted >= 0 && nprinted < avail-1)
|
||||
if (nprinted >= 0 && nprinted < avail - 1)
|
||||
{
|
||||
/* Success. Note nprinted does not include trailing null. */
|
||||
str->len += nprinted;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.43 2000/01/26 05:56:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.44 2000/04/12 17:15:13 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -472,6 +472,7 @@ be_recvauth(Port *port)
|
||||
|
||||
AuthRequest areq = AUTH_REQ_OK;
|
||||
PacketDoneProc auth_handler = NULL;
|
||||
|
||||
switch (port->auth_method)
|
||||
{
|
||||
case uaReject:
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: be-dumpdata.c,v 1.32 2000/01/26 05:56:28 momjian Exp $
|
||||
* $Id: be-dumpdata.c,v 1.33 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.43 2000/01/26 05:56:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.44 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This should be moved to a more appropriate place. It is here
|
||||
@ -259,9 +259,9 @@ lo_tell(int fd)
|
||||
}
|
||||
|
||||
/*
|
||||
* We assume we do not need to switch contexts for inv_tell.
|
||||
* That is true for now, but is probably more than this module
|
||||
* ought to assume...
|
||||
* We assume we do not need to switch contexts for inv_tell. That is
|
||||
* true for now, but is probably more than this module ought to
|
||||
* assume...
|
||||
*/
|
||||
return inv_tell(cookies[fd]);
|
||||
}
|
||||
@ -269,10 +269,11 @@ lo_tell(int fd)
|
||||
int
|
||||
lo_unlink(Oid lobjId)
|
||||
{
|
||||
|
||||
/*
|
||||
* inv_drop does not need a context switch, indeed it doesn't
|
||||
* touch any LO-specific data structures at all. (Again, that's
|
||||
* probably more than this module ought to be assuming.)
|
||||
* inv_drop does not need a context switch, indeed it doesn't touch
|
||||
* any LO-specific data structures at all. (Again, that's probably
|
||||
* more than this module ought to be assuming.)
|
||||
*
|
||||
* XXX there ought to be some code to clean up any open LOs that
|
||||
* reference the specified relation... as is, they remain "open".
|
||||
@ -417,9 +418,9 @@ lo_export(Oid lobjId, text *filename)
|
||||
/*
|
||||
* open the file to be written to
|
||||
*
|
||||
* Note: we reduce backend's normal 077 umask to the slightly
|
||||
* friendlier 022. This code used to drop it all the way to 0,
|
||||
* but creating world-writable export files doesn't seem wise.
|
||||
* Note: we reduce backend's normal 077 umask to the slightly friendlier
|
||||
* 022. This code used to drop it all the way to 0, but creating
|
||||
* world-writable export files doesn't seem wise.
|
||||
*/
|
||||
nbytes = VARSIZE(filename) - VARHDRSZ + 1;
|
||||
if (nbytes > FNAME_BUFSIZE)
|
||||
@ -470,8 +471,9 @@ lo_commit(bool isCommit)
|
||||
|
||||
currentContext = MemoryContextSwitchTo((MemoryContext) fscxt);
|
||||
|
||||
/* Clean out still-open index scans (not necessary if aborting)
|
||||
* and clear cookies array so that LO fds are no longer good.
|
||||
/*
|
||||
* Clean out still-open index scans (not necessary if aborting) and
|
||||
* clear cookies array so that LO fds are no longer good.
|
||||
*/
|
||||
for (i = 0; i < MAX_LOBJ_FDS; i++)
|
||||
{
|
||||
|
@ -5,7 +5,7 @@
|
||||
* wherein you authenticate a user by seeing what IP address the system
|
||||
* says he comes from and possibly using ident).
|
||||
*
|
||||
* $Id: hba.c,v 1.50 2000/03/17 02:36:08 tgl Exp $
|
||||
* $Id: hba.c,v 1.51 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -217,11 +217,12 @@ process_hba_record(FILE *file, hbaPort *port, bool *matches_p, bool *error_p)
|
||||
{
|
||||
struct in_addr file_ip_addr,
|
||||
mask;
|
||||
bool discard = 0; /* Discard this entry */
|
||||
bool discard = 0;/* Discard this entry */
|
||||
|
||||
#ifdef USE_SSL
|
||||
/* If SSL, then check that we are on SSL */
|
||||
if (strcmp(buf, "hostssl") == 0) {
|
||||
if (strcmp(buf, "hostssl") == 0)
|
||||
{
|
||||
if (!port->ssl)
|
||||
discard = 1;
|
||||
|
||||
@ -232,7 +233,7 @@ process_hba_record(FILE *file, hbaPort *port, bool *matches_p, bool *error_p)
|
||||
}
|
||||
#else
|
||||
/* If not SSL, we don't support this */
|
||||
if (strcmp(buf,"hostssl") == 0)
|
||||
if (strcmp(buf, "hostssl") == 0)
|
||||
goto syntax;
|
||||
#endif
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/portalbuf.c,v 1.23 2000/03/17 02:36:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/portalbuf.c,v 1.24 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -29,7 +29,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: pqcomm.c,v 1.87 2000/01/26 05:56:29 momjian Exp $
|
||||
* $Id: pqcomm.c,v 1.88 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -561,9 +561,7 @@ pq_getstring(StringInfo s)
|
||||
|
||||
/* Read until we get the terminating '\0' */
|
||||
while ((c = pq_getbyte()) != EOF && c != '\0')
|
||||
{
|
||||
appendStringInfoChar(s, c);
|
||||
}
|
||||
|
||||
if (c == EOF)
|
||||
return EOF;
|
||||
@ -614,6 +612,7 @@ pq_flush(void)
|
||||
while (bufptr < bufend)
|
||||
{
|
||||
int r;
|
||||
|
||||
#ifdef USE_SSL
|
||||
if (MyProcPort->ssl)
|
||||
r = SSL_write(MyProcPort->ssl, bufptr, bufend - bufptr);
|
||||
|
@ -16,7 +16,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: pqformat.c,v 1.12 2000/01/26 05:56:29 momjian Exp $
|
||||
* $Id: pqformat.c,v 1.13 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -156,6 +156,7 @@ void
|
||||
pq_sendstring(StringInfo buf, const char *str)
|
||||
{
|
||||
int slen = strlen(str);
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
char *p;
|
||||
|
||||
@ -237,6 +238,7 @@ int
|
||||
pq_puttextmessage(char msgtype, const char *str)
|
||||
{
|
||||
int slen = strlen(str);
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
char *p;
|
||||
|
||||
@ -244,6 +246,7 @@ pq_puttextmessage(char msgtype, const char *str)
|
||||
if (p != str) /* actual conversion has been done? */
|
||||
{
|
||||
int result = pq_putmessage(msgtype, p, strlen(p) + 1);
|
||||
|
||||
pfree(p);
|
||||
return result;
|
||||
}
|
||||
@ -308,8 +311,10 @@ int
|
||||
pq_getstr(StringInfo s)
|
||||
{
|
||||
int result;
|
||||
|
||||
#ifdef MULTIBYTE
|
||||
char *p;
|
||||
|
||||
#endif
|
||||
|
||||
result = pq_getstring(s);
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.25 2000/03/19 22:10:07 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.26 2000/04/12 17:15:14 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.112 2000/04/08 00:21:15 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.113 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1108,6 +1108,7 @@ _copyIndexOptInfo(IndexOptInfo *from)
|
||||
static void
|
||||
CopyPathFields(Path *from, Path *newnode)
|
||||
{
|
||||
|
||||
/*
|
||||
* Modify the next line, since it causes the copying to cycle (i.e.
|
||||
* the parent points right back here! -- JMH, 7/7/92. Old version:
|
||||
@ -1189,6 +1190,7 @@ _copyTidPath(TidPath *from)
|
||||
|
||||
return newnode;
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
* CopyJoinPathFields
|
||||
*
|
||||
@ -1497,8 +1499,8 @@ _copyQuery(Query *from)
|
||||
|
||||
/*
|
||||
* We do not copy the planner internal fields: base_rel_list,
|
||||
* join_rel_list, equi_key_list, query_pathkeys.
|
||||
* Not entirely clear if this is right?
|
||||
* join_rel_list, equi_key_list, query_pathkeys. Not entirely clear if
|
||||
* this is right?
|
||||
*/
|
||||
|
||||
return newnode;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.65 2000/03/22 22:08:32 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.66 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -81,9 +81,11 @@ _equalFjoin(Fjoin *a, Fjoin *b)
|
||||
static bool
|
||||
_equalExpr(Expr *a, Expr *b)
|
||||
{
|
||||
/* We do not examine typeOid, since the optimizer often doesn't
|
||||
* bother to set it in created nodes, and it is logically a
|
||||
* derivative of the oper field anyway.
|
||||
|
||||
/*
|
||||
* We do not examine typeOid, since the optimizer often doesn't bother
|
||||
* to set it in created nodes, and it is logically a derivative of the
|
||||
* oper field anyway.
|
||||
*/
|
||||
if (a->opType != b->opType)
|
||||
return false;
|
||||
@ -134,7 +136,9 @@ _equalOper(Oper *a, Oper *b)
|
||||
return false;
|
||||
if (a->opresulttype != b->opresulttype)
|
||||
return false;
|
||||
/* We do not examine opid, opsize, or op_fcache, since these are
|
||||
|
||||
/*
|
||||
* We do not examine opid, opsize, or op_fcache, since these are
|
||||
* logically derived from opno, and they may not be set yet depending
|
||||
* on how far along the node is in the parse/plan pipeline.
|
||||
*
|
||||
@ -156,10 +160,11 @@ _equalConst(Const *a, Const *b)
|
||||
if (a->constbyval != b->constbyval)
|
||||
return false;
|
||||
/* XXX What about constisset and constiscast? */
|
||||
|
||||
/*
|
||||
* We treat all NULL constants of the same type as equal.
|
||||
* Someday this might need to change? But datumIsEqual
|
||||
* doesn't work on nulls, so...
|
||||
* We treat all NULL constants of the same type as equal. Someday this
|
||||
* might need to change? But datumIsEqual doesn't work on nulls,
|
||||
* so...
|
||||
*/
|
||||
if (a->constisnull)
|
||||
return true;
|
||||
@ -320,7 +325,9 @@ _equalArrayRef(ArrayRef *a, ArrayRef *b)
|
||||
static bool
|
||||
_equalRelOptInfo(RelOptInfo *a, RelOptInfo *b)
|
||||
{
|
||||
/* We treat RelOptInfos as equal if they refer to the same base rels
|
||||
|
||||
/*
|
||||
* We treat RelOptInfos as equal if they refer to the same base rels
|
||||
* joined in the same order. Is this sufficient?
|
||||
*/
|
||||
return equali(a->relids, b->relids);
|
||||
@ -329,8 +336,10 @@ _equalRelOptInfo(RelOptInfo *a, RelOptInfo *b)
|
||||
static bool
|
||||
_equalIndexOptInfo(IndexOptInfo *a, IndexOptInfo *b)
|
||||
{
|
||||
/* We treat IndexOptInfos as equal if they refer to the same index.
|
||||
* Is this sufficient?
|
||||
|
||||
/*
|
||||
* We treat IndexOptInfos as equal if they refer to the same index. Is
|
||||
* this sufficient?
|
||||
*/
|
||||
if (a->indexoid != b->indexoid)
|
||||
return false;
|
||||
@ -354,7 +363,9 @@ _equalPath(Path *a, Path *b)
|
||||
return false;
|
||||
if (!equal(a->parent, b->parent))
|
||||
return false;
|
||||
/* do not check path costs, since they may not be set yet, and being
|
||||
|
||||
/*
|
||||
* do not check path costs, since they may not be set yet, and being
|
||||
* float values there are roundoff error issues anyway...
|
||||
*/
|
||||
if (!equal(a->pathkeys, b->pathkeys))
|
||||
@ -375,8 +386,10 @@ _equalIndexPath(IndexPath *a, IndexPath *b)
|
||||
return false;
|
||||
if (!equali(a->joinrelids, b->joinrelids))
|
||||
return false;
|
||||
/* Skip 'rows' because of possibility of floating-point roundoff error.
|
||||
* It should be derivable from the other fields anyway.
|
||||
|
||||
/*
|
||||
* Skip 'rows' because of possibility of floating-point roundoff
|
||||
* error. It should be derivable from the other fields anyway.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
@ -448,6 +461,7 @@ _equalHashPath(HashPath *a, HashPath *b)
|
||||
static bool
|
||||
_equalIndexScan(IndexScan *a, IndexScan *b)
|
||||
{
|
||||
|
||||
/*
|
||||
* if(a->scan.plan.cost != b->scan.plan.cost) return(false);
|
||||
*/
|
||||
@ -642,9 +656,9 @@ _equalQuery(Query *a, Query *b)
|
||||
|
||||
/*
|
||||
* We do not check the internal-to-the-planner fields: base_rel_list,
|
||||
* join_rel_list, equi_key_list, query_pathkeys.
|
||||
* They might not be set yet, and in any case they should be derivable
|
||||
* from the other fields.
|
||||
* join_rel_list, equi_key_list, query_pathkeys. They might not be set
|
||||
* yet, and in any case they should be derivable from the other
|
||||
* fields.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
@ -882,7 +896,8 @@ equal(void *a, void *b)
|
||||
List *lb = (List *) b;
|
||||
List *l;
|
||||
|
||||
/* Try to reject by length check before we grovel through
|
||||
/*
|
||||
* Try to reject by length check before we grovel through
|
||||
* all the elements...
|
||||
*/
|
||||
if (length(la) != length(lb))
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/Attic/freefuncs.c,v 1.39 2000/03/14 23:06:28 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/Attic/freefuncs.c,v 1.40 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -746,7 +746,9 @@ _freeRelOptInfo(RelOptInfo *node)
|
||||
|
||||
freeObject(node->targetlist);
|
||||
freeObject(node->pathlist);
|
||||
/* XXX is this right? cheapest-path fields will typically be pointers
|
||||
|
||||
/*
|
||||
* XXX is this right? cheapest-path fields will typically be pointers
|
||||
* into pathlist, not separate structs...
|
||||
*/
|
||||
freeObject(node->cheapest_startup_path);
|
||||
@ -870,7 +872,9 @@ FreeJoinPathFields(JoinPath *node)
|
||||
{
|
||||
freeObject(node->outerjoinpath);
|
||||
freeObject(node->innerjoinpath);
|
||||
/* XXX probably wrong, since ordinarily a JoinPath would share its
|
||||
|
||||
/*
|
||||
* XXX probably wrong, since ordinarily a JoinPath would share its
|
||||
* restrictinfo list with other paths made for the same join?
|
||||
*/
|
||||
freeObject(node->joinrestrictinfo);
|
||||
@ -970,7 +974,9 @@ _freeRestrictInfo(RestrictInfo *node)
|
||||
* ----------------
|
||||
*/
|
||||
freeObject(node->clause);
|
||||
/* this is certainly wrong? IndexOptInfos don't belong to
|
||||
|
||||
/*
|
||||
* this is certainly wrong? IndexOptInfos don't belong to
|
||||
* RestrictInfo...
|
||||
*/
|
||||
freeObject(node->subclauseindices);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.30 2000/02/21 18:47:00 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.31 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* XXX a few of the following functions are duplicated to handle
|
||||
@ -314,7 +314,7 @@ LispUnion(List *l1, List *l2)
|
||||
|
||||
foreach(i, l2)
|
||||
{
|
||||
if (! member(lfirst(i), retval))
|
||||
if (!member(lfirst(i), retval))
|
||||
retval = lappend(retval, lfirst(i));
|
||||
}
|
||||
return retval;
|
||||
@ -328,7 +328,7 @@ LispUnioni(List *l1, List *l2)
|
||||
|
||||
foreach(i, l2)
|
||||
{
|
||||
if (! intMember(lfirsti(i), retval))
|
||||
if (!intMember(lfirsti(i), retval))
|
||||
retval = lappendi(retval, lfirsti(i));
|
||||
}
|
||||
return retval;
|
||||
@ -494,7 +494,7 @@ set_difference(List *l1, List *l2)
|
||||
|
||||
foreach(i, l1)
|
||||
{
|
||||
if (! member(lfirst(i), l2))
|
||||
if (!member(lfirst(i), l2))
|
||||
result = lappend(result, lfirst(i));
|
||||
}
|
||||
return result;
|
||||
@ -516,7 +516,7 @@ set_differencei(List *l1, List *l2)
|
||||
|
||||
foreach(i, l1)
|
||||
{
|
||||
if (! intMember(lfirsti(i), l2))
|
||||
if (!intMember(lfirsti(i), l2))
|
||||
result = lappendi(result, lfirsti(i));
|
||||
}
|
||||
return result;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.20 2000/02/15 03:37:09 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.21 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Creator functions in POSTGRES 4.2 are generated automatically. Most of
|
||||
@ -62,11 +62,13 @@ makeVar(Index varno,
|
||||
var->vartype = vartype;
|
||||
var->vartypmod = vartypmod;
|
||||
var->varlevelsup = varlevelsup;
|
||||
|
||||
/*
|
||||
* Since few if any routines ever create Var nodes with varnoold/varoattno
|
||||
* different from varno/varattno, we don't provide separate arguments
|
||||
* for them, but just initialize them to the given varno/varattno.
|
||||
* This reduces code clutter and chance of error for most callers.
|
||||
* Since few if any routines ever create Var nodes with
|
||||
* varnoold/varoattno different from varno/varattno, we don't provide
|
||||
* separate arguments for them, but just initialize them to the given
|
||||
* varno/varattno. This reduces code clutter and chance of error for
|
||||
* most callers.
|
||||
*/
|
||||
var->varnoold = varno;
|
||||
var->varoattno = varattno;
|
||||
@ -107,7 +109,9 @@ makeResdom(AttrNumber resno,
|
||||
resdom->restype = restype;
|
||||
resdom->restypmod = restypmod;
|
||||
resdom->resname = resname;
|
||||
/* For historical reasons, ressortgroupref defaults to 0 while
|
||||
|
||||
/*
|
||||
* For historical reasons, ressortgroupref defaults to 0 while
|
||||
* reskey/reskeyop are passed in explicitly. This is pretty silly.
|
||||
*/
|
||||
resdom->ressortgroupref = 0;
|
||||
@ -159,8 +163,3 @@ makeAttr(char *relname, char *attname)
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/nodes.c,v 1.12 2000/01/26 05:56:31 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/nodes.c,v 1.13 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
* HISTORY
|
||||
* Andrew Yu Oct 20, 1994 file creation
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.113 2000/03/24 02:58:25 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.114 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Every (plan) node in POSTGRES has an associated "out" routine which
|
||||
@ -60,10 +60,11 @@ _outToken(StringInfo str, char *s)
|
||||
appendStringInfo(str, "<>");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look for characters or patterns that are treated specially by
|
||||
* read.c (either in lsptok() or in nodeRead()), and therefore need
|
||||
* a protective backslash.
|
||||
* read.c (either in lsptok() or in nodeRead()), and therefore need a
|
||||
* protective backslash.
|
||||
*/
|
||||
/* These characters only need to be quoted at the start of the string */
|
||||
if (*s == '<' ||
|
||||
@ -1286,8 +1287,10 @@ _outValue(StringInfo str, Value *value)
|
||||
appendStringInfo(str, " %ld ", value->val.ival);
|
||||
break;
|
||||
case T_Float:
|
||||
/* We assume the value is a valid numeric literal
|
||||
* and so does not need quoting.
|
||||
|
||||
/*
|
||||
* We assume the value is a valid numeric literal and so does
|
||||
* not need quoting.
|
||||
*/
|
||||
appendStringInfo(str, " %s ", value->val.str);
|
||||
break;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.37 2000/02/15 20:49:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.38 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
* HISTORY
|
||||
* AUTHOR DATE MAJOR EVENT
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/read.c,v 1.21 2000/02/21 18:47:00 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/nodes/read.c,v 1.22 2000/04/12 17:15:16 momjian Exp $
|
||||
*
|
||||
* HISTORY
|
||||
* AUTHOR DATE MAJOR EVENT
|
||||
@ -160,7 +160,7 @@ lsptok(char *string, int *length)
|
||||
char *
|
||||
debackslash(char *token, int length)
|
||||
{
|
||||
char *result = palloc(length+1);
|
||||
char *result = palloc(length + 1);
|
||||
char *ptr = result;
|
||||
|
||||
while (length > 0)
|
||||
@ -208,22 +208,23 @@ nodeTokenType(char *token, int length)
|
||||
if ((numlen > 0 && isdigit(*numptr)) ||
|
||||
(numlen > 1 && *numptr == '.' && isdigit(numptr[1])))
|
||||
{
|
||||
|
||||
/*
|
||||
* Yes. Figure out whether it is integral or float;
|
||||
* this requires both a syntax check and a range check.
|
||||
* strtol() can do both for us.
|
||||
* We know the token will end at a character that strtol will
|
||||
* Yes. Figure out whether it is integral or float; this requires
|
||||
* both a syntax check and a range check. strtol() can do both for
|
||||
* us. We know the token will end at a character that strtol will
|
||||
* stop at, so we do not need to modify the string.
|
||||
*/
|
||||
errno = 0;
|
||||
(void) strtol(token, &endptr, 10);
|
||||
if (endptr != token+length || errno == ERANGE)
|
||||
if (endptr != token + length || errno == ERANGE)
|
||||
return T_Float;
|
||||
return T_Integer;
|
||||
}
|
||||
|
||||
/*
|
||||
* these three cases do not need length checks, since lsptok()
|
||||
* will always treat them as single-byte tokens
|
||||
* these three cases do not need length checks, since lsptok() will
|
||||
* always treat them as single-byte tokens
|
||||
*/
|
||||
else if (*token == '(')
|
||||
retval = LEFT_PAREN;
|
||||
@ -233,7 +234,7 @@ nodeTokenType(char *token, int length)
|
||||
retval = PLAN_SYM;
|
||||
else if (*token == '@' && length == 1)
|
||||
retval = AT_SYMBOL;
|
||||
else if (*token == '\"' && length > 1 && token[length-1] == '\"')
|
||||
else if (*token == '\"' && length > 1 && token[length - 1] == '\"')
|
||||
retval = T_String;
|
||||
else
|
||||
retval = ATOM_TOKEN;
|
||||
@ -305,6 +306,7 @@ nodeRead(bool read_car_only)
|
||||
{
|
||||
/* must be "<>" */
|
||||
this_value = NULL;
|
||||
|
||||
/*
|
||||
* It might be NULL but it is an atom!
|
||||
*/
|
||||
@ -321,7 +323,11 @@ nodeRead(bool read_car_only)
|
||||
}
|
||||
break;
|
||||
case T_Integer:
|
||||
/* we know that the token terminates on a char atol will stop at */
|
||||
|
||||
/*
|
||||
* we know that the token terminates on a char atol will stop
|
||||
* at
|
||||
*/
|
||||
this_value = (Node *) makeInteger(atol(token));
|
||||
make_dotted_pair_cell = true;
|
||||
break;
|
||||
@ -337,7 +343,7 @@ nodeRead(bool read_car_only)
|
||||
break;
|
||||
case T_String:
|
||||
/* need to remove leading and trailing quotes, and backslashes */
|
||||
this_value = (Node *) makeString(debackslash(token+1, tok_len-2));
|
||||
this_value = (Node *) makeString(debackslash(token + 1, tok_len - 2));
|
||||
make_dotted_pair_cell = true;
|
||||
break;
|
||||
default:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user