1
0
mirror of https://github.com/postgres/postgres.git synced 2025-04-27 22:56:53 +03:00

pgindent run. Make it all clean.

This commit is contained in:
Bruce Momjian 2001-03-22 04:01:46 +00:00
parent 6cf8707b82
commit 9e1552607a
555 changed files with 32514 additions and 28110 deletions

View File

@ -4,76 +4,81 @@
#include "utils/elog.h" #include "utils/elog.h"
static char * PARSE_BUFFER; static char *PARSE_BUFFER;
static char * PARSE_BUFFER_PTR; static char *PARSE_BUFFER_PTR;
static unsigned int PARSE_BUFFER_SIZE; static unsigned int PARSE_BUFFER_SIZE;
static unsigned int SCANNER_POS; static unsigned int SCANNER_POS;
void set_parse_buffer( char* s ); void set_parse_buffer(char *s);
void reset_parse_buffer( void ); void reset_parse_buffer(void);
int read_parse_buffer( void ); int read_parse_buffer(void);
char * parse_buffer( void ); char *parse_buffer(void);
char * parse_buffer_ptr( void ); char *parse_buffer_ptr(void);
unsigned int parse_buffer_curr_char( void ); unsigned int parse_buffer_curr_char(void);
unsigned int parse_buffer_size( void ); unsigned int parse_buffer_size(void);
unsigned int parse_buffer_pos( void ); unsigned int parse_buffer_pos(void);
extern void cube_flush_scanner_buffer(void); /* defined in cubescan.l */ extern void cube_flush_scanner_buffer(void); /* defined in cubescan.l */
void set_parse_buffer( char* s ) void
set_parse_buffer(char *s)
{ {
PARSE_BUFFER = s; PARSE_BUFFER = s;
PARSE_BUFFER_SIZE = strlen(s); PARSE_BUFFER_SIZE = strlen(s);
if ( PARSE_BUFFER_SIZE == 0 ) { if (PARSE_BUFFER_SIZE == 0)
elog(ERROR, "cube_in: can't parse an empty string"); elog(ERROR, "cube_in: can't parse an empty string");
}
PARSE_BUFFER_PTR = PARSE_BUFFER; PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0; SCANNER_POS = 0;
} }
void reset_parse_buffer( void ) void
reset_parse_buffer(void)
{ {
PARSE_BUFFER_PTR = PARSE_BUFFER; PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0; SCANNER_POS = 0;
cube_flush_scanner_buffer(); cube_flush_scanner_buffer();
} }
int read_parse_buffer( void ) int
read_parse_buffer(void)
{ {
int c; int c;
/* /*
c = *PARSE_BUFFER_PTR++; * c = *PARSE_BUFFER_PTR++; SCANNER_POS++;
SCANNER_POS++;
*/ */
c = PARSE_BUFFER[SCANNER_POS]; c = PARSE_BUFFER[SCANNER_POS];
if(SCANNER_POS < PARSE_BUFFER_SIZE) if (SCANNER_POS < PARSE_BUFFER_SIZE)
SCANNER_POS++; SCANNER_POS++;
return c; return c;
} }
char * parse_buffer( void ) char *
parse_buffer(void)
{ {
return PARSE_BUFFER; return PARSE_BUFFER;
} }
unsigned int parse_buffer_curr_char( void ) unsigned int
parse_buffer_curr_char(void)
{ {
return PARSE_BUFFER[SCANNER_POS]; return PARSE_BUFFER[SCANNER_POS];
} }
char * parse_buffer_ptr( void ) char *
parse_buffer_ptr(void)
{ {
return PARSE_BUFFER_PTR; return PARSE_BUFFER_PTR;
} }
unsigned int parse_buffer_pos( void ) unsigned int
parse_buffer_pos(void)
{ {
return SCANNER_POS; return SCANNER_POS;
} }
unsigned int parse_buffer_size( void ) unsigned int
parse_buffer_size(void)
{ {
return PARSE_BUFFER_SIZE; return PARSE_BUFFER_SIZE;
} }

View File

@ -1,8 +1,8 @@
extern void set_parse_buffer( char* s ); extern void set_parse_buffer(char *s);
extern void reset_parse_buffer( void ); extern void reset_parse_buffer(void);
extern int read_parse_buffer( void ); extern int read_parse_buffer(void);
extern char * parse_buffer( void ); extern char *parse_buffer(void);
extern char * parse_buffer_ptr( void ); extern char *parse_buffer_ptr(void);
extern unsigned int parse_buffer_curr_char( void ); extern unsigned int parse_buffer_curr_char(void);
extern unsigned int parse_buffer_pos( void ); extern unsigned int parse_buffer_pos(void);
extern unsigned int parse_buffer_size( void ); extern unsigned int parse_buffer_size(void);

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
typedef struct NDBOX { typedef struct NDBOX
{
unsigned int size; /* required to be a Postgres varlena type */ unsigned int size; /* required to be a Postgres varlena type */
unsigned int dim; unsigned int dim;
float x[1]; float x[1];

View File

@ -1,7 +1,7 @@
/* /*
* PostgreSQL type definitions for managed LargeObjects. * PostgreSQL type definitions for managed LargeObjects.
* *
* $Header: /cvsroot/pgsql/contrib/lo/lo.c,v 1.7 2001/02/10 02:31:25 tgl Exp $ * $Header: /cvsroot/pgsql/contrib/lo/lo.c,v 1.8 2001/03/22 03:59:09 momjian Exp $
* *
*/ */

View File

@ -13,7 +13,8 @@
#include "libpq-fe.h" #include "libpq-fe.h"
/* these are the opts structures for command line params */ /* these are the opts structures for command line params */
struct options { struct options
{
int getdatabase; int getdatabase;
int gettable; int gettable;
int getoid; int getoid;
@ -38,7 +39,7 @@ struct options {
/* function prototypes */ /* function prototypes */
void get_opts(int, char **, struct options *); void get_opts(int, char **, struct options *);
PGconn *sql_conn(char *, struct options *); PGconn *sql_conn(char *, struct options *);
void sql_exec_error (int); void sql_exec_error(int);
int sql_exec(PGconn *, char *, int); int sql_exec(PGconn *, char *, int);
void sql_exec_dumpdb(PGconn *); void sql_exec_dumpdb(PGconn *);
void sql_exec_dumptable(PGconn *, int); void sql_exec_dumptable(PGconn *, int);
@ -46,7 +47,8 @@ void sql_exec_searchtable(PGconn *, char *);
void sql_exec_searchoid(PGconn *, int); void sql_exec_searchoid(PGconn *, int);
/* fuction to parse command line options and check for some usage errors. */ /* fuction to parse command line options and check for some usage errors. */
void get_opts(int argc, char **argv, struct options *my_opts) void
get_opts(int argc, char **argv, struct options * my_opts)
{ {
char c; char c;
@ -63,9 +65,9 @@ void get_opts(int argc, char **argv, struct options *my_opts)
my_opts->remotepass = 0; my_opts->remotepass = 0;
/* get opts */ /* get opts */
while( (c = getopt(argc, argv, "H:p:U:P:d:t:o:xh?")) != EOF) while ((c = getopt(argc, argv, "H:p:U:P:d:t:o:xh?")) != EOF)
{ {
switch(c) switch (c)
{ {
/* specify the database */ /* specify the database */
case 'd': case 'd':
@ -76,13 +78,13 @@ void get_opts(int argc, char **argv, struct options *my_opts)
/* specify the table name */ /* specify the table name */
case 't': case 't':
/* make sure we set the database first */ /* make sure we set the database first */
if(!my_opts->getdatabase) if (!my_opts->getdatabase)
{ {
fprintf(stderr, "Sorry, but you must specify a database to dump from.\n"); fprintf(stderr, "Sorry, but you must specify a database to dump from.\n");
exit(1); exit(1);
} }
/* make sure we don't try to do a -o also */ /* make sure we don't try to do a -o also */
if(my_opts->getoid) if (my_opts->getoid)
{ {
fprintf(stderr, "Sorry, you can only specify either oid or table\n"); fprintf(stderr, "Sorry, you can only specify either oid or table\n");
exit(1); exit(1);
@ -96,13 +98,13 @@ void get_opts(int argc, char **argv, struct options *my_opts)
/* specify the oid int */ /* specify the oid int */
case 'o': case 'o':
/* make sure we set the database first */ /* make sure we set the database first */
if(!my_opts->getdatabase) if (!my_opts->getdatabase)
{ {
fprintf(stderr, "Sorry, but you must specify a database to dump from.\n"); fprintf(stderr, "Sorry, but you must specify a database to dump from.\n");
exit(1); exit(1);
} }
/* make sure we don't try to do a -t also */ /* make sure we don't try to do a -t also */
if(my_opts->gettable) if (my_opts->gettable)
{ {
fprintf(stderr, "Sorry, you can only specify either oid or table\n"); fprintf(stderr, "Sorry, you can only specify either oid or table\n");
exit(1); exit(1);
@ -148,11 +150,11 @@ void get_opts(int argc, char **argv, struct options *my_opts)
case 'h': case 'h':
fprintf(stderr, "\n\ fprintf(stderr, "\n\
Usage: pg_oid2name [-d database [-x] ] [-t table | -o oid] \n\ Usage: pg_oid2name [-d database [-x] ] [-t table | -o oid] \n\
dafault action display all databases dafault action display all databases\n\
-d database database to oid2name\n\ -d database database to oid2name\n\
-x display system tables\n\ -x display system tables\n\
-t table | -o oid search for table name (-t) or\n\ -t table | -o oid search for table name (-t) or\n\
oid (-o) in -d database oid (-o) in -d database\n\
-H host connect to remote host\n\ -H host connect to remote host\n\
-p port host port to connect to\n\ -p port host port to connect to\n\
-U username username to connect with\n\ -U username username to connect with\n\
@ -165,11 +167,15 @@ Usage: pg_oid2name [-d database [-x] ] [-t table | -o oid] \n\
} }
/* establish connection with database. */ /* establish connection with database. */
PGconn *sql_conn(char *dbName, struct options *my_opts) PGconn *
sql_conn(char *dbName, struct options * my_opts)
{ {
char *pghost, *pgport; char *pghost,
char *pgoptions, *pgtty; *pgport;
char *pguser, *pgpass; char *pgoptions,
*pgtty;
char *pguser,
*pgpass;
PGconn *conn; PGconn *conn;
@ -184,27 +190,27 @@ PGconn *sql_conn(char *dbName, struct options *my_opts)
pgpass = NULL; pgpass = NULL;
/* override the NULLs with the user params if passed */ /* override the NULLs with the user params if passed */
if(my_opts->remotehost) if (my_opts->remotehost)
{ {
pghost = (char *) malloc (128); pghost = (char *) malloc(128);
sscanf(my_opts->_hostname, "%s", pghost); sscanf(my_opts->_hostname, "%s", pghost);
} }
if(my_opts->remoteport) if (my_opts->remoteport)
{ {
pgport = (char *) malloc (6); pgport = (char *) malloc(6);
sscanf(my_opts->_port, "%s", pgport); sscanf(my_opts->_port, "%s", pgport);
} }
if(my_opts->remoteuser) if (my_opts->remoteuser)
{ {
pguser = (char *) malloc (128); pguser = (char *) malloc(128);
sscanf(my_opts->_username, "%s", pguser); sscanf(my_opts->_username, "%s", pguser);
} }
if(my_opts->remotepass) if (my_opts->remotepass)
{ {
pgpass = (char *) malloc (128); pgpass = (char *) malloc(128);
sscanf(my_opts->_password, "%s", pgpass); sscanf(my_opts->_password, "%s", pgpass);
} }
@ -228,10 +234,11 @@ PGconn *sql_conn(char *dbName, struct options *my_opts)
} }
/* If the sql_ command has an error, this function looks up the error number and prints it out. */ /* If the sql_ command has an error, this function looks up the error number and prints it out. */
void sql_exec_error (int error_number) void
sql_exec_error(int error_number)
{ {
fprintf(stderr, "Error number %i.\n", error_number); fprintf(stderr, "Error number %i.\n", error_number);
switch(error_number) switch (error_number)
{ {
case 3: case 3:
fprintf(stderr, "Error: PGRES_COPY_OUT\n"); fprintf(stderr, "Error: PGRES_COPY_OUT\n");
@ -256,13 +263,15 @@ void sql_exec_error (int error_number)
} }
/* actual code to make call to the database and print the output data */ /* actual code to make call to the database and print the output data */
int sql_exec(PGconn *conn, char *todo, int match) int
sql_exec(PGconn *conn, char *todo, int match)
{ {
PGresult *res; PGresult *res;
int numbfields; int numbfields;
int error_number; int error_number;
int i, len; int i,
len;
/* make the call */ /* make the call */
res = PQexec(conn, todo); res = PQexec(conn, todo);
@ -284,15 +293,15 @@ int sql_exec(PGconn *conn, char *todo, int match)
numbfields = PQntuples(res); numbfields = PQntuples(res);
/* if we only expect 1 and there mode than, return -2 */ /* if we only expect 1 and there mode than, return -2 */
if(match == 1 && numbfields > 1) if (match == 1 && numbfields > 1)
return -2; return -2;
/* return -1 if there aren't any returns */ /* return -1 if there aren't any returns */
if(match == 1 && numbfields < 1) if (match == 1 && numbfields < 1)
return -1; return -1;
/* for each row, dump the information */ /* for each row, dump the information */
for(i = 0; i < numbfields; i++) for (i = 0; i < numbfields; i++)
{ {
len = strlen(PQgetvalue(res, i, 0)); len = strlen(PQgetvalue(res, i, 0));
@ -306,11 +315,12 @@ int sql_exec(PGconn *conn, char *todo, int match)
} }
/* dump all databases know by the system table */ /* dump all databases know by the system table */
void sql_exec_dumpdb(PGconn *conn) void
sql_exec_dumpdb(PGconn *conn)
{ {
char *todo; char *todo;
todo = (char *) malloc (1024); todo = (char *) malloc(1024);
/* get the oid and database name from the system pg_database table */ /* get the oid and database name from the system pg_database table */
sprintf(todo, "select oid,datname from pg_database"); sprintf(todo, "select oid,datname from pg_database");
@ -320,14 +330,15 @@ void sql_exec_dumpdb(PGconn *conn)
/* display all tables in whatever db we are connected to. don't display the /* display all tables in whatever db we are connected to. don't display the
system tables by default */ system tables by default */
void sql_exec_dumptable(PGconn *conn, int systables) void
sql_exec_dumptable(PGconn *conn, int systables)
{ {
char *todo; char *todo;
todo = (char *) malloc (1024); todo = (char *) malloc(1024);
/* don't exclude the systables if this is set */ /* don't exclude the systables if this is set */
if(systables == 1) if (systables == 1)
sprintf(todo, "select relfilenode,relname from pg_class order by relname"); sprintf(todo, "select relfilenode,relname from pg_class order by relname");
else else
sprintf(todo, "select relfilenode,relname from pg_class where relname not like 'pg_%%' order by relname"); sprintf(todo, "select relfilenode,relname from pg_class where relname not like 'pg_%%' order by relname");
@ -337,12 +348,13 @@ void sql_exec_dumptable(PGconn *conn, int systables)
/* display the oid for a given tablename for whatever db we are connected /* display the oid for a given tablename for whatever db we are connected
to. do we want to allow %bar% in the search? Not now. */ to. do we want to allow %bar% in the search? Not now. */
void sql_exec_searchtable(PGconn *conn, char *tablename) void
sql_exec_searchtable(PGconn *conn, char *tablename)
{ {
int returnvalue; int returnvalue;
char *todo; char *todo;
todo = (char *) malloc (1024); todo = (char *) malloc(1024);
/* get the oid and tablename where the name matches tablename */ /* get the oid and tablename where the name matches tablename */
sprintf(todo, "select relfilenode,relname from pg_class where relname = '%s'", tablename); sprintf(todo, "select relfilenode,relname from pg_class where relname = '%s'", tablename);
@ -350,52 +362,46 @@ void sql_exec_searchtable(PGconn *conn, char *tablename)
returnvalue = sql_exec(conn, todo, 1); returnvalue = sql_exec(conn, todo, 1);
/* deal with the return errors */ /* deal with the return errors */
if(returnvalue == -1) if (returnvalue == -1)
{
printf("No tables with that name found\n"); printf("No tables with that name found\n");
}
if(returnvalue == -2) if (returnvalue == -2)
{
printf("VERY scary: more than one table with that name found!!\n"); printf("VERY scary: more than one table with that name found!!\n");
}
} }
/* same as above */ /* same as above */
void sql_exec_searchoid(PGconn *conn, int oid) void
sql_exec_searchoid(PGconn *conn, int oid)
{ {
int returnvalue; int returnvalue;
char *todo; char *todo;
todo = (char *) malloc (1024); todo = (char *) malloc(1024);
sprintf(todo, "select relfilenode,relname from pg_class where oid = %i", oid); sprintf(todo, "select relfilenode,relname from pg_class where oid = %i", oid);
returnvalue = sql_exec(conn, todo, 1); returnvalue = sql_exec(conn, todo, 1);
if(returnvalue == -1) if (returnvalue == -1)
{
printf("No tables with that oid found\n"); printf("No tables with that oid found\n");
}
if(returnvalue == -2) if (returnvalue == -2)
{
printf("VERY scary: more than one table with that oid found!!\n"); printf("VERY scary: more than one table with that oid found!!\n");
}
} }
int main(int argc, char **argv) int
main(int argc, char **argv)
{ {
struct options *my_opts; struct options *my_opts;
PGconn *pgconn; PGconn *pgconn;
my_opts = (struct options *) malloc (sizeof(struct options)); my_opts = (struct options *) malloc(sizeof(struct options));
/* parse the opts */ /* parse the opts */
get_opts(argc, argv, my_opts); get_opts(argc, argv, my_opts);
/* display all the tables in the database */ /* display all the tables in the database */
if(my_opts->getdatabase & my_opts->gettable) if (my_opts->getdatabase & my_opts->gettable)
{ {
printf("Oid of table %s from database \"%s\":\n", my_opts->_tbname, my_opts->_dbname); printf("Oid of table %s from database \"%s\":\n", my_opts->_tbname, my_opts->_dbname);
printf("_______________________________\n"); printf("_______________________________\n");
@ -408,7 +414,7 @@ int main(int argc, char **argv)
} }
/* search for the tablename of the given OID */ /* search for the tablename of the given OID */
if(my_opts->getdatabase & my_opts->getoid) if (my_opts->getdatabase & my_opts->getoid)
{ {
printf("Tablename of oid %i from database \"%s\":\n", my_opts->_oid, my_opts->_dbname); printf("Tablename of oid %i from database \"%s\":\n", my_opts->_oid, my_opts->_dbname);
printf("---------------------------------\n"); printf("---------------------------------\n");
@ -421,7 +427,7 @@ int main(int argc, char **argv)
} }
/* search for the oid for the given tablename */ /* search for the oid for the given tablename */
if(my_opts->getdatabase) if (my_opts->getdatabase)
{ {
printf("All tables from database \"%s\":\n", my_opts->_dbname); printf("All tables from database \"%s\":\n", my_opts->_dbname);
printf("---------------------------------\n"); printf("---------------------------------\n");

View File

@ -6,7 +6,7 @@
* copyright (c) Oliver Elphick <olly@lfix.co.uk>, 2001; * copyright (c) Oliver Elphick <olly@lfix.co.uk>, 2001;
* licence: BSD * licence: BSD
* *
* $Header: /cvsroot/pgsql/contrib/pg_controldata/Attic/pg_controldata.c,v 1.2 2001/03/13 01:17:40 tgl Exp $ * $Header: /cvsroot/pgsql/contrib/pg_controldata/Attic/pg_controldata.c,v 1.3 2001/03/22 03:59:09 momjian Exp $
*/ */
#include "postgres.h" #include "postgres.h"
@ -51,8 +51,9 @@ main()
char ckpttime_str[32]; char ckpttime_str[32];
DataDir = getenv("PGDATA"); DataDir = getenv("PGDATA");
if ( DataDir == NULL ) { if (DataDir == NULL)
fprintf(stderr,"PGDATA is not defined\n"); {
fprintf(stderr, "PGDATA is not defined\n");
exit(1); exit(1);
} }
@ -74,7 +75,7 @@ main()
/* Check the CRC. */ /* Check the CRC. */
INIT_CRC64(crc); INIT_CRC64(crc);
COMP_CRC64(crc, COMP_CRC64(crc,
(char*) &ControlFile + sizeof(crc64), (char *) &ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64)); sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(crc); FIN_CRC64(crc);

View File

@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------- /* -------------------------------------------------------------------------
* pg_dumplo * pg_dumplo
* *
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.5 2001/01/24 19:42:44 momjian Exp $ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.6 2001/03/22 03:59:10 momjian Exp $
* *
* Karel Zak 1999-2000 * Karel Zak 1999-2000
* ------------------------------------------------------------------------- * -------------------------------------------------------------------------
@ -26,7 +26,7 @@ extern int errno;
void void
load_lolist( LODumpMaster *pgLO ) load_lolist(LODumpMaster * pgLO)
{ {
LOlist *ll; LOlist *ll;
int i; int i;
@ -52,25 +52,29 @@ load_lolist( LODumpMaster *pgLO )
" AND c.relkind = 'r' " " AND c.relkind = 'r' "
" AND c.relname NOT LIKE 'pg_%'"); " AND c.relname NOT LIKE 'pg_%'");
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK) { if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname, fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
PQerrorMessage(pgLO->conn)); PQerrorMessage(pgLO->conn));
exit(RE_ERROR); exit(RE_ERROR);
} }
if ((n = PQntuples(pgLO->res)) == 0) { if ((n = PQntuples(pgLO->res)) == 0)
{
fprintf(stderr, "%s: No OID columns in the database.\n", progname); fprintf(stderr, "%s: No OID columns in the database.\n", progname);
exit(RE_ERROR); exit(RE_ERROR);
} }
pgLO->lolist = (LOlist *) malloc((n + 1) * sizeof(LOlist)); pgLO->lolist = (LOlist *) malloc((n + 1) * sizeof(LOlist));
if (!pgLO->lolist) { if (!pgLO->lolist)
{
fprintf(stderr, "%s: can't allocate memory\n", progname); fprintf(stderr, "%s: can't allocate memory\n", progname);
exit(RE_ERROR); exit(RE_ERROR);
} }
for (i = 0, ll = pgLO->lolist; i < n; i++, ll++) { for (i = 0, ll = pgLO->lolist; i < n; i++, ll++)
{
ll->lo_table = strdup(PQgetvalue(pgLO->res, i, 0)); ll->lo_table = strdup(PQgetvalue(pgLO->res, i, 0));
ll->lo_attr = strdup(PQgetvalue(pgLO->res, i, 1)); ll->lo_attr = strdup(PQgetvalue(pgLO->res, i, 1));
} }
@ -80,15 +84,17 @@ load_lolist( LODumpMaster *pgLO )
} }
void void
pglo_export(LODumpMaster *pgLO) pglo_export(LODumpMaster * pgLO)
{ {
LOlist *ll; LOlist *ll;
int tuples; int tuples;
char path[BUFSIZ], char path[BUFSIZ],
Qbuff[QUERY_BUFSIZ]; Qbuff[QUERY_BUFSIZ];
if (pgLO->action != ACTION_SHOW) { if (pgLO->action != ACTION_SHOW)
{
time_t t; time_t t;
time(&t); time(&t);
fprintf(pgLO->index, "#\n# This is the PostgreSQL large object dump index\n#\n"); fprintf(pgLO->index, "#\n# This is the PostgreSQL large object dump index\n#\n");
fprintf(pgLO->index, "#\tDate: %s", ctime(&t)); fprintf(pgLO->index, "#\tDate: %s", ctime(&t));
@ -100,7 +106,8 @@ pglo_export(LODumpMaster *pgLO)
pgLO->counter = 0; pgLO->counter = 0;
for(ll=pgLO->lolist; ll->lo_table != NULL; ll++) { for (ll = pgLO->lolist; ll->lo_table != NULL; ll++)
{
/* ---------- /* ----------
* Query: find the LOs referenced by this column * Query: find the LOs referenced by this column
@ -113,15 +120,19 @@ pglo_export(LODumpMaster *pgLO)
pgLO->res = PQexec(pgLO->conn, Qbuff); pgLO->res = PQexec(pgLO->conn, Qbuff);
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK) { if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname, fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
PQerrorMessage(pgLO->conn)); PQerrorMessage(pgLO->conn));
} }
else if ((tuples = PQntuples(pgLO->res)) == 0) { else if ((tuples = PQntuples(pgLO->res)) == 0)
{
if (!pgLO->quiet && pgLO->action == ACTION_EXPORT_ATTR) if (!pgLO->quiet && pgLO->action == ACTION_EXPORT_ATTR)
printf("%s: no large objects in \"%s\".\"%s\"\n", printf("%s: no large objects in \"%s\".\"%s\"\n",
progname, ll->lo_table, ll->lo_attr); progname, ll->lo_table, ll->lo_attr);
} else { }
else
{
int t; int t;
char *val; char *val;
@ -130,13 +141,16 @@ pglo_export(LODumpMaster *pgLO)
* Create DIR/FILE * Create DIR/FILE
* ---------- * ----------
*/ */
if (pgLO->action != ACTION_SHOW) { if (pgLO->action != ACTION_SHOW)
{
sprintf(path, "%s/%s/%s", pgLO->space, pgLO->db, sprintf(path, "%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_table); ll->lo_table);
if (mkdir(path, DIR_UMASK) == -1) { if (mkdir(path, DIR_UMASK) == -1)
if (errno != EEXIST) { {
if (errno != EEXIST)
{
perror(path); perror(path);
exit(RE_ERROR); exit(RE_ERROR);
} }
@ -145,8 +159,10 @@ pglo_export(LODumpMaster *pgLO)
sprintf(path, "%s/%s/%s/%s", pgLO->space, pgLO->db, sprintf(path, "%s/%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_table, ll->lo_attr); ll->lo_table, ll->lo_attr);
if (mkdir(path, DIR_UMASK) == -1) { if (mkdir(path, DIR_UMASK) == -1)
if (errno != EEXIST) { {
if (errno != EEXIST)
{
perror(path); perror(path);
exit(RE_ERROR); exit(RE_ERROR);
} }
@ -159,14 +175,16 @@ pglo_export(LODumpMaster *pgLO)
pgLO->counter += tuples; pgLO->counter += tuples;
for(t=0; t<tuples; t++) { for (t = 0; t < tuples; t++)
{
Oid lo; Oid lo;
val = PQgetvalue(pgLO->res, t, 0); val = PQgetvalue(pgLO->res, t, 0);
lo = atooid(val); lo = atooid(val);
if (pgLO->action == ACTION_SHOW) { if (pgLO->action == ACTION_SHOW)
{
printf("%s.%s: %u\n", ll->lo_table, ll->lo_attr, lo); printf("%s.%s: %u\n", ll->lo_table, ll->lo_attr, lo);
continue; continue;
} }

View File

@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------- /* -------------------------------------------------------------------------
* pg_dumplo * pg_dumplo
* *
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.3 2001/01/24 19:42:45 momjian Exp $ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
* *
* Karel Zak 1999-2000 * Karel Zak 1999-2000
* ------------------------------------------------------------------------- * -------------------------------------------------------------------------
@ -25,20 +25,23 @@
extern int errno; extern int errno;
void void
pglo_import(LODumpMaster *pgLO) pglo_import(LODumpMaster * pgLO)
{ {
LOlist loa; LOlist loa;
Oid new_oid; Oid new_oid;
char tab[MAX_TABLE_NAME], attr[MAX_ATTR_NAME], char tab[MAX_TABLE_NAME],
path[BUFSIZ], lo_path[BUFSIZ], attr[MAX_ATTR_NAME],
path[BUFSIZ],
lo_path[BUFSIZ],
Qbuff[QUERY_BUFSIZ]; Qbuff[QUERY_BUFSIZ];
while(fgets(Qbuff, QUERY_BUFSIZ, pgLO->index)) { while (fgets(Qbuff, QUERY_BUFSIZ, pgLO->index))
{
if (*Qbuff == '#') if (*Qbuff == '#')
continue; continue;
if (! pgLO->remove && ! pgLO->quiet) if (!pgLO->remove && !pgLO->quiet)
printf(Qbuff); printf(Qbuff);
sscanf(Qbuff, "%u\t%s\t%s\t%s\n", &loa.lo_oid, tab, attr, path); sscanf(Qbuff, "%u\t%s\t%s\t%s\n", &loa.lo_oid, tab, attr, path);
@ -51,7 +54,8 @@ pglo_import(LODumpMaster *pgLO)
* Import LO * Import LO
* ---------- * ----------
*/ */
if ((new_oid = lo_import(pgLO->conn, lo_path)) == 0) { if ((new_oid = lo_import(pgLO->conn, lo_path)) == 0)
{
fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn)); fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn));
@ -60,7 +64,8 @@ pglo_import(LODumpMaster *pgLO)
exit(RE_ERROR); exit(RE_ERROR);
} }
if (pgLO->remove) { if (pgLO->remove)
{
notice(pgLO, FALSE); notice(pgLO, FALSE);
if (lo_unlink(pgLO->conn, loa.lo_oid) < 0) if (lo_unlink(pgLO->conn, loa.lo_oid) < 0)
fprintf(stderr, "%s: can't remove LO %u:\n%s", fprintf(stderr, "%s: can't remove LO %u:\n%s",
@ -81,12 +86,13 @@ pglo_import(LODumpMaster *pgLO)
sprintf(Qbuff, "UPDATE \"%s\" SET \"%s\"=%u WHERE \"%s\"=%u", sprintf(Qbuff, "UPDATE \"%s\" SET \"%s\"=%u WHERE \"%s\"=%u",
loa.lo_table, loa.lo_attr, new_oid, loa.lo_attr, loa.lo_oid); loa.lo_table, loa.lo_attr, new_oid, loa.lo_attr, loa.lo_oid);
/*fprintf(stderr, Qbuff);*/ /* fprintf(stderr, Qbuff); */
pgLO->res = PQexec(pgLO->conn, Qbuff); pgLO->res = PQexec(pgLO->conn, Qbuff);
if (PQresultStatus(pgLO->res) != PGRES_COMMAND_OK) { if (PQresultStatus(pgLO->res) != PGRES_COMMAND_OK)
fprintf(stderr, "%s: %s\n",progname, PQerrorMessage(pgLO->conn)); {
fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn));
PQclear(pgLO->res); PQclear(pgLO->res);
PQexec(pgLO->conn, "ROLLBACK"); PQexec(pgLO->conn, "ROLLBACK");
fprintf(stderr, "\n%s: ROLLBACK\n", progname); fprintf(stderr, "\n%s: ROLLBACK\n", progname);
@ -94,4 +100,4 @@ pglo_import(LODumpMaster *pgLO)
} }
PQclear(pgLO->res); PQclear(pgLO->res);
} }
} }

View File

@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------- /* -------------------------------------------------------------------------
* pg_dumplo * pg_dumplo
* *
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.6 2001/02/10 02:31:25 tgl Exp $ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.7 2001/03/22 03:59:10 momjian Exp $
* *
* Karel Zak 1999-2000 * Karel Zak 1999-2000
* ------------------------------------------------------------------------- * -------------------------------------------------------------------------
@ -24,9 +24,9 @@
#include "pg_dumplo.h" #include "pg_dumplo.h"
#ifdef HAVE_GETOPT_LONG #ifdef HAVE_GETOPT_LONG
#include <getopt.h> #include <getopt.h>
#define no_argument 0 #define no_argument 0
#define required_argument 1 #define required_argument 1
#endif #endif
extern int errno; extern int errno;
@ -35,7 +35,7 @@ char *progname = NULL;
int main(int argc, char **argv); int main(int argc, char **argv);
static void usage(void); static void usage(void);
static void parse_lolist (LODumpMaster *pgLO); static void parse_lolist(LODumpMaster * pgLO);
/*----- /*-----
@ -45,7 +45,8 @@ static void parse_lolist (LODumpMaster *pgLO);
int int
main(int argc, char **argv) main(int argc, char **argv)
{ {
LODumpMaster _pgLO, *pgLO = &_pgLO; LODumpMaster _pgLO,
*pgLO = &_pgLO;
char *pwd = NULL; char *pwd = NULL;
pgLO->argv = argv; pgLO->argv = argv;
@ -68,33 +69,37 @@ main(int argc, char **argv)
* Parse ARGV * Parse ARGV
* ---------- * ----------
*/ */
if (argc > 1) { if (argc > 1)
{
int arg; int arg;
extern int optind; extern int optind;
#ifdef HAVE_GETOPT_LONG #ifdef HAVE_GETOPT_LONG
int l_index=0; int l_index = 0;
static struct option l_opt[] = { static struct option l_opt[] = {
{ "help", no_argument, 0, 'h' }, {"help", no_argument, 0, 'h'},
{ "user", required_argument, 0, 'u' }, {"user", required_argument, 0, 'u'},
{ "pwd", required_argument, 0, 'p' }, {"pwd", required_argument, 0, 'p'},
{ "db", required_argument, 0, 'd' }, {"db", required_argument, 0, 'd'},
{ "host", required_argument, 0, 'h' }, {"host", required_argument, 0, 'h'},
{ "space", required_argument, 0, 's' }, {"space", required_argument, 0, 's'},
{ "import", no_argument, 0, 'i' }, {"import", no_argument, 0, 'i'},
{ "export", no_argument, 0, 'e' }, {"export", no_argument, 0, 'e'},
{ "remove", no_argument, 0, 'r' }, {"remove", no_argument, 0, 'r'},
{ "quiet", no_argument, 0, 'q' }, {"quiet", no_argument, 0, 'q'},
{ "all", no_argument, 0, 'a' }, {"all", no_argument, 0, 'a'},
{ "show", no_argument, 0, 'w' }, {"show", no_argument, 0, 'w'},
{ NULL, 0, 0, 0 } {NULL, 0, 0, 0}
}; };
while((arg = getopt_long(argc, argv, "?aehu:p:qd:l:t:irs:w", l_opt, &l_index)) != -1) { while ((arg = getopt_long(argc, argv, "?aehu:p:qd:l:t:irs:w", l_opt, &l_index)) != -1)
{
#else #else
while((arg = getopt(argc, argv, "?aehu:p:qd:l:t:irs:w")) != -1) { while ((arg = getopt(argc, argv, "?aehu:p:qd:l:t:irs:w")) != -1)
{
#endif #endif
switch(arg) { switch (arg)
{
case '?': case '?':
case 'h': case 'h':
usage(); usage();
@ -119,8 +124,8 @@ main(int argc, char **argv)
break; break;
case 'l': case 'l':
pgLO->action = ACTION_EXPORT_ATTR; pgLO->action = ACTION_EXPORT_ATTR;
pgLO->lolist_start = optind-1; pgLO->lolist_start = optind - 1;
parse_lolist (pgLO); parse_lolist(pgLO);
break; break;
case 'e': case 'e':
case 'a': case 'a':
@ -141,7 +146,9 @@ main(int argc, char **argv)
exit(RE_ERROR); exit(RE_ERROR);
} }
} }
} else { }
else
{
usage(); usage();
exit(RE_ERROR); exit(RE_ERROR);
} }
@ -150,14 +157,17 @@ main(int argc, char **argv)
* Check space * Check space
* ---------- * ----------
*/ */
if (! pgLO->space && ! pgLO->action == ACTION_SHOW) { if (!pgLO->space && !pgLO->action == ACTION_SHOW)
if (!(pgLO->space = getenv("PWD"))) { {
if (!(pgLO->space = getenv("PWD")))
{
fprintf(stderr, "%s: not set space for dump-tree (option '-s' or $PWD).\n", progname); fprintf(stderr, "%s: not set space for dump-tree (option '-s' or $PWD).\n", progname);
exit(RE_ERROR); exit(RE_ERROR);
} }
} }
if (!pgLO->action) { if (!pgLO->action)
{
fprintf(stderr, "%s: What do you want - export or import?\n", progname); fprintf(stderr, "%s: What do you want - export or import?\n", progname);
exit(RE_ERROR); exit(RE_ERROR);
} }
@ -169,7 +179,8 @@ main(int argc, char **argv)
pgLO->conn = PQsetdbLogin(pgLO->host, NULL, NULL, NULL, pgLO->db, pgLO->conn = PQsetdbLogin(pgLO->host, NULL, NULL, NULL, pgLO->db,
pgLO->user, pwd); pgLO->user, pwd);
if (PQstatus(pgLO->conn) == CONNECTION_BAD) { if (PQstatus(pgLO->conn) == CONNECTION_BAD)
{
fprintf(stderr, "%s (connection): %s\n", progname, PQerrorMessage(pgLO->conn)); fprintf(stderr, "%s (connection): %s\n", progname, PQerrorMessage(pgLO->conn));
exit(RE_ERROR); exit(RE_ERROR);
} }
@ -187,7 +198,8 @@ main(int argc, char **argv)
PQexec(pgLO->conn, "BEGIN"); PQexec(pgLO->conn, "BEGIN");
switch(pgLO->action) { switch (pgLO->action)
{
case ACTION_SHOW: case ACTION_SHOW:
case ACTION_EXPORT_ALL: case ACTION_EXPORT_ALL:
@ -196,7 +208,8 @@ main(int argc, char **argv)
case ACTION_EXPORT_ATTR: case ACTION_EXPORT_ATTR:
pglo_export(pgLO); pglo_export(pgLO);
if (!pgLO->quiet) { if (!pgLO->quiet)
{
if (pgLO->action == ACTION_SHOW) if (pgLO->action == ACTION_SHOW)
printf("\nDatabase '%s' contains %d large objects.\n\n", pgLO->db, pgLO->counter); printf("\nDatabase '%s' contains %d large objects.\n\n", pgLO->db, pgLO->counter);
else else
@ -221,27 +234,30 @@ main(int argc, char **argv)
} }
static void static void
parse_lolist (LODumpMaster *pgLO) parse_lolist(LODumpMaster * pgLO)
{ {
LOlist *ll; LOlist *ll;
char **d, char **d,
*loc, *loc,
buff[MAX_TABLE_NAME + MAX_ATTR_NAME +1]; buff[MAX_TABLE_NAME + MAX_ATTR_NAME + 1];
pgLO->lolist = (LOlist *) malloc(pgLO->argc * sizeof(LOlist)); pgLO->lolist = (LOlist *) malloc(pgLO->argc * sizeof(LOlist));
if (! pgLO->lolist) { if (!pgLO->lolist)
{
fprintf(stderr, "%s: can't allocate memory\n", progname); fprintf(stderr, "%s: can't allocate memory\n", progname);
exit(RE_ERROR); exit(RE_ERROR);
} }
for( d=pgLO->argv + pgLO->lolist_start, ll=pgLO->lolist; for (d = pgLO->argv + pgLO->lolist_start, ll = pgLO->lolist;
*d != NULL; *d != NULL;
d++, ll++) { d++, ll++)
{
strncpy(buff, *d, MAX_TABLE_NAME + MAX_ATTR_NAME); strncpy(buff, *d, MAX_TABLE_NAME + MAX_ATTR_NAME);
if ((loc = strchr(buff, '.')) == NULL) { if ((loc = strchr(buff, '.')) == NULL)
{
fprintf(stderr, "%s: '%s' is bad 'table.attr'\n", progname, buff); fprintf(stderr, "%s: '%s' is bad 'table.attr'\n", progname, buff);
exit(RE_ERROR); exit(RE_ERROR);
} }
@ -304,5 +320,5 @@ usage()
" * option '-i' without option '-r' make new large obj in DB\n" " * option '-i' without option '-r' make new large obj in DB\n"
" not rewrite old, the '-i' UPDATE oid numbers in table.attr only!\n" " not rewrite old, the '-i' UPDATE oid numbers in table.attr only!\n"
" * if is not set option -s, the pg_dumplo use $PWD\n" " * if is not set option -s, the pg_dumplo use $PWD\n"
); /* puts()*/ ); /* puts() */
} }

View File

@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------- /* -------------------------------------------------------------------------
* pg_dumplo * pg_dumplo
* *
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/pg_dumplo.h,v 1.3 2001/01/24 19:42:45 momjian Exp $ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/pg_dumplo.h,v 1.4 2001/03/22 03:59:10 momjian Exp $
* *
* Karel Zak 1999-2000 * Karel Zak 1999-2000
* ------------------------------------------------------------------------- * -------------------------------------------------------------------------
@ -36,13 +36,15 @@
* LO struct * LO struct
* ---------- * ----------
*/ */
typedef struct { typedef struct
{
char *lo_table, char *lo_table,
*lo_attr; *lo_attr;
Oid lo_oid; Oid lo_oid;
} LOlist; } LOlist;
typedef struct { typedef struct
{
int action; int action;
LOlist *lolist; LOlist *lolist;
char **argv, char **argv,
@ -60,7 +62,8 @@ typedef struct {
PGconn *conn; PGconn *conn;
} LODumpMaster; } LODumpMaster;
typedef enum { typedef enum
{
ACTION_NONE, ACTION_NONE,
ACTION_SHOW, ACTION_SHOW,
ACTION_EXPORT_ATTR, ACTION_EXPORT_ATTR,
@ -70,10 +73,10 @@ typedef enum {
extern char *progname; extern char *progname;
extern void notice (LODumpMaster *pgLO, int set); extern void notice(LODumpMaster * pgLO, int set);
extern void index_file (LODumpMaster *pgLO); extern void index_file(LODumpMaster * pgLO);
extern void load_lolist (LODumpMaster *pgLO); extern void load_lolist(LODumpMaster * pgLO);
extern void pglo_export (LODumpMaster *pgLO); extern void pglo_export(LODumpMaster * pgLO);
extern void pglo_import (LODumpMaster *pgLO); extern void pglo_import(LODumpMaster * pgLO);
#endif /* PG_DUMPLO_H */ #endif /* PG_DUMPLO_H */

View File

@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------- /* -------------------------------------------------------------------------
* pg_dumplo * pg_dumplo
* *
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/utils.c,v 1.3 2001/01/24 19:42:45 momjian Exp $ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/utils.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
* *
* Karel Zak 1999-2000 * Karel Zak 1999-2000
* ------------------------------------------------------------------------- * -------------------------------------------------------------------------
@ -24,12 +24,12 @@
extern int errno; extern int errno;
static void Dummy_NoticeProcessor(void * arg, const char * message); static void Dummy_NoticeProcessor(void *arg, const char *message);
static void Default_NoticeProcessor(void * arg, const char * message); static void Default_NoticeProcessor(void *arg, const char *message);
void void
index_file(LODumpMaster *pgLO) index_file(LODumpMaster * pgLO)
{ {
char path[BUFSIZ]; char path[BUFSIZ];
@ -39,10 +39,13 @@ index_file(LODumpMaster *pgLO)
sprintf(path, "%s/%s", pgLO->space, pgLO->db); sprintf(path, "%s/%s", pgLO->space, pgLO->db);
if (pgLO->action == ACTION_EXPORT_ATTR || if (pgLO->action == ACTION_EXPORT_ATTR ||
pgLO->action == ACTION_EXPORT_ALL) { pgLO->action == ACTION_EXPORT_ALL)
{
if (mkdir(path, DIR_UMASK) == -1) { if (mkdir(path, DIR_UMASK) == -1)
if (errno != EEXIST) { {
if (errno != EEXIST)
{
perror(path); perror(path);
exit(RE_ERROR); exit(RE_ERROR);
} }
@ -50,16 +53,20 @@ index_file(LODumpMaster *pgLO)
sprintf(path, "%s/lo_dump.index", path); sprintf(path, "%s/lo_dump.index", path);
if ((pgLO->index = fopen(path, "w")) == NULL) { if ((pgLO->index = fopen(path, "w")) == NULL)
{
perror(path); perror(path);
exit(RE_ERROR); exit(RE_ERROR);
} }
} else if (pgLO->action != ACTION_NONE ) { }
else if (pgLO->action != ACTION_NONE)
{
sprintf(path, "%s/lo_dump.index", path); sprintf(path, "%s/lo_dump.index", path);
if ((pgLO->index = fopen(path, "r")) == NULL) { if ((pgLO->index = fopen(path, "r")) == NULL)
{
perror(path); perror(path);
exit(RE_ERROR); exit(RE_ERROR);
} }
@ -67,20 +74,24 @@ index_file(LODumpMaster *pgLO)
} }
static static
void Dummy_NoticeProcessor(void * arg, const char * message) void
Dummy_NoticeProcessor(void *arg, const char *message)
{ {
; ;
} }
static static
void Default_NoticeProcessor(void * arg, const char * message) void
Default_NoticeProcessor(void *arg, const char *message)
{ {
fprintf(stderr, "%s", message); fprintf(stderr, "%s", message);
} }
void void
notice(LODumpMaster *pgLO, int set) notice(LODumpMaster * pgLO, int set)
{ {
if (set)PQsetNoticeProcessor(pgLO->conn, Default_NoticeProcessor, NULL); if (set)
else PQsetNoticeProcessor(pgLO->conn, Dummy_NoticeProcessor, NULL); PQsetNoticeProcessor(pgLO->conn, Default_NoticeProcessor, NULL);
else
PQsetNoticeProcessor(pgLO->conn, Dummy_NoticeProcessor, NULL);
} }

View File

@ -13,24 +13,48 @@
#include <syslog.h> #include <syslog.h>
#include <string.h> #include <string.h>
struct { struct
{
const char *tag; const char *tag;
int size; int size;
int priority; int priority;
} tags[] = { } tags[] =
{ "", 0, LOG_NOTICE },
{ "emerg:", sizeof("emerg"), LOG_EMERG }, {
{ "alert:", sizeof("alert"), LOG_ALERT }, {
{ "crit:", sizeof("crit"), LOG_CRIT }, "", 0, LOG_NOTICE
{ "err:", sizeof("err"), LOG_ERR }, },
{ "error:", sizeof("error"), LOG_ERR }, {
{ "warning:", sizeof("warning"), LOG_WARNING }, "emerg:", sizeof("emerg"), LOG_EMERG
{ "notice:", sizeof("notice"), LOG_NOTICE }, },
{ "info:", sizeof("info"), LOG_INFO }, {
{ "debug:", sizeof("debug"), LOG_DEBUG } "alert:", sizeof("alert"), LOG_ALERT
},
{
"crit:", sizeof("crit"), LOG_CRIT
},
{
"err:", sizeof("err"), LOG_ERR
},
{
"error:", sizeof("error"), LOG_ERR
},
{
"warning:", sizeof("warning"), LOG_WARNING
},
{
"notice:", sizeof("notice"), LOG_NOTICE
},
{
"info:", sizeof("info"), LOG_INFO
},
{
"debug:", sizeof("debug"), LOG_DEBUG
}
}; };
int main() int
main()
{ {
char buf[301]; char buf[301];
int c; int c;
@ -40,29 +64,29 @@ int main()
#ifndef DEBUG #ifndef DEBUG
openlog("postgresql", LOG_CONS, LOG_LOCAL1); openlog("postgresql", LOG_CONS, LOG_LOCAL1);
#endif #endif
while ( (c = getchar()) != EOF) { while ((c = getchar()) != EOF)
if (c == '\r') { {
if (c == '\r')
continue; continue;
} if (c == '\n')
if (c == '\n') { {
int level = sizeof(tags)/sizeof(*tags); int level = sizeof(tags) / sizeof(*tags);
char *bol; char *bol;
if (colon == 0 || (size_t)(colon - buf) > sizeof("warning")) { if (colon == 0 || (size_t) (colon - buf) > sizeof("warning"))
level = 1; level = 1;
}
*pos = 0; *pos = 0;
while (--level) { while (--level)
{
if (pos - buf >= tags[level].size if (pos - buf >= tags[level].size
&& strncmp(buf, tags[level].tag, tags[level].size) == 0) { && strncmp(buf, tags[level].tag, tags[level].size) == 0)
break; break;
} }
}
bol = buf + tags[level].size; bol = buf + tags[level].size;
if (bol > buf && *bol == ' ') { if (bol > buf && *bol == ' ')
++bol; ++bol;
} if (pos - bol > 0)
if (pos - bol > 0) { {
#ifndef DEBUG #ifndef DEBUG
syslog(tags[level].priority, "%s", bol); syslog(tags[level].priority, "%s", bol);
#else #else
@ -70,16 +94,13 @@ int main()
#endif #endif
} }
pos = buf; pos = buf;
colon = (char const *)0; colon = (char const *) 0;
continue; continue;
} }
if (c == ':' && !colon) { if (c == ':' && !colon)
colon = pos; colon = pos;
} if ((size_t) (pos - buf) < sizeof(buf) - 1)
if ((size_t)(pos - buf) < sizeof(buf)-1) {
*pos++ = c; *pos++ = c;
} }
}
return 0; return 0;
} }

View File

@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $Header: /cvsroot/pgsql/contrib/pg_resetxlog/Attic/pg_resetxlog.c,v 1.2 2001/03/16 05:08:39 tgl Exp $ * $Header: /cvsroot/pgsql/contrib/pg_resetxlog/Attic/pg_resetxlog.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -110,7 +110,8 @@ static char XLogDir[MAXPGPATH];
static char ControlFilePath[MAXPGPATH]; static char ControlFilePath[MAXPGPATH];
static ControlFileData ControlFile; /* pg_control values */ static ControlFileData ControlFile; /* pg_control values */
static uint32 newXlogId, newXlogSeg; /* ID/Segment of new XLOG segment */ static uint32 newXlogId,
newXlogSeg; /* ID/Segment of new XLOG segment */
static bool guessed = false; /* T if we had to guess at any values */ static bool guessed = false; /* T if we had to guess at any values */
@ -146,10 +147,11 @@ ReadControlFile(void)
if ((fd = open(ControlFilePath, O_RDONLY)) < 0) if ((fd = open(ControlFilePath, O_RDONLY)) < 0)
{ {
/* /*
* If pg_control is not there at all, or we can't read it, * If pg_control is not there at all, or we can't read it, the
* the odds are we've been handed a bad DataDir path, so give up. * odds are we've been handed a bad DataDir path, so give up. User
* User can do "touch pg_control" to force us to proceed. * can do "touch pg_control" to force us to proceed.
*/ */
perror("Failed to open $PGDATA/global/pg_control for reading"); perror("Failed to open $PGDATA/global/pg_control for reading");
if (errno == ENOENT) if (errno == ENOENT)
@ -193,6 +195,7 @@ ReadControlFile(void)
guessed = true; guessed = true;
return true; return true;
} }
/* /*
* Maybe it's a 7.1beta pg_control. * Maybe it's a 7.1beta pg_control.
*/ */
@ -222,49 +225,49 @@ typedef struct crc64V0
} crc64V0; } crc64V0;
static uint32 crc_tableV0[] = { static uint32 crc_tableV0[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
}; };
#define INIT_CRC64V0(crc) ((crc).crc1 = 0xffffffff, (crc).crc2 = 0xffffffff) #define INIT_CRC64V0(crc) ((crc).crc1 = 0xffffffff, (crc).crc2 = 0xffffffff)
@ -356,7 +359,7 @@ typedef struct XLogPageHeaderDataV0
typedef XLogPageHeaderDataV0 *XLogPageHeaderV0; typedef XLogPageHeaderDataV0 *XLogPageHeaderV0;
static bool RecordIsValidV0(XLogRecordV0 *record); static bool RecordIsValidV0(XLogRecordV0 * record);
static XLogRecordV0 *ReadRecordV0(XLogRecPtr *RecPtr, char *buffer); static XLogRecordV0 *ReadRecordV0(XLogRecPtr *RecPtr, char *buffer);
static bool ValidXLOGHeaderV0(XLogPageHeaderV0 hdr); static bool ValidXLOGHeaderV0(XLogPageHeaderV0 hdr);
@ -409,6 +412,7 @@ CheckControlVersion0(char *buffer, int len)
(char *) malloc(_INTL_MAXLOGRECSZ)); (char *) malloc(_INTL_MAXLOGRECSZ));
if (record == NULL) if (record == NULL)
{ {
/* /*
* We have to guess at the checkpoint contents. * We have to guess at the checkpoint contents.
*/ */
@ -435,26 +439,26 @@ CheckControlVersion0(char *buffer, int len)
* We assume all of the record has been read into memory at *record. * We assume all of the record has been read into memory at *record.
*/ */
static bool static bool
RecordIsValidV0(XLogRecordV0 *record) RecordIsValidV0(XLogRecordV0 * record)
{ {
crc64V0 crc; crc64V0 crc;
uint32 len = record->xl_len; uint32 len = record->xl_len;
/* /*
* NB: this code is not right for V0 records containing backup blocks, * NB: this code is not right for V0 records containing backup blocks,
* but for now it's only going to be applied to checkpoint records, * but for now it's only going to be applied to checkpoint records, so
* so I'm not going to worry about it... * I'm not going to worry about it...
*/ */
INIT_CRC64V0(crc); INIT_CRC64V0(crc);
COMP_CRC64V0(crc, XLogRecGetData(record), len); COMP_CRC64V0(crc, XLogRecGetData(record), len);
COMP_CRC64V0(crc, (char*) record + sizeof(crc64V0), COMP_CRC64V0(crc, (char *) record + sizeof(crc64V0),
SizeOfXLogRecordV0 - sizeof(crc64V0)); SizeOfXLogRecordV0 - sizeof(crc64V0));
FIN_CRC64V0(crc); FIN_CRC64V0(crc);
if (!EQ_CRC64V0(record->xl_crc, crc)) if (!EQ_CRC64V0(record->xl_crc, crc))
return false; return false;
return(true); return (true);
} }
/* /*
@ -489,7 +493,7 @@ ReadRecordV0(XLogRecPtr *RecPtr, char *buffer)
readFile = XLogFileOpen(readId, readSeg); readFile = XLogFileOpen(readId, readSeg);
if (readFile < 0) if (readFile < 0)
goto next_record_is_invalid; goto next_record_is_invalid;
readOff = (uint32) (-1); /* force read to occur below */ readOff = (uint32) (-1);/* force read to occur below */
} }
targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ; targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ;
@ -510,10 +514,13 @@ ReadRecordV0(XLogRecPtr *RecPtr, char *buffer)
if (record->xl_len == 0) if (record->xl_len == 0)
goto next_record_is_invalid; goto next_record_is_invalid;
/* /*
* Compute total length of record including any appended backup blocks. * Compute total length of record including any appended backup
* blocks.
*/ */
total_len = SizeOfXLogRecordV0 + record->xl_len; total_len = SizeOfXLogRecordV0 + record->xl_len;
/* /*
* Make sure it will fit in buffer (currently, it is mechanically * Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea * impossible for this test to fail, but it seems like a good idea
@ -557,7 +564,7 @@ ReadRecordV0(XLogRecPtr *RecPtr, char *buffer)
len = BLCKSZ - SizeOfXLogPHDV0 - SizeOfXLogContRecordV0; len = BLCKSZ - SizeOfXLogPHDV0 - SizeOfXLogContRecordV0;
if (contrecord->xl_len > len) if (contrecord->xl_len > len)
{ {
memcpy(buffer, (char *)contrecord + SizeOfXLogContRecordV0, len); memcpy(buffer, (char *) contrecord + SizeOfXLogContRecordV0, len);
gotlen += len; gotlen += len;
buffer += len; buffer += len;
continue; continue;
@ -610,6 +617,7 @@ GuessControlValues(void)
{ {
#ifdef USE_LOCALE #ifdef USE_LOCALE
char *localeptr; char *localeptr;
#endif #endif
/* /*
@ -710,8 +718,8 @@ RewriteControlFile(void)
char buffer[BLCKSZ]; /* need not be aligned */ char buffer[BLCKSZ]; /* need not be aligned */
/* /*
* Adjust fields as needed to force an empty XLOG starting at the * Adjust fields as needed to force an empty XLOG starting at the next
* next available segment. * available segment.
*/ */
newXlogId = ControlFile.logId; newXlogId = ControlFile.logId;
newXlogSeg = ControlFile.logSeg; newXlogSeg = ControlFile.logSeg;
@ -735,16 +743,16 @@ RewriteControlFile(void)
/* Contents are protected with a CRC */ /* Contents are protected with a CRC */
INIT_CRC64(ControlFile.crc); INIT_CRC64(ControlFile.crc);
COMP_CRC64(ControlFile.crc, COMP_CRC64(ControlFile.crc,
(char*) &ControlFile + sizeof(crc64), (char *) &ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64)); sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile.crc); FIN_CRC64(ControlFile.crc);
/* /*
* We write out BLCKSZ bytes into pg_control, zero-padding the * We write out BLCKSZ bytes into pg_control, zero-padding the excess
* excess over sizeof(ControlFileData). This reduces the odds * over sizeof(ControlFileData). This reduces the odds of
* of premature-EOF errors when reading pg_control. We'll still * premature-EOF errors when reading pg_control. We'll still fail
* fail when we check the contents of the file, but hopefully with * when we check the contents of the file, but hopefully with a more
* a more specific error than "couldn't read pg_control". * specific error than "couldn't read pg_control".
*/ */
if (sizeof(ControlFileData) > BLCKSZ) if (sizeof(ControlFileData) > BLCKSZ)
{ {
@ -858,7 +866,7 @@ WriteEmptyXLOG(void)
INIT_CRC64(crc); INIT_CRC64(crc);
COMP_CRC64(crc, &ControlFile.checkPointCopy, sizeof(CheckPoint)); COMP_CRC64(crc, &ControlFile.checkPointCopy, sizeof(CheckPoint));
COMP_CRC64(crc, (char*) record + sizeof(crc64), COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64)); SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc); FIN_CRC64(crc);
record->xl_crc = crc; record->xl_crc = crc;
@ -914,7 +922,7 @@ usage(void)
int int
main(int argc, char ** argv) main(int argc, char **argv)
{ {
int argn; int argn;
bool force = false; bool force = false;
@ -934,7 +942,7 @@ main(int argc, char ** argv)
usage(); usage();
} }
if (argn != argc-1) /* one required non-switch argument */ if (argn != argc - 1) /* one required non-switch argument */
usage(); usage();
DataDir = argv[argn++]; DataDir = argv[argn++];
@ -946,7 +954,8 @@ main(int argc, char ** argv)
/* /*
* Check for a postmaster lock file --- if there is one, refuse to * Check for a postmaster lock file --- if there is one, refuse to
* proceed, on grounds we might be interfering with a live installation. * proceed, on grounds we might be interfering with a live
* installation.
*/ */
snprintf(path, MAXPGPATH, "%s%cpostmaster.pid", DataDir, SEP_CHAR); snprintf(path, MAXPGPATH, "%s%cpostmaster.pid", DataDir, SEP_CHAR);
@ -973,8 +982,8 @@ main(int argc, char ** argv)
GuessControlValues(); GuessControlValues();
/* /*
* If we had to guess anything, and -f was not given, just print * If we had to guess anything, and -f was not given, just print the
* the guessed values and exit. Also print if -n is given. * guessed values and exit. Also print if -n is given.
*/ */
if ((guessed && !force) || noupdate) if ((guessed && !force) || noupdate)
{ {

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* $Id: encode.c,v 1.3 2001/02/10 02:31:25 tgl Exp $ * $Id: encode.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*/ */
#include "postgres.h" #include "postgres.h"
@ -43,9 +43,9 @@
#endif #endif
static pg_coding * static pg_coding *
find_coding(pg_coding *hbuf, text *name, int silent); find_coding(pg_coding * hbuf, text *name, int silent);
static pg_coding * static pg_coding *
pg_find_coding(pg_coding *res, char *name); pg_find_coding(pg_coding * res, char *name);
/* SQL function: encode(bytea, text) returns text */ /* SQL function: encode(bytea, text) returns text */
@ -56,8 +56,11 @@ encode(PG_FUNCTION_ARGS)
{ {
text *arg; text *arg;
text *name; text *name;
uint len, rlen, rlen0; uint len,
pg_coding *c, cbuf; rlen,
rlen0;
pg_coding *c,
cbuf;
text *res; text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -71,7 +74,7 @@ encode(PG_FUNCTION_ARGS)
rlen0 = c->encode_len(len); rlen0 = c->encode_len(len);
res = (text *)palloc(rlen0 + VARHDRSZ); res = (text *) palloc(rlen0 + VARHDRSZ);
rlen = c->encode(VARDATA(arg), len, VARDATA(res)); rlen = c->encode(VARDATA(arg), len, VARDATA(res));
VARATT_SIZEP(res) = rlen + VARHDRSZ; VARATT_SIZEP(res) = rlen + VARHDRSZ;
@ -93,8 +96,11 @@ decode(PG_FUNCTION_ARGS)
{ {
text *arg; text *arg;
text *name; text *name;
uint len, rlen, rlen0; uint len,
pg_coding *c, cbuf; rlen,
rlen0;
pg_coding *c,
cbuf;
text *res; text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -108,7 +114,7 @@ decode(PG_FUNCTION_ARGS)
rlen0 = c->decode_len(len); rlen0 = c->decode_len(len);
res = (text *)palloc(rlen0 + VARHDRSZ); res = (text *) palloc(rlen0 + VARHDRSZ);
rlen = c->decode(VARDATA(arg), len, VARDATA(res)); rlen = c->decode(VARDATA(arg), len, VARDATA(res));
VARATT_SIZEP(res) = rlen + VARHDRSZ; VARATT_SIZEP(res) = rlen + VARHDRSZ;
@ -123,14 +129,15 @@ decode(PG_FUNCTION_ARGS)
} }
static pg_coding * static pg_coding *
find_coding(pg_coding *dst, text *name, int silent) find_coding(pg_coding * dst, text *name, int silent)
{ {
pg_coding *p; pg_coding *p;
char buf[NAMEDATALEN]; char buf[NAMEDATALEN];
uint len; uint len;
len = VARSIZE(name) - VARHDRSZ; len = VARSIZE(name) - VARHDRSZ;
if (len >= NAMEDATALEN) { if (len >= NAMEDATALEN)
{
if (silent) if (silent)
return NULL; return NULL;
elog(ERROR, "Encoding type does not exist (name too long)"); elog(ERROR, "Encoding type does not exist (name too long)");
@ -152,12 +159,14 @@ uint
hex_encode(uint8 *src, uint len, uint8 *dst) hex_encode(uint8 *src, uint len, uint8 *dst)
{ {
uint8 *end = src + len; uint8 *end = src + len;
while (src < end) {
while (src < end)
{
*dst++ = hextbl[(*src >> 4) & 0xF]; *dst++ = hextbl[(*src >> 4) & 0xF];
*dst++ = hextbl[*src & 0xF]; *dst++ = hextbl[*src & 0xF];
src++; src++;
} }
return len*2; return len * 2;
} }
/* probably should use lookup table */ /* probably should use lookup table */
@ -181,12 +190,19 @@ get_hex(char c)
uint uint
hex_decode(uint8 *src, uint len, uint8 *dst) hex_decode(uint8 *src, uint len, uint8 *dst)
{ {
uint8 *s, *srcend, v1, v2, *p = dst; uint8 *s,
*srcend,
v1,
v2,
*p = dst;
srcend = src + len; srcend = src + len;
s = src; p = dst; s = src;
while (s < srcend) { p = dst;
if (*s == ' ' || *s == '\n' || *s == '\t' || *s == '\r') { while (s < srcend)
{
if (*s == ' ' || *s == '\n' || *s == '\t' || *s == '\r')
{
s++; s++;
continue; continue;
} }
@ -202,24 +218,30 @@ hex_decode(uint8 *src, uint len, uint8 *dst)
static unsigned char _base64[] = static unsigned char _base64[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
uint uint
b64_encode(uint8 *src, uint len, uint8 *dst) b64_encode(uint8 *src, uint len, uint8 *dst)
{ {
uint8 *s, *p, *end = src + len, *lend = dst + 76; uint8 *s,
*p,
*end = src + len,
*lend = dst + 76;
int pos = 2; int pos = 2;
unsigned long buf = 0; unsigned long buf = 0;
s = src; p = dst; s = src;
p = dst;
while (s < end) { while (s < end)
{
buf |= *s << (pos << 3); buf |= *s << (pos << 3);
pos--; pos--;
s++; s++;
/* write it out */ /* write it out */
if (pos < 0) { if (pos < 0)
{
*p++ = _base64[(buf >> 18) & 0x3f]; *p++ = _base64[(buf >> 18) & 0x3f];
*p++ = _base64[(buf >> 12) & 0x3f]; *p++ = _base64[(buf >> 12) & 0x3f];
*p++ = _base64[(buf >> 6) & 0x3f]; *p++ = _base64[(buf >> 6) & 0x3f];
@ -228,12 +250,14 @@ b64_encode(uint8 *src, uint len, uint8 *dst)
pos = 2; pos = 2;
buf = 0; buf = 0;
} }
if (p >= lend) { if (p >= lend)
{
*p++ = '\n'; *p++ = '\n';
lend = p + 76; lend = p + 76;
} }
} }
if (pos != 2) { if (pos != 2)
{
*p++ = _base64[(buf >> 18) & 0x3f]; *p++ = _base64[(buf >> 18) & 0x3f];
*p++ = _base64[(buf >> 12) & 0x3f]; *p++ = _base64[(buf >> 12) & 0x3f];
*p++ = (pos == 0) ? _base64[(buf >> 6) & 0x3f] : '='; *p++ = (pos == 0) ? _base64[(buf >> 6) & 0x3f] : '=';
@ -247,14 +271,17 @@ b64_encode(uint8 *src, uint len, uint8 *dst)
uint uint
b64_decode(uint8 *src, uint len, uint8 *dst) b64_decode(uint8 *src, uint len, uint8 *dst)
{ {
char *srcend = src + len, *s = src; char *srcend = src + len,
*s = src;
uint8 *p = dst; uint8 *p = dst;
char c; char c;
uint b = 0; uint b = 0;
unsigned long buf = 0; unsigned long buf = 0;
int pos = 0, end = 0; int pos = 0,
end = 0;
while (s < srcend) { while (s < srcend)
{
c = *s++; c = *s++;
if (c >= 'A' && c <= 'Z') if (c >= 'A' && c <= 'Z')
b = c - 'A'; b = c - 'A';
@ -266,16 +293,21 @@ b64_decode(uint8 *src, uint len, uint8 *dst)
b = 62; b = 62;
else if (c == '/') else if (c == '/')
b = 63; b = 63;
else if (c == '=') { else if (c == '=')
{
/* end sequence */ /* end sequence */
if (!end) { if (!end)
if (pos == 2) end = 1; {
else if (pos == 3) end = 2; if (pos == 2)
end = 1;
else if (pos == 3)
end = 2;
else else
elog(ERROR, "base64: unexpected '='"); elog(ERROR, "base64: unexpected '='");
} }
b = 0; b = 0;
} else if (c == ' ' || c == '\t' || c == '\n' || c == '\r') }
else if (c == ' ' || c == '\t' || c == '\n' || c == '\r')
continue; continue;
else else
elog(ERROR, "base64: Invalid symbol"); elog(ERROR, "base64: Invalid symbol");
@ -283,7 +315,8 @@ b64_decode(uint8 *src, uint len, uint8 *dst)
/* add it to buffer */ /* add it to buffer */
buf = (buf << 6) + b; buf = (buf << 6) + b;
pos++; pos++;
if (pos == 4) { if (pos == 4)
{
*p++ = (buf >> 16) & 255; *p++ = (buf >> 16) & 255;
if (end == 0 || end > 1) if (end == 0 || end > 1)
*p++ = (buf >> 8) & 255; *p++ = (buf >> 8) & 255;
@ -326,21 +359,22 @@ b64_dec_len(uint srclen)
} }
static pg_coding static pg_coding
encoding_list [] = { encoding_list[] = {
{ "hex", hex_enc_len, hex_dec_len, hex_encode, hex_decode}, {"hex", hex_enc_len, hex_dec_len, hex_encode, hex_decode},
{ "base64", b64_enc_len, b64_dec_len, b64_encode, b64_decode}, {"base64", b64_enc_len, b64_dec_len, b64_encode, b64_decode},
{ NULL, NULL, NULL, NULL, NULL} {NULL, NULL, NULL, NULL, NULL}
}; };
static pg_coding * static pg_coding *
pg_find_coding(pg_coding *res, char *name) pg_find_coding(pg_coding * res, char *name)
{ {
pg_coding *p; pg_coding *p;
for (p = encoding_list; p->name; p++) {
for (p = encoding_list; p->name; p++)
{
if (!strcasecmp(p->name, name)) if (!strcasecmp(p->name, name))
return p; return p;
} }
return NULL; return NULL;
} }

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* $Id: encode.h,v 1.1 2001/01/24 03:46:16 momjian Exp $ * $Id: encode.h,v 1.2 2001/03/22 03:59:10 momjian Exp $
*/ */
#ifndef __PG_ENCODE_H #ifndef __PG_ENCODE_H
@ -37,12 +37,13 @@ Datum encode(PG_FUNCTION_ARGS);
Datum decode(PG_FUNCTION_ARGS); Datum decode(PG_FUNCTION_ARGS);
typedef struct _pg_coding pg_coding; typedef struct _pg_coding pg_coding;
struct _pg_coding { struct _pg_coding
{
char *name; char *name;
uint (*encode_len)(uint dlen); uint (*encode_len) (uint dlen);
uint (*decode_len)(uint dlen); uint (*decode_len) (uint dlen);
uint (*encode)(uint8 *data, uint dlen, uint8 *res); uint (*encode) (uint8 *data, uint dlen, uint8 *res);
uint (*decode)(uint8 *data, uint dlen, uint8 *res); uint (*decode) (uint8 *data, uint dlen, uint8 *res);
}; };
/* They are for outside usage in C code, if needed */ /* They are for outside usage in C code, if needed */
@ -57,4 +58,3 @@ uint b64_enc_len(uint srclen);
uint b64_dec_len(uint srclen); uint b64_dec_len(uint srclen);
#endif /* __PG_ENCODE_H */ #endif /* __PG_ENCODE_H */

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* $Id: internal.c,v 1.2 2001/02/10 02:31:25 tgl Exp $ * $Id: internal.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/ */
#include "postgres.h" #include "postgres.h"
@ -49,29 +49,30 @@
#endif #endif
static uint static uint
pg_md5_len(pg_digest *h); pg_md5_len(pg_digest * h);
static uint8 * static uint8 *
pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf); pg_md5_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static uint static uint
pg_sha1_len(pg_digest *h); pg_sha1_len(pg_digest * h);
static uint8 * static uint8 *
pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf); pg_sha1_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static pg_digest static pg_digest
int_digest_list [] = { int_digest_list[] = {
{ "md5", pg_md5_len, pg_md5_digest, {0}}, {"md5", pg_md5_len, pg_md5_digest, {0}},
{ "sha1", pg_sha1_len, pg_sha1_digest, {0}}, {"sha1", pg_sha1_len, pg_sha1_digest, {0}},
{ NULL, NULL, NULL, {0}} {NULL, NULL, NULL, {0}}
}; };
static uint static uint
pg_md5_len(pg_digest *h) { pg_md5_len(pg_digest * h)
{
return MD5_DIGEST_LENGTH; return MD5_DIGEST_LENGTH;
} }
static uint8 * static uint8 *
pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf) pg_md5_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{ {
MD5_CTX ctx; MD5_CTX ctx;
@ -83,12 +84,13 @@ pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
} }
static uint static uint
pg_sha1_len(pg_digest *h) { pg_sha1_len(pg_digest * h)
{
return SHA1_DIGEST_LENGTH; return SHA1_DIGEST_LENGTH;
} }
static uint8 * static uint8 *
pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf) pg_sha1_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{ {
SHA1_CTX ctx; SHA1_CTX ctx;
@ -101,7 +103,7 @@ pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_digest * pg_digest *
pg_find_digest(pg_digest *h, char *name) pg_find_digest(pg_digest * h, char *name)
{ {
pg_digest *p; pg_digest *p;
@ -110,5 +112,3 @@ pg_find_digest(pg_digest *h, char *name)
return p; return p;
return NULL; return NULL;
} }

View File

@ -31,7 +31,7 @@
* It is possible that this works with other SHA1/MD5 * It is possible that this works with other SHA1/MD5
* implementations too. * implementations too.
* *
* $Id: krb.c,v 1.3 2001/02/20 15:34:14 momjian Exp $ * $Id: krb.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*/ */
#include "postgres.h" #include "postgres.h"
@ -54,29 +54,30 @@
#endif #endif
static uint static uint
pg_md5_len(pg_digest *h); pg_md5_len(pg_digest * h);
static uint8 * static uint8 *
pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf); pg_md5_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static uint static uint
pg_sha1_len(pg_digest *h); pg_sha1_len(pg_digest * h);
static uint8 * static uint8 *
pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf); pg_sha1_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static pg_digest static pg_digest
int_digest_list [] = { int_digest_list[] = {
{ "md5", pg_md5_len, pg_md5_digest, {0}}, {"md5", pg_md5_len, pg_md5_digest, {0}},
{ "sha1", pg_sha1_len, pg_sha1_digest, {0}}, {"sha1", pg_sha1_len, pg_sha1_digest, {0}},
{ NULL, NULL, NULL, {0}} {NULL, NULL, NULL, {0}}
}; };
static uint static uint
pg_md5_len(pg_digest *h) { pg_md5_len(pg_digest * h)
{
return MD5_DIGEST_LENGTH; return MD5_DIGEST_LENGTH;
} }
static uint8 * static uint8 *
pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf) pg_md5_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{ {
MD5_CTX ctx; MD5_CTX ctx;
@ -88,12 +89,13 @@ pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
} }
static uint static uint
pg_sha1_len(pg_digest *h) { pg_sha1_len(pg_digest * h)
{
return SHA1_DIGEST_LENGTH; return SHA1_DIGEST_LENGTH;
} }
static uint8 * static uint8 *
pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf) pg_sha1_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{ {
SHA1_CTX ctx; SHA1_CTX ctx;
@ -106,7 +108,7 @@ pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_digest * pg_digest *
pg_find_digest(pg_digest *h, char *name) pg_find_digest(pg_digest * h, char *name)
{ {
pg_digest *p; pg_digest *p;
@ -115,5 +117,3 @@ pg_find_digest(pg_digest *h, char *name)
return p; return p;
return NULL; return NULL;
} }

View File

@ -1,4 +1,4 @@
/* $Id: md5.c,v 1.4 2001/02/10 02:31:25 tgl Exp $ */ /* $Id: md5.c,v 1.5 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: md5.c,v 1.3 2000/02/22 14:01:17 itojun Exp $ */ /* $KAME: md5.c,v 1.3 2000/02/22 14:01:17 itojun Exp $ */
/* /*
@ -125,10 +125,11 @@ static const uint8 md5_paddat[MD5_BUFLEN] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
}; };
static void md5_calc (uint8 *, md5_ctxt *); static void md5_calc(uint8 *, md5_ctxt *);
void md5_init(ctxt) void
md5_ctxt *ctxt; md5_init(ctxt)
md5_ctxt *ctxt;
{ {
ctxt->md5_n = 0; ctxt->md5_n = 0;
ctxt->md5_i = 0; ctxt->md5_i = 0;
@ -139,52 +140,60 @@ void md5_init(ctxt)
bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf)); bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
} }
void md5_loop(ctxt, input, len) void
md5_ctxt *ctxt; md5_loop(ctxt, input, len)
uint8 *input; md5_ctxt *ctxt;
unsigned int len; /* number of bytes */ uint8 *input;
unsigned int len; /* number of bytes */
{ {
unsigned int gap, i; unsigned int gap,
i;
ctxt->md5_n += len * 8; /* byte to bit */ ctxt->md5_n += len * 8; /* byte to bit */
gap = MD5_BUFLEN - ctxt->md5_i; gap = MD5_BUFLEN - ctxt->md5_i;
if (len >= gap) { if (len >= gap)
bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i), {
bcopy((void *) input, (void *) (ctxt->md5_buf + ctxt->md5_i),
gap); gap);
md5_calc(ctxt->md5_buf, ctxt); md5_calc(ctxt->md5_buf, ctxt);
for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) { for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN)
md5_calc((uint8 *)(input + i), ctxt); md5_calc((uint8 *) (input + i), ctxt);
}
ctxt->md5_i = len - i; ctxt->md5_i = len - i;
bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i); bcopy((void *) (input + i), (void *) ctxt->md5_buf, ctxt->md5_i);
} else { }
bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i), else
{
bcopy((void *) input, (void *) (ctxt->md5_buf + ctxt->md5_i),
len); len);
ctxt->md5_i += len; ctxt->md5_i += len;
} }
} }
void md5_pad(ctxt) void
md5_ctxt *ctxt; md5_pad(ctxt)
md5_ctxt *ctxt;
{ {
unsigned int gap; unsigned int gap;
/* Don't count up padding. Keep md5_n. */ /* Don't count up padding. Keep md5_n. */
gap = MD5_BUFLEN - ctxt->md5_i; gap = MD5_BUFLEN - ctxt->md5_i;
if (gap > 8) { if (gap > 8)
bcopy((void *)md5_paddat, {
(void *)(ctxt->md5_buf + ctxt->md5_i), bcopy((void *) md5_paddat,
(void *) (ctxt->md5_buf + ctxt->md5_i),
gap - sizeof(ctxt->md5_n)); gap - sizeof(ctxt->md5_n));
} else { }
else
{
/* including gap == 8 */ /* including gap == 8 */
bcopy((void *)md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i), bcopy((void *) md5_paddat, (void *) (ctxt->md5_buf + ctxt->md5_i),
gap); gap);
md5_calc(ctxt->md5_buf, ctxt); md5_calc(ctxt->md5_buf, ctxt);
bcopy((void *)(md5_paddat + gap), bcopy((void *) (md5_paddat + gap),
(void *)ctxt->md5_buf, (void *) ctxt->md5_buf,
MD5_BUFLEN - sizeof(ctxt->md5_n)); MD5_BUFLEN - sizeof(ctxt->md5_n));
} }
@ -206,98 +215,192 @@ void md5_pad(ctxt)
md5_calc(ctxt->md5_buf, ctxt); md5_calc(ctxt->md5_buf, ctxt);
} }
void md5_result(digest, ctxt) void
uint8 *digest; md5_result(digest, ctxt)
md5_ctxt *ctxt; uint8 *digest;
md5_ctxt *ctxt;
{ {
/* 4 byte words */ /* 4 byte words */
#if BYTE_ORDER == LITTLE_ENDIAN #if BYTE_ORDER == LITTLE_ENDIAN
bcopy(&ctxt->md5_st8[0], digest, 16); bcopy(&ctxt->md5_st8[0], digest, 16);
#endif #endif
#if BYTE_ORDER == BIG_ENDIAN #if BYTE_ORDER == BIG_ENDIAN
digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2]; digest[0] = ctxt->md5_st8[3];
digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0]; digest[1] = ctxt->md5_st8[2];
digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6]; digest[2] = ctxt->md5_st8[1];
digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4]; digest[3] = ctxt->md5_st8[0];
digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10]; digest[4] = ctxt->md5_st8[7];
digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8]; digest[5] = ctxt->md5_st8[6];
digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14]; digest[6] = ctxt->md5_st8[5];
digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12]; digest[7] = ctxt->md5_st8[4];
digest[8] = ctxt->md5_st8[11];
digest[9] = ctxt->md5_st8[10];
digest[10] = ctxt->md5_st8[9];
digest[11] = ctxt->md5_st8[8];
digest[12] = ctxt->md5_st8[15];
digest[13] = ctxt->md5_st8[14];
digest[14] = ctxt->md5_st8[13];
digest[15] = ctxt->md5_st8[12];
#endif #endif
} }
#if BYTE_ORDER == BIG_ENDIAN #if BYTE_ORDER == BIG_ENDIAN
uint32 X[16]; uint32 X[16];
#endif #endif
static void md5_calc(b64, ctxt) static void
uint8 *b64; md5_calc(b64, ctxt)
md5_ctxt *ctxt; uint8 *b64;
md5_ctxt *ctxt;
{ {
uint32 A = ctxt->md5_sta; uint32 A = ctxt->md5_sta;
uint32 B = ctxt->md5_stb; uint32 B = ctxt->md5_stb;
uint32 C = ctxt->md5_stc; uint32 C = ctxt->md5_stc;
uint32 D = ctxt->md5_std; uint32 D = ctxt->md5_std;
#if BYTE_ORDER == LITTLE_ENDIAN #if BYTE_ORDER == LITTLE_ENDIAN
uint32 *X = (uint32 *)b64; uint32 *X = (uint32 *) b64;
#endif #endif
#if BYTE_ORDER == BIG_ENDIAN #if BYTE_ORDER == BIG_ENDIAN
/* 4 byte words */ /* 4 byte words */
/* what a brute force but fast! */ /* what a brute force but fast! */
uint8 *y = (uint8 *)X; uint8 *y = (uint8 *) X;
y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4]; y[0] = b64[3];
y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8]; y[1] = b64[2];
y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12]; y[2] = b64[1];
y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16]; y[3] = b64[0];
y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20]; y[4] = b64[7];
y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24]; y[5] = b64[6];
y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28]; y[6] = b64[5];
y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32]; y[7] = b64[4];
y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36]; y[8] = b64[11];
y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40]; y[9] = b64[10];
y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44]; y[10] = b64[9];
y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48]; y[11] = b64[8];
y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52]; y[12] = b64[15];
y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56]; y[13] = b64[14];
y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60]; y[14] = b64[13];
y[15] = b64[12];
y[16] = b64[19];
y[17] = b64[18];
y[18] = b64[17];
y[19] = b64[16];
y[20] = b64[23];
y[21] = b64[22];
y[22] = b64[21];
y[23] = b64[20];
y[24] = b64[27];
y[25] = b64[26];
y[26] = b64[25];
y[27] = b64[24];
y[28] = b64[31];
y[29] = b64[30];
y[30] = b64[29];
y[31] = b64[28];
y[32] = b64[35];
y[33] = b64[34];
y[34] = b64[33];
y[35] = b64[32];
y[36] = b64[39];
y[37] = b64[38];
y[38] = b64[37];
y[39] = b64[36];
y[40] = b64[43];
y[41] = b64[42];
y[42] = b64[41];
y[43] = b64[40];
y[44] = b64[47];
y[45] = b64[46];
y[46] = b64[45];
y[47] = b64[44];
y[48] = b64[51];
y[49] = b64[50];
y[50] = b64[49];
y[51] = b64[48];
y[52] = b64[55];
y[53] = b64[54];
y[54] = b64[53];
y[55] = b64[52];
y[56] = b64[59];
y[57] = b64[58];
y[58] = b64[57];
y[59] = b64[56];
y[60] = b64[63];
y[61] = b64[62];
y[62] = b64[61];
y[63] = b64[60];
#endif #endif
ROUND1(A, B, C, D, 0, Sa, 1); ROUND1(D, A, B, C, 1, Sb, 2); ROUND1(A, B, C, D, 0, Sa, 1);
ROUND1(C, D, A, B, 2, Sc, 3); ROUND1(B, C, D, A, 3, Sd, 4); ROUND1(D, A, B, C, 1, Sb, 2);
ROUND1(A, B, C, D, 4, Sa, 5); ROUND1(D, A, B, C, 5, Sb, 6); ROUND1(C, D, A, B, 2, Sc, 3);
ROUND1(C, D, A, B, 6, Sc, 7); ROUND1(B, C, D, A, 7, Sd, 8); ROUND1(B, C, D, A, 3, Sd, 4);
ROUND1(A, B, C, D, 8, Sa, 9); ROUND1(D, A, B, C, 9, Sb, 10); ROUND1(A, B, C, D, 4, Sa, 5);
ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12); ROUND1(D, A, B, C, 5, Sb, 6);
ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14); ROUND1(C, D, A, B, 6, Sc, 7);
ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16); ROUND1(B, C, D, A, 7, Sd, 8);
ROUND1(A, B, C, D, 8, Sa, 9);
ROUND1(D, A, B, C, 9, Sb, 10);
ROUND1(C, D, A, B, 10, Sc, 11);
ROUND1(B, C, D, A, 11, Sd, 12);
ROUND1(A, B, C, D, 12, Sa, 13);
ROUND1(D, A, B, C, 13, Sb, 14);
ROUND1(C, D, A, B, 14, Sc, 15);
ROUND1(B, C, D, A, 15, Sd, 16);
ROUND2(A, B, C, D, 1, Se, 17); ROUND2(D, A, B, C, 6, Sf, 18); ROUND2(A, B, C, D, 1, Se, 17);
ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A, 0, Sh, 20); ROUND2(D, A, B, C, 6, Sf, 18);
ROUND2(A, B, C, D, 5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22); ROUND2(C, D, A, B, 11, Sg, 19);
ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A, 4, Sh, 24); ROUND2(B, C, D, A, 0, Sh, 20);
ROUND2(A, B, C, D, 9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26); ROUND2(A, B, C, D, 5, Se, 21);
ROUND2(C, D, A, B, 3, Sg, 27); ROUND2(B, C, D, A, 8, Sh, 28); ROUND2(D, A, B, C, 10, Sf, 22);
ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C, 2, Sf, 30); ROUND2(C, D, A, B, 15, Sg, 23);
ROUND2(C, D, A, B, 7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32); ROUND2(B, C, D, A, 4, Sh, 24);
ROUND2(A, B, C, D, 9, Se, 25);
ROUND2(D, A, B, C, 14, Sf, 26);
ROUND2(C, D, A, B, 3, Sg, 27);
ROUND2(B, C, D, A, 8, Sh, 28);
ROUND2(A, B, C, D, 13, Se, 29);
ROUND2(D, A, B, C, 2, Sf, 30);
ROUND2(C, D, A, B, 7, Sg, 31);
ROUND2(B, C, D, A, 12, Sh, 32);
ROUND3(A, B, C, D, 5, Si, 33); ROUND3(D, A, B, C, 8, Sj, 34); ROUND3(A, B, C, D, 5, Si, 33);
ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36); ROUND3(D, A, B, C, 8, Sj, 34);
ROUND3(A, B, C, D, 1, Si, 37); ROUND3(D, A, B, C, 4, Sj, 38); ROUND3(C, D, A, B, 11, Sk, 35);
ROUND3(C, D, A, B, 7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40); ROUND3(B, C, D, A, 14, Sl, 36);
ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C, 0, Sj, 42); ROUND3(A, B, C, D, 1, Si, 37);
ROUND3(C, D, A, B, 3, Sk, 43); ROUND3(B, C, D, A, 6, Sl, 44); ROUND3(D, A, B, C, 4, Sj, 38);
ROUND3(A, B, C, D, 9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46); ROUND3(C, D, A, B, 7, Sk, 39);
ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A, 2, Sl, 48); ROUND3(B, C, D, A, 10, Sl, 40);
ROUND3(A, B, C, D, 13, Si, 41);
ROUND3(D, A, B, C, 0, Sj, 42);
ROUND3(C, D, A, B, 3, Sk, 43);
ROUND3(B, C, D, A, 6, Sl, 44);
ROUND3(A, B, C, D, 9, Si, 45);
ROUND3(D, A, B, C, 12, Sj, 46);
ROUND3(C, D, A, B, 15, Sk, 47);
ROUND3(B, C, D, A, 2, Sl, 48);
ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50); ROUND4(A, B, C, D, 0, Sm, 49);
ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52); ROUND4(D, A, B, C, 7, Sn, 50);
ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54); ROUND4(C, D, A, B, 14, So, 51);
ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56); ROUND4(B, C, D, A, 5, Sp, 52);
ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58); ROUND4(A, B, C, D, 12, Sm, 53);
ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60); ROUND4(D, A, B, C, 3, Sn, 54);
ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62); ROUND4(C, D, A, B, 10, So, 55);
ROUND4(C, D, A, B, 2, So, 63); ROUND4(B, C, D, A, 9, Sp, 64); ROUND4(B, C, D, A, 1, Sp, 56);
ROUND4(A, B, C, D, 8, Sm, 57);
ROUND4(D, A, B, C, 15, Sn, 58);
ROUND4(C, D, A, B, 6, So, 59);
ROUND4(B, C, D, A, 13, Sp, 60);
ROUND4(A, B, C, D, 4, Sm, 61);
ROUND4(D, A, B, C, 11, Sn, 62);
ROUND4(C, D, A, B, 2, So, 63);
ROUND4(B, C, D, A, 9, Sp, 64);
ctxt->md5_sta += A; ctxt->md5_sta += A;
ctxt->md5_stb += B; ctxt->md5_stb += B;

View File

@ -1,4 +1,4 @@
/* $Id: md5.h,v 1.3 2001/01/09 16:07:13 momjian Exp $ */ /* $Id: md5.h,v 1.4 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: md5.h,v 1.3 2000/02/22 14:01:18 itojun Exp $ */ /* $KAME: md5.h,v 1.3 2000/02/22 14:01:18 itojun Exp $ */
/* /*
@ -35,8 +35,10 @@
#define MD5_BUFLEN 64 #define MD5_BUFLEN 64
typedef struct { typedef struct
union { {
union
{
uint32 md5_state32[4]; uint32 md5_state32[4];
uint8 md5_state8[16]; uint8 md5_state8[16];
} md5_st; } md5_st;
@ -47,7 +49,8 @@ typedef struct {
#define md5_std md5_st.md5_state32[3] #define md5_std md5_st.md5_state32[3]
#define md5_st8 md5_st.md5_state8 #define md5_st8 md5_st.md5_state8
union { union
{
uint64 md5_count64; uint64 md5_count64;
uint8 md5_count8[8]; uint8 md5_count8[8];
} md5_count; } md5_count;
@ -58,10 +61,10 @@ typedef struct {
uint8 md5_buf[MD5_BUFLEN]; uint8 md5_buf[MD5_BUFLEN];
} md5_ctxt; } md5_ctxt;
extern void md5_init (md5_ctxt *); extern void md5_init(md5_ctxt *);
extern void md5_loop (md5_ctxt *, uint8 *, unsigned int); extern void md5_loop(md5_ctxt *, uint8 *, unsigned int);
extern void md5_pad (md5_ctxt *); extern void md5_pad(md5_ctxt *);
extern void md5_result (uint8 *, md5_ctxt *); extern void md5_result(uint8 *, md5_ctxt *);
/* compatibility */ /* compatibility */
#define MD5_CTX md5_ctxt #define MD5_CTX md5_ctxt
@ -73,4 +76,4 @@ do { \
md5_result((x), (y)); \ md5_result((x), (y)); \
} while (0) } while (0)
#endif /* ! _NETINET6_MD5_H_*/ #endif /* ! _NETINET6_MD5_H_ */

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* $Id: mhash.c,v 1.2 2001/02/10 02:31:26 tgl Exp $ * $Id: mhash.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/ */
#include "postgres.h" #include "postgres.h"
@ -36,22 +36,23 @@
#include <mhash.h> #include <mhash.h>
static uint static uint
pg_mhash_len(pg_digest *hash); pg_mhash_len(pg_digest * hash);
static uint8 * static uint8 *pg_mhash_digest(pg_digest * hash, uint8 *src,
pg_mhash_digest(pg_digest *hash, uint8 *src,
uint len, uint8 *buf); uint len, uint8 *buf);
static uint static uint
pg_mhash_len(pg_digest *h) { pg_mhash_len(pg_digest * h)
{
return mhash_get_block_size(h->misc.code); return mhash_get_block_size(h->misc.code);
} }
static uint8 * static uint8 *
pg_mhash_digest(pg_digest *h, uint8 *src, uint len, uint8 *dst) pg_mhash_digest(pg_digest * h, uint8 *src, uint len, uint8 *dst)
{ {
uint8 *res; uint8 *res;
MHASH mh = mhash_init(h->misc.code); MHASH mh = mhash_init(h->misc.code);
mhash(mh, src, len); mhash(mh, src, len);
res = mhash_end(mh); res = mhash_end(mh);
@ -62,19 +63,23 @@ pg_mhash_digest(pg_digest *h, uint8 *src, uint len, uint8 *dst)
} }
pg_digest * pg_digest *
pg_find_digest(pg_digest *h, char *name) pg_find_digest(pg_digest * h, char *name)
{ {
size_t hnum, i, b; size_t hnum,
i,
b;
char *mname; char *mname;
hnum = mhash_count(); hnum = mhash_count();
for (i = 0; i <= hnum; i++) { for (i = 0; i <= hnum; i++)
{
mname = mhash_get_hash_name(i); mname = mhash_get_hash_name(i);
if (mname == NULL) if (mname == NULL)
continue; continue;
b = strcasecmp(name, mname); b = strcasecmp(name, mname);
free(mname); free(mname);
if (!b) { if (!b)
{
h->name = mhash_get_hash_name(i); h->name = mhash_get_hash_name(i);
h->length = pg_mhash_len; h->length = pg_mhash_len;
h->digest = pg_mhash_digest; h->digest = pg_mhash_digest;
@ -84,4 +89,3 @@ pg_find_digest(pg_digest *h, char *name)
} }
return NULL; return NULL;
} }

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* $Id: openssl.c,v 1.2 2001/02/10 02:31:26 tgl Exp $ * $Id: openssl.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/ */
#include "postgres.h" #include "postgres.h"
@ -36,19 +36,20 @@
#include <evp.h> #include <evp.h>
static uint static uint
pg_ossl_len(pg_digest *h); pg_ossl_len(pg_digest * h);
static uint8 * static uint8 *
pg_ossl_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf); pg_ossl_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static uint static uint
pg_ossl_len(pg_digest *h) { pg_ossl_len(pg_digest * h)
return EVP_MD_size((EVP_MD*)h->misc.ptr); {
return EVP_MD_size((EVP_MD *) h->misc.ptr);
} }
static uint8 * static uint8 *
pg_ossl_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf) pg_ossl_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{ {
EVP_MD *md = (EVP_MD*)h->misc.ptr; EVP_MD *md = (EVP_MD *) h->misc.ptr;
EVP_MD_CTX ctx; EVP_MD_CTX ctx;
EVP_DigestInit(&ctx, md); EVP_DigestInit(&ctx, md);
@ -61,11 +62,12 @@ pg_ossl_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
static int pg_openssl_initialized = 0; static int pg_openssl_initialized = 0;
pg_digest * pg_digest *
pg_find_digest(pg_digest *h, char *name) pg_find_digest(pg_digest * h, char *name)
{ {
const EVP_MD *md; const EVP_MD *md;
if (!pg_openssl_initialized) { if (!pg_openssl_initialized)
{
OpenSSL_add_all_digests(); OpenSSL_add_all_digests();
pg_openssl_initialized = 1; pg_openssl_initialized = 1;
} }
@ -77,9 +79,7 @@ pg_find_digest(pg_digest *h, char *name)
h->name = name; h->name = name;
h->length = pg_ossl_len; h->length = pg_ossl_len;
h->digest = pg_ossl_digest; h->digest = pg_ossl_digest;
h->misc.ptr = (void*)md; h->misc.ptr = (void *) md;
return h; return h;
} }

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* $Id: pgcrypto.c,v 1.6 2001/02/10 02:31:26 tgl Exp $ * $Id: pgcrypto.c,v 1.7 2001/03/22 03:59:10 momjian Exp $
*/ */
#include "postgres.h" #include "postgres.h"
@ -49,7 +49,7 @@ Datum digest_exists(PG_FUNCTION_ARGS);
/* private stuff */ /* private stuff */
static pg_digest * static pg_digest *
find_digest(pg_digest *hbuf, text *name, int silent); find_digest(pg_digest * hbuf, text *name, int silent);
/* SQL function: hash(text, text) returns text */ /* SQL function: hash(text, text) returns text */
@ -60,8 +60,10 @@ digest(PG_FUNCTION_ARGS)
{ {
text *arg; text *arg;
text *name; text *name;
uint len, hlen; uint len,
pg_digest *h, _hbuf; hlen;
pg_digest *h,
_hbuf;
text *res; text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -72,7 +74,7 @@ digest(PG_FUNCTION_ARGS)
hlen = h->length(h); hlen = h->length(h);
res = (text *)palloc(hlen + VARHDRSZ); res = (text *) palloc(hlen + VARHDRSZ);
VARATT_SIZEP(res) = hlen + VARHDRSZ; VARATT_SIZEP(res) = hlen + VARHDRSZ;
arg = PG_GETARG_TEXT_P(0); arg = PG_GETARG_TEXT_P(0);
@ -93,7 +95,8 @@ Datum
digest_exists(PG_FUNCTION_ARGS) digest_exists(PG_FUNCTION_ARGS)
{ {
text *name; text *name;
pg_digest _hbuf, *res; pg_digest _hbuf,
*res;
if (PG_ARGISNULL(0)) if (PG_ARGISNULL(0))
PG_RETURN_NULL(); PG_RETURN_NULL();
@ -110,14 +113,15 @@ digest_exists(PG_FUNCTION_ARGS)
} }
static pg_digest * static pg_digest *
find_digest(pg_digest *hbuf, text *name, int silent) find_digest(pg_digest * hbuf, text *name, int silent)
{ {
pg_digest *p; pg_digest *p;
char buf[NAMEDATALEN]; char buf[NAMEDATALEN];
uint len; uint len;
len = VARSIZE(name) - VARHDRSZ; len = VARSIZE(name) - VARHDRSZ;
if (len >= NAMEDATALEN) { if (len >= NAMEDATALEN)
{
if (silent) if (silent)
return NULL; return NULL;
elog(ERROR, "Hash type does not exist (name too long)"); elog(ERROR, "Hash type does not exist (name too long)");
@ -132,4 +136,3 @@ find_digest(pg_digest *hbuf, text *name, int silent)
elog(ERROR, "Hash type does not exist: '%s'", buf); elog(ERROR, "Hash type does not exist: '%s'", buf);
return p; return p;
} }

View File

@ -26,25 +26,27 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* $Id: pgcrypto.h,v 1.2 2001/01/09 16:07:13 momjian Exp $ * $Id: pgcrypto.h,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/ */
#ifndef _PG_CRYPTO_H #ifndef _PG_CRYPTO_H
#define _PG_CRYPTO_H #define _PG_CRYPTO_H
typedef struct _pg_digest pg_digest; typedef struct _pg_digest pg_digest;
struct _pg_digest { struct _pg_digest
{
char *name; char *name;
uint (*length)(pg_digest *h); uint (*length) (pg_digest * h);
uint8 *(*digest)(pg_digest *h, uint8 *data, uint8 *(*digest) (pg_digest * h, uint8 *data,
uint dlen, uint8 *buf); uint dlen, uint8 *buf);
/* private */ /* private */
union { union
{
uint code; uint code;
const void *ptr; const void *ptr;
} misc; } misc;
}; };
extern pg_digest *pg_find_digest(pg_digest *hbuf, char *name); extern pg_digest *pg_find_digest(pg_digest * hbuf, char *name);
#endif #endif

View File

@ -1,4 +1,4 @@
/* $Id: sha1.c,v 1.4 2001/02/10 02:31:26 tgl Exp $ */ /* $Id: sha1.c,v 1.5 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: sha1.c,v 1.3 2000/02/22 14:01:18 itojun Exp $ */ /* $KAME: sha1.c,v 1.3 2000/02/22 14:01:18 itojun Exp $ */
/* /*
@ -41,15 +41,16 @@
/* sanity check */ /* sanity check */
#if BYTE_ORDER != BIG_ENDIAN #if BYTE_ORDER != BIG_ENDIAN
# if BYTE_ORDER != LITTLE_ENDIAN #if BYTE_ORDER != LITTLE_ENDIAN
# define unsupported 1 #define unsupported 1
# endif #endif
#endif #endif
#ifndef unsupported #ifndef unsupported
/* constant table */ /* constant table */
static uint32 _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 }; static uint32 _K[] = {0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6};
#define K(t) _K[(t) / 20] #define K(t) _K[(t) / 20]
#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d))) #define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
@ -81,80 +82,141 @@ static uint32 _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
sha1_step(ctxt); \ sha1_step(ctxt); \
} }
static void sha1_step (struct sha1_ctxt *); static void sha1_step(struct sha1_ctxt *);
static void static void
sha1_step(ctxt) sha1_step(ctxt)
struct sha1_ctxt *ctxt; struct sha1_ctxt *ctxt;
{ {
uint32 a, b, c, d, e; uint32 a,
size_t t, s; b,
c,
d,
e;
size_t t,
s;
uint32 tmp; uint32 tmp;
#if BYTE_ORDER == LITTLE_ENDIAN #if BYTE_ORDER == LITTLE_ENDIAN
struct sha1_ctxt tctxt; struct sha1_ctxt tctxt;
bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64); bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2]; ctxt->m.b8[0] = tctxt.m.b8[3];
ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0]; ctxt->m.b8[1] = tctxt.m.b8[2];
ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6]; ctxt->m.b8[2] = tctxt.m.b8[1];
ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4]; ctxt->m.b8[3] = tctxt.m.b8[0];
ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10]; ctxt->m.b8[4] = tctxt.m.b8[7];
ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8]; ctxt->m.b8[5] = tctxt.m.b8[6];
ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14]; ctxt->m.b8[6] = tctxt.m.b8[5];
ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12]; ctxt->m.b8[7] = tctxt.m.b8[4];
ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18]; ctxt->m.b8[8] = tctxt.m.b8[11];
ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16]; ctxt->m.b8[9] = tctxt.m.b8[10];
ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22]; ctxt->m.b8[10] = tctxt.m.b8[9];
ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20]; ctxt->m.b8[11] = tctxt.m.b8[8];
ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26]; ctxt->m.b8[12] = tctxt.m.b8[15];
ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24]; ctxt->m.b8[13] = tctxt.m.b8[14];
ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30]; ctxt->m.b8[14] = tctxt.m.b8[13];
ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28]; ctxt->m.b8[15] = tctxt.m.b8[12];
ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34]; ctxt->m.b8[16] = tctxt.m.b8[19];
ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32]; ctxt->m.b8[17] = tctxt.m.b8[18];
ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38]; ctxt->m.b8[18] = tctxt.m.b8[17];
ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36]; ctxt->m.b8[19] = tctxt.m.b8[16];
ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42]; ctxt->m.b8[20] = tctxt.m.b8[23];
ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40]; ctxt->m.b8[21] = tctxt.m.b8[22];
ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46]; ctxt->m.b8[22] = tctxt.m.b8[21];
ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44]; ctxt->m.b8[23] = tctxt.m.b8[20];
ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50]; ctxt->m.b8[24] = tctxt.m.b8[27];
ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48]; ctxt->m.b8[25] = tctxt.m.b8[26];
ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54]; ctxt->m.b8[26] = tctxt.m.b8[25];
ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52]; ctxt->m.b8[27] = tctxt.m.b8[24];
ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58]; ctxt->m.b8[28] = tctxt.m.b8[31];
ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56]; ctxt->m.b8[29] = tctxt.m.b8[30];
ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62]; ctxt->m.b8[30] = tctxt.m.b8[29];
ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60]; ctxt->m.b8[31] = tctxt.m.b8[28];
ctxt->m.b8[32] = tctxt.m.b8[35];
ctxt->m.b8[33] = tctxt.m.b8[34];
ctxt->m.b8[34] = tctxt.m.b8[33];
ctxt->m.b8[35] = tctxt.m.b8[32];
ctxt->m.b8[36] = tctxt.m.b8[39];
ctxt->m.b8[37] = tctxt.m.b8[38];
ctxt->m.b8[38] = tctxt.m.b8[37];
ctxt->m.b8[39] = tctxt.m.b8[36];
ctxt->m.b8[40] = tctxt.m.b8[43];
ctxt->m.b8[41] = tctxt.m.b8[42];
ctxt->m.b8[42] = tctxt.m.b8[41];
ctxt->m.b8[43] = tctxt.m.b8[40];
ctxt->m.b8[44] = tctxt.m.b8[47];
ctxt->m.b8[45] = tctxt.m.b8[46];
ctxt->m.b8[46] = tctxt.m.b8[45];
ctxt->m.b8[47] = tctxt.m.b8[44];
ctxt->m.b8[48] = tctxt.m.b8[51];
ctxt->m.b8[49] = tctxt.m.b8[50];
ctxt->m.b8[50] = tctxt.m.b8[49];
ctxt->m.b8[51] = tctxt.m.b8[48];
ctxt->m.b8[52] = tctxt.m.b8[55];
ctxt->m.b8[53] = tctxt.m.b8[54];
ctxt->m.b8[54] = tctxt.m.b8[53];
ctxt->m.b8[55] = tctxt.m.b8[52];
ctxt->m.b8[56] = tctxt.m.b8[59];
ctxt->m.b8[57] = tctxt.m.b8[58];
ctxt->m.b8[58] = tctxt.m.b8[57];
ctxt->m.b8[59] = tctxt.m.b8[56];
ctxt->m.b8[60] = tctxt.m.b8[63];
ctxt->m.b8[61] = tctxt.m.b8[62];
ctxt->m.b8[62] = tctxt.m.b8[61];
ctxt->m.b8[63] = tctxt.m.b8[60];
#endif #endif
a = H(0); b = H(1); c = H(2); d = H(3); e = H(4); a = H(0);
b = H(1);
c = H(2);
d = H(3);
e = H(4);
for (t = 0; t < 20; t++) { for (t = 0; t < 20; t++)
{
s = t & 0x0f; s = t & 0x0f;
if (t >= 16) { if (t >= 16)
W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
}
tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t); tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp; e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
} }
for (t = 20; t < 40; t++) { for (t = 20; t < 40; t++)
{
s = t & 0x0f; s = t & 0x0f;
W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t); tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp; e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
} }
for (t = 40; t < 60; t++) { for (t = 40; t < 60; t++)
{
s = t & 0x0f; s = t & 0x0f;
W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t); tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp; e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
} }
for (t = 60; t < 80; t++) { for (t = 60; t < 80; t++)
{
s = t & 0x0f; s = t & 0x0f;
W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t); tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp; e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
} }
H(0) = H(0) + a; H(0) = H(0) + a;
@ -170,7 +232,7 @@ sha1_step(ctxt)
void void
sha1_init(ctxt) sha1_init(ctxt)
struct sha1_ctxt *ctxt; struct sha1_ctxt *ctxt;
{ {
bzero(ctxt, sizeof(struct sha1_ctxt)); bzero(ctxt, sizeof(struct sha1_ctxt));
H(0) = 0x67452301; H(0) = 0x67452301;
@ -182,16 +244,17 @@ sha1_init(ctxt)
void void
sha1_pad(ctxt) sha1_pad(ctxt)
struct sha1_ctxt *ctxt; struct sha1_ctxt *ctxt;
{ {
size_t padlen; /*pad length in bytes*/ size_t padlen; /* pad length in bytes */
size_t padstart; size_t padstart;
PUTPAD(0x80); PUTPAD(0x80);
padstart = COUNT % 64; padstart = COUNT % 64;
padlen = 64 - padstart; padlen = 64 - padstart;
if (padlen < 8) { if (padlen < 8)
{
bzero(&ctxt->m.b8[padstart], padlen); bzero(&ctxt->m.b8[padstart], padlen);
COUNT += padlen; COUNT += padlen;
COUNT %= 64; COUNT %= 64;
@ -203,23 +266,31 @@ sha1_pad(ctxt)
COUNT += (padlen - 8); COUNT += (padlen - 8);
COUNT %= 64; COUNT %= 64;
#if BYTE_ORDER == BIG_ENDIAN #if BYTE_ORDER == BIG_ENDIAN
PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[1]);
PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[2]);
PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[3]);
PUTPAD(ctxt->c.b8[4]);
PUTPAD(ctxt->c.b8[5]);
PUTPAD(ctxt->c.b8[6]);
PUTPAD(ctxt->c.b8[7]);
#else #else
PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[6]);
PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[5]);
PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[4]);
PUTPAD(ctxt->c.b8[3]);
PUTPAD(ctxt->c.b8[2]);
PUTPAD(ctxt->c.b8[1]);
PUTPAD(ctxt->c.b8[0]);
#endif #endif
} }
void void
sha1_loop(ctxt, input0, len) sha1_loop(ctxt, input0, len)
struct sha1_ctxt *ctxt; struct sha1_ctxt *ctxt;
const caddr_t input0; const caddr_t input0;
size_t len; size_t len;
{ {
const uint8 *input; const uint8 *input;
size_t gaplen; size_t gaplen;
@ -227,10 +298,11 @@ sha1_loop(ctxt, input0, len)
size_t off; size_t off;
size_t copysiz; size_t copysiz;
input = (const uint8 *)input0; input = (const uint8 *) input0;
off = 0; off = 0;
while (off < len) { while (off < len)
{
gapstart = COUNT % 64; gapstart = COUNT % 64;
gaplen = 64 - gapstart; gaplen = 64 - gapstart;
@ -247,27 +319,37 @@ sha1_loop(ctxt, input0, len)
void void
sha1_result(ctxt, digest0) sha1_result(ctxt, digest0)
struct sha1_ctxt *ctxt; struct sha1_ctxt *ctxt;
caddr_t digest0; caddr_t digest0;
{ {
uint8 *digest; uint8 *digest;
digest = (uint8 *)digest0; digest = (uint8 *) digest0;
sha1_pad(ctxt); sha1_pad(ctxt);
#if BYTE_ORDER == BIG_ENDIAN #if BYTE_ORDER == BIG_ENDIAN
bcopy(&ctxt->h.b8[0], digest, 20); bcopy(&ctxt->h.b8[0], digest, 20);
#else #else
digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2]; digest[0] = ctxt->h.b8[3];
digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0]; digest[1] = ctxt->h.b8[2];
digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6]; digest[2] = ctxt->h.b8[1];
digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4]; digest[3] = ctxt->h.b8[0];
digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10]; digest[4] = ctxt->h.b8[7];
digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8]; digest[5] = ctxt->h.b8[6];
digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14]; digest[6] = ctxt->h.b8[5];
digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12]; digest[7] = ctxt->h.b8[4];
digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18]; digest[8] = ctxt->h.b8[11];
digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16]; digest[9] = ctxt->h.b8[10];
digest[10] = ctxt->h.b8[9];
digest[11] = ctxt->h.b8[8];
digest[12] = ctxt->h.b8[15];
digest[13] = ctxt->h.b8[14];
digest[14] = ctxt->h.b8[13];
digest[15] = ctxt->h.b8[12];
digest[16] = ctxt->h.b8[19];
digest[17] = ctxt->h.b8[18];
digest[18] = ctxt->h.b8[17];
digest[19] = ctxt->h.b8[16];
#endif #endif
} }
#endif /*unsupported*/ #endif /* unsupported */

View File

@ -1,4 +1,4 @@
/* $Id: sha1.h,v 1.3 2001/01/09 16:07:13 momjian Exp $ */ /* $Id: sha1.h,v 1.4 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: sha1.h,v 1.4 2000/02/22 14:01:18 itojun Exp $ */ /* $KAME: sha1.h,v 1.4 2000/02/22 14:01:18 itojun Exp $ */
/* /*
@ -38,33 +38,38 @@
#ifndef _NETINET6_SHA1_H_ #ifndef _NETINET6_SHA1_H_
#define _NETINET6_SHA1_H_ #define _NETINET6_SHA1_H_
struct sha1_ctxt { struct sha1_ctxt
union { {
union
{
uint8 b8[20]; uint8 b8[20];
uint32 b32[5]; uint32 b32[5];
} h; } h;
union { union
{
uint8 b8[8]; uint8 b8[8];
uint64 b64[1]; uint64 b64[1];
} c; } c;
union { union
{
uint8 b8[64]; uint8 b8[64];
uint32 b32[16]; uint32 b32[16];
} m; } m;
uint8 count; uint8 count;
}; };
extern void sha1_init (struct sha1_ctxt *); extern void sha1_init(struct sha1_ctxt *);
extern void sha1_pad (struct sha1_ctxt *); extern void sha1_pad(struct sha1_ctxt *);
extern void sha1_loop (struct sha1_ctxt *, const caddr_t, size_t); extern void sha1_loop(struct sha1_ctxt *, const caddr_t, size_t);
extern void sha1_result (struct sha1_ctxt *, caddr_t); extern void sha1_result(struct sha1_ctxt *, caddr_t);
/* compatibilty with other SHA1 source codes */ /* compatibilty with other SHA1 source codes */
typedef struct sha1_ctxt SHA1_CTX; typedef struct sha1_ctxt SHA1_CTX;
#define SHA1Init(x) sha1_init((x)) #define SHA1Init(x) sha1_init((x))
#define SHA1Update(x, y, z) sha1_loop((x), (y), (z)) #define SHA1Update(x, y, z) sha1_loop((x), (y), (z))
#define SHA1Final(x, y) sha1_result((y), (x)) #define SHA1Final(x, y) sha1_result((y), (x))
#define SHA1_RESULTLEN (160/8) #define SHA1_RESULTLEN (160/8)
#endif /*_NETINET6_SHA1_H_*/ #endif /* _NETINET6_SHA1_H_ */

View File

@ -19,15 +19,17 @@ PG_FUNCTION_INFO_V1(_rserv_debug_);
Datum _rserv_log_(PG_FUNCTION_ARGS); Datum _rserv_log_(PG_FUNCTION_ARGS);
Datum _rserv_sync_(PG_FUNCTION_ARGS); Datum _rserv_sync_(PG_FUNCTION_ARGS);
Datum _rserv_debug_(PG_FUNCTION_ARGS); Datum _rserv_debug_(PG_FUNCTION_ARGS);
#else #else
HeapTuple _rserv_log_(void); HeapTuple _rserv_log_(void);
int32 _rserv_sync_(int32); int32 _rserv_sync_(int32);
int32 _rserv_debug_(int32); int32 _rserv_debug_(int32);
#endif #endif
static int debug = 0; static int debug = 0;
static char* OutputValue(char *key, char *buf, int size); static char *OutputValue(char *key, char *buf, int size);
#ifdef PG_FUNCTION_INFO_V1 #ifdef PG_FUNCTION_INFO_V1
Datum Datum
@ -203,6 +205,7 @@ _rserv_sync_(int32 server)
{ {
#ifdef PG_FUNCTION_INFO_V1 #ifdef PG_FUNCTION_INFO_V1
int32 server = PG_GETARG_INT32(0); int32 server = PG_GETARG_INT32(0);
#endif #endif
char sql[8192]; char sql[8192];
char buf[8192]; char buf[8192];
@ -248,6 +251,7 @@ _rserv_debug_(int32 newval)
{ {
#ifdef PG_FUNCTION_INFO_V1 #ifdef PG_FUNCTION_INFO_V1
int32 newval = PG_GETARG_INT32(0); int32 newval = PG_GETARG_INT32(0);
#endif #endif
int32 oldval = debug; int32 oldval = debug;
@ -258,7 +262,7 @@ _rserv_debug_(int32 newval)
#define ExtendBy 1024 #define ExtendBy 1024
static char* static char *
OutputValue(char *key, char *buf, int size) OutputValue(char *key, char *buf, int size)
{ {
int i = 0; int i = 0;
@ -267,25 +271,31 @@ OutputValue(char *key, char *buf, int size)
int slen = 0; int slen = 0;
size--; size--;
for ( ; ; ) for (;;)
{ {
switch (*key) switch (*key)
{ {
case '\\': subst ="\\\\"; case '\\':
subst = "\\\\";
slen = 2; slen = 2;
break; break;
case ' ': subst = "\\011"; case ' ':
subst = "\\011";
slen = 4; slen = 4;
break; break;
case '\n': subst = "\\012"; case '\n':
subst = "\\012";
slen = 4; slen = 4;
break; break;
case '\'': subst = "\\047"; case '\'':
subst = "\\047";
slen = 4; slen = 4;
break; break;
case '\0': out[i] = 0; case '\0':
return(out); out[i] = 0;
default: slen = 1; return (out);
default:
slen = 1;
break; break;
} }
@ -293,13 +303,13 @@ OutputValue(char *key, char *buf, int size)
{ {
if (out == buf) if (out == buf)
{ {
out = (char*) palloc(size + ExtendBy); out = (char *) palloc(size + ExtendBy);
strncpy(out, buf, i); strncpy(out, buf, i);
size += ExtendBy; size += ExtendBy;
} }
else else
{ {
out = (char*) repalloc(out, size + ExtendBy); out = (char *) repalloc(out, size + ExtendBy);
size += ExtendBy; size += ExtendBy;
} }
} }
@ -314,6 +324,6 @@ OutputValue(char *key, char *buf, int size)
key++; key++;
} }
return(out); return (out);
} }

View File

@ -4,76 +4,81 @@
#include "utils/elog.h" #include "utils/elog.h"
static char * PARSE_BUFFER; static char *PARSE_BUFFER;
static char * PARSE_BUFFER_PTR; static char *PARSE_BUFFER_PTR;
static unsigned int PARSE_BUFFER_SIZE; static unsigned int PARSE_BUFFER_SIZE;
static unsigned int SCANNER_POS; static unsigned int SCANNER_POS;
void set_parse_buffer( char* s ); void set_parse_buffer(char *s);
void reset_parse_buffer( void ); void reset_parse_buffer(void);
int read_parse_buffer( void ); int read_parse_buffer(void);
char * parse_buffer( void ); char *parse_buffer(void);
char * parse_buffer_ptr( void ); char *parse_buffer_ptr(void);
unsigned int parse_buffer_curr_char( void ); unsigned int parse_buffer_curr_char(void);
unsigned int parse_buffer_size( void ); unsigned int parse_buffer_size(void);
unsigned int parse_buffer_pos( void ); unsigned int parse_buffer_pos(void);
extern void seg_flush_scanner_buffer(void); /* defined in segscan.l */ extern void seg_flush_scanner_buffer(void); /* defined in segscan.l */
void set_parse_buffer( char* s ) void
set_parse_buffer(char *s)
{ {
PARSE_BUFFER = s; PARSE_BUFFER = s;
PARSE_BUFFER_SIZE = strlen(s); PARSE_BUFFER_SIZE = strlen(s);
if ( PARSE_BUFFER_SIZE == 0 ) { if (PARSE_BUFFER_SIZE == 0)
elog(ERROR, "seg_in: can't parse an empty string"); elog(ERROR, "seg_in: can't parse an empty string");
}
PARSE_BUFFER_PTR = PARSE_BUFFER; PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0; SCANNER_POS = 0;
} }
void reset_parse_buffer( void ) void
reset_parse_buffer(void)
{ {
PARSE_BUFFER_PTR = PARSE_BUFFER; PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0; SCANNER_POS = 0;
seg_flush_scanner_buffer(); seg_flush_scanner_buffer();
} }
int read_parse_buffer( void ) int
read_parse_buffer(void)
{ {
int c; int c;
/* /*
c = *PARSE_BUFFER_PTR++; * c = *PARSE_BUFFER_PTR++; SCANNER_POS++;
SCANNER_POS++;
*/ */
c = PARSE_BUFFER[SCANNER_POS]; c = PARSE_BUFFER[SCANNER_POS];
if(SCANNER_POS < PARSE_BUFFER_SIZE) if (SCANNER_POS < PARSE_BUFFER_SIZE)
SCANNER_POS++; SCANNER_POS++;
return c; return c;
} }
char * parse_buffer( void ) char *
parse_buffer(void)
{ {
return PARSE_BUFFER; return PARSE_BUFFER;
} }
unsigned int parse_buffer_curr_char( void ) unsigned int
parse_buffer_curr_char(void)
{ {
return PARSE_BUFFER[SCANNER_POS]; return PARSE_BUFFER[SCANNER_POS];
} }
char * parse_buffer_ptr( void ) char *
parse_buffer_ptr(void)
{ {
return PARSE_BUFFER_PTR; return PARSE_BUFFER_PTR;
} }
unsigned int parse_buffer_pos( void ) unsigned int
parse_buffer_pos(void)
{ {
return SCANNER_POS; return SCANNER_POS;
} }
unsigned int parse_buffer_size( void ) unsigned int
parse_buffer_size(void)
{ {
return PARSE_BUFFER_SIZE; return PARSE_BUFFER_SIZE;
} }

View File

@ -1,8 +1,8 @@
extern void set_parse_buffer( char* s ); extern void set_parse_buffer(char *s);
extern void reset_parse_buffer( void ); extern void reset_parse_buffer(void);
extern int read_parse_buffer( void ); extern int read_parse_buffer(void);
extern char * parse_buffer( void ); extern char *parse_buffer(void);
extern char * parse_buffer_ptr( void ); extern char *parse_buffer_ptr(void);
extern unsigned int parse_buffer_curr_char( void ); extern unsigned int parse_buffer_curr_char(void);
extern unsigned int parse_buffer_pos( void ); extern unsigned int parse_buffer_pos(void);
extern unsigned int parse_buffer_size( void ); extern unsigned int parse_buffer_size(void);

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
typedef struct SEG { typedef struct SEG
{
float lower; float lower;
float upper; float upper;
char l_sigd; char l_sigd;

View File

@ -1,4 +1,4 @@
/* $Header: /cvsroot/pgsql/contrib/soundex/Attic/soundex.c,v 1.10 2001/02/10 02:31:26 tgl Exp $ */ /* $Header: /cvsroot/pgsql/contrib/soundex/Attic/soundex.c,v 1.11 2001/03/22 03:59:10 momjian Exp $ */
#include "postgres.h" #include "postgres.h"
#include <ctype.h> #include <ctype.h>
@ -42,6 +42,7 @@ text_soundex(PG_FUNCTION_ARGS)
/* ABCDEFGHIJKLMNOPQRSTUVWXYZ */ /* ABCDEFGHIJKLMNOPQRSTUVWXYZ */
static const char *soundex_table = "01230120022455012623010202"; static const char *soundex_table = "01230120022455012623010202";
#define soundex_code(letter) soundex_table[toupper((unsigned char) (letter)) - 'A'] #define soundex_code(letter) soundex_table[toupper((unsigned char) (letter)) - 'A']
@ -98,7 +99,7 @@ soundex(const char *instr, char *outstr)
#ifdef SOUNDEX_TEST #ifdef SOUNDEX_TEST
int int
main (int argc, char *argv[]) main(int argc, char *argv[])
{ {
if (argc < 2) if (argc < 2)
{ {
@ -114,4 +115,5 @@ main (int argc, char *argv[])
return 0; return 0;
} }
} }
#endif /* SOUNDEX_TEST */ #endif /* SOUNDEX_TEST */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.8 2001/01/24 19:42:45 momjian Exp $ * $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.9 2001/03/22 03:59:11 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -76,6 +76,7 @@ vacuumlo(char *database, int verbose)
return -1; return -1;
} }
PQclear(res); PQclear(res);
/* /*
* Vacuum the temp table so that planner will generate decent plans * Vacuum the temp table so that planner will generate decent plans
* for the DELETEs below. * for the DELETEs below.
@ -96,13 +97,13 @@ vacuumlo(char *database, int verbose)
/* /*
* Now find any candidate tables who have columns of type oid. * Now find any candidate tables who have columns of type oid.
* *
* NOTE: the temp table formed above is ignored, because its real * NOTE: the temp table formed above is ignored, because its real table
* table name will be pg_something. Also, pg_largeobject will be * name will be pg_something. Also, pg_largeobject will be ignored.
* ignored. If either of these were scanned, obviously we'd end up * If either of these were scanned, obviously we'd end up with nothing
* with nothing to delete... * to delete...
* *
* NOTE: the system oid column is ignored, as it has attnum < 1. * NOTE: the system oid column is ignored, as it has attnum < 1. This
* This shouldn't matter for correctness, but it saves time. * shouldn't matter for correctness, but it saves time.
*/ */
buf[0] = '\0'; buf[0] = '\0';
strcat(buf, "SELECT c.relname, a.attname "); strcat(buf, "SELECT c.relname, a.attname ");
@ -135,9 +136,9 @@ vacuumlo(char *database, int verbose)
fprintf(stdout, "Checking %s in %s\n", field, table); fprintf(stdout, "Checking %s in %s\n", field, table);
/* /*
* We use a DELETE with implicit join for efficiency. This * We use a DELETE with implicit join for efficiency. This is a
* is a Postgres-ism and not portable to other DBMSs, but * Postgres-ism and not portable to other DBMSs, but then this
* then this whole program is a Postgres-ism. * whole program is a Postgres-ism.
*/ */
sprintf(buf, "DELETE FROM vacuum_l WHERE lo = \"%s\".\"%s\" ", sprintf(buf, "DELETE FROM vacuum_l WHERE lo = \"%s\".\"%s\" ",
table, field); table, field);
@ -159,8 +160,8 @@ vacuumlo(char *database, int verbose)
/* /*
* Run the actual deletes in a single transaction. Note that this * Run the actual deletes in a single transaction. Note that this
* would be a bad idea in pre-7.1 Postgres releases (since rolling * would be a bad idea in pre-7.1 Postgres releases (since rolling
* back a table delete used to cause problems), but it should * back a table delete used to cause problems), but it should be safe
* be safe now. * now.
*/ */
res = PQexec(conn, "begin"); res = PQexec(conn, "begin");
PQclear(res); PQclear(res);

View File

@ -9,7 +9,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.69 2001/01/24 19:42:46 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.70 2001/03/22 03:59:11 momjian Exp $
* *
* NOTES * NOTES
* The old interface functions have been converted to macros * The old interface functions have been converted to macros
@ -306,8 +306,8 @@ nocachegetattr(HeapTuple tuple,
int j; int j;
/* /*
* In for(), we test <= and not < because we want to see * In for(), we test <= and not < because we want to see if we
* if we can go past it in initializing offsets. * can go past it in initializing offsets.
*/ */
for (j = 0; j <= attnum; j++) for (j = 0; j <= attnum; j++)
{ {
@ -321,9 +321,9 @@ nocachegetattr(HeapTuple tuple,
} }
/* /*
* If slow is false, and we got here, we know that we have a tuple with * If slow is false, and we got here, we know that we have a tuple
* no nulls or varlenas before the target attribute. If possible, we * with no nulls or varlenas before the target attribute. If possible,
* also want to initialize the remainder of the attribute cached * we also want to initialize the remainder of the attribute cached
* offset values. * offset values.
*/ */
if (!slow) if (!slow)

View File

@ -9,7 +9,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.52 2001/02/22 21:48:48 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.53 2001/03/22 03:59:11 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -45,9 +45,11 @@ index_formtuple(TupleDesc tupleDescriptor,
bool hasnull = false; bool hasnull = false;
uint16 tupmask = 0; uint16 tupmask = 0;
int numberOfAttributes = tupleDescriptor->natts; int numberOfAttributes = tupleDescriptor->natts;
#ifdef TOAST_INDEX_HACK #ifdef TOAST_INDEX_HACK
Datum untoasted_value[INDEX_MAX_KEYS]; Datum untoasted_value[INDEX_MAX_KEYS];
bool untoasted_free[INDEX_MAX_KEYS]; bool untoasted_free[INDEX_MAX_KEYS];
#endif #endif
if (numberOfAttributes > INDEX_MAX_KEYS) if (numberOfAttributes > INDEX_MAX_KEYS)
@ -79,8 +81,8 @@ index_formtuple(TupleDesc tupleDescriptor,
} }
/* /*
* If value is above size target, and is of a compressible datatype, * If value is above size target, and is of a compressible
* try to compress it in-line. * datatype, try to compress it in-line.
*/ */
if (VARATT_SIZE(untoasted_value[i]) > TOAST_INDEX_TARGET && if (VARATT_SIZE(untoasted_value[i]) > TOAST_INDEX_TARGET &&
!VARATT_IS_EXTENDED(untoasted_value[i]) && !VARATT_IS_EXTENDED(untoasted_value[i]) &&
@ -146,8 +148,8 @@ index_formtuple(TupleDesc tupleDescriptor,
/* /*
* We do this because DataFill wants to initialize a "tupmask" which * We do this because DataFill wants to initialize a "tupmask" which
* is used for HeapTuples, but we want an indextuple infomask. The * is used for HeapTuples, but we want an indextuple infomask. The
* only relevant info is the "has variable attributes" field. * only relevant info is the "has variable attributes" field. We have
* We have already set the hasnull bit above. * already set the hasnull bit above.
*/ */
if (tupmask & HEAP_HASVARLENA) if (tupmask & HEAP_HASVARLENA)
@ -315,9 +317,9 @@ nocache_index_getattr(IndexTuple tup,
} }
/* /*
* If slow is false, and we got here, we know that we have a tuple with * If slow is false, and we got here, we know that we have a tuple
* no nulls or varlenas before the target attribute. If possible, we * with no nulls or varlenas before the target attribute. If possible,
* also want to initialize the remainder of the attribute cached * we also want to initialize the remainder of the attribute cached
* offset values. * offset values.
*/ */
if (!slow) if (!slow)
@ -391,10 +393,8 @@ nocache_index_getattr(IndexTuple tup,
usecache = false; usecache = false;
} }
else else
{
off += att[i]->attlen; off += att[i]->attlen;
} }
}
off = att_align(off, att[attnum]->attlen, att[attnum]->attalign); off = att_align(off, att[attnum]->attlen, att[attnum]->attalign);

View File

@ -9,7 +9,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.57 2001/01/24 19:42:47 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.58 2001/03/22 03:59:11 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -51,7 +51,7 @@ getTypeOutputInfo(Oid type, Oid *typOutput, Oid *typElem,
*typOutput = pt->typoutput; *typOutput = pt->typoutput;
*typElem = pt->typelem; *typElem = pt->typelem;
*typIsVarlena = (! pt->typbyval) && (pt->typlen == -1); *typIsVarlena = (!pt->typbyval) && (pt->typlen == -1);
ReleaseSysCache(typeTuple); ReleaseSysCache(typeTuple);
return OidIsValid(*typOutput); return OidIsValid(*typOutput);
} }
@ -200,9 +200,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue; continue;
if (OidIsValid(thisState->typoutput)) if (OidIsValid(thisState->typoutput))
{ {
/* /*
* If we have a toasted datum, forcibly detoast it here to avoid * If we have a toasted datum, forcibly detoast it here to
* memory leakage inside the type's output routine. * avoid memory leakage inside the type's output routine.
*/ */
if (thisState->typisvarlena) if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr)); attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@ -308,9 +309,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (getTypeOutputInfo(typeinfo->attrs[i]->atttypid, if (getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena)) &typoutput, &typelem, &typisvarlena))
{ {
/* /*
* If we have a toasted datum, forcibly detoast it here to avoid * If we have a toasted datum, forcibly detoast it here to
* memory leakage inside the type's output routine. * avoid memory leakage inside the type's output routine.
*/ */
if (typisvarlena) if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr)); attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@ -405,6 +407,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
/* send # of bytes, and opaque data */ /* send # of bytes, and opaque data */
if (thisState->typisvarlena) if (thisState->typisvarlena)
{ {
/* /*
* If we have a toasted datum, must detoast before sending. * If we have a toasted datum, must detoast before sending.
*/ */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.71 2001/01/24 19:42:47 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.72 2001/03/22 03:59:11 momjian Exp $
* *
* NOTES * NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be * some of the executor utility code such as "ExecTypeFromTL" should be
@ -242,9 +242,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/* /*
* We do not need to check every single field here, and in fact * We do not need to check every single field here, and in fact
* some fields such as attdispersion probably shouldn't be * some fields such as attdispersion probably shouldn't be
* compared. We can also disregard attnum (it was used to * compared. We can also disregard attnum (it was used to place
* place the row in the attrs array) and everything derived * the row in the attrs array) and everything derived from the
* from the column datatype. * column datatype.
*/ */
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0) if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false; return false;
@ -276,8 +276,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/* /*
* We can't assume that the items are always read from the * We can't assume that the items are always read from the
* system catalogs in the same order; so use the adnum field to * system catalogs in the same order; so use the adnum field
* identify the matching item to compare. * to identify the matching item to compare.
*/ */
for (j = 0; j < n; defval2++, j++) for (j = 0; j < n; defval2++, j++)
{ {
@ -298,9 +298,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
ConstrCheck *check2 = constr2->check; ConstrCheck *check2 = constr2->check;
/* /*
* Similarly, don't assume that the checks are always read * Similarly, don't assume that the checks are always read in
* in the same order; match them up by name and contents. * the same order; match them up by name and contents. (The
* (The name *should* be unique, but...) * name *should* be unique, but...)
*/ */
for (j = 0; j < n; check2++, j++) for (j = 0; j < n; check2++, j++)
{ {

View File

@ -6,7 +6,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.71 2001/03/07 21:20:26 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.72 2001/03/22 03:59:12 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -34,31 +34,31 @@ static void gistdoinsert(Relation r,
IndexTuple itup, IndexTuple itup,
InsertIndexResult *res, InsertIndexResult *res,
GISTSTATE *GISTstate); GISTSTATE *GISTstate);
static int gistlayerinsert( Relation r, BlockNumber blkno, static int gistlayerinsert(Relation r, BlockNumber blkno,
IndexTuple **itup, IndexTuple **itup,
int *len, int *len,
InsertIndexResult *res, InsertIndexResult *res,
GISTSTATE *giststate ); GISTSTATE *giststate);
static OffsetNumber gistwritebuffer( Relation r, static OffsetNumber gistwritebuffer(Relation r,
Page page, Page page,
IndexTuple *itup, IndexTuple *itup,
int len, int len,
OffsetNumber off, OffsetNumber off,
GISTSTATE *giststate ); GISTSTATE *giststate);
static int gistnospace( Page page, static int gistnospace(Page page,
IndexTuple *itvec, int len ); IndexTuple *itvec, int len);
static IndexTuple * gistreadbuffer( Relation r, static IndexTuple *gistreadbuffer(Relation r,
Buffer buffer, int *len ); Buffer buffer, int *len);
static IndexTuple * gistjoinvector( static IndexTuple *gistjoinvector(
IndexTuple *itvec, int *len, IndexTuple *itvec, int *len,
IndexTuple *additvec, int addlen ); IndexTuple *additvec, int addlen);
static IndexTuple gistunion( Relation r, IndexTuple *itvec, static IndexTuple gistunion(Relation r, IndexTuple *itvec,
int len, GISTSTATE *giststate ); int len, GISTSTATE *giststate);
static IndexTuple gistgetadjusted( Relation r, static IndexTuple gistgetadjusted(Relation r,
IndexTuple oldtup, IndexTuple oldtup,
IndexTuple addtup, IndexTuple addtup,
GISTSTATE *giststate ); GISTSTATE *giststate);
static IndexTuple * gistSplit(Relation r, static IndexTuple *gistSplit(Relation r,
Buffer buffer, Buffer buffer,
IndexTuple *itup, IndexTuple *itup,
int *len, int *len,
@ -80,6 +80,7 @@ static void gistcentryinit(GISTSTATE *giststate,
#undef GISTDEBUG #undef GISTDEBUG
#ifdef GISTDEBUG #ifdef GISTDEBUG
static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff); static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff);
#endif #endif
/* /*
@ -92,8 +93,10 @@ gistbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1); Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2); IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3); Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED #ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4); IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif #endif
HeapScanDesc hscan; HeapScanDesc hscan;
HeapTuple htup; HeapTuple htup;
@ -105,9 +108,11 @@ gistbuild(PG_FUNCTION_ARGS)
int nhtups, int nhtups,
nitups; nitups;
Node *pred = indexInfo->ii_Predicate; Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable; TupleTable tupleTable;
TupleTableSlot *slot; TupleTableSlot *slot;
#endif #endif
ExprContext *econtext; ExprContext *econtext;
GISTSTATE giststate; GISTSTATE giststate;
@ -181,6 +186,7 @@ gistbuild(PG_FUNCTION_ARGS)
nhtups++; nhtups++;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
/* /*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip * If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index * this tuple if it was already in the existing partial index
@ -262,9 +268,7 @@ gistbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true); ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */ #endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext); FreeExprContext(econtext);
@ -297,7 +301,7 @@ gistbuild(PG_FUNCTION_ARGS)
} }
#ifdef GISTDEBUG #ifdef GISTDEBUG
gist_dumptree(index, 0, GISTP_ROOT, 0); gist_dumptree(index, 0, GISTP_ROOT, 0);
#endif #endif
PG_RETURN_VOID(); PG_RETURN_VOID();
@ -316,8 +320,10 @@ gistinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1); Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2); char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3); ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED #ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4); Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif #endif
InsertIndexResult res; InsertIndexResult res;
IndexTuple itup; IndexTuple itup;
@ -406,32 +412,36 @@ gistPageAddItem(GISTSTATE *giststate,
} }
static void static void
gistdoinsert( Relation r, gistdoinsert(Relation r,
IndexTuple itup, IndexTuple itup,
InsertIndexResult *res, InsertIndexResult *res,
GISTSTATE *giststate ) { GISTSTATE *giststate)
{
IndexTuple *instup; IndexTuple *instup;
int i,ret,len = 1; int i,
ret,
len = 1;
instup = ( IndexTuple* ) palloc( sizeof(IndexTuple) ); instup = (IndexTuple *) palloc(sizeof(IndexTuple));
instup[0] = ( IndexTuple ) palloc( IndexTupleSize( itup ) ); instup[0] = (IndexTuple) palloc(IndexTupleSize(itup));
memcpy( instup[0], itup, IndexTupleSize( itup ) ); memcpy(instup[0], itup, IndexTupleSize(itup));
ret = gistlayerinsert(r, GISTP_ROOT, &instup, &len, res, giststate); ret = gistlayerinsert(r, GISTP_ROOT, &instup, &len, res, giststate);
if ( ret & SPLITED ) if (ret & SPLITED)
gistnewroot( giststate, r, instup, len ); gistnewroot(giststate, r, instup, len);
for(i=0;i<len;i++) for (i = 0; i < len; i++)
pfree( instup[i] ); pfree(instup[i]);
pfree( instup ); pfree(instup);
} }
static int static int
gistlayerinsert( Relation r, BlockNumber blkno, gistlayerinsert(Relation r, BlockNumber blkno,
IndexTuple **itup, /* in - out, has compressed entry */ IndexTuple **itup, /* in - out, has compressed entry */
int *len , /* in - out */ int *len, /* in - out */
InsertIndexResult *res, /* out */ InsertIndexResult *res, /* out */
GISTSTATE *giststate ) { GISTSTATE *giststate)
{
Buffer buffer; Buffer buffer;
Page page; Page page;
OffsetNumber child; OffsetNumber child;
@ -442,7 +452,8 @@ gistlayerinsert( Relation r, BlockNumber blkno,
page = (Page) BufferGetPage(buffer); page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page); opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
if (!(opaque->flags & F_LEAF)) { if (!(opaque->flags & F_LEAF))
{
/* internal page, so we must walk on tree */ /* internal page, so we must walk on tree */
/* len IS equial 1 */ /* len IS equial 1 */
ItemId iid; ItemId iid;
@ -450,36 +461,38 @@ gistlayerinsert( Relation r, BlockNumber blkno,
ItemPointerData oldtid; ItemPointerData oldtid;
IndexTuple oldtup; IndexTuple oldtup;
child = gistchoose( r, page, *(*itup), giststate ); child = gistchoose(r, page, *(*itup), giststate);
iid = PageGetItemId(page, child); iid = PageGetItemId(page, child);
oldtup = (IndexTuple) PageGetItem(page, iid); oldtup = (IndexTuple) PageGetItem(page, iid);
nblkno = ItemPointerGetBlockNumber(&(oldtup->t_tid)); nblkno = ItemPointerGetBlockNumber(&(oldtup->t_tid));
/* /*
* After this call: * After this call: 1. if child page was splited, then itup
* 1. if child page was splited, then itup contains * contains keys for each page 2. if child page wasn't splited,
* keys for each page * then itup contains additional for adjustement of current key
* 2. if child page wasn't splited, then itup contains
* additional for adjustement of current key
*/ */
ret = gistlayerinsert( r, nblkno, itup, len, res, giststate ); ret = gistlayerinsert(r, nblkno, itup, len, res, giststate);
/* nothing inserted in child */ /* nothing inserted in child */
if ( ! (ret & INSERTED) ) { if (!(ret & INSERTED))
{
ReleaseBuffer(buffer); ReleaseBuffer(buffer);
return 0x00; return 0x00;
} }
/* child does not splited */ /* child does not splited */
if ( ! (ret & SPLITED) ) { if (!(ret & SPLITED))
IndexTuple newtup = gistgetadjusted( r, oldtup, (*itup)[0], giststate ); {
if ( ! newtup ) { IndexTuple newtup = gistgetadjusted(r, oldtup, (*itup)[0], giststate);
if (!newtup)
{
/* not need to update key */ /* not need to update key */
ReleaseBuffer(buffer); ReleaseBuffer(buffer);
return 0x00; return 0x00;
} }
pfree( (*itup)[0] ); /* !!! */ pfree((*itup)[0]); /* !!! */
(*itup)[0] = newtup; (*itup)[0] = newtup;
} }
@ -492,43 +505,54 @@ gistlayerinsert( Relation r, BlockNumber blkno,
ret = INSERTED; ret = INSERTED;
if ( gistnospace(page, (*itup), *len) ) { if (gistnospace(page, (*itup), *len))
{
/* no space for insertion */ /* no space for insertion */
IndexTuple *itvec; IndexTuple *itvec;
int tlen; int tlen;
ret |= SPLITED; ret |= SPLITED;
itvec = gistreadbuffer( r, buffer, &tlen ); itvec = gistreadbuffer(r, buffer, &tlen);
itvec = gistjoinvector( itvec, &tlen, (*itup), *len ); itvec = gistjoinvector(itvec, &tlen, (*itup), *len);
pfree( (*itup) ); pfree((*itup));
(*itup) = gistSplit( r, buffer, itvec, &tlen, giststate, (*itup) = gistSplit(r, buffer, itvec, &tlen, giststate,
(opaque->flags & F_LEAF) ? res : NULL ); /*res only for inserting in leaf*/ (opaque->flags & F_LEAF) ? res : NULL); /* res only for
ReleaseBuffer( buffer ); * inserting in leaf */
pfree( itvec ); ReleaseBuffer(buffer);
pfree(itvec);
*len = tlen; /* now tlen >= 2 */ *len = tlen; /* now tlen >= 2 */
} else { }
else
{
/* enogth space */ /* enogth space */
OffsetNumber off, l; OffsetNumber off,
l;
off = ( PageIsEmpty(page) ) ? off = (PageIsEmpty(page)) ?
FirstOffsetNumber FirstOffsetNumber
: :
OffsetNumberNext(PageGetMaxOffsetNumber(page)); OffsetNumberNext(PageGetMaxOffsetNumber(page));
l = gistwritebuffer( r, page, (*itup), *len, off, giststate ); l = gistwritebuffer(r, page, (*itup), *len, off, giststate);
WriteBuffer(buffer); WriteBuffer(buffer);
/* set res if insert into leaf page, in /*
this case, len = 1 always */ * set res if insert into leaf page, in this case, len = 1 always
if ( res && (opaque->flags & F_LEAF) ) */
if (res && (opaque->flags & F_LEAF))
ItemPointerSet(&((*res)->pointerData), blkno, l); ItemPointerSet(&((*res)->pointerData), blkno, l);
if ( *len > 1 ) { /* previos insert ret & SPLITED != 0 */ if (*len > 1)
{ /* previos insert ret & SPLITED != 0 */
int i; int i;
/* child was splited, so we must form union
* for insertion in parent */ /*
* child was splited, so we must form union for insertion in
* parent
*/
IndexTuple newtup = gistunion(r, (*itup), *len, giststate); IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
for(i=0; i<*len; i++)
pfree( (*itup)[i] ); for (i = 0; i < *len; i++)
pfree((*itup)[i]);
(*itup)[0] = newtup; (*itup)[0] = newtup;
*len = 1; *len = 1;
} }
@ -541,18 +565,20 @@ gistlayerinsert( Relation r, BlockNumber blkno,
* Write itup vector to page, has no control of free space * Write itup vector to page, has no control of free space
*/ */
static OffsetNumber static OffsetNumber
gistwritebuffer( Relation r, Page page, IndexTuple *itup, gistwritebuffer(Relation r, Page page, IndexTuple *itup,
int len, OffsetNumber off, GISTSTATE *giststate) { int len, OffsetNumber off, GISTSTATE *giststate)
{
OffsetNumber l = InvalidOffsetNumber; OffsetNumber l = InvalidOffsetNumber;
int i; int i;
GISTENTRY tmpdentry; GISTENTRY tmpdentry;
IndexTuple newtup; IndexTuple newtup;
for(i=0; i<len; i++) { for (i = 0; i < len; i++)
{
l = gistPageAddItem(giststate, r, page, l = gistPageAddItem(giststate, r, page,
(Item) itup[i], IndexTupleSize(itup[i]), (Item) itup[i], IndexTupleSize(itup[i]),
off, LP_USED, &tmpdentry, &newtup); off, LP_USED, &tmpdentry, &newtup);
off = OffsetNumberNext( off ); off = OffsetNumberNext(off);
if (tmpdentry.pred != (((char *) itup[i]) + sizeof(IndexTupleData)) && tmpdentry.pred) if (tmpdentry.pred != (((char *) itup[i]) + sizeof(IndexTupleData)) && tmpdentry.pred)
pfree(tmpdentry.pred); pfree(tmpdentry.pred);
if (itup[i] != newtup) if (itup[i] != newtup)
@ -565,11 +591,13 @@ gistwritebuffer( Relation r, Page page, IndexTuple *itup,
* Check space for itup vector on page * Check space for itup vector on page
*/ */
static int static int
gistnospace( Page page, IndexTuple *itvec, int len ) { gistnospace(Page page, IndexTuple *itvec, int len)
{
int size = 0; int size = 0;
int i; int i;
for(i=0; i<len; i++)
size += IndexTupleSize( itvec[i] )+4; /* ??? */ for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + 4; /* ??? */
return (PageGetFreeSpace(page) < size); return (PageGetFreeSpace(page) < size);
} }
@ -578,16 +606,18 @@ gistnospace( Page page, IndexTuple *itvec, int len ) {
* Read buffer into itup vector * Read buffer into itup vector
*/ */
static IndexTuple * static IndexTuple *
gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) { gistreadbuffer(Relation r, Buffer buffer, int *len /* out */ )
OffsetNumber i, maxoff; {
OffsetNumber i,
maxoff;
IndexTuple *itvec; IndexTuple *itvec;
Page p = (Page) BufferGetPage(buffer); Page p = (Page) BufferGetPage(buffer);
*len=0; *len = 0;
maxoff = PageGetMaxOffsetNumber(p); maxoff = PageGetMaxOffsetNumber(p);
itvec = palloc( sizeof(IndexTuple) * maxoff ); itvec = palloc(sizeof(IndexTuple) * maxoff);
for(i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
itvec[ (*len)++ ] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i)); itvec[(*len)++] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
return itvec; return itvec;
} }
@ -596,9 +626,10 @@ gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
* join two vectors into one * join two vectors into one
*/ */
static IndexTuple * static IndexTuple *
gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen ) { gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
itvec = (IndexTuple*) repalloc( (void*)itvec, sizeof(IndexTuple) * ( (*len) + addlen ) ); {
memmove( &itvec[*len], additvec, sizeof(IndexTuple) * addlen ); itvec = (IndexTuple *) repalloc((void *) itvec, sizeof(IndexTuple) * ((*len) + addlen));
memmove(&itvec[*len], additvec, sizeof(IndexTuple) * addlen);
*len += addlen; *len += addlen;
return itvec; return itvec;
} }
@ -607,10 +638,12 @@ gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen )
* return union of itup vector * return union of itup vector
*/ */
static IndexTuple static IndexTuple
gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) { gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
{
bytea *evec; bytea *evec;
char *datum; char *datum;
int datumsize, i; int datumsize,
i;
GISTENTRY centry; GISTENTRY centry;
char isnull; char isnull;
IndexTuple newtup; IndexTuple newtup;
@ -618,33 +651,33 @@ gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
evec = (bytea *) palloc(len * sizeof(GISTENTRY) + VARHDRSZ); evec = (bytea *) palloc(len * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = len * sizeof(GISTENTRY) + VARHDRSZ; VARATT_SIZEP(evec) = len * sizeof(GISTENTRY) + VARHDRSZ;
for ( i = 0 ; i< len ; i++ ) for (i = 0; i < len; i++)
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[i], gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[i],
(char*) itvec[i] + sizeof(IndexTupleData), (char *) itvec[i] + sizeof(IndexTupleData),
(Relation)NULL, (Page)NULL, (OffsetNumber)NULL, (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
IndexTupleSize((IndexTuple)itvec[i]) - sizeof(IndexTupleData), FALSE); IndexTupleSize((IndexTuple) itvec[i]) - sizeof(IndexTupleData), FALSE);
datum = (char *) datum = (char *)
DatumGetPointer(FunctionCall2(&giststate->unionFn, DatumGetPointer(FunctionCall2(&giststate->unionFn,
PointerGetDatum(evec), PointerGetDatum(evec),
PointerGetDatum(&datumsize))); PointerGetDatum(&datumsize)));
for ( i = 0 ; i< len ; i++ ) for (i = 0; i < len; i++)
if ( ((GISTENTRY *) VARDATA(evec))[i].pred && if (((GISTENTRY *) VARDATA(evec))[i].pred &&
((GISTENTRY *) VARDATA(evec))[i].pred != ((GISTENTRY *) VARDATA(evec))[i].pred !=
((char*)( itvec[i] )+ sizeof(IndexTupleData)) ) ((char *) (itvec[i]) + sizeof(IndexTupleData)))
pfree( ((GISTENTRY *) VARDATA(evec))[i].pred ); pfree(((GISTENTRY *) VARDATA(evec))[i].pred);
pfree( evec ); pfree(evec);
gistcentryinit(giststate, &centry, datum, gistcentryinit(giststate, &centry, datum,
(Relation)NULL, (Page)NULL, (OffsetNumber)NULL, (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
datumsize, FALSE); datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n'; isnull = (centry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull ); newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
if (centry.pred != datum) if (centry.pred != datum)
pfree( datum ); pfree(datum);
return newtup; return newtup;
} }
@ -653,28 +686,31 @@ gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
* Forms union of oldtup and addtup, if union == oldtup then return NULL * Forms union of oldtup and addtup, if union == oldtup then return NULL
*/ */
static IndexTuple static IndexTuple
gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate ) { gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
{
bytea *evec; bytea *evec;
char *datum; char *datum;
int datumsize; int datumsize;
bool result; bool result;
char isnull; char isnull;
GISTENTRY centry, *ev0p, *ev1p; GISTENTRY centry,
*ev0p,
*ev1p;
IndexTuple newtup = NULL; IndexTuple newtup = NULL;
evec = (bytea *) palloc(2 * sizeof(GISTENTRY) + VARHDRSZ); evec = (bytea *) palloc(2 * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ; VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[0], gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[0],
(char*) oldtup + sizeof(IndexTupleData), (Relation) NULL, (char *) oldtup + sizeof(IndexTupleData), (Relation) NULL,
(Page) NULL, (OffsetNumber) 0, (Page) NULL, (OffsetNumber) 0,
IndexTupleSize((IndexTuple)oldtup) - sizeof(IndexTupleData), FALSE); IndexTupleSize((IndexTuple) oldtup) - sizeof(IndexTupleData), FALSE);
ev0p = &((GISTENTRY *) VARDATA(evec))[0]; ev0p = &((GISTENTRY *) VARDATA(evec))[0];
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[1], gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[1],
(char*) addtup + sizeof(IndexTupleData), (Relation) NULL, (char *) addtup + sizeof(IndexTupleData), (Relation) NULL,
(Page) NULL, (OffsetNumber) 0, (Page) NULL, (OffsetNumber) 0,
IndexTupleSize((IndexTuple)addtup) - sizeof(IndexTupleData), FALSE); IndexTupleSize((IndexTuple) addtup) - sizeof(IndexTupleData), FALSE);
ev1p = &((GISTENTRY *) VARDATA(evec))[1]; ev1p = &((GISTENTRY *) VARDATA(evec))[1];
datum = (char *) datum = (char *)
@ -682,36 +718,40 @@ gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *gi
PointerGetDatum(evec), PointerGetDatum(evec),
PointerGetDatum(&datumsize))); PointerGetDatum(&datumsize)));
if ( ! ( ev0p->pred && ev1p->pred ) ) { if (!(ev0p->pred && ev1p->pred))
result = ( ev0p->pred == NULL && ev1p->pred == NULL ); result = (ev0p->pred == NULL && ev1p->pred == NULL);
} else { else
{
FunctionCall3(&giststate->equalFn, FunctionCall3(&giststate->equalFn,
PointerGetDatum(ev0p->pred), PointerGetDatum(ev0p->pred),
PointerGetDatum(datum), PointerGetDatum(datum),
PointerGetDatum(&result)); PointerGetDatum(&result));
} }
if ( result ) { if (result)
{
/* not need to update key */ /* not need to update key */
pfree( datum ); pfree(datum);
} else { }
else
{
gistcentryinit(giststate, &centry, datum, ev0p->rel, ev0p->page, gistcentryinit(giststate, &centry, datum, ev0p->rel, ev0p->page,
ev0p->offset, datumsize, FALSE); ev0p->offset, datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n'; isnull = (centry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull ); newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
newtup->t_tid = oldtup->t_tid; newtup->t_tid = oldtup->t_tid;
if (centry.pred != datum) if (centry.pred != datum)
pfree( datum ); pfree(datum);
} }
if ( ev0p->pred && if (ev0p->pred &&
ev0p->pred != (char*) oldtup + sizeof(IndexTupleData) ) ev0p->pred != (char *) oldtup + sizeof(IndexTupleData))
pfree( ev0p->pred ); pfree(ev0p->pred);
if ( ev1p->pred && if (ev1p->pred &&
ev1p->pred != (char*) addtup + sizeof(IndexTupleData) ) ev1p->pred != (char *) addtup + sizeof(IndexTupleData))
pfree( ev1p->pred ); pfree(ev1p->pred);
pfree( evec ); pfree(evec);
return newtup; return newtup;
} }
@ -728,19 +768,27 @@ gistSplit(Relation r,
InsertIndexResult *res) InsertIndexResult *res)
{ {
Page p; Page p;
Buffer leftbuf, rightbuf; Buffer leftbuf,
Page left, right; rightbuf;
OffsetNumber *spl_left, *spl_right; Page left,
IndexTuple *lvectup, *rvectup, *newtup; right;
int leftoff, rightoff; OffsetNumber *spl_left,
BlockNumber lbknum, rbknum; *spl_right;
IndexTuple *lvectup,
*rvectup,
*newtup;
int leftoff,
rightoff;
BlockNumber lbknum,
rbknum;
GISTPageOpaque opaque; GISTPageOpaque opaque;
char isnull; char isnull;
GIST_SPLITVEC v; GIST_SPLITVEC v;
bytea *entryvec; bytea *entryvec;
bool *decompvec; bool *decompvec;
GISTENTRY tmpentry; GISTENTRY tmpentry;
int i, nlen; int i,
nlen;
p = (Page) BufferGetPage(buffer); p = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(p); opaque = (GISTPageOpaque) PageGetSpecialPointer(p);
@ -773,17 +821,17 @@ gistSplit(Relation r,
right = (Page) BufferGetPage(rightbuf); right = (Page) BufferGetPage(rightbuf);
/* generate the item array */ /* generate the item array */
entryvec = (bytea *) palloc(VARHDRSZ + (*len+1) * sizeof(GISTENTRY)); entryvec = (bytea *) palloc(VARHDRSZ + (*len + 1) * sizeof(GISTENTRY));
decompvec = (bool *) palloc(VARHDRSZ + (*len+1) * sizeof(bool)); decompvec = (bool *) palloc(VARHDRSZ + (*len + 1) * sizeof(bool));
VARATT_SIZEP(entryvec) = (*len+1) * sizeof(GISTENTRY) + VARHDRSZ; VARATT_SIZEP(entryvec) = (*len + 1) * sizeof(GISTENTRY) + VARHDRSZ;
for (i = 1; i <= *len; i++) for (i = 1; i <= *len; i++)
{ {
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(entryvec))[i], gistdentryinit(giststate, &((GISTENTRY *) VARDATA(entryvec))[i],
(((char *) itup[i-1]) + sizeof(IndexTupleData)), (((char *) itup[i - 1]) + sizeof(IndexTupleData)),
r, p, i, r, p, i,
IndexTupleSize(itup[i-1]) - sizeof(IndexTupleData), FALSE); IndexTupleSize(itup[i - 1]) - sizeof(IndexTupleData), FALSE);
if ((char *) (((GISTENTRY *) VARDATA(entryvec))[i].pred) if ((char *) (((GISTENTRY *) VARDATA(entryvec))[i].pred)
== (((char *) itup[i-1]) + sizeof(IndexTupleData))) == (((char *) itup[i - 1]) + sizeof(IndexTupleData)))
decompvec[i] = FALSE; decompvec[i] = FALSE;
else else
decompvec[i] = TRUE; decompvec[i] = TRUE;
@ -801,35 +849,43 @@ gistSplit(Relation r,
pfree(entryvec); pfree(entryvec);
pfree(decompvec); pfree(decompvec);
spl_left = v.spl_left; spl_right = v.spl_right; spl_left = v.spl_left;
spl_right = v.spl_right;
/* form left and right vector */ /* form left and right vector */
lvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nleft ); lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nleft);
rvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nright ); rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nright);
leftoff = rightoff = 0; leftoff = rightoff = 0;
for( i=1; i <= *len; i++ ) { for (i = 1; i <= *len; i++)
if (i == *(spl_left) || ( i==*len && *(spl_left) != FirstOffsetNumber ) ) { {
lvectup[ leftoff++ ] = itup[ i-1 ]; if (i == *(spl_left) || (i == *len && *(spl_left) != FirstOffsetNumber))
{
lvectup[leftoff++] = itup[i - 1];
spl_left++; spl_left++;
} else { }
rvectup[ rightoff++ ] = itup[ i-1 ]; else
{
rvectup[rightoff++] = itup[i - 1];
spl_right++; spl_right++;
} }
} }
/* write on disk (may be need another split) */ /* write on disk (may be need another split) */
if ( gistnospace(right, rvectup, v.spl_nright) ) { if (gistnospace(right, rvectup, v.spl_nright))
{
nlen = v.spl_nright; nlen = v.spl_nright;
newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate, newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
( res && rvectup[ nlen-1 ] == itup[ *len - 1 ] ) ? res : NULL ); (res && rvectup[nlen - 1] == itup[*len - 1]) ? res : NULL);
ReleaseBuffer( rightbuf ); ReleaseBuffer(rightbuf);
} else { }
else
{
OffsetNumber l; OffsetNumber l;
l = gistwritebuffer( r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate ); l = gistwritebuffer(r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate);
WriteBuffer(rightbuf); WriteBuffer(rightbuf);
if ( res ) if (res)
ItemPointerSet(&((*res)->pointerData), rbknum, l); ItemPointerSet(&((*res)->pointerData), rbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_rdatum, (Relation) NULL, gistcentryinit(giststate, &tmpentry, v.spl_rdatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0, (Page) NULL, (OffsetNumber) 0,
@ -839,32 +895,35 @@ gistSplit(Relation r,
v.spl_rdatum = tmpentry.pred; v.spl_rdatum = tmpentry.pred;
nlen = 1; nlen = 1;
newtup = (IndexTuple*) palloc( sizeof(IndexTuple) * 1); newtup = (IndexTuple *) palloc(sizeof(IndexTuple) * 1);
isnull = ( v.spl_rdatum ) ? ' ' : 'n'; isnull = (v.spl_rdatum) ? ' ' : 'n';
newtup[0] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_rdatum), &isnull); newtup[0] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_rdatum), &isnull);
ItemPointerSet(&(newtup[0]->t_tid), rbknum, 1); ItemPointerSet(&(newtup[0]->t_tid), rbknum, 1);
} }
if ( gistnospace(left, lvectup, v.spl_nleft) ) { if (gistnospace(left, lvectup, v.spl_nleft))
{
int llen = v.spl_nleft; int llen = v.spl_nleft;
IndexTuple *lntup; IndexTuple *lntup;
lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate, lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate,
( res && lvectup[ llen-1 ] == itup[ *len - 1 ] ) ? res : NULL ); (res && lvectup[llen - 1] == itup[*len - 1]) ? res : NULL);
ReleaseBuffer( leftbuf ); ReleaseBuffer(leftbuf);
newtup = gistjoinvector( newtup, &nlen, lntup, llen ); newtup = gistjoinvector(newtup, &nlen, lntup, llen);
pfree( lntup ); pfree(lntup);
} else { }
else
{
OffsetNumber l; OffsetNumber l;
l = gistwritebuffer( r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate ); l = gistwritebuffer(r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate);
if ( BufferGetBlockNumber(buffer) != GISTP_ROOT) if (BufferGetBlockNumber(buffer) != GISTP_ROOT)
PageRestoreTempPage(left, p); PageRestoreTempPage(left, p);
WriteBuffer(leftbuf); WriteBuffer(leftbuf);
if ( res ) if (res)
ItemPointerSet(&((*res)->pointerData), lbknum, l); ItemPointerSet(&((*res)->pointerData), lbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_ldatum, (Relation) NULL, gistcentryinit(giststate, &tmpentry, v.spl_ldatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0, (Page) NULL, (OffsetNumber) 0,
@ -874,10 +933,10 @@ gistSplit(Relation r,
v.spl_ldatum = tmpentry.pred; v.spl_ldatum = tmpentry.pred;
nlen += 1; nlen += 1;
newtup = (IndexTuple*) repalloc( (void*)newtup, sizeof(IndexTuple) * nlen); newtup = (IndexTuple *) repalloc((void *) newtup, sizeof(IndexTuple) * nlen);
isnull = ( v.spl_ldatum ) ? ' ' : 'n'; isnull = (v.spl_ldatum) ? ' ' : 'n';
newtup[nlen-1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull); newtup[nlen - 1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
ItemPointerSet(&(newtup[nlen-1]->t_tid), lbknum, 1); ItemPointerSet(&(newtup[nlen - 1]->t_tid), lbknum, 1);
} }
@ -885,10 +944,10 @@ gistSplit(Relation r,
gistadjscans(r, GISTOP_SPLIT, BufferGetBlockNumber(buffer), FirstOffsetNumber); gistadjscans(r, GISTOP_SPLIT, BufferGetBlockNumber(buffer), FirstOffsetNumber);
/* !!! pfree */ /* !!! pfree */
pfree( rvectup ); pfree(rvectup);
pfree( lvectup ); pfree(lvectup);
pfree( v.spl_left ); pfree(v.spl_left);
pfree( v.spl_right ); pfree(v.spl_right);
*len = nlen; *len = nlen;
return newtup; return newtup;
@ -904,7 +963,7 @@ gistnewroot(GISTSTATE *giststate, Relation r, IndexTuple *itup, int len)
GISTInitBuffer(b, 0); GISTInitBuffer(b, 0);
p = BufferGetPage(b); p = BufferGetPage(b);
gistwritebuffer( r, p, itup, len, FirstOffsetNumber, giststate ); gistwritebuffer(r, p, itup, len, FirstOffsetNumber, giststate);
WriteBuffer(b); WriteBuffer(b);
} }
@ -1101,7 +1160,7 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
char *datum = (((char *) t) + sizeof(IndexTupleData)); char *datum = (((char *) t) + sizeof(IndexTupleData));
/* if new entry fits in index tuple, copy it in */ /* if new entry fits in index tuple, copy it in */
if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0 ) if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0)
{ {
memcpy(datum, entry.pred, entry.bytes); memcpy(datum, entry.pred, entry.bytes);
/* clear out old size */ /* clear out old size */
@ -1118,7 +1177,7 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
IndexTuple newtup; IndexTuple newtup;
char isnull; char isnull;
isnull = ( entry.pred ) ? ' ' : 'n'; isnull = (entry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple(tupDesc, newtup = (IndexTuple) index_formtuple(tupDesc,
(Datum *) &(entry.pred), (Datum *) &(entry.pred),
&isnull); &isnull);
@ -1182,37 +1241,39 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
GISTPageOpaque opaque; GISTPageOpaque opaque;
IndexTuple which; IndexTuple which;
ItemId iid; ItemId iid;
OffsetNumber i,maxoff; OffsetNumber i,
maxoff;
BlockNumber cblk; BlockNumber cblk;
char *pred; char *pred;
pred = (char*) palloc( sizeof(char)*level+1 ); pred = (char *) palloc(sizeof(char) * level + 1);
MemSet(pred, '\t', level); MemSet(pred, '\t', level);
pred[level]='\0'; pred[level] = '\0';
buffer = ReadBuffer(r, blk); buffer = ReadBuffer(r, blk);
page = (Page) BufferGetPage(buffer); page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page); opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber( page ); maxoff = PageGetMaxOffsetNumber(page);
elog(NOTICE,"%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, ( opaque->flags & F_LEAF ) ? "LEAF" : "INTE", (int)blk, (int)maxoff, PageGetFreeSpace(page)); elog(NOTICE, "%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, (opaque->flags & F_LEAF) ? "LEAF" : "INTE", (int) blk, (int) maxoff, PageGetFreeSpace(page));
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
iid = PageGetItemId(page, i); iid = PageGetItemId(page, i);
which = (IndexTuple) PageGetItem(page, iid); which = (IndexTuple) PageGetItem(page, iid);
cblk = ItemPointerGetBlockNumber(&(which->t_tid)); cblk = ItemPointerGetBlockNumber(&(which->t_tid));
#ifdef PRINTTUPLE #ifdef PRINTTUPLE
elog(NOTICE,"%s Tuple. blk: %d size: %d", pred, (int)cblk, IndexTupleSize( which ) ); elog(NOTICE, "%s Tuple. blk: %d size: %d", pred, (int) cblk, IndexTupleSize(which));
#endif #endif
if ( ! ( opaque->flags & F_LEAF ) ) { if (!(opaque->flags & F_LEAF))
gist_dumptree( r, level+1, cblk, i ); gist_dumptree(r, level + 1, cblk, i);
}
} }
ReleaseBuffer(buffer); ReleaseBuffer(buffer);
pfree(pred); pfree(pred);
} }
#endif /* defined GISTDEBUG */ #endif /* defined GISTDEBUG */
void void
@ -1228,7 +1289,6 @@ gist_undo(XLogRecPtr lsn, XLogRecord *record)
} }
void void
gist_desc(char *buf, uint8 xl_info, char* rec) gist_desc(char *buf, uint8 xl_info, char *rec)
{ {
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.49 2001/02/22 21:48:49 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.50 2001/03/22 03:59:12 momjian Exp $
* *
* NOTES * NOTES
* This file contains only the public interface routines. * This file contains only the public interface routines.
@ -45,8 +45,10 @@ hashbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1); Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2); IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3); Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED #ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4); IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif #endif
HeapScanDesc hscan; HeapScanDesc hscan;
HeapTuple htup; HeapTuple htup;
@ -59,9 +61,11 @@ hashbuild(PG_FUNCTION_ARGS)
nitups; nitups;
HashItem hitem; HashItem hitem;
Node *pred = indexInfo->ii_Predicate; Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable; TupleTable tupleTable;
TupleTableSlot *slot; TupleTableSlot *slot;
#endif #endif
ExprContext *econtext; ExprContext *econtext;
InsertIndexResult res = NULL; InsertIndexResult res = NULL;
@ -117,6 +121,7 @@ hashbuild(PG_FUNCTION_ARGS)
nhtups++; nhtups++;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
/* /*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip * If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index * this tuple if it was already in the existing partial index
@ -191,9 +196,7 @@ hashbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true); ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */ #endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext); FreeExprContext(econtext);
@ -245,8 +248,10 @@ hashinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1); Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2); char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3); ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED #ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4); Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif #endif
InsertIndexResult res; InsertIndexResult res;
HashItem hitem; HashItem hitem;
@ -327,8 +332,10 @@ Datum
hashrescan(PG_FUNCTION_ARGS) hashrescan(PG_FUNCTION_ARGS)
{ {
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */ #ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
bool fromEnd = PG_GETARG_BOOL(1); bool fromEnd = PG_GETARG_BOOL(1);
#endif #endif
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr; ItemPointer iptr;
@ -493,6 +500,6 @@ hash_undo(XLogRecPtr lsn, XLogRecord *record)
} }
void void
hash_desc(char *buf, uint8 xl_info, char* rec) hash_desc(char *buf, uint8 xl_info, char *rec)
{ {
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.29 2001/01/24 19:42:47 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.30 2001/03/22 03:59:13 momjian Exp $
* *
* NOTES * NOTES
* These functions are stored in pg_amproc. For each operator class * These functions are stored in pg_amproc. For each operator class
@ -25,32 +25,32 @@
Datum Datum
hashchar(PG_FUNCTION_ARGS) hashchar(PG_FUNCTION_ARGS)
{ {
PG_RETURN_UINT32(~ ((uint32) PG_GETARG_CHAR(0))); PG_RETURN_UINT32(~((uint32) PG_GETARG_CHAR(0)));
} }
Datum Datum
hashint2(PG_FUNCTION_ARGS) hashint2(PG_FUNCTION_ARGS)
{ {
PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT16(0))); PG_RETURN_UINT32(~((uint32) PG_GETARG_INT16(0)));
} }
Datum Datum
hashint4(PG_FUNCTION_ARGS) hashint4(PG_FUNCTION_ARGS)
{ {
PG_RETURN_UINT32(~ PG_GETARG_UINT32(0)); PG_RETURN_UINT32(~PG_GETARG_UINT32(0));
} }
Datum Datum
hashint8(PG_FUNCTION_ARGS) hashint8(PG_FUNCTION_ARGS)
{ {
/* we just use the low 32 bits... */ /* we just use the low 32 bits... */
PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT64(0))); PG_RETURN_UINT32(~((uint32) PG_GETARG_INT64(0)));
} }
Datum Datum
hashoid(PG_FUNCTION_ARGS) hashoid(PG_FUNCTION_ARGS)
{ {
PG_RETURN_UINT32(~ ((uint32) PG_GETARG_OID(0))); PG_RETURN_UINT32(~((uint32) PG_GETARG_OID(0)));
} }
Datum Datum
@ -93,7 +93,7 @@ hashint2vector(PG_FUNCTION_ARGS)
Datum Datum
hashname(PG_FUNCTION_ARGS) hashname(PG_FUNCTION_ARGS)
{ {
char *key = NameStr(* PG_GETARG_NAME(0)); char *key = NameStr(*PG_GETARG_NAME(0));
return hash_any((char *) key, NAMEDATALEN); return hash_any((char *) key, NAMEDATALEN);
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.110 2001/01/24 19:42:47 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.111 2001/03/22 03:59:13 momjian Exp $
* *
* *
* INTERFACE ROUTINES * INTERFACE ROUTINES
@ -564,7 +564,8 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
) )
); );
} }
#endif /* defined(DISABLE_COMPLEX_MACRO)*/
#endif /* defined(DISABLE_COMPLEX_MACRO) */
/* ---------------------------------------------------------------- /* ----------------------------------------------------------------
@ -791,8 +792,8 @@ heap_beginscan(Relation relation,
scan->rs_nkeys = (short) nkeys; scan->rs_nkeys = (short) nkeys;
/* /*
* we do this here instead of in initscan() because heap_rescan * we do this here instead of in initscan() because heap_rescan also
* also calls initscan() and we don't want to allocate memory again * calls initscan() and we don't want to allocate memory again
*/ */
if (nkeys) if (nkeys)
scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys); scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
@ -1374,7 +1375,7 @@ heap_insert(Relation relation, HeapTuple tup)
xlrec.target.node = relation->rd_node; xlrec.target.node = relation->rd_node;
xlrec.target.tid = tup->t_self; xlrec.target.tid = tup->t_self;
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapInsert; rdata[0].len = SizeOfHeapInsert;
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
@ -1383,12 +1384,12 @@ heap_insert(Relation relation, HeapTuple tup)
xlhdr.t_hoff = tup->t_data->t_hoff; xlhdr.t_hoff = tup->t_data->t_hoff;
xlhdr.mask = tup->t_data->t_infomask; xlhdr.mask = tup->t_data->t_infomask;
rdata[1].buffer = buffer; rdata[1].buffer = buffer;
rdata[1].data = (char*)&xlhdr; rdata[1].data = (char *) &xlhdr;
rdata[1].len = SizeOfHeapHeader; rdata[1].len = SizeOfHeapHeader;
rdata[1].next = &(rdata[2]); rdata[1].next = &(rdata[2]);
rdata[2].buffer = buffer; rdata[2].buffer = buffer;
rdata[2].data = (char*) tup->t_data + offsetof(HeapTupleHeaderData, t_bits); rdata[2].data = (char *) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[2].len = tup->t_len - offsetof(HeapTupleHeaderData, t_bits); rdata[2].len = tup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[2].next = NULL; rdata[2].next = NULL;
@ -1411,10 +1412,10 @@ heap_insert(Relation relation, HeapTuple tup)
WriteBuffer(buffer); WriteBuffer(buffer);
/* /*
* If tuple is cachable, mark it for rollback from the caches * If tuple is cachable, mark it for rollback from the caches in case
* in case we abort. Note it is OK to do this after WriteBuffer * we abort. Note it is OK to do this after WriteBuffer releases the
* releases the buffer, because the "tup" data structure is all * buffer, because the "tup" data structure is all in local memory,
* in local memory, not in the shared buffer. * not in the shared buffer.
*/ */
RelationMark4RollbackHeapTuple(relation, tup); RelationMark4RollbackHeapTuple(relation, tup);
@ -1520,7 +1521,7 @@ l1:
xlrec.target.node = relation->rd_node; xlrec.target.node = relation->rd_node;
xlrec.target.tid = tp.t_self; xlrec.target.tid = tp.t_self;
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapDelete; rdata[0].len = SizeOfHeapDelete;
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
@ -1551,9 +1552,10 @@ l1:
#endif #endif
/* /*
* Mark tuple for invalidation from system caches at next command boundary. * Mark tuple for invalidation from system caches at next command
* We have to do this before WriteBuffer because we need to look at the * boundary. We have to do this before WriteBuffer because we need to
* contents of the tuple, so we need to hold our refcount on the buffer. * look at the contents of the tuple, so we need to hold our refcount
* on the buffer.
*/ */
RelationInvalidateHeapTuple(relation, &tp); RelationInvalidateHeapTuple(relation, &tp);
@ -1636,6 +1638,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp); oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
oldtup.t_len = ItemIdGetLength(lp); oldtup.t_len = ItemIdGetLength(lp);
oldtup.t_self = *otid; oldtup.t_self = *otid;
/* /*
* Note: beyond this point, use oldtup not otid to refer to old tuple. * Note: beyond this point, use oldtup not otid to refer to old tuple.
* otid may very well point at newtup->t_self, which we will overwrite * otid may very well point at newtup->t_self, which we will overwrite
@ -1701,18 +1704,19 @@ l2:
/* /*
* If the toaster needs to be activated, OR if the new tuple will not * If the toaster needs to be activated, OR if the new tuple will not
* fit on the same page as the old, then we need to release the context * fit on the same page as the old, then we need to release the
* lock (but not the pin!) on the old tuple's buffer while we are off * context lock (but not the pin!) on the old tuple's buffer while we
* doing TOAST and/or table-file-extension work. We must mark the old * are off doing TOAST and/or table-file-extension work. We must mark
* tuple to show that it's already being updated, else other processes * the old tuple to show that it's already being updated, else other
* may try to update it themselves. To avoid second XLOG log record, * processes may try to update it themselves. To avoid second XLOG log
* we use xact mgr hook to unlock old tuple without reading log if xact * record, we use xact mgr hook to unlock old tuple without reading
* will abort before update is logged. In the event of crash prio logging, * log if xact will abort before update is logged. In the event of
* TQUAL routines will see HEAP_XMAX_UNLOGGED flag... * crash prio logging, TQUAL routines will see HEAP_XMAX_UNLOGGED
* flag...
* *
* NOTE: this trick is useless currently but saved for future * NOTE: this trick is useless currently but saved for future when we'll
* when we'll implement UNDO and will re-use transaction IDs * implement UNDO and will re-use transaction IDs after postmaster
* after postmaster startup. * startup.
* *
* We need to invoke the toaster if there are already any toasted values * We need to invoke the toaster if there are already any toasted values
* present, or if the new tuple is over-threshold. * present, or if the new tuple is over-threshold.
@ -1726,7 +1730,7 @@ l2:
{ {
_locked_tuple_.node = relation->rd_node; _locked_tuple_.node = relation->rd_node;
_locked_tuple_.tid = oldtup.t_self; _locked_tuple_.tid = oldtup.t_self;
XactPushRollback(_heap_unlock_tuple, (void*) &_locked_tuple_); XactPushRollback(_heap_unlock_tuple, (void *) &_locked_tuple_);
TransactionIdStore(GetCurrentTransactionId(), TransactionIdStore(GetCurrentTransactionId(),
&(oldtup.t_data->t_xmax)); &(oldtup.t_data->t_xmax));
@ -1814,10 +1818,10 @@ l2:
WriteBuffer(buffer); WriteBuffer(buffer);
/* /*
* If new tuple is cachable, mark it for rollback from the caches * If new tuple is cachable, mark it for rollback from the caches in
* in case we abort. Note it is OK to do this after WriteBuffer * case we abort. Note it is OK to do this after WriteBuffer releases
* releases the buffer, because the "newtup" data structure is all * the buffer, because the "newtup" data structure is all in local
* in local memory, not in the shared buffer. * memory, not in the shared buffer.
*/ */
RelationMark4RollbackHeapTuple(relation, newtup); RelationMark4RollbackHeapTuple(relation, newtup);
@ -2136,7 +2140,7 @@ log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
xlrec.node = reln->rd_node; xlrec.node = reln->rd_node;
xlrec.block = BufferGetBlockNumber(buffer); xlrec.block = BufferGetBlockNumber(buffer);
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapClean; rdata[0].len = SizeOfHeapClean;
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
@ -2157,7 +2161,7 @@ log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CLEAN, rdata); recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CLEAN, rdata);
return(recptr); return (recptr);
} }
static XLogRecPtr static XLogRecPtr
@ -2166,7 +2170,7 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
{ {
char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)]; char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)];
xl_heap_update xlrec; xl_heap_update xlrec;
xl_heap_header *xlhdr = (xl_heap_header*) tbuf; xl_heap_header *xlhdr = (xl_heap_header *) tbuf;
int hsize = SizeOfHeapHeader; int hsize = SizeOfHeapHeader;
XLogRecPtr recptr; XLogRecPtr recptr;
XLogRecData rdata[4]; XLogRecData rdata[4];
@ -2177,7 +2181,7 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
xlrec.target.tid = from; xlrec.target.tid = from;
xlrec.newtid = newtup->t_self; xlrec.newtid = newtup->t_self;
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapUpdate; rdata[0].len = SizeOfHeapUpdate;
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
@ -2205,12 +2209,12 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
hsize += (2 * sizeof(TransactionId)); hsize += (2 * sizeof(TransactionId));
} }
rdata[2].buffer = newbuf; rdata[2].buffer = newbuf;
rdata[2].data = (char*)xlhdr; rdata[2].data = (char *) xlhdr;
rdata[2].len = hsize; rdata[2].len = hsize;
rdata[2].next = &(rdata[3]); rdata[2].next = &(rdata[3]);
rdata[3].buffer = newbuf; rdata[3].buffer = newbuf;
rdata[3].data = (char*) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits); rdata[3].data = (char *) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits); rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[3].next = NULL; rdata[3].next = NULL;
@ -2224,20 +2228,20 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
recptr = XLogInsert(RM_HEAP_ID, info, rdata); recptr = XLogInsert(RM_HEAP_ID, info, rdata);
return(recptr); return (recptr);
} }
XLogRecPtr XLogRecPtr
log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from, log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup) Buffer newbuf, HeapTuple newtup)
{ {
return(log_heap_update(reln, oldbuf, from, newbuf, newtup, true)); return (log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
} }
static void static void
heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record) heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
{ {
xl_heap_clean *xlrec = (xl_heap_clean*) XLogRecGetData(record); xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
Relation reln; Relation reln;
Buffer buffer; Buffer buffer;
Page page; Page page;
@ -2267,14 +2271,14 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfHeapClean) if (record->xl_len > SizeOfHeapClean)
{ {
char unbuf[BLCKSZ]; char unbuf[BLCKSZ];
OffsetNumber *unused = (OffsetNumber*)unbuf; OffsetNumber *unused = (OffsetNumber *) unbuf;
char *unend; char *unend;
ItemId lp; ItemId lp;
memcpy(unbuf, (char*)xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean); memcpy(unbuf, (char *) xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
unend = unbuf + (record->xl_len - SizeOfHeapClean); unend = unbuf + (record->xl_len - SizeOfHeapClean);
while((char*)unused < unend) while ((char *) unused < unend)
{ {
lp = ((PageHeader) page)->pd_linp + *unused; lp = ((PageHeader) page)->pd_linp + *unused;
lp->lp_flags &= ~LP_USED; lp->lp_flags &= ~LP_USED;
@ -2289,7 +2293,7 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void static void
heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record) heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
{ {
xl_heap_delete *xlrec = (xl_heap_delete*) XLogRecGetData(record); xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node); Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
Buffer buffer; Buffer buffer;
Page page; Page page;
@ -2320,7 +2324,8 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
return; return;
} }
} }
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */ else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_delete_undo: bad page LSN"); elog(STOP, "heap_delete_undo: bad page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid)); offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@ -2350,7 +2355,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void static void
heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record) heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
{ {
xl_heap_insert *xlrec = (xl_heap_insert*) XLogRecGetData(record); xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node); Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
Buffer buffer; Buffer buffer;
Page page; Page page;
@ -2396,9 +2401,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_insert_redo: invalid max offset number"); elog(STOP, "heap_insert_redo: invalid max offset number");
newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader; newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapInsert, SizeOfHeapHeader); memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits), memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
(char*)xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen); (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits); newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf; htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid; htup->t_oid = xlhdr.t_oid;
@ -2409,7 +2414,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
htup->t_xmax = htup->t_cmax = 0; htup->t_xmax = htup->t_cmax = 0;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask; htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
offnum = PageAddItem(page, (Item)htup, newlen, offnum, offnum = PageAddItem(page, (Item) htup, newlen, offnum,
LP_USED | OverwritePageMode); LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber) if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_insert_redo: failed to add tuple"); elog(STOP, "heap_insert_redo: failed to add tuple");
@ -2420,7 +2425,8 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
} }
/* undo insert */ /* undo insert */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */ if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_insert_undo: bad page LSN"); elog(STOP, "heap_insert_undo: bad page LSN");
elog(STOP, "heap_insert_undo: unimplemented"); elog(STOP, "heap_insert_undo: unimplemented");
@ -2432,7 +2438,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void static void
heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move) heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
{ {
xl_heap_update *xlrec = (xl_heap_update*) XLogRecGetData(record); xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node); Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
Buffer buffer; Buffer buffer;
bool samepage = bool samepage =
@ -2470,7 +2476,8 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
goto newt; goto newt;
} }
} }
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */ else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_update_undo: bad old tuple page LSN"); elog(STOP, "heap_update_undo: bad old tuple page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid)); offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@ -2557,9 +2564,9 @@ newsame:;
hsize += (2 * sizeof(TransactionId)); hsize += (2 * sizeof(TransactionId));
newlen = record->xl_len - hsize; newlen = record->xl_len - hsize;
memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapUpdate, SizeOfHeapHeader); memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits), memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
(char*)xlrec + hsize, newlen); (char *) xlrec + hsize, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits); newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf; htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid; htup->t_oid = xlhdr.t_oid;
@ -2568,9 +2575,9 @@ newsame:;
if (move) if (move)
{ {
hsize = SizeOfHeapUpdate + SizeOfHeapHeader; hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
memcpy(&(htup->t_xmax), (char*)xlrec + hsize, sizeof(TransactionId)); memcpy(&(htup->t_xmax), (char *) xlrec + hsize, sizeof(TransactionId));
memcpy(&(htup->t_xmin), memcpy(&(htup->t_xmin),
(char*)xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId)); (char *) xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin)); TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin));
htup->t_infomask = xlhdr.mask; htup->t_infomask = xlhdr.mask;
htup->t_infomask &= ~(HEAP_XMIN_COMMITTED | htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
@ -2585,7 +2592,7 @@ newsame:;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask; htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
} }
offnum = PageAddItem(page, (Item)htup, newlen, offnum, offnum = PageAddItem(page, (Item) htup, newlen, offnum,
LP_USED | OverwritePageMode); LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber) if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_update_redo: failed to add tuple"); elog(STOP, "heap_update_redo: failed to add tuple");
@ -2596,7 +2603,8 @@ newsame:;
} }
/* undo */ /* undo */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */ if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_update_undo: bad new tuple page LSN"); elog(STOP, "heap_update_undo: bad new tuple page LSN");
elog(STOP, "heap_update_undo: unimplemented"); elog(STOP, "heap_update_undo: unimplemented");
@ -2606,7 +2614,7 @@ newsame:;
static void static void
_heap_unlock_tuple(void *data) _heap_unlock_tuple(void *data)
{ {
xl_heaptid *xltid = (xl_heaptid*) data; xl_heaptid *xltid = (xl_heaptid *) data;
Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node); Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node);
Buffer buffer; Buffer buffer;
Page page; Page page;
@ -2645,7 +2653,8 @@ _heap_unlock_tuple(void *data)
return; return;
} }
void heap_redo(XLogRecPtr lsn, XLogRecord *record) void
heap_redo(XLogRecPtr lsn, XLogRecord *record)
{ {
uint8 info = record->xl_info & ~XLR_INFO_MASK; uint8 info = record->xl_info & ~XLR_INFO_MASK;
@ -2664,7 +2673,8 @@ void heap_redo(XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_redo: unknown op code %u", info); elog(STOP, "heap_redo: unknown op code %u", info);
} }
void heap_undo(XLogRecPtr lsn, XLogRecord *record) void
heap_undo(XLogRecPtr lsn, XLogRecord *record)
{ {
uint8 info = record->xl_info & ~XLR_INFO_MASK; uint8 info = record->xl_info & ~XLR_INFO_MASK;
@ -2693,26 +2703,29 @@ out_target(char *buf, xl_heaptid *target)
} }
void void
heap_desc(char *buf, uint8 xl_info, char* rec) heap_desc(char *buf, uint8 xl_info, char *rec)
{ {
uint8 info = xl_info & ~XLR_INFO_MASK; uint8 info = xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK; info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT) if (info == XLOG_HEAP_INSERT)
{ {
xl_heap_insert *xlrec = (xl_heap_insert*) rec; xl_heap_insert *xlrec = (xl_heap_insert *) rec;
strcat(buf, "insert: "); strcat(buf, "insert: ");
out_target(buf, &(xlrec->target)); out_target(buf, &(xlrec->target));
} }
else if (info == XLOG_HEAP_DELETE) else if (info == XLOG_HEAP_DELETE)
{ {
xl_heap_delete *xlrec = (xl_heap_delete*) rec; xl_heap_delete *xlrec = (xl_heap_delete *) rec;
strcat(buf, "delete: "); strcat(buf, "delete: ");
out_target(buf, &(xlrec->target)); out_target(buf, &(xlrec->target));
} }
else if (info == XLOG_HEAP_UPDATE || info == XLOG_HEAP_MOVE) else if (info == XLOG_HEAP_UPDATE || info == XLOG_HEAP_MOVE)
{ {
xl_heap_update *xlrec = (xl_heap_update*) rec; xl_heap_update *xlrec = (xl_heap_update *) rec;
if (info == XLOG_HEAP_UPDATE) if (info == XLOG_HEAP_UPDATE)
strcat(buf, "update: "); strcat(buf, "update: ");
else else
@ -2724,7 +2737,8 @@ heap_desc(char *buf, uint8 xl_info, char* rec)
} }
else if (info == XLOG_HEAP_CLEAN) else if (info == XLOG_HEAP_CLEAN)
{ {
xl_heap_clean *xlrec = (xl_heap_clean*) rec; xl_heap_clean *xlrec = (xl_heap_clean *) rec;
sprintf(buf + strlen(buf), "clean: node %u/%u; blk %u", sprintf(buf + strlen(buf), "clean: node %u/%u; blk %u",
xlrec->node.tblNode, xlrec->node.relNode, xlrec->block); xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Id: hio.c,v 1.35 2001/01/24 19:42:48 momjian Exp $ * $Id: hio.c,v 1.36 2001/03/22 03:59:13 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -92,7 +92,7 @@ RelationGetBufferForTuple(Relation relation, Size len)
*/ */
if (len > MaxTupleSize) if (len > MaxTupleSize)
elog(ERROR, "Tuple is too big: size %lu, max size %ld", elog(ERROR, "Tuple is too big: size %lu, max size %ld",
(unsigned long)len, MaxTupleSize); (unsigned long) len, MaxTupleSize);
if (!relation->rd_myxactonly) if (!relation->rd_myxactonly)
LockPage(relation, 0, ExclusiveLock); LockPage(relation, 0, ExclusiveLock);
@ -140,13 +140,13 @@ RelationGetBufferForTuple(Relation relation, Size len)
{ {
/* We should not get here given the test at the top */ /* We should not get here given the test at the top */
elog(STOP, "Tuple is too big: size %lu", elog(STOP, "Tuple is too big: size %lu",
(unsigned long)len); (unsigned long) len);
} }
} }
if (!relation->rd_myxactonly) if (!relation->rd_myxactonly)
UnlockPage(relation, 0, ExclusiveLock); UnlockPage(relation, 0, ExclusiveLock);
return(buffer); return (buffer);
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.17 2001/02/15 20:57:01 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.18 2001/03/22 03:59:13 momjian Exp $
* *
* *
* INTERFACE ROUTINES * INTERFACE ROUTINES
@ -124,11 +124,11 @@ heap_tuple_untoast_attr(varattrib *attr)
varattrib *tmp; varattrib *tmp;
tmp = toast_fetch_datum(attr); tmp = toast_fetch_datum(attr);
result = (varattrib *)palloc(attr->va_content.va_external.va_rawsize result = (varattrib *) palloc(attr->va_content.va_external.va_rawsize
+ VARHDRSZ); + VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_external.va_rawsize VARATT_SIZEP(result) = attr->va_content.va_external.va_rawsize
+ VARHDRSZ; + VARHDRSZ;
pglz_decompress((PGLZ_Header *)tmp, VARATT_DATA(result)); pglz_decompress((PGLZ_Header *) tmp, VARATT_DATA(result));
pfree(tmp); pfree(tmp);
} }
@ -147,11 +147,11 @@ heap_tuple_untoast_attr(varattrib *attr)
* This is a compressed value inside of the main tuple * This is a compressed value inside of the main tuple
* ---------- * ----------
*/ */
result = (varattrib *)palloc(attr->va_content.va_compressed.va_rawsize result = (varattrib *) palloc(attr->va_content.va_compressed.va_rawsize
+ VARHDRSZ); + VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_compressed.va_rawsize VARATT_SIZEP(result) = attr->va_content.va_compressed.va_rawsize
+ VARHDRSZ; + VARHDRSZ;
pglz_decompress((PGLZ_Header *)attr, VARATT_DATA(result)); pglz_decompress((PGLZ_Header *) attr, VARATT_DATA(result));
} }
else else
/* ---------- /* ----------
@ -270,11 +270,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* For UPDATE get the old and new values of this attribute * For UPDATE get the old and new values of this attribute
* ---------- * ----------
*/ */
old_value = (varattrib *)DatumGetPointer( old_value = (varattrib *) DatumGetPointer(
heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull)); heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
toast_values[i] = toast_values[i] =
heap_getattr(newtup, i + 1, tupleDesc, &new_isnull); heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
new_value = (varattrib *)DatumGetPointer(toast_values[i]); new_value = (varattrib *) DatumGetPointer(toast_values[i]);
/* ---------- /* ----------
* If the old value is an external stored one, check if it * If the old value is an external stored one, check if it
@ -356,7 +356,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i]))) if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i])))
{ {
toast_values[i] = PointerGetDatum(heap_tuple_untoast_attr( toast_values[i] = PointerGetDatum(heap_tuple_untoast_attr(
(varattrib *)DatumGetPointer(toast_values[i]))); (varattrib *) DatumGetPointer(toast_values[i])));
toast_free[i] = true; toast_free[i] = true;
need_change = true; need_change = true;
need_free = true; need_free = true;
@ -448,7 +448,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
} }
else else
{ {
/* incompressible data, ignore on subsequent compression passes */
/*
* incompressible data, ignore on subsequent compression
* passes
*/
toast_action[i] = 'x'; toast_action[i] = 'x';
} }
} }
@ -565,7 +569,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
} }
else else
{ {
/* incompressible data, ignore on subsequent compression passes */
/*
* incompressible data, ignore on subsequent compression
* passes
*/
toast_action[i] = 'x'; toast_action[i] = 'x';
} }
} }
@ -662,10 +670,10 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ---------- * ----------
*/ */
memcpy(new_data, newtup->t_data, newtup->t_data->t_hoff); memcpy(new_data, newtup->t_data, newtup->t_data->t_hoff);
newtup->t_data = (HeapTupleHeader)new_data; newtup->t_data = (HeapTupleHeader) new_data;
newtup->t_len = new_len; newtup->t_len = new_len;
DataFill((char *)(MAXALIGN((long)new_data + DataFill((char *) (MAXALIGN((long) new_data +
offsetof(HeapTupleHeaderData, t_bits) + offsetof(HeapTupleHeaderData, t_bits) +
((has_nulls) ? BITMAPLEN(numAttrs) : 0))), ((has_nulls) ? BITMAPLEN(numAttrs) : 0))),
tupleDesc, tupleDesc,
@ -679,7 +687,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* free the memory from the previous run * free the memory from the previous run
* ---------- * ----------
*/ */
if ((char *)olddata != ((char *)newtup + HEAPTUPLESIZE)) if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE))
pfree(olddata); pfree(olddata);
/* ---------- /* ----------
@ -772,14 +780,14 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
* Create the varattrib reference * Create the varattrib reference
* ---------- * ----------
*/ */
result = (varattrib *)palloc(sizeof(varattrib)); result = (varattrib *) palloc(sizeof(varattrib));
result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL; result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL;
if (VARATT_IS_COMPRESSED(value)) if (VARATT_IS_COMPRESSED(value))
{ {
result->va_header |= VARATT_FLAG_COMPRESSED; result->va_header |= VARATT_FLAG_COMPRESSED;
result->va_content.va_external.va_rawsize = result->va_content.va_external.va_rawsize =
((varattrib *)value)->va_content.va_compressed.va_rawsize; ((varattrib *) value)->va_content.va_compressed.va_rawsize;
} }
else else
result->va_content.va_external.va_rawsize = VARATT_SIZE(value); result->va_content.va_external.va_rawsize = VARATT_SIZE(value);
@ -888,7 +896,7 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
static void static void
toast_delete_datum(Relation rel, Datum value) toast_delete_datum(Relation rel, Datum value)
{ {
register varattrib *attr = (varattrib *)value; register varattrib *attr = (varattrib *) value;
Relation toastrel; Relation toastrel;
Relation toastidx; Relation toastidx;
ScanKeyData toastkey; ScanKeyData toastkey;
@ -990,7 +998,7 @@ toast_fetch_datum(varattrib *attr)
memset(chunks_found, 0, numchunks); memset(chunks_found, 0, numchunks);
memset(chunks_expected, 1, numchunks); memset(chunks_expected, 1, numchunks);
result = (varattrib *)palloc(ressize + VARHDRSZ); result = (varattrib *) palloc(ressize + VARHDRSZ);
VARATT_SIZEP(result) = ressize + VARHDRSZ; VARATT_SIZEP(result) = ressize + VARHDRSZ;
if (VARATT_IS_COMPRESSED(attr)) if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED; VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
@ -1049,7 +1057,7 @@ toast_fetch_datum(varattrib *attr)
elog(ERROR, "unexpected chunk number %d for toast value %d", elog(ERROR, "unexpected chunk number %d for toast value %d",
residx, residx,
attr->va_content.va_external.va_valueid); attr->va_content.va_external.va_valueid);
if (residx < numchunks-1) if (residx < numchunks - 1)
{ {
if (chunksize != TOAST_MAX_CHUNK_SIZE) if (chunksize != TOAST_MAX_CHUNK_SIZE)
elog(ERROR, "unexpected chunk size %d in chunk %d for toast value %d", elog(ERROR, "unexpected chunk size %d in chunk %d for toast value %d",
@ -1072,7 +1080,7 @@ toast_fetch_datum(varattrib *attr)
* Copy the data into proper place in our result * Copy the data into proper place in our result
* ---------- * ----------
*/ */
memcpy(((char *)VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE, memcpy(((char *) VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
VARATT_DATA(chunk), VARATT_DATA(chunk),
chunksize); chunksize);

View File

@ -9,7 +9,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.48 2001/01/24 19:42:48 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.49 2001/03/22 03:59:13 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -239,7 +239,7 @@ StrategyTermEvaluate(StrategyTerm term,
break; break;
case SK_NEGATE: case SK_NEGATE:
result = ! DatumGetBool(FunctionCall2(&entry->sk_func, result = !DatumGetBool(FunctionCall2(&entry->sk_func,
left, right)); left, right));
break; break;
@ -249,7 +249,7 @@ StrategyTermEvaluate(StrategyTerm term,
break; break;
case SK_NEGATE | SK_COMMUTE: case SK_NEGATE | SK_COMMUTE:
result = ! DatumGetBool(FunctionCall2(&entry->sk_func, result = !DatumGetBool(FunctionCall2(&entry->sk_func,
right, left)); right, left));
break; break;
@ -263,6 +263,7 @@ StrategyTermEvaluate(StrategyTerm term,
return result; return result;
} }
#endif #endif
/* ---------------- /* ----------------
@ -465,6 +466,7 @@ RelationInvokeStrategy(Relation relation,
} }
#endif #endif
/* ---------------- /* ----------------
@ -597,9 +599,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
} }
if (cachesearch) if (cachesearch)
{
ReleaseSysCache(tuple); ReleaseSysCache(tuple);
}
else else
{ {
heap_endscan(scan); heap_endscan(scan);

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.40 2001/01/24 19:42:48 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.41 2001/03/22 03:59:14 momjian Exp $
* *
* NOTES * NOTES
* *
@ -236,9 +236,10 @@ bttextcmp(PG_FUNCTION_ARGS)
if (res == 0 && VARSIZE(a) != VARSIZE(b)) if (res == 0 && VARSIZE(a) != VARSIZE(b))
{ {
/* /*
* The two strings are the same in the first len bytes, * The two strings are the same in the first len bytes, and they
* and they are of different lengths. * are of different lengths.
*/ */
if (VARSIZE(a) < VARSIZE(b)) if (VARSIZE(a) < VARSIZE(b))
res = -1; res = -1;

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.81 2001/02/07 23:35:33 vadim Exp $ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.82 2001/03/22 03:59:14 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -114,8 +114,8 @@ top:
buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE); buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE);
/* /*
* If we're not allowing duplicates, make sure the key isn't * If we're not allowing duplicates, make sure the key isn't already
* already in the index. XXX this belongs somewhere else, likely * in the index. XXX this belongs somewhere else, likely
*/ */
if (index_is_unique) if (index_is_unique)
{ {
@ -171,8 +171,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
maxoff = PageGetMaxOffsetNumber(page); maxoff = PageGetMaxOffsetNumber(page);
/* /*
* Find first item >= proposed new item. Note we could also get * Find first item >= proposed new item. Note we could also get a
* a pointer to end-of-page here. * pointer to end-of-page here.
*/ */
offset = _bt_binsrch(rel, buf, natts, itup_scankey); offset = _bt_binsrch(rel, buf, natts, itup_scankey);
@ -187,24 +187,24 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
BlockNumber nblkno; BlockNumber nblkno;
/* /*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
* how we handling NULLs - and so we must not use _bt_compare * handling NULLs - and so we must not use _bt_compare in real
* in real comparison, but only for ordering/finding items on * comparison, but only for ordering/finding items on pages. -
* pages. - vadim 03/24/97 * vadim 03/24/97
* *
* make sure the offset points to an actual key * make sure the offset points to an actual key before trying to
* before trying to compare it... * compare it...
*/ */
if (offset <= maxoff) if (offset <= maxoff)
{ {
if (! _bt_isequal(itupdesc, page, offset, natts, itup_scankey)) if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */ break; /* we're past all the equal tuples */
/* /*
* Have to check is inserted heap tuple deleted one (i.e. * Have to check is inserted heap tuple deleted one (i.e. just
* just moved to another place by vacuum)! We only need to * moved to another place by vacuum)! We only need to do this
* do this once, but don't want to do it at all unless * once, but don't want to do it at all unless we see equal
* we see equal tuples, so as not to slow down unequal case. * tuples, so as not to slow down unequal case.
*/ */
if (chtup) if (chtup)
{ {
@ -238,6 +238,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/* Tell _bt_doinsert to wait... */ /* Tell _bt_doinsert to wait... */
return xwait; return xwait;
} }
/* /*
* Otherwise we have a definite conflict. * Otherwise we have a definite conflict.
*/ */
@ -358,16 +359,14 @@ _bt_insertonpg(Relation rel,
*/ */
if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData)) if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %lu", elog(ERROR, "btree: index item size %lu exceeds maximum %lu",
(unsigned long)itemsz, (unsigned long) itemsz,
(PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData)); (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
/* /*
* Determine exactly where new item will go. * Determine exactly where new item will go.
*/ */
if (afteritem > 0) if (afteritem > 0)
{
newitemoff = afteritem + 1; newitemoff = afteritem + 1;
}
else else
{ {
/*---------- /*----------
@ -404,10 +403,11 @@ _bt_insertonpg(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page); lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
movedright = true; movedright = true;
} }
/* /*
* Now we are on the right page, so find the insert position. * Now we are on the right page, so find the insert position. If
* If we moved right at all, we know we should insert at the * we moved right at all, we know we should insert at the start of
* start of the page, else must find the position by searching. * the page, else must find the position by searching.
*/ */
if (movedright) if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop); newitemoff = P_FIRSTDATAKEY(lpageop);
@ -418,9 +418,9 @@ _bt_insertonpg(Relation rel,
/* /*
* Do we need to split the page to fit the item on it? * Do we need to split the page to fit the item on it?
* *
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
* result, so this comparison is correct even though we appear to * so this comparison is correct even though we appear to be
* be accounting only for the item and not for its line pointer. * accounting only for the item and not for its line pointer.
*/ */
if (PageGetFreeSpace(page) < itemsz) if (PageGetFreeSpace(page) < itemsz)
{ {
@ -489,10 +489,11 @@ _bt_insertonpg(Relation rel,
if (stack == (BTStack) NULL) if (stack == (BTStack) NULL)
{ {
elog(DEBUG, "btree: concurrent ROOT page split"); elog(DEBUG, "btree: concurrent ROOT page split");
/* /*
* If root page splitter failed to create new root page * If root page splitter failed to create new root page
* then old root' btpo_parent still points to metapage. * then old root' btpo_parent still points to metapage. We
* We have to fix root page in this case. * have to fix root page in this case.
*/ */
if (BTreeInvalidParent(lpageop)) if (BTreeInvalidParent(lpageop))
{ {
@ -531,9 +532,9 @@ _bt_insertonpg(Relation rel,
* item! We want to find parent pointing to where we are, * item! We want to find parent pointing to where we are,
* right ? - vadim 05/27/97 * right ? - vadim 05/27/97
* *
* Interestingly, this means we didn't *really* need to stack * Interestingly, this means we didn't *really* need to stack the
* the parent key at all; all we really care about is the * parent key at all; all we really care about is the saved
* saved block and offset as a starting point for our search... * block and offset as a starting point for our search...
*/ */
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid), ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY); bknum, P_HIKEY);
@ -598,10 +599,11 @@ _bt_insertuple(Relation rel, Buffer buf,
XLogRecPtr recptr; XLogRecPtr recptr;
XLogRecData rdata[2]; XLogRecData rdata[2];
BTItemData truncitem; BTItemData truncitem;
xlrec.target.node = rel->rd_node; xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), BufferGetBlockNumber(buf), newitemoff); ItemPointerSet(&(xlrec.target.tid), BufferGetBlockNumber(buf), newitemoff);
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeInsert; rdata[0].len = SizeOfBtreeInsert;
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
@ -610,12 +612,12 @@ _bt_insertuple(Relation rel, Buffer buf,
{ {
truncitem = *btitem; truncitem = *btitem;
truncitem.bti_itup.t_info = sizeof(BTItemData); truncitem.bti_itup.t_info = sizeof(BTItemData);
rdata[1].data = (char*)&truncitem; rdata[1].data = (char *) &truncitem;
rdata[1].len = sizeof(BTItemData); rdata[1].len = sizeof(BTItemData);
} }
else else
{ {
rdata[1].data = (char*)btitem; rdata[1].data = (char *) btitem;
rdata[1].len = IndexTupleDSize(btitem->bti_itup) + rdata[1].len = IndexTupleDSize(btitem->bti_itup) +
(sizeof(BTItemData) - sizeof(IndexTupleData)); (sizeof(BTItemData) - sizeof(IndexTupleData));
} }
@ -700,8 +702,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
/* /*
* If the page we're splitting is not the rightmost page at its level * If the page we're splitting is not the rightmost page at its level
* in the tree, then the first entry on the page is the high key * in the tree, then the first entry on the page is the high key for
* for the page. We need to copy that to the right half. Otherwise * the page. We need to copy that to the right half. Otherwise
* (meaning the rightmost page case), all the items on the right half * (meaning the rightmost page case), all the items on the right half
* will be user data. * will be user data.
*/ */
@ -812,11 +814,11 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
} }
/* /*
* We have to grab the right sibling (if any) and fix the prev * We have to grab the right sibling (if any) and fix the prev pointer
* pointer there. We are guaranteed that this is deadlock-free * there. We are guaranteed that this is deadlock-free since no other
* since no other writer will be holding a lock on that page * writer will be holding a lock on that page and trying to move left,
* and trying to move left, and all readers release locks on a page * and all readers release locks on a page before trying to fetch its
* before trying to fetch its neighbors. * neighbors.
*/ */
if (!P_RIGHTMOST(ropaque)) if (!P_RIGHTMOST(ropaque))
@ -856,31 +858,33 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
BlockIdSet(&(xlrec.parentblk), lopaque->btpo_parent); BlockIdSet(&(xlrec.parentblk), lopaque->btpo_parent);
BlockIdSet(&(xlrec.leftblk), lopaque->btpo_prev); BlockIdSet(&(xlrec.leftblk), lopaque->btpo_prev);
BlockIdSet(&(xlrec.rightblk), ropaque->btpo_next); BlockIdSet(&(xlrec.rightblk), ropaque->btpo_next);
/* /*
* Dirrect access to page is not good but faster - we should * Dirrect access to page is not good but faster - we should
* implement some new func in page API. * implement some new func in page API.
*/ */
xlrec.leftlen = ((PageHeader)leftpage)->pd_special - xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
((PageHeader)leftpage)->pd_upper; ((PageHeader) leftpage)->pd_upper;
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeSplit; rdata[0].len = SizeOfBtreeSplit;
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
rdata[1].buffer = InvalidBuffer; rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char*)leftpage + ((PageHeader)leftpage)->pd_upper; rdata[1].data = (char *) leftpage + ((PageHeader) leftpage)->pd_upper;
rdata[1].len = xlrec.leftlen; rdata[1].len = xlrec.leftlen;
rdata[1].next = &(rdata[2]); rdata[1].next = &(rdata[2]);
rdata[2].buffer = InvalidBuffer; rdata[2].buffer = InvalidBuffer;
rdata[2].data = (char*)rightpage + ((PageHeader)rightpage)->pd_upper; rdata[2].data = (char *) rightpage + ((PageHeader) rightpage)->pd_upper;
rdata[2].len = ((PageHeader)rightpage)->pd_special - rdata[2].len = ((PageHeader) rightpage)->pd_special -
((PageHeader)rightpage)->pd_upper; ((PageHeader) rightpage)->pd_upper;
rdata[2].next = NULL; rdata[2].next = NULL;
if (!P_RIGHTMOST(ropaque)) if (!P_RIGHTMOST(ropaque))
{ {
BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage); BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
sopaque->btpo_prev = BufferGetBlockNumber(rbuf); sopaque->btpo_prev = BufferGetBlockNumber(rbuf);
rdata[2].next = &(rdata[3]); rdata[2].next = &(rdata[3]);
@ -968,23 +972,23 @@ _bt_findsplitloc(Relation rel,
/* Passed-in newitemsz is MAXALIGNED but does not include line pointer */ /* Passed-in newitemsz is MAXALIGNED but does not include line pointer */
newitemsz += sizeof(ItemIdData); newitemsz += sizeof(ItemIdData);
state.newitemsz = newitemsz; state.newitemsz = newitemsz;
state.non_leaf = ! P_ISLEAF(opaque); state.non_leaf = !P_ISLEAF(opaque);
state.have_split = false; state.have_split = false;
/* Total free space available on a btree page, after fixed overhead */ /* Total free space available on a btree page, after fixed overhead */
leftspace = rightspace = leftspace = rightspace =
PageGetPageSize(page) - sizeof(PageHeaderData) - PageGetPageSize(page) - sizeof(PageHeaderData) -
MAXALIGN(sizeof(BTPageOpaqueData)) MAXALIGN(sizeof(BTPageOpaqueData))
+ sizeof(ItemIdData); +sizeof(ItemIdData);
/* /*
* Finding the best possible split would require checking all the possible * Finding the best possible split would require checking all the
* split points, because of the high-key and left-key special cases. * possible split points, because of the high-key and left-key special
* That's probably more work than it's worth; instead, stop as soon as * cases. That's probably more work than it's worth; instead, stop as
* we find a "good-enough" split, where good-enough is defined as an * soon as we find a "good-enough" split, where good-enough is defined
* imbalance in free space of no more than pagesize/16 (arbitrary...) * as an imbalance in free space of no more than pagesize/16
* This should let us stop near the middle on most pages, instead of * (arbitrary...) This should let us stop near the middle on most
* plowing to the end. * pages, instead of plowing to the end.
*/ */
goodenough = leftspace / 16; goodenough = leftspace / 16;
@ -1024,6 +1028,7 @@ _bt_findsplitloc(Relation rel,
*/ */
leftfree = leftspace - dataitemstoleft - (int) itemsz; leftfree = leftspace - dataitemstoleft - (int) itemsz;
rightfree = rightspace - (dataitemtotal - dataitemstoleft); rightfree = rightspace - (dataitemtotal - dataitemstoleft);
/* /*
* Will the new item go to left or right of split? * Will the new item go to left or right of split?
*/ */
@ -1051,10 +1056,10 @@ _bt_findsplitloc(Relation rel,
} }
/* /*
* I believe it is not possible to fail to find a feasible split, * I believe it is not possible to fail to find a feasible split, but
* but just in case ... * just in case ...
*/ */
if (! state.have_split) if (!state.have_split)
elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s", elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s",
RelationGetRelationName(rel)); RelationGetRelationName(rel));
@ -1071,6 +1076,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
int leftfree, int rightfree, int leftfree, int rightfree,
bool newitemonleft, Size firstrightitemsz) bool newitemonleft, Size firstrightitemsz)
{ {
/* /*
* Account for the new item on whichever side it is to be put. * Account for the new item on whichever side it is to be put.
*/ */
@ -1078,13 +1084,15 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
leftfree -= (int) state->newitemsz; leftfree -= (int) state->newitemsz;
else else
rightfree -= (int) state->newitemsz; rightfree -= (int) state->newitemsz;
/* /*
* If we are not on the leaf level, we will be able to discard the * If we are not on the leaf level, we will be able to discard the key
* key data from the first item that winds up on the right page. * data from the first item that winds up on the right page.
*/ */
if (state->non_leaf) if (state->non_leaf)
rightfree += (int) firstrightitemsz - rightfree += (int) firstrightitemsz -
(int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData)); (int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData));
/* /*
* If feasible split point, remember best delta. * If feasible split point, remember best delta.
*/ */
@ -1134,10 +1142,11 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
maxoff = PageGetMaxOffsetNumber(page); maxoff = PageGetMaxOffsetNumber(page);
start = stack->bts_offset; start = stack->bts_offset;
/* /*
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the * _bt_insertonpg set bts_offset to InvalidOffsetNumber in the case of
* case of concurrent ROOT page split. Also, watch out for * concurrent ROOT page split. Also, watch out for possibility that
* possibility that page has a high key now when it didn't before. * page has a high key now when it didn't before.
*/ */
if (start < P_FIRSTDATAKEY(opaque)) if (start < P_FIRSTDATAKEY(opaque))
start = P_FIRSTDATAKEY(opaque); start = P_FIRSTDATAKEY(opaque);
@ -1159,11 +1168,15 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
return buf; return buf;
} }
} }
/* by here, the item we're looking for moved right at least one page */
/*
* by here, the item we're looking for moved right at least one
* page
*/
if (P_RIGHTMOST(opaque)) if (P_RIGHTMOST(opaque))
{ {
_bt_relbuf(rel, buf, access); _bt_relbuf(rel, buf, access);
return(InvalidBuffer); return (InvalidBuffer);
} }
blkno = opaque->btpo_next; blkno = opaque->btpo_next;
@ -1236,9 +1249,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
rpage = BufferGetPage(rbuf); rpage = BufferGetPage(rbuf);
/* /*
* Make sure pages in old root level have valid parent links --- we will * Make sure pages in old root level have valid parent links --- we
* need this in _bt_insertonpg() if a concurrent root split happens (see * will need this in _bt_insertonpg() if a concurrent root split
* README). * happens (see README).
*/ */
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent = ((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent = ((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
@ -1264,8 +1277,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
pfree(new_item); pfree(new_item);
/* /*
* Create downlink item for right page. The key for it is obtained from * Create downlink item for right page. The key for it is obtained
* the "high key" position in the left page. * from the "high key" position in the left page.
*/ */
itemid = PageGetItemId(lpage, P_HIKEY); itemid = PageGetItemId(lpage, P_HIKEY);
itemsz = ItemIdGetLength(itemid); itemsz = ItemIdGetLength(itemid);
@ -1293,7 +1306,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
xlrec.level = metad->btm_level; xlrec.level = metad->btm_level;
BlockIdSet(&(xlrec.rootblk), rootblknum); BlockIdSet(&(xlrec.rootblk), rootblknum);
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeNewroot; rdata[0].len = SizeOfBtreeNewroot;
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
@ -1302,9 +1315,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
* implement some new func in page API. * implement some new func in page API.
*/ */
rdata[1].buffer = InvalidBuffer; rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char*)rootpage + ((PageHeader) rootpage)->pd_upper; rdata[1].data = (char *) rootpage + ((PageHeader) rootpage)->pd_upper;
rdata[1].len = ((PageHeader)rootpage)->pd_special - rdata[1].len = ((PageHeader) rootpage)->pd_special -
((PageHeader)rootpage)->pd_upper; ((PageHeader) rootpage)->pd_upper;
rdata[1].next = NULL; rdata[1].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, rdata); recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, rdata);
@ -1325,7 +1338,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* write and let go of metapage buffer */ /* write and let go of metapage buffer */
_bt_wrtbuf(rel, metabuf); _bt_wrtbuf(rel, metabuf);
return(rootbuf); return (rootbuf);
} }
/* /*
@ -1346,17 +1359,24 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
Page oldrootpage = BufferGetPage(oldrootbuf); Page oldrootpage = BufferGetPage(oldrootbuf);
BTPageOpaque oldrootopaque = (BTPageOpaque) BTPageOpaque oldrootopaque = (BTPageOpaque)
PageGetSpecialPointer(oldrootpage); PageGetSpecialPointer(oldrootpage);
Buffer buf, leftbuf, rightbuf; Buffer buf,
Page page, leftpage, rightpage; leftbuf,
BTPageOpaque opaque, leftopaque, rightopaque; rightbuf;
Page page,
leftpage,
rightpage;
BTPageOpaque opaque,
leftopaque,
rightopaque;
OffsetNumber newitemoff; OffsetNumber newitemoff;
BTItem btitem, ritem; BTItem btitem,
ritem;
Size itemsz; Size itemsz;
if (! P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque)) if (!P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
elog(ERROR, "bt_fixroot: not valid old root page"); elog(ERROR, "bt_fixroot: not valid old root page");
/* Read right neighbor and create new root page*/ /* Read right neighbor and create new root page */
leftbuf = _bt_getbuf(rel, oldrootopaque->btpo_next, BT_WRITE); leftbuf = _bt_getbuf(rel, oldrootopaque->btpo_next, BT_WRITE);
leftpage = BufferGetPage(leftbuf); leftpage = BufferGetPage(leftbuf);
leftopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage); leftopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage);
@ -1377,26 +1397,26 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
* *
* If concurrent process will split one of pages on this level then it * If concurrent process will split one of pages on this level then it
* will see either btpo_parent == metablock or btpo_parent == rootblk. * will see either btpo_parent == metablock or btpo_parent == rootblk.
* In first case it will give up its locks and walk to the leftmost page * In first case it will give up its locks and walk to the leftmost
* (oldrootbuf) in _bt_fixup() - ie it will wait for us and let us * page (oldrootbuf) in _bt_fixup() - ie it will wait for us and let
* continue. In second case it will try to lock rootbuf keeping its locks * us continue. In second case it will try to lock rootbuf keeping its
* on buffers we already passed, also waiting for us. If we'll have to * locks on buffers we already passed, also waiting for us. If we'll
* unlock rootbuf (split it) and that process will have to split page * have to unlock rootbuf (split it) and that process will have to
* of new level we created (level of rootbuf) then it will wait while * split page of new level we created (level of rootbuf) then it will
* we create upper level. Etc. * wait while we create upper level. Etc.
*/ */
while(! P_RIGHTMOST(leftopaque)) while (!P_RIGHTMOST(leftopaque))
{ {
rightbuf = _bt_getbuf(rel, leftopaque->btpo_next, BT_WRITE); rightbuf = _bt_getbuf(rel, leftopaque->btpo_next, BT_WRITE);
rightpage = BufferGetPage(rightbuf); rightpage = BufferGetPage(rightbuf);
rightopaque = (BTPageOpaque) PageGetSpecialPointer(rightpage); rightopaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
/* /*
* Update LSN & StartUpID of child page buffer to ensure that * Update LSN & StartUpID of child page buffer to ensure that it
* it will be written on disk after flushing log record for new * will be written on disk after flushing log record for new root
* root creation. Unfortunately, for the moment (?) we do not * creation. Unfortunately, for the moment (?) we do not log this
* log this operation and so possibly break our rule to log entire * operation and so possibly break our rule to log entire page
* page content on first after checkpoint modification. * content on first after checkpoint modification.
*/ */
HOLD_INTERRUPTS(); HOLD_INTERRUPTS();
rightopaque->btpo_parent = rootblk; rightopaque->btpo_parent = rootblk;
@ -1450,10 +1470,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
/* /*
* Here we hold locks on old root buffer, new root buffer we've * Here we hold locks on old root buffer, new root buffer we've
* created with _bt_newroot() - rootbuf, - and buf we've used * created with _bt_newroot() - rootbuf, - and buf we've used for last
* for last insert ops - buf. If rootbuf != buf then we have to * insert ops - buf. If rootbuf != buf then we have to create at least
* create at least one more level. And if "release" is TRUE * one more level. And if "release" is TRUE then we give up
* then we give up oldrootbuf. * oldrootbuf.
*/ */
if (release) if (release)
_bt_wrtbuf(rel, oldrootbuf); _bt_wrtbuf(rel, oldrootbuf);
@ -1461,10 +1481,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
if (rootbuf != buf) if (rootbuf != buf)
{ {
_bt_wrtbuf(rel, buf); _bt_wrtbuf(rel, buf);
return(_bt_fixroot(rel, rootbuf, true)); return (_bt_fixroot(rel, rootbuf, true));
} }
return(rootbuf); return (rootbuf);
} }
/* /*
@ -1479,12 +1499,12 @@ _bt_fixtree(Relation rel, BlockNumber blkno)
BTPageOpaque opaque; BTPageOpaque opaque;
BlockNumber pblkno; BlockNumber pblkno;
for ( ; ; ) for (;;)
{ {
buf = _bt_getbuf(rel, blkno, BT_READ); buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf); page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page); opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (! P_LEFTMOST(opaque) || P_ISLEAF(opaque)) if (!P_LEFTMOST(opaque) || P_ISLEAF(opaque))
elog(ERROR, "bt_fixtree[%s]: invalid start page (need to recreate index)", RelationGetRelationName(rel)); elog(ERROR, "bt_fixtree[%s]: invalid start page (need to recreate index)", RelationGetRelationName(rel));
pblkno = opaque->btpo_parent; pblkno = opaque->btpo_parent;
@ -1543,7 +1563,8 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
Page cpage[3]; Page cpage[3];
BTPageOpaque copaque[3]; BTPageOpaque copaque[3];
BTItem btitem; BTItem btitem;
int cidx, i; int cidx,
i;
bool goodbye = false; bool goodbye = false;
char tbuf[BLCKSZ]; char tbuf[BLCKSZ];
@ -1552,7 +1573,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
memmove(tbuf, page, PageGetPageSize(page)); memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_READ); _bt_relbuf(rel, buf, BT_READ);
page = (Page)tbuf; page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page); opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* Initialize first child data */ /* Initialize first child data */
@ -1564,20 +1585,21 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
cbuf[0] = _bt_getbuf(rel, cblkno[0], BT_READ); cbuf[0] = _bt_getbuf(rel, cblkno[0], BT_READ);
cpage[0] = BufferGetPage(cbuf[0]); cpage[0] = BufferGetPage(cbuf[0]);
copaque[0] = (BTPageOpaque) PageGetSpecialPointer(cpage[0]); copaque[0] = (BTPageOpaque) PageGetSpecialPointer(cpage[0]);
if (P_LEFTMOST(opaque) && ! P_LEFTMOST(copaque[0])) if (P_LEFTMOST(opaque) && !P_LEFTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: non-leftmost child page of leftmost parent (need to recreate index)", RelationGetRelationName(rel)); elog(ERROR, "bt_fixtlevel[%s]: non-leftmost child page of leftmost parent (need to recreate index)", RelationGetRelationName(rel));
/* caller should take care and avoid this */ /* caller should take care and avoid this */
if (P_RIGHTMOST(copaque[0])) if (P_RIGHTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: invalid start child (need to recreate index)", RelationGetRelationName(rel)); elog(ERROR, "bt_fixtlevel[%s]: invalid start child (need to recreate index)", RelationGetRelationName(rel));
for ( ; ; ) for (;;)
{ {
/* /*
* Read up to 2 more child pages and look for pointers * Read up to 2 more child pages and look for pointers to them in
* to them in *saved* parent page * *saved* parent page
*/ */
coff[1] = coff[2] = InvalidOffsetNumber; coff[1] = coff[2] = InvalidOffsetNumber;
for (cidx = 0; cidx < 2; ) for (cidx = 0; cidx < 2;)
{ {
cidx++; cidx++;
cblkno[cidx] = (copaque[cidx - 1])->btpo_next; cblkno[cidx] = (copaque[cidx - 1])->btpo_next;
@ -1649,7 +1671,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
continue; continue;
} }
/* Have to check next page ? */ /* Have to check next page ? */
if ((! P_RIGHTMOST(opaque)) && if ((!P_RIGHTMOST(opaque)) &&
coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */ coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */
{ {
newbuf = _bt_getbuf(rel, opaque->btpo_next, BT_WRITE); newbuf = _bt_getbuf(rel, opaque->btpo_next, BT_WRITE);
@ -1720,7 +1742,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
/* copy page with pointer to cblkno[cidx] to temp storage */ /* copy page with pointer to cblkno[cidx] to temp storage */
memmove(tbuf, page, PageGetPageSize(page)); memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_WRITE); _bt_relbuf(rel, buf, BT_WRITE);
page = (Page)tbuf; page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page); opaque = (BTPageOpaque) PageGetSpecialPointer(page);
} }
@ -1766,12 +1788,13 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
BlockNumber blkno = true_stack->bts_blkno; BlockNumber blkno = true_stack->bts_blkno;
BTStackData stack; BTStackData stack;
BTPageOpaque opaque; BTPageOpaque opaque;
Buffer buf, rbuf; Buffer buf,
rbuf;
Page page; Page page;
OffsetNumber offnum; OffsetNumber offnum;
true_stack = true_stack->bts_parent; true_stack = true_stack->bts_parent;
for ( ; ; ) for (;;)
{ {
buf = _bt_getbuf(rel, blkno, BT_READ); buf = _bt_getbuf(rel, blkno, BT_READ);
@ -1779,8 +1802,8 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
_bt_fixlevel(rel, buf, rblkno); _bt_fixlevel(rel, buf, rblkno);
/* /*
* Here parent level should have pointers for both * Here parent level should have pointers for both lblkno and
* lblkno and rblkno and we have to find them. * rblkno and we have to find them.
*/ */
stack.bts_parent = NULL; stack.bts_parent = NULL;
stack.bts_blkno = blkno; stack.bts_blkno = blkno;
@ -1829,10 +1852,10 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
} }
/* /*
* Well, we are on the level that was root or unexistent when * Well, we are on the level that was root or unexistent when we
* we started traversing tree down. If btpo_parent is updated * started traversing tree down. If btpo_parent is updated then
* then we'll use it to continue, else we'll fix/restore upper * we'll use it to continue, else we'll fix/restore upper levels
* levels entirely. * entirely.
*/ */
if (!BTreeInvalidParent(opaque)) if (!BTreeInvalidParent(opaque))
{ {
@ -1878,14 +1901,14 @@ _bt_fixup(Relation rel, Buffer buf)
BTPageOpaque opaque; BTPageOpaque opaque;
BlockNumber blkno; BlockNumber blkno;
for ( ; ; ) for (;;)
{ {
page = BufferGetPage(buf); page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page); opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* /*
* If someone else already created parent pages * If someone else already created parent pages then it's time for
* then it's time for _bt_fixtree() to check upper * _bt_fixtree() to check upper levels and fix them, if required.
* levels and fix them, if required.
*/ */
if (!BTreeInvalidParent(opaque)) if (!BTreeInvalidParent(opaque))
{ {
@ -1904,9 +1927,8 @@ _bt_fixup(Relation rel, Buffer buf)
} }
/* /*
* Ok, we are on the leftmost page, it's write locked * Ok, we are on the leftmost page, it's write locked by us and its
* by us and its btpo_parent points to meta page - time * btpo_parent points to meta page - time for _bt_fixroot().
* for _bt_fixroot().
*/ */
elog(NOTICE, "bt_fixup[%s]: fixing root page", RelationGetRelationName(rel)); elog(NOTICE, "bt_fixup[%s]: fixing root page", RelationGetRelationName(rel));
buf = _bt_fixroot(rel, buf, true); buf = _bt_fixroot(rel, buf, true);
@ -1925,16 +1947,16 @@ _bt_getoff(Page page, BlockNumber blkno)
ItemId itemid; ItemId itemid;
BTItem item; BTItem item;
for ( ; offnum <= maxoff; offnum++) for (; offnum <= maxoff; offnum++)
{ {
itemid = PageGetItemId(page, offnum); itemid = PageGetItemId(page, offnum);
item = (BTItem) PageGetItem(page, itemid); item = (BTItem) PageGetItem(page, itemid);
curblkno = ItemPointerGetBlockNumber(&(item->bti_itup.t_tid)); curblkno = ItemPointerGetBlockNumber(&(item->bti_itup.t_tid));
if (curblkno == blkno) if (curblkno == blkno)
return(offnum); return (offnum);
} }
return(InvalidOffsetNumber); return (InvalidOffsetNumber);
} }
/* /*
@ -1963,7 +1985,7 @@ _bt_pgaddtup(Relation rel,
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
BTItemData truncitem; BTItemData truncitem;
if (! P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque)) if (!P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
{ {
memcpy(&truncitem, btitem, sizeof(BTItemData)); memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData); truncitem.bti_itup.t_info = sizeof(BTItemData);

View File

@ -9,7 +9,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.50 2001/02/07 23:35:33 vadim Exp $ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.51 2001/03/22 03:59:14 momjian Exp $
* *
* NOTES * NOTES
* Postgres btree pages look like ordinary relation pages. The opaque * Postgres btree pages look like ordinary relation pages. The opaque
@ -186,12 +186,12 @@ _bt_getroot(Relation rel, int access)
xlrec.level = 1; xlrec.level = 1;
BlockIdSet(&(xlrec.rootblk), rootblkno); BlockIdSet(&(xlrec.rootblk), rootblkno);
rdata.buffer = InvalidBuffer; rdata.buffer = InvalidBuffer;
rdata.data = (char*)&xlrec; rdata.data = (char *) &xlrec;
rdata.len = SizeOfBtreeNewroot; rdata.len = SizeOfBtreeNewroot;
rdata.next = NULL; rdata.next = NULL;
recptr = XLogInsert(RM_BTREE_ID, recptr = XLogInsert(RM_BTREE_ID,
XLOG_BTREE_NEWROOT|XLOG_BTREE_LEAF, &rdata); XLOG_BTREE_NEWROOT | XLOG_BTREE_LEAF, &rdata);
PageSetLSN(rootpage, recptr); PageSetLSN(rootpage, recptr);
PageSetSUI(rootpage, ThisStartUpID); PageSetSUI(rootpage, ThisStartUpID);
@ -212,6 +212,7 @@ _bt_getroot(Relation rel, int access)
} }
else else
{ {
/* /*
* Metadata initialized by someone else. In order to * Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata * guarantee no deadlocks, we have to release the metadata
@ -237,18 +238,19 @@ _bt_getroot(Relation rel, int access)
rootpage = BufferGetPage(rootbuf); rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage); rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
if (! P_ISROOT(rootopaque)) if (!P_ISROOT(rootopaque))
{ {
/* /*
* It happened, but if root page splitter failed to create * It happened, but if root page splitter failed to create new
* new root page then we'll go in loop trying to call * root page then we'll go in loop trying to call _bt_getroot
* _bt_getroot again and again. * again and again.
*/ */
if (FixBTree) if (FixBTree)
{ {
Buffer newrootbuf; Buffer newrootbuf;
check_parent:; check_parent:;
if (BTreeInvalidParent(rootopaque)) /* unupdated! */ if (BTreeInvalidParent(rootopaque)) /* unupdated! */
{ {
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK); LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
@ -266,20 +268,22 @@ check_parent:;
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage); rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
/* New root might be splitted while changing lock */ /* New root might be splitted while changing lock */
if (P_ISROOT(rootopaque)) if (P_ISROOT(rootopaque))
return(rootbuf); return (rootbuf);
/* rootbuf is read locked */ /* rootbuf is read locked */
goto check_parent; goto check_parent;
} }
else /* someone else already fixed root */ else
/* someone else already fixed root */
{ {
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK); LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ); LockBuffer(rootbuf, BT_READ);
} }
} }
/* /*
* Ok, here we have old root page with btpo_parent pointing * Ok, here we have old root page with btpo_parent pointing to
* to upper level - check parent page because of there is * upper level - check parent page because of there is good
* good chance that parent is root page. * chance that parent is root page.
*/ */
newrootbuf = _bt_getbuf(rel, rootopaque->btpo_parent, BT_READ); newrootbuf = _bt_getbuf(rel, rootopaque->btpo_parent, BT_READ);
_bt_relbuf(rel, rootbuf, BT_READ); _bt_relbuf(rel, rootbuf, BT_READ);
@ -287,7 +291,7 @@ check_parent:;
rootpage = BufferGetPage(rootbuf); rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage); rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
if (P_ISROOT(rootopaque)) if (P_ISROOT(rootopaque))
return(rootbuf); return (rootbuf);
/* no luck -:( */ /* no luck -:( */
} }
@ -475,7 +479,7 @@ _bt_pagedel(Relation rel, ItemPointer tid)
xlrec.target.node = rel->rd_node; xlrec.target.node = rel->rd_node;
xlrec.target.tid = *tid; xlrec.target.tid = *tid;
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeDelete; rdata[0].len = SizeOfBtreeDelete;
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);

View File

@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.78 2001/02/07 23:35:33 vadim Exp $ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.79 2001/03/22 03:59:15 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -30,6 +30,7 @@
bool BuildingBtree = false; /* see comment in btbuild() */ bool BuildingBtree = false; /* see comment in btbuild() */
bool FastBuild = true; /* use sort/build instead */ bool FastBuild = true; /* use sort/build instead */
/* of insertion build */ /* of insertion build */
@ -56,8 +57,10 @@ btbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1); Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2); IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3); Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED #ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4); IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif #endif
HeapScanDesc hscan; HeapScanDesc hscan;
HeapTuple htup; HeapTuple htup;
@ -69,9 +72,11 @@ btbuild(PG_FUNCTION_ARGS)
int nhtups, int nhtups,
nitups; nitups;
Node *pred = indexInfo->ii_Predicate; Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable; TupleTable tupleTable;
TupleTableSlot *slot; TupleTableSlot *slot;
#endif #endif
ExprContext *econtext; ExprContext *econtext;
InsertIndexResult res = NULL; InsertIndexResult res = NULL;
@ -80,10 +85,11 @@ btbuild(PG_FUNCTION_ARGS)
bool usefast; bool usefast;
Snapshot snapshot; Snapshot snapshot;
TransactionId XmaxRecent; TransactionId XmaxRecent;
/* /*
* spool2 is needed only when the index is an unique index. * spool2 is needed only when the index is an unique index. Dead
* Dead tuples are put into spool2 instead of spool in * tuples are put into spool2 instead of spool in order to avoid
* order to avoid uniqueness check. * uniqueness check.
*/ */
BTSpool *spool2 = NULL; BTSpool *spool2 = NULL;
bool tupleIsAlive; bool tupleIsAlive;
@ -155,9 +161,9 @@ btbuild(PG_FUNCTION_ARGS)
if (usefast) if (usefast)
{ {
spool = _bt_spoolinit(index, indexInfo->ii_Unique); spool = _bt_spoolinit(index, indexInfo->ii_Unique);
/* /*
* Different from spool,the uniqueness isn't checked * Different from spool,the uniqueness isn't checked for spool2.
* for spool2.
*/ */
if (indexInfo->ii_Unique) if (indexInfo->ii_Unique)
spool2 = _bt_spoolinit(index, false); spool2 = _bt_spoolinit(index, false);
@ -193,6 +199,7 @@ btbuild(PG_FUNCTION_ARGS)
nhtups++; nhtups++;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
/* /*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip * If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index * this tuple if it was already in the existing partial index
@ -253,8 +260,7 @@ btbuild(PG_FUNCTION_ARGS)
* btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE. * btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE.
* Sure, it's just rule for placing/finding items and no more - * Sure, it's just rule for placing/finding items and no more -
* keytest'll return FALSE for a = 5 for items having 'a' isNULL. * keytest'll return FALSE for a = 5 for items having 'a' isNULL.
* Look at _bt_compare for how it works. * Look at _bt_compare for how it works. - vadim 03/23/97
* - vadim 03/23/97
* *
* if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; } * if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; }
*/ */
@ -271,7 +277,8 @@ btbuild(PG_FUNCTION_ARGS)
{ {
if (tupleIsAlive || !spool2) if (tupleIsAlive || !spool2)
_bt_spool(btitem, spool); _bt_spool(btitem, spool);
else /* dead tuples are put into spool2 */ else
/* dead tuples are put into spool2 */
{ {
dead_count++; dead_count++;
_bt_spool(btitem, spool2); _bt_spool(btitem, spool2);
@ -296,9 +303,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true); ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */ #endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext); FreeExprContext(econtext);
@ -408,10 +413,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (ItemPointerIsValid(&(scan->currentItemData))) if (ItemPointerIsValid(&(scan->currentItemData)))
{ {
/* /*
* Restore scan position using heap TID returned by previous call * Restore scan position using heap TID returned by previous call
* to btgettuple(). _bt_restscan() re-grabs the read lock on * to btgettuple(). _bt_restscan() re-grabs the read lock on the
* the buffer, too. * buffer, too.
*/ */
_bt_restscan(scan); _bt_restscan(scan);
res = _bt_next(scan, dir); res = _bt_next(scan, dir);
@ -421,8 +427,8 @@ btgettuple(PG_FUNCTION_ARGS)
/* /*
* Save heap TID to use it in _bt_restscan. Then release the read * Save heap TID to use it in _bt_restscan. Then release the read
* lock on the buffer so that we aren't blocking other backends. * lock on the buffer so that we aren't blocking other backends. NOTE:
* NOTE: we do keep the pin on the buffer! * we do keep the pin on the buffer!
*/ */
if (res) if (res)
{ {
@ -462,8 +468,10 @@ Datum
btrescan(PG_FUNCTION_ARGS) btrescan(PG_FUNCTION_ARGS)
{ {
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */ #ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
bool fromEnd = PG_GETARG_BOOL(1); bool fromEnd = PG_GETARG_BOOL(1);
#endif #endif
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr; ItemPointer iptr;
@ -671,8 +679,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno; BlockNumber blkno;
/* /*
* Get back the read lock we were holding on the buffer. * Get back the read lock we were holding on the buffer. (We still
* (We still have a reference-count pin on it, though.) * have a reference-count pin on it, though.)
*/ */
LockBuffer(buf, BT_READ); LockBuffer(buf, BT_READ);
@ -694,8 +702,8 @@ _bt_restscan(IndexScanDesc scan)
} }
/* /*
* The item we were on may have moved right due to insertions. * The item we were on may have moved right due to insertions. Find it
* Find it again. * again.
*/ */
for (;;) for (;;)
{ {
@ -717,7 +725,8 @@ _bt_restscan(IndexScanDesc scan)
} }
/* /*
* By here, the item we're looking for moved right at least one page * By here, the item we're looking for moved right at least one
* page
*/ */
if (P_RIGHTMOST(opaque)) if (P_RIGHTMOST(opaque))
elog(FATAL, "_bt_restscan: my bits moved right off the end of the world!" elog(FATAL, "_bt_restscan: my bits moved right off the end of the world!"
@ -742,7 +751,7 @@ _bt_restore_page(Page page, char *from, int len)
Size itemsz; Size itemsz;
char *end = from + len; char *end = from + len;
for ( ; from < end; ) for (; from < end;)
{ {
memcpy(&btdata, from, sizeof(BTItemData)); memcpy(&btdata, from, sizeof(BTItemData));
itemsz = IndexTupleDSize(btdata.bti_itup) + itemsz = IndexTupleDSize(btdata.bti_itup) +
@ -766,7 +775,7 @@ btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1)) if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
return; return;
xlrec = (xl_btree_delete*) XLogRecGetData(record); xlrec = (xl_btree_delete *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node); reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln)) if (!RelationIsValid(reln))
return; return;
@ -805,7 +814,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (redo && (record->xl_info & XLR_BKP_BLOCK_1)) if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return; return;
xlrec = (xl_btree_insert*) XLogRecGetData(record); xlrec = (xl_btree_insert *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node); reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln)) if (!RelationIsValid(reln))
return; return;
@ -825,7 +834,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return; return;
} }
if (PageAddItem(page, (Item)((char*)xlrec + SizeOfBtreeInsert), if (PageAddItem(page, (Item) ((char *) xlrec + SizeOfBtreeInsert),
record->xl_len - SizeOfBtreeInsert, record->xl_len - SizeOfBtreeInsert,
ItemPointerGetOffsetNumber(&(xlrec->target.tid)), ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber) LP_USED) == InvalidOffsetNumber)
@ -840,7 +849,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (XLByteLT(PageGetLSN(page), lsn)) if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_insert_undo: bad page LSN"); elog(STOP, "btree_insert_undo: bad page LSN");
if (! P_ISLEAF(pageop)) if (!P_ISLEAF(pageop))
{ {
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return; return;
@ -855,7 +864,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void static void
btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record) btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
{ {
xl_btree_split *xlrec = (xl_btree_split*) XLogRecGetData(record); xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
Relation reln; Relation reln;
BlockNumber blkno; BlockNumber blkno;
Buffer buffer; Buffer buffer;
@ -892,13 +901,14 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
pageop->btpo_next = ItemPointerGetBlockNumber(&(xlrec->target.tid)); pageop->btpo_next = ItemPointerGetBlockNumber(&(xlrec->target.tid));
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0; pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
_bt_restore_page(page, (char*)xlrec + SizeOfBtreeSplit, xlrec->leftlen); _bt_restore_page(page, (char *) xlrec + SizeOfBtreeSplit, xlrec->leftlen);
PageSetLSN(page, lsn); PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID); PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer); UnlockAndWriteBuffer(buffer);
} }
else /* undo */ else
/* undo */
{ {
if (XLByteLT(PageGetLSN(page), lsn)) if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad left sibling LSN"); elog(STOP, "btree_split_undo: bad left sibling LSN");
@ -929,14 +939,15 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0; pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
_bt_restore_page(page, _bt_restore_page(page,
(char*)xlrec + SizeOfBtreeSplit + xlrec->leftlen, (char *) xlrec + SizeOfBtreeSplit + xlrec->leftlen,
record->xl_len - SizeOfBtreeSplit - xlrec->leftlen); record->xl_len - SizeOfBtreeSplit - xlrec->leftlen);
PageSetLSN(page, lsn); PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID); PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer); UnlockAndWriteBuffer(buffer);
} }
else /* undo */ else
/* undo */
{ {
if (XLByteLT(PageGetLSN(page), lsn)) if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad right sibling LSN"); elog(STOP, "btree_split_undo: bad right sibling LSN");
@ -977,7 +988,7 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
static void static void
btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record) btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
{ {
xl_btree_newroot *xlrec = (xl_btree_newroot*) XLogRecGetData(record); xl_btree_newroot *xlrec = (xl_btree_newroot *) XLogRecGetData(record);
Relation reln; Relation reln;
Buffer buffer; Buffer buffer;
Page page; Page page;
@ -1011,7 +1022,7 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfBtreeNewroot) if (record->xl_len > SizeOfBtreeNewroot)
_bt_restore_page(page, _bt_restore_page(page,
(char*)xlrec + SizeOfBtreeNewroot, (char *) xlrec + SizeOfBtreeNewroot,
record->xl_len - SizeOfBtreeNewroot); record->xl_len - SizeOfBtreeNewroot);
PageSetLSN(page, lsn); PageSetLSN(page, lsn);
@ -1065,7 +1076,7 @@ btree_undo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_BTREE_INSERT) else if (info == XLOG_BTREE_INSERT)
btree_xlog_insert(false, lsn, record); btree_xlog_insert(false, lsn, record);
else if (info == XLOG_BTREE_SPLIT) else if (info == XLOG_BTREE_SPLIT)
btree_xlog_split(false, false, lsn, record);/* new item on the right */ btree_xlog_split(false, false, lsn, record); /* new item on the right */
else if (info == XLOG_BTREE_SPLEFT) else if (info == XLOG_BTREE_SPLEFT)
btree_xlog_split(false, true, lsn, record); /* new item on the left */ btree_xlog_split(false, true, lsn, record); /* new item on the left */
else if (info == XLOG_BTREE_NEWROOT) else if (info == XLOG_BTREE_NEWROOT)
@ -1084,26 +1095,29 @@ out_target(char *buf, xl_btreetid *target)
} }
void void
btree_desc(char *buf, uint8 xl_info, char* rec) btree_desc(char *buf, uint8 xl_info, char *rec)
{ {
uint8 info = xl_info & ~XLR_INFO_MASK; uint8 info = xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF; info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_INSERT) if (info == XLOG_BTREE_INSERT)
{ {
xl_btree_insert *xlrec = (xl_btree_insert*) rec; xl_btree_insert *xlrec = (xl_btree_insert *) rec;
strcat(buf, "insert: "); strcat(buf, "insert: ");
out_target(buf, &(xlrec->target)); out_target(buf, &(xlrec->target));
} }
else if (info == XLOG_BTREE_DELETE) else if (info == XLOG_BTREE_DELETE)
{ {
xl_btree_delete *xlrec = (xl_btree_delete*) rec; xl_btree_delete *xlrec = (xl_btree_delete *) rec;
strcat(buf, "delete: "); strcat(buf, "delete: ");
out_target(buf, &(xlrec->target)); out_target(buf, &(xlrec->target));
} }
else if (info == XLOG_BTREE_SPLIT || info == XLOG_BTREE_SPLEFT) else if (info == XLOG_BTREE_SPLIT || info == XLOG_BTREE_SPLEFT)
{ {
xl_btree_split *xlrec = (xl_btree_split*) rec; xl_btree_split *xlrec = (xl_btree_split *) rec;
sprintf(buf + strlen(buf), "split(%s): ", sprintf(buf + strlen(buf), "split(%s): ",
(info == XLOG_BTREE_SPLIT) ? "right" : "left"); (info == XLOG_BTREE_SPLIT) ? "right" : "left");
out_target(buf, &(xlrec->target)); out_target(buf, &(xlrec->target));
@ -1113,7 +1127,8 @@ btree_desc(char *buf, uint8 xl_info, char* rec)
} }
else if (info == XLOG_BTREE_NEWROOT) else if (info == XLOG_BTREE_NEWROOT)
{ {
xl_btree_newroot *xlrec = (xl_btree_newroot*) rec; xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
sprintf(buf + strlen(buf), "root: node %u/%u; blk %u", sprintf(buf + strlen(buf), "root: node %u/%u; blk %u",
xlrec->node.tblNode, xlrec->node.relNode, xlrec->node.tblNode, xlrec->node.relNode,
BlockIdGetBlockNumber(&xlrec->rootblk)); BlockIdGetBlockNumber(&xlrec->rootblk));

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.63 2001/01/24 19:42:49 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.64 2001/03/22 03:59:15 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -45,7 +45,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*bufP = _bt_getroot(rel, access); *bufP = _bt_getroot(rel, access);
/* If index is empty and access = BT_READ, no root page is created. */ /* If index is empty and access = BT_READ, no root page is created. */
if (! BufferIsValid(*bufP)) if (!BufferIsValid(*bufP))
return (BTStack) NULL; return (BTStack) NULL;
/* Loop iterates once per level descended in the tree */ /* Loop iterates once per level descended in the tree */
@ -79,13 +79,13 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP); par_blkno = BufferGetBlockNumber(*bufP);
/* /*
* We need to save the bit image of the index entry we chose in the * We need to save the bit image of the index entry we chose in
* parent page on a stack. In case we split the tree, we'll use this * the parent page on a stack. In case we split the tree, we'll
* bit image to figure out what our real parent page is, in case the * use this bit image to figure out what our real parent page is,
* parent splits while we're working lower in the tree. See the paper * in case the parent splits while we're working lower in the
* by Lehman and Yao for how this is detected and handled. (We use the * tree. See the paper by Lehman and Yao for how this is detected
* child link to disambiguate duplicate keys in the index -- Lehman * and handled. (We use the child link to disambiguate duplicate
* and Yao disallow duplicate keys.) * keys in the index -- Lehman and Yao disallow duplicate keys.)
*/ */
new_stack = (BTStack) palloc(sizeof(BTStackData)); new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno; new_stack->bts_blkno = par_blkno;
@ -98,9 +98,9 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*bufP = _bt_getbuf(rel, blkno, BT_READ); *bufP = _bt_getbuf(rel, blkno, BT_READ);
/* /*
* Race -- the page we just grabbed may have split since we read its * Race -- the page we just grabbed may have split since we read
* pointer in the parent. If it has, we may need to move right to its * its pointer in the parent. If it has, we may need to move
* new sibling. Do that. * right to its new sibling. Do that.
*/ */
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ); *bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@ -299,7 +299,7 @@ _bt_compare(Relation rel,
* Force result ">" if target item is first data item on an internal * Force result ">" if target item is first data item on an internal
* page --- see NOTE above. * page --- see NOTE above.
*/ */
if (! P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque)) if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
return 1; return 1;
btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum)); btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum));
@ -458,10 +458,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
_bt_orderkeys(rel, so); _bt_orderkeys(rel, so);
/* /*
* Quit now if _bt_orderkeys() discovered that the scan keys can * Quit now if _bt_orderkeys() discovered that the scan keys can never
* never be satisfied (eg, x == 1 AND x > 2). * be satisfied (eg, x == 1 AND x > 2).
*/ */
if (! so->qual_ok) if (!so->qual_ok)
return (RetrieveIndexResult) NULL; return (RetrieveIndexResult) NULL;
/* /*
@ -484,17 +484,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break; break;
strat = _bt_getstrat(rel, attno, strat = _bt_getstrat(rel, attno,
so->keyData[i].sk_procedure); so->keyData[i].sk_procedure);
/* /*
* Can we use this key as a starting boundary for this attr? * Can we use this key as a starting boundary for this attr?
* *
* We can use multiple keys if they look like, say, = >= = * We can use multiple keys if they look like, say, = >= = but we
* but we have to stop after accepting a > or < boundary. * have to stop after accepting a > or < boundary.
*/ */
if (strat == strat_total || if (strat == strat_total ||
strat == BTEqualStrategyNumber) strat == BTEqualStrategyNumber)
{
nKeyIs[keysCount++] = i; nKeyIs[keysCount++] = i;
}
else if (ScanDirectionIsBackward(dir) && else if (ScanDirectionIsBackward(dir) &&
(strat == BTLessStrategyNumber || (strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber)) strat == BTLessEqualStrategyNumber))
@ -536,7 +535,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
for (i = 0; i < keysCount; i++) for (i = 0; i < keysCount; i++)
{ {
j = nKeyIs[i]; j = nKeyIs[i];
/* _bt_orderkeys disallows it, but it's place to add some code later */
/*
* _bt_orderkeys disallows it, but it's place to add some code
* later
*/
if (so->keyData[j].sk_flags & SK_ISNULL) if (so->keyData[j].sk_flags & SK_ISNULL)
{ {
pfree(nKeyIs); pfree(nKeyIs);
@ -562,7 +565,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* don't need to keep the stack around... */ /* don't need to keep the stack around... */
_bt_freestack(stack); _bt_freestack(stack);
if (! BufferIsValid(buf)) if (!BufferIsValid(buf))
{ {
/* Only get here if index is completely empty */ /* Only get here if index is completely empty */
ItemPointerSetInvalid(current); ItemPointerSetInvalid(current);
@ -601,6 +604,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
switch (strat_total) switch (strat_total)
{ {
case BTLessStrategyNumber: case BTLessStrategyNumber:
/* /*
* Back up one to arrive at last item < scankey * Back up one to arrive at last item < scankey
*/ */
@ -612,6 +616,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break; break;
case BTLessEqualStrategyNumber: case BTLessEqualStrategyNumber:
/* /*
* We need to find the last item <= scankey, so step forward * We need to find the last item <= scankey, so step forward
* till we find one > scankey, then step back one. * till we find one > scankey, then step back one.
@ -645,9 +650,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break; break;
case BTEqualStrategyNumber: case BTEqualStrategyNumber:
/* /*
* Make sure we are on the first equal item; might have to step * Make sure we are on the first equal item; might have to
* forward if currently at end of page. * step forward if currently at end of page.
*/ */
if (offnum > PageGetMaxOffsetNumber(page)) if (offnum > PageGetMaxOffsetNumber(page))
{ {
@ -662,6 +668,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
result = _bt_compare(rel, keysCount, scankeys, page, offnum); result = _bt_compare(rel, keysCount, scankeys, page, offnum);
if (result != 0) if (result != 0)
goto nomatches; /* no equal items! */ goto nomatches; /* no equal items! */
/* /*
* If a backward scan was specified, need to start with last * If a backward scan was specified, need to start with last
* equal item not first one. * equal item not first one.
@ -685,6 +692,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break; break;
case BTGreaterEqualStrategyNumber: case BTGreaterEqualStrategyNumber:
/* /*
* We want the first item >= scankey, which is where we are... * We want the first item >= scankey, which is where we are...
* unless we're not anywhere at all... * unless we're not anywhere at all...
@ -700,9 +708,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break; break;
case BTGreaterStrategyNumber: case BTGreaterStrategyNumber:
/* /*
* We want the first item > scankey, so make sure we are on * We want the first item > scankey, so make sure we are on an
* an item and then step over any equal items. * item and then step over any equal items.
*/ */
if (offnum > PageGetMaxOffsetNumber(page)) if (offnum > PageGetMaxOffsetNumber(page))
{ {
@ -850,11 +859,12 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
*bufP = _bt_getbuf(rel, blkno, BT_READ); *bufP = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(*bufP); page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page); opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* /*
* If the adjacent page just split, then we have to walk * If the adjacent page just split, then we have to walk
* right to find the block that's now adjacent to where * right to find the block that's now adjacent to where we
* we were. Because pages only split right, we don't have * were. Because pages only split right, we don't have to
* to worry about this failing to terminate. * worry about this failing to terminate.
*/ */
while (opaque->btpo_next != obknum) while (opaque->btpo_next != obknum)
{ {
@ -917,7 +927,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
*/ */
buf = _bt_getroot(rel, BT_READ); buf = _bt_getroot(rel, BT_READ);
if (! BufferIsValid(buf)) if (!BufferIsValid(buf))
{ {
/* empty index... */ /* empty index... */
ItemPointerSetInvalid(current); ItemPointerSetInvalid(current);
@ -981,7 +991,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Assert(P_RIGHTMOST(opaque)); Assert(P_RIGHTMOST(opaque));
start = PageGetMaxOffsetNumber(page); start = PageGetMaxOffsetNumber(page);
if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty page */ if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty
* page */
start = P_FIRSTDATAKEY(opaque); start = P_FIRSTDATAKEY(opaque);
} }
else else
@ -995,8 +1006,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
so->btso_curbuf = buf; so->btso_curbuf = buf;
/* /*
* Left/rightmost page could be empty due to deletions, * Left/rightmost page could be empty due to deletions, if so step
* if so step till we find a nonempty page. * till we find a nonempty page.
*/ */
if (start > maxoff) if (start > maxoff)
{ {

View File

@ -35,7 +35,7 @@
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.59 2001/01/24 19:42:49 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.60 2001/03/22 03:59:15 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -73,10 +73,12 @@ typedef struct BTPageState
{ {
Buffer btps_buf; /* current buffer & page */ Buffer btps_buf; /* current buffer & page */
Page btps_page; Page btps_page;
BTItem btps_minkey; /* copy of minimum key (first item) on page */ BTItem btps_minkey; /* copy of minimum key (first item) on
* page */
OffsetNumber btps_lastoff; /* last item offset loaded */ OffsetNumber btps_lastoff; /* last item offset loaded */
int btps_level; /* tree level (0 = leaf) */ int btps_level; /* tree level (0 = leaf) */
Size btps_full; /* "full" if less than this much free space */ Size btps_full; /* "full" if less than this much free
* space */
struct BTPageState *btps_next; /* link to parent level, if any */ struct BTPageState *btps_next; /* link to parent level, if any */
} BTPageState; } BTPageState;
@ -271,7 +273,7 @@ _bt_sortaddtup(Page page,
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
BTItemData truncitem; BTItemData truncitem;
if (! P_ISLEAF(opaque) && itup_off == P_FIRSTKEY) if (!P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
{ {
memcpy(&truncitem, btitem, sizeof(BTItemData)); memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData); truncitem.bti_itup.t_info = sizeof(BTItemData);
@ -347,11 +349,12 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
*/ */
if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData)) if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %ld", elog(ERROR, "btree: index item size %lu exceeds maximum %ld",
(unsigned long)btisz, (unsigned long) btisz,
(PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData)); (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
if (pgspc < btisz || pgspc < state->btps_full) if (pgspc < btisz || pgspc < state->btps_full)
{ {
/* /*
* Item won't fit on this page, or we feel the page is full enough * Item won't fit on this page, or we feel the page is full enough
* already. Finish off the page and write it out. * already. Finish off the page and write it out.
@ -388,9 +391,9 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData); ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
/* /*
* Link the old buffer into its parent, using its minimum key. * Link the old buffer into its parent, using its minimum key. If
* If we don't have a parent, we have to create one; * we don't have a parent, we have to create one; this adds a new
* this adds a new btree level. * btree level.
*/ */
if (state->btps_next == (BTPageState *) NULL) if (state->btps_next == (BTPageState *) NULL)
{ {
@ -405,8 +408,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/* /*
* Save a copy of the minimum key for the new page. We have to * Save a copy of the minimum key for the new page. We have to
* copy it off the old page, not the new one, in case we are * copy it off the old page, not the new one, in case we are not
* not at leaf level. * at leaf level.
*/ */
state->btps_minkey = _bt_formitem(&(obti->bti_itup)); state->btps_minkey = _bt_formitem(&(obti->bti_itup));
@ -414,13 +417,13 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
* Set the sibling links for both pages, and parent links too. * Set the sibling links for both pages, and parent links too.
* *
* It's not necessary to set the parent link at all, because it's * It's not necessary to set the parent link at all, because it's
* only used for handling concurrent root splits, but we may as well * only used for handling concurrent root splits, but we may as
* do it as a debugging aid. Note we set new page's link as well * well do it as a debugging aid. Note we set new page's link as
* as old's, because if the new page turns out to be the last of * well as old's, because if the new page turns out to be the last
* the level, _bt_uppershutdown won't change it. The links may be * of the level, _bt_uppershutdown won't change it. The links may
* out of date by the time the build finishes, but that's OK; they * be out of date by the time the build finishes, but that's OK;
* need only point to a left-sibling of the true parent. See the * they need only point to a left-sibling of the true parent. See
* README file for more info. * the README file for more info.
*/ */
{ {
BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage); BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage);
@ -449,8 +452,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/* /*
* If the new item is the first for its page, stash a copy for later. * If the new item is the first for its page, stash a copy for later.
* Note this will only happen for the first item on a level; on later * Note this will only happen for the first item on a level; on later
* pages, the first item for a page is copied from the prior page * pages, the first item for a page is copied from the prior page in
* in the code above. * the code above.
*/ */
if (last_off == P_HIKEY) if (last_off == P_HIKEY)
{ {
@ -493,8 +496,8 @@ _bt_uppershutdown(Relation index, BTPageState *state)
* *
* If we're at the top, it's the root, so attach it to the metapage. * If we're at the top, it's the root, so attach it to the metapage.
* Otherwise, add an entry for it to its parent using its minimum * Otherwise, add an entry for it to its parent using its minimum
* key. This may cause the last page of the parent level to split, * key. This may cause the last page of the parent level to
* but that's not a problem -- we haven't gotten to it yet. * split, but that's not a problem -- we haven't gotten to it yet.
*/ */
if (s->btps_next == (BTPageState *) NULL) if (s->btps_next == (BTPageState *) NULL)
{ {
@ -529,21 +532,28 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
{ {
BTPageState *state = NULL; BTPageState *state = NULL;
bool merge = (btspool2 != NULL); bool merge = (btspool2 != NULL);
BTItem bti, bti2 = NULL; BTItem bti,
bool should_free, should_free2, load1; bti2 = NULL;
bool should_free,
should_free2,
load1;
TupleDesc tupdes = RelationGetDescr(index); TupleDesc tupdes = RelationGetDescr(index);
int i, keysz = RelationGetNumberOfAttributes(index); int i,
keysz = RelationGetNumberOfAttributes(index);
ScanKey indexScanKey = NULL; ScanKey indexScanKey = NULL;
if (merge) if (merge)
{ {
/* /*
* Another BTSpool for dead tuples exists. * Another BTSpool for dead tuples exists. Now we have to merge
* Now we have to merge btspool and btspool2. * btspool and btspool2.
*/ */
ScanKey entry; ScanKey entry;
Datum attrDatum1, attrDatum2; Datum attrDatum1,
bool isFirstNull, isSecondNull; attrDatum2;
bool isFirstNull,
isSecondNull;
int32 compare; int32 compare;
/* the preparation of merge */ /* the preparation of merge */
@ -564,8 +574,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
for (i = 1; i <= keysz; i++) for (i = 1; i <= keysz; i++)
{ {
entry = indexScanKey + i - 1; entry = indexScanKey + i - 1;
attrDatum1 = index_getattr((IndexTuple)bti, i, tupdes, &isFirstNull); attrDatum1 = index_getattr((IndexTuple) bti, i, tupdes, &isFirstNull);
attrDatum2 = index_getattr((IndexTuple)bti2, i, tupdes, &isSecondNull); attrDatum2 = index_getattr((IndexTuple) bti2, i, tupdes, &isSecondNull);
if (isFirstNull) if (isFirstNull)
{ {
if (!isSecondNull) if (!isSecondNull)
@ -613,7 +623,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
} }
_bt_freeskey(indexScanKey); _bt_freeskey(indexScanKey);
} }
else /* merge is unnecessary */ else
/* merge is unnecessary */
{ {
while (bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free), bti != (BTItem) NULL) while (bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free), bti != (BTItem) NULL)
{ {

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.42 2001/01/24 19:42:49 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.43 2001/03/22 03:59:15 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -240,8 +240,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
/* /*
* Initialize for processing of keys for attr 1. * Initialize for processing of keys for attr 1.
* *
* xform[i] holds a copy of the current scan key of strategy type i+1, * xform[i] holds a copy of the current scan key of strategy type i+1, if
* if any; init[i] is TRUE if we have found such a key for this attr. * any; init[i] is TRUE if we have found such a key for this attr.
*/ */
attno = 1; attno = 1;
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation), map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
@ -255,7 +255,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* pass to handle after-last-key processing. Actual exit from the * pass to handle after-last-key processing. Actual exit from the
* loop is at the "break" statement below. * loop is at the "break" statement below.
*/ */
for (i = 0; ; cur++, i++) for (i = 0;; cur++, i++)
{ {
if (i < numberOfKeys) if (i < numberOfKeys)
{ {
@ -263,7 +263,9 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (cur->sk_flags & SK_ISNULL) if (cur->sk_flags & SK_ISNULL)
{ {
so->qual_ok = false; so->qual_ok = false;
/* Quit processing so we don't try to invoke comparison
/*
* Quit processing so we don't try to invoke comparison
* routines on NULLs. * routines on NULLs.
*/ */
return; return;
@ -271,8 +273,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
} }
/* /*
* If we are at the end of the keys for a particular attr, * If we are at the end of the keys for a particular attr, finish
* finish up processing and emit the cleaned-up keys. * up processing and emit the cleaned-up keys.
*/ */
if (i == numberOfKeys || cur->sk_attno != attno) if (i == numberOfKeys || cur->sk_attno != attno)
{ {
@ -296,7 +298,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
eq = &xform[BTEqualStrategyNumber - 1]; eq = &xform[BTEqualStrategyNumber - 1];
for (j = BTMaxStrategyNumber; --j >= 0;) for (j = BTMaxStrategyNumber; --j >= 0;)
{ {
if (! init[j] || if (!init[j] ||
j == (BTEqualStrategyNumber - 1)) j == (BTEqualStrategyNumber - 1))
continue; continue;
chk = &xform[j]; chk = &xform[j];
@ -313,6 +315,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
} }
else else
{ {
/* /*
* No "=" for this key, so we're done with required keys * No "=" for this key, so we're done with required keys
*/ */
@ -355,8 +358,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* Emit the cleaned-up keys back into the key[] array in the * Emit the cleaned-up keys back into the key[] array in the
* correct order. Note we are overwriting our input here! * correct order. Note we are overwriting our input here!
* It's OK because (a) xform[] is a physical copy of the keys * It's OK because (a) xform[] is a physical copy of the keys
* we want, (b) we cannot emit more keys than we input, so * we want, (b) we cannot emit more keys than we input, so we
* we won't overwrite as-yet-unprocessed keys. * won't overwrite as-yet-unprocessed keys.
*/ */
for (j = BTMaxStrategyNumber; --j >= 0;) for (j = BTMaxStrategyNumber; --j >= 0;)
{ {
@ -409,7 +412,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (DatumGetBool(test)) if (DatumGetBool(test))
xform[j].sk_argument = cur->sk_argument; xform[j].sk_argument = cur->sk_argument;
else if (j == (BTEqualStrategyNumber - 1)) else if (j == (BTEqualStrategyNumber - 1))
so->qual_ok = false; /* key == a && key == b, but a != b */ so->qual_ok = false; /* key == a && key == b, but a !=
* b */
} }
else else
{ {
@ -473,16 +477,18 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (isNull) if (isNull)
{ {
/* /*
* Since NULLs are sorted after non-NULLs, we know we have * Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this * reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual * index attr. On a forward scan, we can stop if this qual is
* is one of the "must match" subset. On a backward scan, * one of the "must match" subset. On a backward scan,
* however, we should keep going. * however, we should keep going.
*/ */
if (keysok < so->numberOfRequiredKeys && if (keysok < so->numberOfRequiredKeys &&
ScanDirectionIsForward(dir)) ScanDirectionIsForward(dir))
*continuescan = false; *continuescan = false;
/* /*
* In any case, this indextuple doesn't match the qual. * In any case, this indextuple doesn't match the qual.
*/ */
@ -498,9 +504,10 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (DatumGetBool(test) == !!(key->sk_flags & SK_NEGATE)) if (DatumGetBool(test) == !!(key->sk_flags & SK_NEGATE))
{ {
/* /*
* Tuple fails this qual. If it's a required qual, then * Tuple fails this qual. If it's a required qual, then we
* we can conclude no further tuples will pass, either. * can conclude no further tuples will pass, either.
*/ */
if (keysok < so->numberOfRequiredKeys) if (keysok < so->numberOfRequiredKeys)
*continuescan = false; *continuescan = false;

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.24 2001/01/24 19:42:49 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.25 2001/03/22 03:59:16 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.31 2001/01/24 19:42:49 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.32 2001/03/22 03:59:16 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -70,6 +70,7 @@ Datum
rt_box_size(PG_FUNCTION_ARGS) rt_box_size(PG_FUNCTION_ARGS)
{ {
BOX *a = PG_GETARG_BOX_P(0); BOX *a = PG_GETARG_BOX_P(0);
/* NB: size is an output argument */ /* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1); float *size = (float *) PG_GETARG_POINTER(1);
@ -155,13 +156,15 @@ Datum
rt_poly_size(PG_FUNCTION_ARGS) rt_poly_size(PG_FUNCTION_ARGS)
{ {
Pointer aptr = PG_GETARG_POINTER(0); Pointer aptr = PG_GETARG_POINTER(0);
/* NB: size is an output argument */ /* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1); float *size = (float *) PG_GETARG_POINTER(1);
POLYGON *a; POLYGON *a;
double xdim, double xdim,
ydim; ydim;
/* Can't just use GETARG because of possibility that input is NULL; /*
* Can't just use GETARG because of possibility that input is NULL;
* since POLYGON is toastable, GETARG will try to inspect its value * since POLYGON is toastable, GETARG will try to inspect its value
*/ */
if (aptr == NULL) if (aptr == NULL)

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.60 2001/03/07 21:20:26 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.61 2001/03/22 03:59:16 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -88,8 +88,10 @@ rtbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1); Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2); IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3); Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED #ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4); IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif #endif
HeapScanDesc hscan; HeapScanDesc hscan;
HeapTuple htup; HeapTuple htup;
@ -101,9 +103,11 @@ rtbuild(PG_FUNCTION_ARGS)
int nhtups, int nhtups,
nitups; nitups;
Node *pred = indexInfo->ii_Predicate; Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable; TupleTable tupleTable;
TupleTableSlot *slot; TupleTableSlot *slot;
#endif #endif
ExprContext *econtext; ExprContext *econtext;
InsertIndexResult res = NULL; InsertIndexResult res = NULL;
@ -171,6 +175,7 @@ rtbuild(PG_FUNCTION_ARGS)
nhtups++; nhtups++;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
/* /*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip * If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index * this tuple if it was already in the existing partial index
@ -232,9 +237,7 @@ rtbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL) if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true); ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */ #endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext); FreeExprContext(econtext);
@ -282,8 +285,10 @@ rtinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1); Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2); char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3); ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED #ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4); Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif #endif
InsertIndexResult res; InsertIndexResult res;
IndexTuple itup; IndexTuple itup;
@ -564,7 +569,7 @@ rtdosplit(Relation r,
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData)); res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
/* now insert the new index tuple */ /* now insert the new index tuple */
if (*spl_left == maxoff+1) if (*spl_left == maxoff + 1)
{ {
if (PageAddItem(left, (Item) itup, IndexTupleSize(itup), if (PageAddItem(left, (Item) itup, IndexTupleSize(itup),
leftoff, LP_USED) == InvalidOffsetNumber) leftoff, LP_USED) == InvalidOffsetNumber)
@ -576,7 +581,7 @@ rtdosplit(Relation r,
} }
else else
{ {
Assert(*spl_right == maxoff+1); Assert(*spl_right == maxoff + 1);
if (PageAddItem(right, (Item) itup, IndexTupleSize(itup), if (PageAddItem(right, (Item) itup, IndexTupleSize(itup),
rightoff, LP_USED) == InvalidOffsetNumber) rightoff, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "rtdosplit: failed to add index item to %s", elog(ERROR, "rtdosplit: failed to add index item to %s",
@ -665,10 +670,10 @@ rtintinsert(Relation r,
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child)); old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
/* /*
* This is a hack. Right now, we force rtree internal keys to be constant * This is a hack. Right now, we force rtree internal keys to be
* size. To fix this, need delete the old key and add both left and * constant size. To fix this, need delete the old key and add both
* right for the two new pages. The insertion of left may force a * left and right for the two new pages. The insertion of left may
* split if the new left key is bigger than the old key. * force a split if the new left key is bigger than the old key.
*/ */
if (IndexTupleSize(old) != IndexTupleSize(ltup)) if (IndexTupleSize(old) != IndexTupleSize(ltup))
@ -794,9 +799,10 @@ rtpicksplit(Relation r,
right_avail_space; right_avail_space;
/* /*
* First, make sure the new item is not so large that we can't possibly * First, make sure the new item is not so large that we can't
* fit it on a page, even by itself. (It's sufficient to make this test * possibly fit it on a page, even by itself. (It's sufficient to
* here, since any oversize tuple must lead to a page split attempt.) * make this test here, since any oversize tuple must lead to a page
* split attempt.)
*/ */
newitemsz = IndexTupleTotalSize(itup); newitemsz = IndexTupleTotalSize(itup);
if (newitemsz > RTPageAvailSpace) if (newitemsz > RTPageAvailSpace)
@ -804,7 +810,8 @@ rtpicksplit(Relation r,
(unsigned long) newitemsz, (unsigned long) RTPageAvailSpace); (unsigned long) newitemsz, (unsigned long) RTPageAvailSpace);
maxoff = PageGetMaxOffsetNumber(page); maxoff = PageGetMaxOffsetNumber(page);
newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */ newitemoff = OffsetNumberNext(maxoff); /* phony index for new
* item */
/* Make arrays big enough for worst case, including sentinel */ /* Make arrays big enough for worst case, including sentinel */
nbytes = (maxoff + 2) * sizeof(OffsetNumber); nbytes = (maxoff + 2) * sizeof(OffsetNumber);
@ -827,8 +834,8 @@ rtpicksplit(Relation r,
item_2_sz = IndexTupleTotalSize(item_2); item_2_sz = IndexTupleTotalSize(item_2);
/* /*
* Ignore seed pairs that don't leave room for the new item * Ignore seed pairs that don't leave room for the new item on
* on either split page. * either split page.
*/ */
if (newitemsz + item_1_sz > RTPageAvailSpace && if (newitemsz + item_1_sz > RTPageAvailSpace &&
newitemsz + item_2_sz > RTPageAvailSpace) newitemsz + item_2_sz > RTPageAvailSpace)
@ -841,8 +848,10 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_union)); PointerGetDatum(&size_union));
inter_d = FunctionCall2(&rtstate->interFn, inter_d = FunctionCall2(&rtstate->interFn,
datum_alpha, datum_beta); datum_alpha, datum_beta);
/* The interFn may return a NULL pointer (not an SQL null!)
* to indicate no intersection. sizeFn must cope with this. /*
* The interFn may return a NULL pointer (not an SQL null!) to
* indicate no intersection. sizeFn must cope with this.
*/ */
FunctionCall2(&rtstate->sizeFn, inter_d, FunctionCall2(&rtstate->sizeFn, inter_d,
PointerGetDatum(&size_inter)); PointerGetDatum(&size_inter));
@ -869,6 +878,7 @@ rtpicksplit(Relation r,
if (firsttime) if (firsttime)
{ {
/* /*
* There is no possible split except to put the new item on its * There is no possible split except to put the new item on its
* own page. Since we still have to compute the union rectangles, * own page. Since we still have to compute the union rectangles,
@ -922,8 +932,8 @@ rtpicksplit(Relation r,
/* /*
* If we've already decided where to place this item, just put it * If we've already decided where to place this item, just put it
* on the correct list. Otherwise, we need to figure out which page * on the correct list. Otherwise, we need to figure out which
* needs the least enlargement in order to store the item. * page needs the least enlargement in order to store the item.
*/ */
if (i == seed_1) if (i == seed_1)
@ -961,12 +971,13 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_beta)); PointerGetDatum(&size_beta));
/* /*
* We prefer the page that shows smaller enlargement of its union area * We prefer the page that shows smaller enlargement of its union
* (Guttman's algorithm), but we must take care that at least one page * area (Guttman's algorithm), but we must take care that at least
* will still have room for the new item after this one is added. * one page will still have room for the new item after this one
* is added.
* *
* (We know that all the old items together can fit on one page, * (We know that all the old items together can fit on one page, so
* so we need not worry about any other problem than failing to fit * we need not worry about any other problem than failing to fit
* the new item.) * the new item.)
*/ */
left_feasible = (left_avail_space >= item_1_sz && left_feasible = (left_avail_space >= item_1_sz &&
@ -987,7 +998,7 @@ rtpicksplit(Relation r,
else else
{ {
elog(ERROR, "rtpicksplit: failed to find a workable page split"); elog(ERROR, "rtpicksplit: failed to find a workable page split");
choose_left = false; /* keep compiler quiet */ choose_left = false;/* keep compiler quiet */
} }
if (choose_left) if (choose_left)
@ -1211,6 +1222,6 @@ rtree_undo(XLogRecPtr lsn, XLogRecord *record)
} }
void void
rtree_desc(char *buf, uint8 xl_info, char* rec) rtree_desc(char *buf, uint8 xl_info, char *rec)
{ {
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.35 2001/01/24 19:42:50 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.36 2001/03/22 03:59:16 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -10,20 +10,20 @@
#include "commands/sequence.h" #include "commands/sequence.h"
RmgrData RmgrTable[] = { RmgrData RmgrTable[] = {
{"XLOG", xlog_redo, xlog_undo, xlog_desc}, {"XLOG", xlog_redo, xlog_undo, xlog_desc},
{"Transaction", xact_redo, xact_undo, xact_desc}, {"Transaction", xact_redo, xact_undo, xact_desc},
{"Storage", smgr_redo, smgr_undo, smgr_desc}, {"Storage", smgr_redo, smgr_undo, smgr_desc},
{"Reserved 3", NULL, NULL, NULL}, {"Reserved 3", NULL, NULL, NULL},
{"Reserved 4", NULL, NULL, NULL}, {"Reserved 4", NULL, NULL, NULL},
{"Reserved 5", NULL, NULL, NULL}, {"Reserved 5", NULL, NULL, NULL},
{"Reserved 6", NULL, NULL, NULL}, {"Reserved 6", NULL, NULL, NULL},
{"Reserved 7", NULL, NULL, NULL}, {"Reserved 7", NULL, NULL, NULL},
{"Reserved 8", NULL, NULL, NULL}, {"Reserved 8", NULL, NULL, NULL},
{"Reserved 9", NULL, NULL, NULL}, {"Reserved 9", NULL, NULL, NULL},
{"Heap", heap_redo, heap_undo, heap_desc}, {"Heap", heap_redo, heap_undo, heap_desc},
{"Btree", btree_redo, btree_undo, btree_desc}, {"Btree", btree_redo, btree_undo, btree_desc},
{"Hash", hash_redo, hash_undo, hash_desc}, {"Hash", hash_redo, hash_undo, hash_desc},
{"Rtree", rtree_redo, rtree_undo, rtree_desc}, {"Rtree", rtree_redo, rtree_undo, rtree_desc},
{"Gist", gist_redo, gist_undo, gist_desc}, {"Gist", gist_redo, gist_undo, gist_desc},
{"Sequence", seq_redo, seq_undo, seq_desc} {"Sequence", seq_redo, seq_undo, seq_desc}
}; };

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.41 2001/03/18 20:18:59 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.42 2001/03/22 03:59:17 momjian Exp $
* *
* NOTES * NOTES
* This file contains the high level access-method interface to the * This file contains the high level access-method interface to the

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.28 2001/01/24 19:42:51 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.29 2001/03/22 03:59:17 momjian Exp $
* *
* NOTES * NOTES
* This file contains support functions for the high * This file contains support functions for the high
@ -186,7 +186,7 @@ TransBlockGetXidStatus(Block tblock,
bits8 bit2; bits8 bit2;
BitIndex offset; BitIndex offset;
tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr)); tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ---------------- /* ----------------
* calculate the index into the transaction data where * calculate the index into the transaction data where
@ -229,7 +229,7 @@ TransBlockSetXidStatus(Block tblock,
Index index; Index index;
BitIndex offset; BitIndex offset;
tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr)); tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ---------------- /* ----------------
* calculate the index into the transaction data where * calculate the index into the transaction data where

View File

@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group * Copyright (c) 2000, PostgreSQL Global Development Group
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.37 2001/03/18 20:18:59 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.38 2001/03/22 03:59:17 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -32,9 +32,10 @@ VariableCache ShmemVariableCache = NULL;
void void
GetNewTransactionId(TransactionId *xid) GetNewTransactionId(TransactionId *xid)
{ {
/* /*
* During bootstrap initialization, we return the special * During bootstrap initialization, we return the special bootstrap
* bootstrap transaction id. * transaction id.
*/ */
if (AMI_OVERRIDE) if (AMI_OVERRIDE)
{ {
@ -60,9 +61,10 @@ GetNewTransactionId(TransactionId *xid)
void void
ReadNewTransactionId(TransactionId *xid) ReadNewTransactionId(TransactionId *xid)
{ {
/* /*
* During bootstrap initialization, we return the special * During bootstrap initialization, we return the special bootstrap
* bootstrap transaction id. * transaction id.
*/ */
if (AMI_OVERRIDE) if (AMI_OVERRIDE)
{ {
@ -130,10 +132,9 @@ CheckMaxObjectId(Oid assigned_oid)
} }
/* /*
* We have exceeded the logged oid range. * We have exceeded the logged oid range. We should lock the database
* We should lock the database and kill all other backends * and kill all other backends but we are loading oid's that we can
* but we are loading oid's that we can not guarantee are unique * not guarantee are unique anyway, so we must rely on the user.
* anyway, so we must rely on the user.
*/ */
XLogPutNextOid(assigned_oid + VAR_OID_PREFETCH); XLogPutNextOid(assigned_oid + VAR_OID_PREFETCH);

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.99 2001/03/13 01:17:05 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.100 2001/03/22 03:59:18 momjian Exp $
* *
* NOTES * NOTES
* Transaction aborts can now occur two ways: * Transaction aborts can now occur two ways:
@ -222,9 +222,10 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel; int XactIsoLevel;
int CommitDelay = 0; /* precommit delay in microseconds */ int CommitDelay = 0; /* precommit delay in microseconds */
int CommitSiblings = 5; /* number of concurrent xacts needed to sleep */ int CommitSiblings = 5; /* number of concurrent xacts needed to
* sleep */
static void (*_RollbackFunc)(void*) = NULL; static void (*_RollbackFunc) (void *) = NULL;
static void *_RollbackData = NULL; static void *_RollbackData = NULL;
/* ---------------- /* ----------------
@ -674,25 +675,26 @@ RecordTransactionCommit()
xlrec.xtime = time(NULL); xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer; rdata.buffer = InvalidBuffer;
rdata.data = (char *)(&xlrec); rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactCommit; rdata.len = SizeOfXactCommit;
rdata.next = NULL; rdata.next = NULL;
START_CRIT_SECTION(); START_CRIT_SECTION();
/* /*
* SHOULD SAVE ARRAY OF RELFILENODE-s TO DROP * SHOULD SAVE ARRAY OF RELFILENODE-s TO DROP
*/ */
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT, &rdata); recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT, &rdata);
/* /*
* Sleep before commit! So we can flush more than one * Sleep before commit! So we can flush more than one commit
* commit records per single fsync. (The idea is some other * records per single fsync. (The idea is some other backend may
* backend may do the XLogFlush while we're sleeping. This * do the XLogFlush while we're sleeping. This needs work still,
* needs work still, because on most Unixen, the minimum * because on most Unixen, the minimum select() delay is 10msec or
* select() delay is 10msec or more, which is way too long.) * more, which is way too long.)
* *
* We do not sleep if enableFsync is not turned on, nor if there * We do not sleep if enableFsync is not turned on, nor if there are
* are fewer than CommitSiblings other backends with active * fewer than CommitSiblings other backends with active
* transactions. * transactions.
*/ */
if (CommitDelay > 0 && enableFsync && if (CommitDelay > 0 && enableFsync &&
@ -818,7 +820,7 @@ RecordTransactionAbort(void)
xlrec.xtime = time(NULL); xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer; rdata.buffer = InvalidBuffer;
rdata.data = (char *)(&xlrec); rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactAbort; rdata.len = SizeOfXactAbort;
rdata.next = NULL; rdata.next = NULL;
@ -896,9 +898,7 @@ AtAbort_Memory(void)
MemoryContextResetAndDeleteChildren(TransactionCommandContext); MemoryContextResetAndDeleteChildren(TransactionCommandContext);
} }
else else
{
MemoryContextSwitchTo(TopMemoryContext); MemoryContextSwitchTo(TopMemoryContext);
}
} }
@ -1021,6 +1021,7 @@ CurrentXactInProgress(void)
{ {
return CurrentTransactionState->state == TRANS_INPROGRESS; return CurrentTransactionState->state == TRANS_INPROGRESS;
} }
#endif #endif
/* -------------------------------- /* --------------------------------
@ -1106,7 +1107,7 @@ CommitTransaction(void)
AtCommit_Memory(); AtCommit_Memory();
AtEOXact_Files(); AtEOXact_Files();
SharedBufferChanged = false; /* safest place to do it */ SharedBufferChanged = false;/* safest place to do it */
/* ---------------- /* ----------------
* done with commit processing, set current transaction * done with commit processing, set current transaction
@ -1143,15 +1144,16 @@ AbortTransaction(void)
/* /*
* Release any spinlocks or buffer context locks we might be holding * Release any spinlocks or buffer context locks we might be holding
* as quickly as possible. (Real locks, however, must be held till * as quickly as possible. (Real locks, however, must be held till we
* we finish aborting.) Releasing spinlocks is critical since we * finish aborting.) Releasing spinlocks is critical since we might
* might try to grab them again while cleaning up! * try to grab them again while cleaning up!
*/ */
ProcReleaseSpins(NULL); ProcReleaseSpins(NULL);
UnlockBuffers(); UnlockBuffers();
/* /*
* Also clean up any open wait for lock, since the lock manager * Also clean up any open wait for lock, since the lock manager will
* will choke if we try to wait for another lock before doing this. * choke if we try to wait for another lock before doing this.
*/ */
LockWaitCancel(); LockWaitCancel();
@ -1203,7 +1205,7 @@ AbortTransaction(void)
AtEOXact_Files(); AtEOXact_Files();
AtAbort_Locks(); AtAbort_Locks();
SharedBufferChanged = false; /* safest place to do it */ SharedBufferChanged = false;/* safest place to do it */
/* ---------------- /* ----------------
* State remains TRANS_ABORT until CleanupTransaction(). * State remains TRANS_ABORT until CleanupTransaction().
@ -1327,8 +1329,8 @@ StartTransactionCommand(void)
} }
/* /*
* We must switch to TransactionCommandContext before returning. * We must switch to TransactionCommandContext before returning. This
* This is already done if we called StartTransaction, otherwise not. * is already done if we called StartTransaction, otherwise not.
*/ */
Assert(TransactionCommandContext != NULL); Assert(TransactionCommandContext != NULL);
MemoryContextSwitchTo(TransactionCommandContext); MemoryContextSwitchTo(TransactionCommandContext);
@ -1765,9 +1767,7 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
/* SHOULD REMOVE FILES OF ALL DROPPED RELATIONS */ /* SHOULD REMOVE FILES OF ALL DROPPED RELATIONS */
} }
else if (info == XLOG_XACT_ABORT) else if (info == XLOG_XACT_ABORT)
{
TransactionIdAbort(record->xl_xid); TransactionIdAbort(record->xl_xid);
}
else else
elog(STOP, "xact_redo: unknown op code %u", info); elog(STOP, "xact_redo: unknown op code %u", info);
} }
@ -1784,13 +1784,13 @@ xact_undo(XLogRecPtr lsn, XLogRecord *record)
} }
void void
xact_desc(char *buf, uint8 xl_info, char* rec) xact_desc(char *buf, uint8 xl_info, char *rec)
{ {
uint8 info = xl_info & ~XLR_INFO_MASK; uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT) if (info == XLOG_XACT_COMMIT)
{ {
xl_xact_commit *xlrec = (xl_xact_commit*) rec; xl_xact_commit *xlrec = (xl_xact_commit *) rec;
struct tm *tm = localtime(&xlrec->xtime); struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "commit: %04u-%02u-%02u %02u:%02u:%02u", sprintf(buf + strlen(buf), "commit: %04u-%02u-%02u %02u:%02u:%02u",
@ -1799,7 +1799,7 @@ xact_desc(char *buf, uint8 xl_info, char* rec)
} }
else if (info == XLOG_XACT_ABORT) else if (info == XLOG_XACT_ABORT)
{ {
xl_xact_abort *xlrec = (xl_xact_abort*) rec; xl_xact_abort *xlrec = (xl_xact_abort *) rec;
struct tm *tm = localtime(&xlrec->xtime); struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "abort: %04u-%02u-%02u %02u:%02u:%02u", sprintf(buf + strlen(buf), "abort: %04u-%02u-%02u %02u:%02u:%02u",
@ -1811,7 +1811,7 @@ xact_desc(char *buf, uint8 xl_info, char* rec)
} }
void void
XactPushRollback(void (*func) (void *), void* data) XactPushRollback(void (*func) (void *), void *data)
{ {
#ifdef XLOG_II #ifdef XLOG_II
if (_RollbackFunc != NULL) if (_RollbackFunc != NULL)

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $Id: xid.c,v 1.29 2001/01/24 19:42:51 momjian Exp $ * $Id: xid.c,v 1.30 2001/03/22 03:59:18 momjian Exp $
* *
* OLD COMMENTS * OLD COMMENTS
* XXX WARNING * XXX WARNING
@ -49,6 +49,7 @@ Datum
xidout(PG_FUNCTION_ARGS) xidout(PG_FUNCTION_ARGS)
{ {
TransactionId transactionId = PG_GETARG_TRANSACTIONID(0); TransactionId transactionId = PG_GETARG_TRANSACTIONID(0);
/* maximum 32 bit unsigned integer representation takes 10 chars */ /* maximum 32 bit unsigned integer representation takes 10 chars */
char *representation = palloc(11); char *representation = palloc(11);

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.62 2001/03/18 20:18:59 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.63 2001/03/22 03:59:18 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -50,36 +50,37 @@
*/ */
#define SYNC_METHOD_FSYNC 0 #define SYNC_METHOD_FSYNC 0
#define SYNC_METHOD_FDATASYNC 1 #define SYNC_METHOD_FDATASYNC 1
#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and O_DSYNC */ #define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and
* O_DSYNC */
#if defined(O_SYNC) #if defined(O_SYNC)
# define OPEN_SYNC_FLAG O_SYNC #define OPEN_SYNC_FLAG O_SYNC
#else #else
# if defined(O_FSYNC) #if defined(O_FSYNC)
# define OPEN_SYNC_FLAG O_FSYNC #define OPEN_SYNC_FLAG O_FSYNC
# endif #endif
#endif #endif
#if defined(OPEN_SYNC_FLAG) #if defined(OPEN_SYNC_FLAG)
# if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG) #if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
# define OPEN_DATASYNC_FLAG O_DSYNC #define OPEN_DATASYNC_FLAG O_DSYNC
# endif #endif
#endif #endif
#if defined(OPEN_DATASYNC_FLAG) #if defined(OPEN_DATASYNC_FLAG)
# define DEFAULT_SYNC_METHOD_STR "open_datasync" #define DEFAULT_SYNC_METHOD_STR "open_datasync"
# define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN #define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
# define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG #define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
#else #else
# if defined(HAVE_FDATASYNC) #if defined(HAVE_FDATASYNC)
# define DEFAULT_SYNC_METHOD_STR "fdatasync" #define DEFAULT_SYNC_METHOD_STR "fdatasync"
# define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC #define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
# define DEFAULT_SYNC_FLAGBIT 0 #define DEFAULT_SYNC_FLAGBIT 0
# else #else
# define DEFAULT_SYNC_METHOD_STR "fsync" #define DEFAULT_SYNC_METHOD_STR "fsync"
# define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC #define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
# define DEFAULT_SYNC_FLAGBIT 0 #define DEFAULT_SYNC_FLAGBIT 0
# endif #endif
#endif #endif
@ -91,11 +92,13 @@
/* User-settable parameters */ /* User-settable parameters */
int CheckPointSegments = 3; int CheckPointSegments = 3;
int XLOGbuffers = 8; int XLOGbuffers = 8;
int XLOGfiles = 0; /* how many files to pre-allocate during ckpt */ int XLOGfiles = 0; /* how many files to pre-allocate during
* ckpt */
int XLOG_DEBUG = 0; int XLOG_DEBUG = 0;
char *XLOG_sync_method = NULL; char *XLOG_sync_method = NULL;
const char XLOG_sync_method_default[] = DEFAULT_SYNC_METHOD_STR; const char XLOG_sync_method_default[] = DEFAULT_SYNC_METHOD_STR;
char XLOG_archive_dir[MAXPGPATH]; /* null string means delete 'em */ char XLOG_archive_dir[MAXPGPATH]; /* null string means
* delete 'em */
/* these are derived from XLOG_sync_method by assign_xlog_sync_method */ /* these are derived from XLOG_sync_method by assign_xlog_sync_method */
static int sync_method = DEFAULT_SYNC_METHOD; static int sync_method = DEFAULT_SYNC_METHOD;
@ -229,6 +232,7 @@ typedef struct XLogCtlData
XLogwrtResult LogwrtResult; XLogwrtResult LogwrtResult;
/* Protected by logwrt_lck: */ /* Protected by logwrt_lck: */
XLogCtlWrite Write; XLogCtlWrite Write;
/* /*
* These values do not change after startup, although the pointed-to * These values do not change after startup, although the pointed-to
* pages and xlblocks values certainly do. Permission to read/write * pages and xlblocks values certainly do. Permission to read/write
@ -384,8 +388,10 @@ static int readFile = -1;
static uint32 readId = 0; static uint32 readId = 0;
static uint32 readSeg = 0; static uint32 readSeg = 0;
static uint32 readOff = 0; static uint32 readOff = 0;
/* Buffer for currently read page (BLCKSZ bytes) */ /* Buffer for currently read page (BLCKSZ bytes) */
static char *readBuf = NULL; static char *readBuf = NULL;
/* State information for XLOG reading */ /* State information for XLOG reading */
static XLogRecPtr ReadRecPtr; static XLogRecPtr ReadRecPtr;
static XLogRecPtr EndRecPtr; static XLogRecPtr EndRecPtr;
@ -463,8 +469,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
} }
/* /*
* In bootstrap mode, we don't actually log anything but XLOG resources; * In bootstrap mode, we don't actually log anything but XLOG
* return a phony record pointer. * resources; return a phony record pointer.
*/ */
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID) if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{ {
@ -479,16 +485,17 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* header isn't added into the CRC yet since we don't know the final * header isn't added into the CRC yet since we don't know the final
* length or info bits quite yet. * length or info bits quite yet.
* *
* We may have to loop back to here if a race condition is detected below. * We may have to loop back to here if a race condition is detected
* We could prevent the race by doing all this work while holding the * below. We could prevent the race by doing all this work while
* insert spinlock, but it seems better to avoid doing CRC calculations * holding the insert spinlock, but it seems better to avoid doing CRC
* while holding the lock. This means we have to be careful about * calculations while holding the lock. This means we have to be
* modifying the rdata list until we know we aren't going to loop back * careful about modifying the rdata list until we know we aren't
* again. The only change we allow ourselves to make earlier is to set * going to loop back again. The only change we allow ourselves to
* rdt->data = NULL in list items we have decided we will have to back * make earlier is to set rdt->data = NULL in list items we have
* up the whole buffer for. This is OK because we will certainly decide * decided we will have to back up the whole buffer for. This is OK
* the same thing again for those items if we do it over; doing it here * because we will certainly decide the same thing again for those
* saves an extra pass over the list later. * items if we do it over; doing it here saves an extra pass over the
* list later.
*/ */
begin:; begin:;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++) for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -499,7 +506,7 @@ begin:;
INIT_CRC64(rdata_crc); INIT_CRC64(rdata_crc);
len = 0; len = 0;
for (rdt = rdata; ; ) for (rdt = rdata;;)
{ {
if (rdt->buffer == InvalidBuffer) if (rdt->buffer == InvalidBuffer)
{ {
@ -528,10 +535,11 @@ begin:;
{ {
/* OK, put it in this slot */ /* OK, put it in this slot */
dtbuf[i] = rdt->buffer; dtbuf[i] = rdt->buffer;
/* /*
* XXX We assume page LSN is first data on page * XXX We assume page LSN is first data on page
*/ */
dtbuf_lsn[i] = *((XLogRecPtr*)BufferGetBlock(rdt->buffer)); dtbuf_lsn[i] = *((XLogRecPtr *) BufferGetBlock(rdt->buffer));
if (XLByteLE(dtbuf_lsn[i], RedoRecPtr)) if (XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{ {
crc64 dtcrc; crc64 dtcrc;
@ -545,7 +553,7 @@ begin:;
dtbuf_xlg[i].node = BufferGetFileNode(dtbuf[i]); dtbuf_xlg[i].node = BufferGetFileNode(dtbuf[i]);
dtbuf_xlg[i].block = BufferGetBlockNumber(dtbuf[i]); dtbuf_xlg[i].block = BufferGetBlockNumber(dtbuf[i]);
COMP_CRC64(dtcrc, COMP_CRC64(dtcrc,
(char*) &(dtbuf_xlg[i]) + sizeof(crc64), (char *) &(dtbuf_xlg[i]) + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64)); sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(dtcrc); FIN_CRC64(dtcrc);
dtbuf_xlg[i].crc = dtcrc; dtbuf_xlg[i].crc = dtcrc;
@ -596,9 +604,9 @@ begin:;
S_UNLOCK(&(XLogCtl->info_lck)); S_UNLOCK(&(XLogCtl->info_lck));
/* /*
* If cache is half filled then try to acquire logwrt lock * If cache is half filled then try to acquire logwrt lock and
* and do LOGWRT work, but only once per XLogInsert call. * do LOGWRT work, but only once per XLogInsert call. Ignore
* Ignore any fractional blocks in performing this check. * any fractional blocks in performing this check.
*/ */
LogwrtRqst.Write.xrecoff -= LogwrtRqst.Write.xrecoff % BLCKSZ; LogwrtRqst.Write.xrecoff -= LogwrtRqst.Write.xrecoff % BLCKSZ;
if (do_logwrt && if (do_logwrt &&
@ -625,8 +633,9 @@ begin:;
/* /*
* Check to see if my RedoRecPtr is out of date. If so, may have to * Check to see if my RedoRecPtr is out of date. If so, may have to
* go back and recompute everything. This can only happen just after a * go back and recompute everything. This can only happen just after
* checkpoint, so it's better to be slow in this case and fast otherwise. * a checkpoint, so it's better to be slow in this case and fast
* otherwise.
*/ */
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr)) if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{ {
@ -640,9 +649,10 @@ begin:;
if (dtbuf_bkp[i] == false && if (dtbuf_bkp[i] == false &&
XLByteLE(dtbuf_lsn[i], RedoRecPtr)) XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{ {
/* /*
* Oops, this buffer now needs to be backed up, but we didn't * Oops, this buffer now needs to be backed up, but we
* think so above. Start over. * didn't think so above. Start over.
*/ */
S_UNLOCK(&(XLogCtl->insert_lck)); S_UNLOCK(&(XLogCtl->insert_lck));
END_CRIT_SECTION(); END_CRIT_SECTION();
@ -658,8 +668,9 @@ begin:;
* this loop, write_len includes the backup block data. * this loop, write_len includes the backup block data.
* *
* Also set the appropriate info bits to show which buffers were backed * Also set the appropriate info bits to show which buffers were backed
* up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th
* buffer value (ignoring InvalidBuffer) appearing in the rdata list. * distinct buffer value (ignoring InvalidBuffer) appearing in the
* rdata list.
*/ */
write_len = len; write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++) for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -671,13 +682,13 @@ begin:;
rdt->next = &(dtbuf_rdt[2 * i]); rdt->next = &(dtbuf_rdt[2 * i]);
dtbuf_rdt[2 * i].data = (char*) &(dtbuf_xlg[i]); dtbuf_rdt[2 * i].data = (char *) &(dtbuf_xlg[i]);
dtbuf_rdt[2 * i].len = sizeof(BkpBlock); dtbuf_rdt[2 * i].len = sizeof(BkpBlock);
write_len += sizeof(BkpBlock); write_len += sizeof(BkpBlock);
rdt = dtbuf_rdt[2 * i].next = &(dtbuf_rdt[2 * i + 1]); rdt = dtbuf_rdt[2 * i].next = &(dtbuf_rdt[2 * i + 1]);
dtbuf_rdt[2 * i + 1].data = (char*) BufferGetBlock(dtbuf[i]); dtbuf_rdt[2 * i + 1].data = (char *) BufferGetBlock(dtbuf[i]);
dtbuf_rdt[2 * i + 1].len = BLCKSZ; dtbuf_rdt[2 * i + 1].len = BLCKSZ;
write_len += BLCKSZ; write_len += BLCKSZ;
dtbuf_rdt[2 * i + 1].next = NULL; dtbuf_rdt[2 * i + 1].next = NULL;
@ -711,7 +722,7 @@ begin:;
record->xl_rmid = rmid; record->xl_rmid = rmid;
/* Now we can finish computing the main CRC */ /* Now we can finish computing the main CRC */
COMP_CRC64(rdata_crc, (char*) record + sizeof(crc64), COMP_CRC64(rdata_crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64)); SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(rdata_crc); FIN_CRC64(rdata_crc);
record->xl_crc = rdata_crc; record->xl_crc = rdata_crc;
@ -795,14 +806,15 @@ begin:;
freespace = INSERT_FREESPACE(Insert); freespace = INSERT_FREESPACE(Insert);
/* /*
* The recptr I return is the beginning of the *next* record. * The recptr I return is the beginning of the *next* record. This
* This will be stored as LSN for changed data pages... * will be stored as LSN for changed data pages...
*/ */
INSERT_RECPTR(RecPtr, Insert, curridx); INSERT_RECPTR(RecPtr, Insert, curridx);
/* Need to update shared LogwrtRqst if some block was filled up */ /* Need to update shared LogwrtRqst if some block was filled up */
if (freespace < SizeOfXLogRecord) if (freespace < SizeOfXLogRecord)
updrqst = true; /* curridx is filled and available for writing out */ updrqst = true; /* curridx is filled and available for
* writing out */
else else
curridx = PrevBufIdx(curridx); curridx = PrevBufIdx(curridx);
WriteRqst = XLogCtl->xlblocks[curridx]; WriteRqst = XLogCtl->xlblocks[curridx];
@ -850,9 +862,9 @@ AdvanceXLInsertBuffer(void)
LogwrtResult = Insert->LogwrtResult; LogwrtResult = Insert->LogwrtResult;
/* /*
* Get ending-offset of the buffer page we need to replace (this may be * Get ending-offset of the buffer page we need to replace (this may
* zero if the buffer hasn't been used yet). Fall through if it's already * be zero if the buffer hasn't been used yet). Fall through if it's
* written out. * already written out.
*/ */
OldPageRqstPtr = XLogCtl->xlblocks[nextidx]; OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write)) if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
@ -883,8 +895,8 @@ AdvanceXLInsertBuffer(void)
} }
/* /*
* LogwrtResult lock is busy or we know the page is still dirty. * LogwrtResult lock is busy or we know the page is still
* Try to acquire logwrt lock and write full blocks. * dirty. Try to acquire logwrt lock and write full blocks.
*/ */
if (!TAS(&(XLogCtl->logwrt_lck))) if (!TAS(&(XLogCtl->logwrt_lck)))
{ {
@ -896,9 +908,10 @@ AdvanceXLInsertBuffer(void)
Insert->LogwrtResult = LogwrtResult; Insert->LogwrtResult = LogwrtResult;
break; break;
} }
/* /*
* Have to write buffers while holding insert lock. * Have to write buffers while holding insert lock. This
* This is not good, so only write as much as we absolutely * is not good, so only write as much as we absolutely
* must. * must.
*/ */
WriteRqst.Write = OldPageRqstPtr; WriteRqst.Write = OldPageRqstPtr;
@ -933,14 +946,15 @@ AdvanceXLInsertBuffer(void)
} }
Insert->curridx = nextidx; Insert->curridx = nextidx;
Insert->currpage = (XLogPageHeader) (XLogCtl->pages + nextidx * BLCKSZ); Insert->currpage = (XLogPageHeader) (XLogCtl->pages + nextidx * BLCKSZ);
Insert->currpos = ((char*) Insert->currpage) + SizeOfXLogPHD; Insert->currpos = ((char *) Insert->currpage) + SizeOfXLogPHD;
/* /*
* Be sure to re-zero the buffer so that bytes beyond what we've written * Be sure to re-zero the buffer so that bytes beyond what we've
* will look like zeroes and not valid XLOG records... * written will look like zeroes and not valid XLOG records...
*/ */
MemSet((char*) Insert->currpage, 0, BLCKSZ); MemSet((char *) Insert->currpage, 0, BLCKSZ);
Insert->currpage->xlp_magic = XLOG_PAGE_MAGIC; Insert->currpage->xlp_magic = XLOG_PAGE_MAGIC;
/* Insert->currpage->xlp_info = 0; */ /* done by memset */ /* Insert->currpage->xlp_info = 0; *//* done by memset */
Insert->currpage->xlp_sui = ThisStartUpID; Insert->currpage->xlp_sui = ThisStartUpID;
return update_needed; return update_needed;
@ -959,11 +973,15 @@ XLogWrite(XLogwrtRqst WriteRqst)
bool ispartialpage; bool ispartialpage;
bool use_existent; bool use_existent;
/* Update local LogwrtResult (caller probably did this already, but...) */ /*
* Update local LogwrtResult (caller probably did this already,
* but...)
*/
LogwrtResult = Write->LogwrtResult; LogwrtResult = Write->LogwrtResult;
while (XLByteLT(LogwrtResult.Write, WriteRqst.Write)) while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
{ {
/* /*
* Make sure we're not ahead of the insert process. This could * Make sure we're not ahead of the insert process. This could
* happen if we're passed a bogus WriteRqst.Write that is past the * happen if we're passed a bogus WriteRqst.Write that is past the
@ -979,6 +997,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg)) if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{ {
/* /*
* Switch to new logfile segment. * Switch to new logfile segment.
*/ */
@ -1011,11 +1030,12 @@ XLogWrite(XLogwrtRqst WriteRqst)
ControlFile->logSeg = openLogSeg + 1; ControlFile->logSeg = openLogSeg + 1;
ControlFile->time = time(NULL); ControlFile->time = time(NULL);
UpdateControlFile(); UpdateControlFile();
/* /*
* Signal postmaster to start a checkpoint if it's been too * Signal postmaster to start a checkpoint if it's been
* long since the last one. (We look at local copy of * too long since the last one. (We look at local copy of
* RedoRecPtr which might be a little out of date, but should * RedoRecPtr which might be a little out of date, but
* be close enough for this purpose.) * should be close enough for this purpose.)
*/ */
if (IsUnderPostmaster && if (IsUnderPostmaster &&
(openLogId != RedoRecPtr.xlogid || (openLogId != RedoRecPtr.xlogid ||
@ -1056,9 +1076,9 @@ XLogWrite(XLogwrtRqst WriteRqst)
/* /*
* If we just wrote the whole last page of a logfile segment, * If we just wrote the whole last page of a logfile segment,
* fsync the segment immediately. This avoids having to go back * fsync the segment immediately. This avoids having to go back
* and re-open prior segments when an fsync request comes along later. * and re-open prior segments when an fsync request comes along
* Doing it here ensures that one and only one backend will perform * later. Doing it here ensures that one and only one backend will
* this fsync. * perform this fsync.
*/ */
if (openLogOff >= XLogSegSize && !ispartialpage) if (openLogOff >= XLogSegSize && !ispartialpage)
{ {
@ -1081,10 +1101,11 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) && if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) &&
XLByteLT(LogwrtResult.Flush, LogwrtResult.Write)) XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
{ {
/* /*
* Could get here without iterating above loop, in which case * Could get here without iterating above loop, in which case we
* we might have no open file or the wrong one. However, we do * might have no open file or the wrong one. However, we do not
* not need to fsync more than one file. * need to fsync more than one file.
*/ */
if (sync_method != SYNC_METHOD_OPEN) if (sync_method != SYNC_METHOD_OPEN)
{ {
@ -1110,8 +1131,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
/* /*
* Update shared-memory status * Update shared-memory status
* *
* We make sure that the shared 'request' values do not fall behind * We make sure that the shared 'request' values do not fall behind the
* the 'result' values. This is not absolutely essential, but it saves * 'result' values. This is not absolutely essential, but it saves
* some code in a couple of places. * some code in a couple of places.
*/ */
S_LOCK(&(XLogCtl->info_lck)); S_LOCK(&(XLogCtl->info_lck));
@ -1163,8 +1184,9 @@ XLogFlush(XLogRecPtr record)
* Since fsync is usually a horribly expensive operation, we try to * Since fsync is usually a horribly expensive operation, we try to
* piggyback as much data as we can on each fsync: if we see any more * piggyback as much data as we can on each fsync: if we see any more
* data entered into the xlog buffer, we'll write and fsync that too, * data entered into the xlog buffer, we'll write and fsync that too,
* so that the final value of LogwrtResult.Flush is as large as possible. * so that the final value of LogwrtResult.Flush is as large as
* This gives us some chance of avoiding another fsync immediately after. * possible. This gives us some chance of avoiding another fsync
* immediately after.
*/ */
/* initialize to given target; may increase below */ /* initialize to given target; may increase below */
@ -1192,9 +1214,7 @@ XLogFlush(XLogRecPtr record)
uint32 freespace = INSERT_FREESPACE(Insert); uint32 freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord) /* buffer is full */ if (freespace < SizeOfXLogRecord) /* buffer is full */
{
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx]; WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
}
else else
{ {
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx]; WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
@ -1257,7 +1277,8 @@ XLogFileInit(uint32 log, uint32 seg,
XLogFileName(path, log, seg); XLogFileName(path, log, seg);
/* /*
* Try to use existent file (checkpoint maker may have created it already) * Try to use existent file (checkpoint maker may have created it
* already)
*/ */
if (*use_existent) if (*use_existent)
{ {
@ -1270,14 +1291,14 @@ XLogFileInit(uint32 log, uint32 seg,
log, seg); log, seg);
} }
else else
return(fd); return (fd);
} }
/* /*
* Initialize an empty (all zeroes) segment. NOTE: it is possible that * Initialize an empty (all zeroes) segment. NOTE: it is possible
* another process is doing the same thing. If so, we will end up * that another process is doing the same thing. If so, we will end
* pre-creating an extra log segment. That seems OK, and better than * up pre-creating an extra log segment. That seems OK, and better
* holding the spinlock throughout this lengthy process. * than holding the spinlock throughout this lengthy process.
*/ */
snprintf(tmppath, MAXPGPATH, "%s%cxlogtemp.%d", snprintf(tmppath, MAXPGPATH, "%s%cxlogtemp.%d",
XLogDir, SEP_CHAR, (int) getpid()); XLogDir, SEP_CHAR, (int) getpid());
@ -1306,7 +1327,10 @@ XLogFileInit(uint32 log, uint32 seg,
{ {
int save_errno = errno; int save_errno = errno;
/* If we fail to make the file, delete it to release disk space */ /*
* If we fail to make the file, delete it to release disk
* space
*/
unlink(tmppath); unlink(tmppath);
errno = save_errno; errno = save_errno;
@ -1336,10 +1360,8 @@ XLogFileInit(uint32 log, uint32 seg,
targseg = seg; targseg = seg;
strcpy(targpath, path); strcpy(targpath, path);
if (! *use_existent) if (!*use_existent)
{
unlink(targpath); unlink(targpath);
}
else else
{ {
while ((fd = BasicOpenFile(targpath, O_RDWR | PG_BINARY, while ((fd = BasicOpenFile(targpath, O_RDWR | PG_BINARY,
@ -1499,13 +1521,13 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
char *blk; char *blk;
int i; int i;
blk = (char*)XLogRecGetData(record) + record->xl_len; blk = (char *) XLogRecGetData(record) + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++) for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{ {
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i))) if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
continue; continue;
memcpy((char*)&bkpb, blk, sizeof(BkpBlock)); memcpy((char *) &bkpb, blk, sizeof(BkpBlock));
blk += sizeof(BkpBlock); blk += sizeof(BkpBlock);
reln = XLogOpenRelation(true, record->xl_rmid, bkpb.node); reln = XLogOpenRelation(true, record->xl_rmid, bkpb.node);
@ -1516,7 +1538,7 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
if (BufferIsValid(buffer)) if (BufferIsValid(buffer))
{ {
page = (Page) BufferGetPage(buffer); page = (Page) BufferGetPage(buffer);
memcpy((char*)page, blk, BLCKSZ); memcpy((char *) page, blk, BLCKSZ);
PageSetLSN(page, lsn); PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID); PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer); UnlockAndWriteBuffer(buffer);
@ -1546,7 +1568,7 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
/* Check CRC of rmgr data and record header */ /* Check CRC of rmgr data and record header */
INIT_CRC64(crc); INIT_CRC64(crc);
COMP_CRC64(crc, XLogRecGetData(record), len); COMP_CRC64(crc, XLogRecGetData(record), len);
COMP_CRC64(crc, (char*) record + sizeof(crc64), COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64)); SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc); FIN_CRC64(crc);
@ -1554,11 +1576,11 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
{ {
elog(emode, "ReadRecord: bad rmgr data CRC in record at %u/%u", elog(emode, "ReadRecord: bad rmgr data CRC in record at %u/%u",
recptr.xlogid, recptr.xrecoff); recptr.xlogid, recptr.xrecoff);
return(false); return (false);
} }
/* Check CRCs of backup blocks, if any */ /* Check CRCs of backup blocks, if any */
blk = (char*)XLogRecGetData(record) + len; blk = (char *) XLogRecGetData(record) + len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++) for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{ {
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i))) if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
@ -1569,18 +1591,19 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
COMP_CRC64(crc, blk + sizeof(crc64), COMP_CRC64(crc, blk + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64)); sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(crc); FIN_CRC64(crc);
memcpy((char*)&cbuf, blk, sizeof(crc64)); /* don't assume alignment */ memcpy((char *) &cbuf, blk, sizeof(crc64)); /* don't assume
* alignment */
if (!EQ_CRC64(cbuf, crc)) if (!EQ_CRC64(cbuf, crc))
{ {
elog(emode, "ReadRecord: bad bkp block %d CRC in record at %u/%u", elog(emode, "ReadRecord: bad bkp block %d CRC in record at %u/%u",
i + 1, recptr.xlogid, recptr.xrecoff); i + 1, recptr.xlogid, recptr.xrecoff);
return(false); return (false);
} }
blk += sizeof(BkpBlock) + BLCKSZ; blk += sizeof(BkpBlock) + BLCKSZ;
} }
return(true); return (true);
} }
/* /*
@ -1609,13 +1632,14 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
if (readBuf == NULL) if (readBuf == NULL)
{ {
/* /*
* First time through, permanently allocate readBuf. We do it * First time through, permanently allocate readBuf. We do it
* this way, rather than just making a static array, for two * this way, rather than just making a static array, for two
* reasons: (1) no need to waste the storage in most instantiations * reasons: (1) no need to waste the storage in most
* of the backend; (2) a static char array isn't guaranteed to * instantiations of the backend; (2) a static char array isn't
* have any particular alignment, whereas malloc() will provide * guaranteed to have any particular alignment, whereas malloc()
* MAXALIGN'd storage. * will provide MAXALIGN'd storage.
*/ */
readBuf = (char *) malloc(BLCKSZ); readBuf = (char *) malloc(BLCKSZ);
Assert(readBuf != NULL); Assert(readBuf != NULL);
@ -1656,7 +1680,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
readFile = XLogFileOpen(readId, readSeg, (emode == LOG)); readFile = XLogFileOpen(readId, readSeg, (emode == LOG));
if (readFile < 0) if (readFile < 0)
goto next_record_is_invalid; goto next_record_is_invalid;
readOff = (uint32) (-1); /* force read to occur below */ readOff = (uint32) (-1);/* force read to occur below */
} }
targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ; targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ;
@ -1688,9 +1712,10 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ); record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
got_record:; got_record:;
/* /*
* Currently, xl_len == 0 must be bad data, but that might not be * Currently, xl_len == 0 must be bad data, but that might not be true
* true forever. See note in XLogInsert. * forever. See note in XLogInsert.
*/ */
if (record->xl_len == 0) if (record->xl_len == 0)
{ {
@ -1698,8 +1723,10 @@ got_record:;
RecPtr->xlogid, RecPtr->xrecoff); RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid; goto next_record_is_invalid;
} }
/* /*
* Compute total length of record including any appended backup blocks. * Compute total length of record including any appended backup
* blocks.
*/ */
total_len = SizeOfXLogRecord + record->xl_len; total_len = SizeOfXLogRecord + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++) for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -1708,6 +1735,7 @@ got_record:;
continue; continue;
total_len += sizeof(BkpBlock) + BLCKSZ; total_len += sizeof(BkpBlock) + BLCKSZ;
} }
/* /*
* Make sure it will fit in buffer (currently, it is mechanically * Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea * impossible for this test to fail, but it seems like a good idea
@ -1774,7 +1802,7 @@ got_record:;
len = BLCKSZ - SizeOfXLogPHD - SizeOfXLogContRecord; len = BLCKSZ - SizeOfXLogPHD - SizeOfXLogContRecord;
if (contrecord->xl_rem_len > len) if (contrecord->xl_rem_len > len)
{ {
memcpy(buffer, (char *)contrecord + SizeOfXLogContRecord, len); memcpy(buffer, (char *) contrecord + SizeOfXLogContRecord, len);
gotlen += len; gotlen += len;
buffer += len; buffer += len;
continue; continue;
@ -1839,14 +1867,16 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_info, readId, readSeg, readOff); hdr->xlp_info, readId, readSeg, readOff);
return false; return false;
} }
/* /*
* We disbelieve a SUI less than the previous page's SUI, or more * We disbelieve a SUI less than the previous page's SUI, or more than
* than a few counts greater. In theory as many as 512 shutdown * a few counts greater. In theory as many as 512 shutdown checkpoint
* checkpoint records could appear on a 32K-sized xlog page, so * records could appear on a 32K-sized xlog page, so that's the most
* that's the most differential there could legitimately be. * differential there could legitimately be.
* *
* Note this check can only be applied when we are reading the next page * Note this check can only be applied when we are reading the next page
* in sequence, so ReadRecord passes a flag indicating whether to check. * in sequence, so ReadRecord passes a flag indicating whether to
* check.
*/ */
if (checkSUI) if (checkSUI)
{ {
@ -1891,8 +1921,10 @@ WriteControlFile(void)
{ {
int fd; int fd;
char buffer[BLCKSZ]; /* need not be aligned */ char buffer[BLCKSZ]; /* need not be aligned */
#ifdef USE_LOCALE #ifdef USE_LOCALE
char *localeptr; char *localeptr;
#endif #endif
/* /*
@ -1911,10 +1943,11 @@ WriteControlFile(void)
if (!localeptr) if (!localeptr)
elog(STOP, "Invalid LC_CTYPE setting"); elog(STOP, "Invalid LC_CTYPE setting");
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN); StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
/* /*
* Issue warning notice if initdb'ing in a locale that will not permit * Issue warning notice if initdb'ing in a locale that will not permit
* LIKE index optimization. This is not a clean place to do it, but * LIKE index optimization. This is not a clean place to do it, but I
* I don't see a better place either... * don't see a better place either...
*/ */
if (!locale_is_like_safe()) if (!locale_is_like_safe())
elog(NOTICE, "Initializing database with %s collation order." elog(NOTICE, "Initializing database with %s collation order."
@ -1931,16 +1964,16 @@ WriteControlFile(void)
/* Contents are protected with a CRC */ /* Contents are protected with a CRC */
INIT_CRC64(ControlFile->crc); INIT_CRC64(ControlFile->crc);
COMP_CRC64(ControlFile->crc, COMP_CRC64(ControlFile->crc,
(char*) ControlFile + sizeof(crc64), (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64)); sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc); FIN_CRC64(ControlFile->crc);
/* /*
* We write out BLCKSZ bytes into pg_control, zero-padding the * We write out BLCKSZ bytes into pg_control, zero-padding the excess
* excess over sizeof(ControlFileData). This reduces the odds * over sizeof(ControlFileData). This reduces the odds of
* of premature-EOF errors when reading pg_control. We'll still * premature-EOF errors when reading pg_control. We'll still fail
* fail when we check the contents of the file, but hopefully with * when we check the contents of the file, but hopefully with a more
* a more specific error than "couldn't read pg_control". * specific error than "couldn't read pg_control".
*/ */
if (sizeof(ControlFileData) > BLCKSZ) if (sizeof(ControlFileData) > BLCKSZ)
elog(STOP, "sizeof(ControlFileData) is too large ... fix xlog.c"); elog(STOP, "sizeof(ControlFileData) is too large ... fix xlog.c");
@ -1994,7 +2027,7 @@ ReadControlFile(void)
/* Now check the CRC. */ /* Now check the CRC. */
INIT_CRC64(crc); INIT_CRC64(crc);
COMP_CRC64(crc, COMP_CRC64(crc,
(char*) ControlFile + sizeof(crc64), (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64)); sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(crc); FIN_CRC64(crc);
@ -2002,10 +2035,11 @@ ReadControlFile(void)
elog(STOP, "Invalid CRC in control file"); elog(STOP, "Invalid CRC in control file");
/* /*
* Do compatibility checking immediately. We do this here for 2 reasons: * Do compatibility checking immediately. We do this here for 2
* reasons:
* *
* (1) if the database isn't compatible with the backend executable, * (1) if the database isn't compatible with the backend executable, we
* we want to abort before we can possibly do any damage; * want to abort before we can possibly do any damage;
* *
* (2) this code is executed in the postmaster, so the setlocale() will * (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file * propagate to forked backends, which aren't going to read this file
@ -2043,7 +2077,7 @@ UpdateControlFile(void)
INIT_CRC64(ControlFile->crc); INIT_CRC64(ControlFile->crc);
COMP_CRC64(ControlFile->crc, COMP_CRC64(ControlFile->crc,
(char*) ControlFile + sizeof(crc64), (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64)); sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc); FIN_CRC64(ControlFile->crc);
@ -2096,6 +2130,7 @@ XLOGShmemInit(void)
Assert(!found); Assert(!found);
memset(XLogCtl, 0, sizeof(XLogCtlData)); memset(XLogCtl, 0, sizeof(XLogCtlData));
/* /*
* Since XLogCtlData contains XLogRecPtr fields, its sizeof should be * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be
* a multiple of the alignment for same, so no extra alignment padding * a multiple of the alignment for same, so no extra alignment padding
@ -2104,9 +2139,10 @@ XLOGShmemInit(void)
XLogCtl->xlblocks = (XLogRecPtr *) XLogCtl->xlblocks = (XLogRecPtr *)
(((char *) XLogCtl) + sizeof(XLogCtlData)); (((char *) XLogCtl) + sizeof(XLogCtlData));
memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers); memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
/* /*
* Here, on the other hand, we must MAXALIGN to ensure the page buffers * Here, on the other hand, we must MAXALIGN to ensure the page
* have worst-case alignment. * buffers have worst-case alignment.
*/ */
XLogCtl->pages = XLogCtl->pages =
((char *) XLogCtl) + MAXALIGN(sizeof(XLogCtlData) + ((char *) XLogCtl) + MAXALIGN(sizeof(XLogCtlData) +
@ -2114,8 +2150,8 @@ XLOGShmemInit(void)
memset(XLogCtl->pages, 0, BLCKSZ * XLOGbuffers); memset(XLogCtl->pages, 0, BLCKSZ * XLOGbuffers);
/* /*
* Do basic initialization of XLogCtl shared data. * Do basic initialization of XLogCtl shared data. (StartupXLOG will
* (StartupXLOG will fill in additional info.) * fill in additional info.)
*/ */
XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers; XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1; XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
@ -2180,7 +2216,7 @@ BootStrapXLOG(void)
INIT_CRC64(crc); INIT_CRC64(crc);
COMP_CRC64(crc, &checkPoint, sizeof(checkPoint)); COMP_CRC64(crc, &checkPoint, sizeof(checkPoint));
COMP_CRC64(crc, (char*) record + sizeof(crc64), COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64)); SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc); FIN_CRC64(crc);
record->xl_crc = crc; record->xl_crc = crc;
@ -2246,8 +2282,8 @@ StartupXLOG(void)
/* /*
* Read control file and check XLOG status looks valid. * Read control file and check XLOG status looks valid.
* *
* Note: in most control paths, *ControlFile is already valid and we * Note: in most control paths, *ControlFile is already valid and we need
* need not do ReadControlFile() here, but might as well do it to be sure. * not do ReadControlFile() here, but might as well do it to be sure.
*/ */
ReadControlFile(); ReadControlFile();
@ -2297,10 +2333,8 @@ StartupXLOG(void)
InRecovery = true; /* force recovery even if SHUTDOWNED */ InRecovery = true; /* force recovery even if SHUTDOWNED */
} }
else else
{
elog(STOP, "Unable to locate a valid CheckPoint record"); elog(STOP, "Unable to locate a valid CheckPoint record");
} }
}
LastRec = RecPtr = checkPointLoc; LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint)); memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN); wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
@ -2336,9 +2370,7 @@ StartupXLOG(void)
InRecovery = true; InRecovery = true;
} }
else if (ControlFile->state != DB_SHUTDOWNED) else if (ControlFile->state != DB_SHUTDOWNED)
{
InRecovery = true; InRecovery = true;
}
/* REDO */ /* REDO */
if (InRecovery) if (InRecovery)
@ -2355,7 +2387,8 @@ StartupXLOG(void)
/* Is REDO required ? */ /* Is REDO required ? */
if (XLByteLT(checkPoint.redo, RecPtr)) if (XLByteLT(checkPoint.redo, RecPtr))
record = ReadRecord(&(checkPoint.redo), STOP, buffer); record = ReadRecord(&(checkPoint.redo), STOP, buffer);
else /* read past CheckPoint record */ else
/* read past CheckPoint record */
record = ReadRecord(NULL, LOG, buffer); record = ReadRecord(NULL, LOG, buffer);
if (record != NULL) if (record != NULL)
@ -2411,8 +2444,11 @@ StartupXLOG(void)
XLogCtl->xlblocks[0].xrecoff = XLogCtl->xlblocks[0].xrecoff =
((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ; ((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
Insert = &XLogCtl->Insert; Insert = &XLogCtl->Insert;
/* Tricky point here: readBuf contains the *last* block that the LastRec
* record spans, not the one it starts in, which is what we want. /*
* Tricky point here: readBuf contains the *last* block that the
* LastRec record spans, not the one it starts in, which is what we
* want.
*/ */
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize); Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ); memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@ -2458,6 +2494,7 @@ StartupXLOG(void)
if (InRecovery) if (InRecovery)
{ {
/* /*
* In case we had to use the secondary checkpoint, make sure that * In case we had to use the secondary checkpoint, make sure that
* it will still be shown as the secondary checkpoint after this * it will still be shown as the secondary checkpoint after this
@ -2639,17 +2676,17 @@ CreateCheckPoint(bool shutdown)
/* /*
* If this isn't a shutdown, and we have not inserted any XLOG records * If this isn't a shutdown, and we have not inserted any XLOG records
* since the start of the last checkpoint, skip the checkpoint. The * since the start of the last checkpoint, skip the checkpoint. The
* idea here is to avoid inserting duplicate checkpoints when the system * idea here is to avoid inserting duplicate checkpoints when the
* is idle. That wastes log space, and more importantly it exposes us to * system is idle. That wastes log space, and more importantly it
* possible loss of both current and previous checkpoint records if the * exposes us to possible loss of both current and previous checkpoint
* machine crashes just as we're writing the update. (Perhaps it'd make * records if the machine crashes just as we're writing the update.
* even more sense to checkpoint only when the previous checkpoint record * (Perhaps it'd make even more sense to checkpoint only when the
* is in a different xlog page?) * previous checkpoint record is in a different xlog page?)
* *
* We have to make two tests to determine that nothing has happened since * We have to make two tests to determine that nothing has happened since
* the start of the last checkpoint: current insertion point must match * the start of the last checkpoint: current insertion point must
* the end of the last checkpoint record, and its redo pointer must point * match the end of the last checkpoint record, and its redo pointer
* to itself. * must point to itself.
*/ */
if (!shutdown) if (!shutdown)
{ {
@ -2687,16 +2724,18 @@ CreateCheckPoint(bool shutdown)
freespace = BLCKSZ - SizeOfXLogPHD; freespace = BLCKSZ - SizeOfXLogPHD;
} }
INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx); INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx);
/* /*
* Here we update the shared RedoRecPtr for future XLogInsert calls; * Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock. * this must be done while holding the insert lock.
*/ */
RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo; RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
/* /*
* Get UNDO record ptr - this is oldest of PROC->logRec values. * Get UNDO record ptr - this is oldest of PROC->logRec values. We do
* We do this while holding insert lock to ensure that we won't miss * this while holding insert lock to ensure that we won't miss any
* any about-to-commit transactions (UNDO must include all xacts that * about-to-commit transactions (UNDO must include all xacts that have
* have commits after REDO point). * commits after REDO point).
*/ */
checkPoint.undo = GetUndoRecPtr(); checkPoint.undo = GetUndoRecPtr();
@ -2720,8 +2759,8 @@ CreateCheckPoint(bool shutdown)
SpinRelease(OidGenLockId); SpinRelease(OidGenLockId);
/* /*
* Having constructed the checkpoint record, ensure all shmem disk buffers * Having constructed the checkpoint record, ensure all shmem disk
* are flushed to disk. * buffers are flushed to disk.
*/ */
FlushBufferPool(); FlushBufferPool();
@ -2729,7 +2768,7 @@ CreateCheckPoint(bool shutdown)
* Now insert the checkpoint record into XLOG. * Now insert the checkpoint record into XLOG.
*/ */
rdata.buffer = InvalidBuffer; rdata.buffer = InvalidBuffer;
rdata.data = (char *)(&checkPoint); rdata.data = (char *) (&checkPoint);
rdata.len = sizeof(checkPoint); rdata.len = sizeof(checkPoint);
rdata.next = NULL; rdata.next = NULL;
@ -2748,9 +2787,9 @@ CreateCheckPoint(bool shutdown)
elog(STOP, "XLog concurrent activity while data base is shutting down"); elog(STOP, "XLog concurrent activity while data base is shutting down");
/* /*
* Remember location of prior checkpoint's earliest info. * Remember location of prior checkpoint's earliest info. Oldest item
* Oldest item is redo or undo, whichever is older; but watch out * is redo or undo, whichever is older; but watch out for case that
* for case that undo = 0. * undo = 0.
*/ */
if (ControlFile->checkPointCopy.undo.xrecoff != 0 && if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
XLByteLT(ControlFile->checkPointCopy.undo, XLByteLT(ControlFile->checkPointCopy.undo,
@ -2804,7 +2843,7 @@ XLogPutNextOid(Oid nextOid)
XLogRecData rdata; XLogRecData rdata;
rdata.buffer = InvalidBuffer; rdata.buffer = InvalidBuffer;
rdata.data = (char *)(&nextOid); rdata.data = (char *) (&nextOid);
rdata.len = sizeof(Oid); rdata.len = sizeof(Oid);
rdata.next = NULL; rdata.next = NULL;
(void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID, &rdata); (void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID, &rdata);
@ -2846,9 +2885,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint)); memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
/* In an ONLINE checkpoint, treat the counters like NEXTOID */ /* In an ONLINE checkpoint, treat the counters like NEXTOID */
if (ShmemVariableCache->nextXid < checkPoint.nextXid) if (ShmemVariableCache->nextXid < checkPoint.nextXid)
{
ShmemVariableCache->nextXid = checkPoint.nextXid; ShmemVariableCache->nextXid = checkPoint.nextXid;
}
if (ShmemVariableCache->nextOid < checkPoint.nextOid) if (ShmemVariableCache->nextOid < checkPoint.nextOid)
{ {
ShmemVariableCache->nextOid = checkPoint.nextOid; ShmemVariableCache->nextOid = checkPoint.nextOid;
@ -2863,14 +2900,15 @@ xlog_undo(XLogRecPtr lsn, XLogRecord *record)
} }
void void
xlog_desc(char *buf, uint8 xl_info, char* rec) xlog_desc(char *buf, uint8 xl_info, char *rec)
{ {
uint8 info = xl_info & ~XLR_INFO_MASK; uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN || if (info == XLOG_CHECKPOINT_SHUTDOWN ||
info == XLOG_CHECKPOINT_ONLINE) info == XLOG_CHECKPOINT_ONLINE)
{ {
CheckPoint *checkpoint = (CheckPoint*) rec; CheckPoint *checkpoint = (CheckPoint *) rec;
sprintf(buf + strlen(buf), "checkpoint: redo %u/%u; undo %u/%u; " sprintf(buf + strlen(buf), "checkpoint: redo %u/%u; undo %u/%u; "
"sui %u; xid %u; oid %u; %s", "sui %u; xid %u; oid %u; %s",
checkpoint->redo.xlogid, checkpoint->redo.xrecoff, checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
@ -2923,15 +2961,19 @@ xlog_outrec(char *buf, XLogRecord *record)
bool bool
check_xlog_sync_method(const char *method) check_xlog_sync_method(const char *method)
{ {
if (strcasecmp(method, "fsync") == 0) return true; if (strcasecmp(method, "fsync") == 0)
return true;
#ifdef HAVE_FDATASYNC #ifdef HAVE_FDATASYNC
if (strcasecmp(method, "fdatasync") == 0) return true; if (strcasecmp(method, "fdatasync") == 0)
return true;
#endif #endif
#ifdef OPEN_SYNC_FLAG #ifdef OPEN_SYNC_FLAG
if (strcasecmp(method, "open_sync") == 0) return true; if (strcasecmp(method, "open_sync") == 0)
return true;
#endif #endif
#ifdef OPEN_DATASYNC_FLAG #ifdef OPEN_DATASYNC_FLAG
if (strcasecmp(method, "open_datasync") == 0) return true; if (strcasecmp(method, "open_datasync") == 0)
return true;
#endif #endif
return false; return false;
} }
@ -2978,11 +3020,12 @@ assign_xlog_sync_method(const char *method)
if (sync_method != new_sync_method || open_sync_bit != new_sync_bit) if (sync_method != new_sync_method || open_sync_bit != new_sync_bit)
{ {
/* /*
* To ensure that no blocks escape unsynced, force an fsync on * To ensure that no blocks escape unsynced, force an fsync on the
* the currently open log segment (if any). Also, if the open * currently open log segment (if any). Also, if the open flag is
* flag is changing, close the log file so it will be reopened * changing, close the log file so it will be reopened (with new
* (with new flag bit) at next use. * flag bit) at next use.
*/ */
if (openLogFile >= 0) if (openLogFile >= 0)
{ {

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.14 2001/03/13 01:17:05 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.15 2001/03/22 03:59:18 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -52,11 +52,11 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode); reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln)) if (!RelationIsValid(reln))
return(0); return (0);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr)); buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer)) if (!BufferIsValid(buffer))
return(0); return (0);
LockBuffer(buffer, BUFFER_LOCK_SHARE); LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer); page = (Page) BufferGetPage(buffer);
@ -64,13 +64,13 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page)) ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{ {
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(0); return (0);
} }
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr)); lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp)) if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{ {
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(0); return (0);
} }
htup = (HeapTupleHeader) PageGetItem(page, lp); htup = (HeapTupleHeader) PageGetItem(page, lp);
@ -79,11 +79,11 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
if (htup->t_xmin != xid || htup->t_cmin != cid) if (htup->t_xmin != xid || htup->t_cmin != cid)
{ {
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(-1); return (-1);
} }
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(1); return (1);
} }
/* /*
@ -103,11 +103,11 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode); reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln)) if (!RelationIsValid(reln))
return(false); return (false);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr)); buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer)) if (!BufferIsValid(buffer))
return(false); return (false);
LockBuffer(buffer, BUFFER_LOCK_SHARE); LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer); page = (Page) BufferGetPage(buffer);
@ -115,21 +115,21 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page)) ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{ {
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(false); return (false);
} }
if (PageGetSUI(page) != ThisStartUpID) if (PageGetSUI(page) != ThisStartUpID)
{ {
Assert(PageGetSUI(page) < ThisStartUpID); Assert(PageGetSUI(page) < ThisStartUpID);
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(true); return (true);
} }
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr)); lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp)) if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{ {
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(false); return (false);
} }
htup = (HeapTupleHeader) PageGetItem(page, lp); htup = (HeapTupleHeader) PageGetItem(page, lp);
@ -140,16 +140,16 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{ {
if (htup->t_infomask & HEAP_XMIN_INVALID || if (htup->t_infomask & HEAP_XMIN_INVALID ||
(htup->t_infomask & HEAP_MOVED_IN && (htup->t_infomask & HEAP_MOVED_IN &&
TransactionIdDidAbort((TransactionId)htup->t_cmin)) || TransactionIdDidAbort((TransactionId) htup->t_cmin)) ||
TransactionIdDidAbort(htup->t_xmin)) TransactionIdDidAbort(htup->t_xmin))
{ {
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(false); return (false);
} }
} }
UnlockAndReleaseBuffer(buffer); UnlockAndReleaseBuffer(buffer);
return(true); return (true);
} }
/* /*
@ -208,13 +208,13 @@ XLogReadBuffer(bool extend, Relation reln, BlockNumber blkno)
} }
if (buffer != InvalidBuffer) if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
return(buffer); return (buffer);
} }
buffer = ReadBuffer(reln, blkno); buffer = ReadBuffer(reln, blkno);
if (buffer != InvalidBuffer) if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
return(buffer); return (buffer);
} }
/* /*
@ -239,6 +239,7 @@ static XLogRelDesc *_xlrelarr = NULL;
static Form_pg_class _xlpgcarr = NULL; static Form_pg_class _xlpgcarr = NULL;
static int _xlast = 0; static int _xlast = 0;
static int _xlcnt = 0; static int _xlcnt = 0;
#define _XLOG_RELCACHESIZE 512 #define _XLOG_RELCACHESIZE 512
static void static void
@ -248,7 +249,7 @@ _xl_init_rel_cache(void)
_xlcnt = _XLOG_RELCACHESIZE; _xlcnt = _XLOG_RELCACHESIZE;
_xlast = 0; _xlast = 0;
_xlrelarr = (XLogRelDesc*) malloc(sizeof(XLogRelDesc) * _xlcnt); _xlrelarr = (XLogRelDesc *) malloc(sizeof(XLogRelDesc) * _xlcnt);
memset(_xlrelarr, 0, sizeof(XLogRelDesc) * _xlcnt); memset(_xlrelarr, 0, sizeof(XLogRelDesc) * _xlcnt);
_xlpgcarr = (Form_pg_class) malloc(sizeof(FormData_pg_class) * _xlcnt); _xlpgcarr = (Form_pg_class) malloc(sizeof(FormData_pg_class) * _xlcnt);
memset(_xlpgcarr, 0, sizeof(FormData_pg_class) * _xlcnt); memset(_xlpgcarr, 0, sizeof(FormData_pg_class) * _xlcnt);
@ -258,7 +259,7 @@ _xl_init_rel_cache(void)
memset(&ctl, 0, (int) sizeof(ctl)); memset(&ctl, 0, (int) sizeof(ctl));
ctl.keysize = sizeof(RelFileNode); ctl.keysize = sizeof(RelFileNode);
ctl.datasize = sizeof(XLogRelDesc*); ctl.datasize = sizeof(XLogRelDesc *);
ctl.hash = tag_hash; ctl.hash = tag_hash;
_xlrelcache = hash_create(_XLOG_RELCACHESIZE, &ctl, _xlrelcache = hash_create(_XLOG_RELCACHESIZE, &ctl,
@ -276,8 +277,8 @@ _xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
rdesc->lessRecently->moreRecently = rdesc->moreRecently; rdesc->lessRecently->moreRecently = rdesc->moreRecently;
rdesc->moreRecently->lessRecently = rdesc->lessRecently; rdesc->moreRecently->lessRecently = rdesc->lessRecently;
hentry = (XLogRelCacheEntry*) hash_search(_xlrelcache, hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
(char*)&(rdesc->reldata.rd_node), HASH_REMOVE, &found); (char *) &(rdesc->reldata.rd_node), HASH_REMOVE, &found);
if (hentry == NULL) if (hentry == NULL)
elog(STOP, "_xl_remove_hash_entry: can't delete from cache"); elog(STOP, "_xl_remove_hash_entry: can't delete from cache");
@ -294,7 +295,7 @@ _xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
return; return;
} }
static XLogRelDesc* static XLogRelDesc *
_xl_new_reldesc(void) _xl_new_reldesc(void)
{ {
XLogRelDesc *res; XLogRelDesc *res;
@ -303,7 +304,7 @@ _xl_new_reldesc(void)
if (_xlast < _xlcnt) if (_xlast < _xlcnt)
{ {
_xlrelarr[_xlast].reldata.rd_rel = &(_xlpgcarr[_xlast]); _xlrelarr[_xlast].reldata.rd_rel = &(_xlpgcarr[_xlast]);
return(&(_xlrelarr[_xlast])); return (&(_xlrelarr[_xlast]));
} }
/* reuse */ /* reuse */
@ -312,7 +313,7 @@ _xl_new_reldesc(void)
_xl_remove_hash_entry(&res, 0); _xl_remove_hash_entry(&res, 0);
_xlast--; _xlast--;
return(res); return (res);
} }
@ -348,8 +349,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
XLogRelCacheEntry *hentry; XLogRelCacheEntry *hentry;
bool found; bool found;
hentry = (XLogRelCacheEntry*) hentry = (XLogRelCacheEntry *)
hash_search(_xlrelcache, (char*)&rnode, HASH_FIND, &found); hash_search(_xlrelcache, (char *) &rnode, HASH_FIND, &found);
if (hentry == NULL) if (hentry == NULL)
elog(STOP, "XLogOpenRelation: error in cache"); elog(STOP, "XLogOpenRelation: error in cache");
@ -372,8 +373,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode; res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
res->reldata.rd_node = rnode; res->reldata.rd_node = rnode;
hentry = (XLogRelCacheEntry*) hentry = (XLogRelCacheEntry *)
hash_search(_xlrelcache, (char*)&rnode, HASH_ENTER, &found); hash_search(_xlrelcache, (char *) &rnode, HASH_ENTER, &found);
if (hentry == NULL) if (hentry == NULL)
elog(STOP, "XLogOpenRelation: can't insert into cache"); elog(STOP, "XLogOpenRelation: can't insert into cache");
@ -385,7 +386,7 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_fd = -1; res->reldata.rd_fd = -1;
res->reldata.rd_fd = smgropen(DEFAULT_SMGR, &(res->reldata), res->reldata.rd_fd = smgropen(DEFAULT_SMGR, &(res->reldata),
true /* allow failure */); true /* allow failure */ );
} }
res->moreRecently = &(_xlrelarr[0]); res->moreRecently = &(_xlrelarr[0]);
@ -394,7 +395,7 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->lessRecently->moreRecently = res; res->lessRecently->moreRecently = res;
if (res->reldata.rd_fd < 0) /* file doesn't exist */ if (res->reldata.rd_fd < 0) /* file doesn't exist */
return(NULL); return (NULL);
return(&(res->reldata)); return (&(res->reldata));
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.46 2001/01/24 19:42:51 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.47 2001/03/22 03:59:18 momjian Exp $
* *
* NOTES * NOTES
* See acl.h. * See acl.h.
@ -250,8 +250,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
num; num;
/* /*
* If ACL is null, default to "OK" --- this should not happen, * If ACL is null, default to "OK" --- this should not happen, since
* since caller should have inserted appropriate default * caller should have inserted appropriate default
*/ */
if (!acl) if (!acl)
{ {
@ -265,8 +265,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
/* /*
* We'll treat the empty ACL like that, too, although this is more * We'll treat the empty ACL like that, too, although this is more
* like an error (i.e., you manually blew away your ACL array) -- the * like an error (i.e., you manually blew away your ACL array) -- the
* system never creates an empty ACL, since there must always be * system never creates an empty ACL, since there must always be a
* a "world" entry in the first slot. * "world" entry in the first slot.
*/ */
if (num < 1) if (num < 1)
{ {

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.39 2001/01/24 19:42:51 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.40 2001/03/22 03:59:19 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.160 2001/02/14 21:34:59 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.161 2001/03/22 03:59:19 momjian Exp $
* *
* *
* INTERFACE ROUTINES * INTERFACE ROUTINES
@ -270,7 +270,11 @@ heap_create(char *relname,
if (istemp) if (istemp)
{ {
/* replace relname of caller with a unique name for a temp relation */
/*
* replace relname of caller with a unique name for a temp
* relation
*/
snprintf(relname, NAMEDATALEN, "pg_temp.%d.%u", snprintf(relname, NAMEDATALEN, "pg_temp.%d.%u",
(int) MyProcPid, uniqueId++); (int) MyProcPid, uniqueId++);
} }
@ -738,6 +742,7 @@ AddNewRelationTuple(Relation pg_class_desc,
static void static void
AddNewRelationType(char *typeName, Oid new_rel_oid, Oid new_type_oid) AddNewRelationType(char *typeName, Oid new_rel_oid, Oid new_type_oid)
{ {
/* /*
* The sizes are set to oid size because it makes implementing sets * The sizes are set to oid size because it makes implementing sets
* MUCH easier, and no one (we hope) uses these fields to figure out * MUCH easier, and no one (we hope) uses these fields to figure out
@ -1025,9 +1030,7 @@ RelationRemoveInheritance(Relation relation)
&entry); &entry);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0))) while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{
simple_heap_delete(catalogRelation, &tuple->t_self); simple_heap_delete(catalogRelation, &tuple->t_self);
}
heap_endscan(scan); heap_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock); heap_close(catalogRelation, RowExclusiveLock);
@ -1152,8 +1155,8 @@ RelationTruncateIndexes(Oid heapId)
/* /*
* We have to re-open the heap rel each time through this loop * We have to re-open the heap rel each time through this loop
* because index_build will close it again. We need grab no lock, * because index_build will close it again. We need grab no lock,
* however, because we assume heap_truncate is holding an exclusive * however, because we assume heap_truncate is holding an
* lock on the heap rel. * exclusive lock on the heap rel.
*/ */
heapRelation = heap_open(heapId, NoLock); heapRelation = heap_open(heapId, NoLock);
@ -1164,8 +1167,8 @@ RelationTruncateIndexes(Oid heapId)
LockRelation(currentIndex, AccessExclusiveLock); LockRelation(currentIndex, AccessExclusiveLock);
/* /*
* Drop any buffers associated with this index. If they're * Drop any buffers associated with this index. If they're dirty,
* dirty, they're just dropped without bothering to flush to disk. * they're just dropped without bothering to flush to disk.
*/ */
DropRelationBuffers(currentIndex); DropRelationBuffers(currentIndex);
@ -1177,6 +1180,7 @@ RelationTruncateIndexes(Oid heapId)
InitIndexStrategy(indexInfo->ii_NumIndexAttrs, InitIndexStrategy(indexInfo->ii_NumIndexAttrs,
currentIndex, accessMethodId); currentIndex, accessMethodId);
index_build(heapRelation, currentIndex, indexInfo, NULL); index_build(heapRelation, currentIndex, indexInfo, NULL);
/* /*
* index_build will close both the heap and index relations (but * index_build will close both the heap and index relations (but
* not give up the locks we hold on them). * not give up the locks we hold on them).
@ -1981,9 +1985,7 @@ RemoveAttrDefault(Relation rel)
adscan = heap_beginscan(adrel, 0, SnapshotNow, 1, &key); adscan = heap_beginscan(adrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(adscan, 0))) while (HeapTupleIsValid(tup = heap_getnext(adscan, 0)))
{
simple_heap_delete(adrel, &tup->t_self); simple_heap_delete(adrel, &tup->t_self);
}
heap_endscan(adscan); heap_endscan(adscan);
heap_close(adrel, RowExclusiveLock); heap_close(adrel, RowExclusiveLock);
@ -2005,9 +2007,7 @@ RemoveRelCheck(Relation rel)
rcscan = heap_beginscan(rcrel, 0, SnapshotNow, 1, &key); rcscan = heap_beginscan(rcrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(rcscan, 0))) while (HeapTupleIsValid(tup = heap_getnext(rcscan, 0)))
{
simple_heap_delete(rcrel, &tup->t_self); simple_heap_delete(rcrel, &tup->t_self);
}
heap_endscan(rcscan); heap_endscan(rcscan);
heap_close(rcrel, RowExclusiveLock); heap_close(rcrel, RowExclusiveLock);
@ -2044,9 +2044,7 @@ RemoveStatistics(Relation rel)
scan = heap_beginscan(pgstatistic, false, SnapshotNow, 1, &key); scan = heap_beginscan(pgstatistic, false, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0))) while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{
simple_heap_delete(pgstatistic, &tuple->t_self); simple_heap_delete(pgstatistic, &tuple->t_self);
}
heap_endscan(scan); heap_endscan(scan);
heap_close(pgstatistic, RowExclusiveLock); heap_close(pgstatistic, RowExclusiveLock);

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.142 2001/02/23 09:31:52 inoue Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.143 2001/03/22 03:59:19 momjian Exp $
* *
* *
* INTERFACE ROUTINES * INTERFACE ROUTINES
@ -301,7 +301,8 @@ ConstructTupleDescriptor(Relation heapRelation,
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE); memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
/* /*
* Fix the stuff that should not be the same as the underlying attr * Fix the stuff that should not be the same as the underlying
* attr
*/ */
to->attnum = i + 1; to->attnum = i + 1;
@ -311,9 +312,9 @@ ConstructTupleDescriptor(Relation heapRelation,
to->attcacheoff = -1; to->attcacheoff = -1;
/* /*
* We do not yet have the correct relation OID for the index, * We do not yet have the correct relation OID for the index, so
* so just set it invalid for now. InitializeAttributeOids() * just set it invalid for now. InitializeAttributeOids() will
* will fix it later. * fix it later.
*/ */
to->attrelid = InvalidOid; to->attrelid = InvalidOid;
} }
@ -1008,9 +1009,7 @@ index_create(char *heapRelationName,
/* XXX shouldn't we close the heap and index rels here? */ /* XXX shouldn't we close the heap and index rels here? */
} }
else else
{
index_build(heapRelation, indexRelation, indexInfo, NULL); index_build(heapRelation, indexRelation, indexInfo, NULL);
}
} }
/* ---------------------------------------------------------------- /* ----------------------------------------------------------------
@ -1081,12 +1080,12 @@ index_drop(Oid indexId)
heap_freetuple(tuple); heap_freetuple(tuple);
/* /*
* Update the pg_class tuple for the owning relation. We are presently * Update the pg_class tuple for the owning relation. We are
* too lazy to attempt to compute the new correct value of relhasindex * presently too lazy to attempt to compute the new correct value of
* (the next VACUUM will fix it if necessary). But we must send out a * relhasindex (the next VACUUM will fix it if necessary). But we
* shared-cache-inval notice on the owning relation to ensure other * must send out a shared-cache-inval notice on the owning relation to
* backends update their relcache lists of indexes. So, unconditionally * ensure other backends update their relcache lists of indexes. So,
* do setRelhasindex(true). * unconditionally do setRelhasindex(true).
*/ */
setRelhasindex(heapId, true); setRelhasindex(heapId, true);
@ -1199,7 +1198,7 @@ BuildIndexInfo(HeapTuple indexTuple)
{ {
ii->ii_NumIndexAttrs = 1; ii->ii_NumIndexAttrs = 1;
/* Do a lookup on the function, too */ /* Do a lookup on the function, too */
fmgr_info(indexStruct->indproc, & ii->ii_FuncInfo); fmgr_info(indexStruct->indproc, &ii->ii_FuncInfo);
} }
else else
ii->ii_NumIndexAttrs = numKeys; ii->ii_NumIndexAttrs = numKeys;
@ -1326,8 +1325,8 @@ LockClassinfoForUpdate(Oid relid, HeapTuple rtup,
Relation relationRelation; Relation relationRelation;
/* /*
* NOTE: get and hold RowExclusiveLock on pg_class, because caller will * NOTE: get and hold RowExclusiveLock on pg_class, because caller
* probably modify the rel's pg_class tuple later on. * will probably modify the rel's pg_class tuple later on.
*/ */
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock); relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
classTuple = SearchSysCache(RELOID, PointerGetDatum(relid), classTuple = SearchSysCache(RELOID, PointerGetDatum(relid),
@ -1513,7 +1512,8 @@ setRelhasindex(Oid relid, bool hasindex)
void void
setNewRelfilenode(Relation relation) setNewRelfilenode(Relation relation)
{ {
Relation pg_class, idescs[Num_pg_class_indices]; Relation pg_class,
idescs[Num_pg_class_indices];
Oid newrelfilenode; Oid newrelfilenode;
bool in_place_update = false; bool in_place_update = false;
HeapTupleData lockTupleData; HeapTupleData lockTupleData;
@ -1577,6 +1577,7 @@ setNewRelfilenode(Relation relation)
/* Make sure the relfilenode change */ /* Make sure the relfilenode change */
CommandCounterIncrement(); CommandCounterIncrement();
} }
#endif /* OLD_FILE_NAMING */ #endif /* OLD_FILE_NAMING */
/* ---------------- /* ----------------
@ -1713,6 +1714,7 @@ UpdateStats(Oid relid, long reltuples)
*/ */
if (in_place_upd) if (in_place_upd)
{ {
/* /*
* At bootstrap time, we don't need to worry about concurrency or * At bootstrap time, we don't need to worry about concurrency or
* visibility of changes, so we cheat. Also cheat if REINDEX. * visibility of changes, so we cheat. Also cheat if REINDEX.
@ -1787,9 +1789,11 @@ DefaultBuild(Relation heapRelation,
long reltuples, long reltuples,
indtuples; indtuples;
Node *predicate = indexInfo->ii_Predicate; Node *predicate = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable; TupleTable tupleTable;
TupleTableSlot *slot; TupleTableSlot *slot;
#endif #endif
ExprContext *econtext; ExprContext *econtext;
InsertIndexResult insertResult; InsertIndexResult insertResult;
@ -1855,6 +1859,7 @@ DefaultBuild(Relation heapRelation,
reltuples++; reltuples++;
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
/* /*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip * If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index * this tuple if it was already in the existing partial index
@ -1906,9 +1911,7 @@ DefaultBuild(Relation heapRelation,
#ifndef OMIT_PARTIAL_INDEX #ifndef OMIT_PARTIAL_INDEX
if (predicate != NULL || oldPred != NULL) if (predicate != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true); ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */ #endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext); FreeExprContext(econtext);
@ -2098,9 +2101,10 @@ reindex_index(Oid indexId, bool force, bool inplace)
if (inplace) if (inplace)
{ {
/* /*
* Release any buffers associated with this index. If they're dirty, * Release any buffers associated with this index. If they're
* they're just dropped without bothering to flush to disk. * dirty, they're just dropped without bothering to flush to disk.
*/ */
DropRelationBuffers(iRel); DropRelationBuffers(iRel);
@ -2164,18 +2168,24 @@ reindex_relation(Oid relid, bool force)
bool old, bool old,
reindexed; reindexed;
bool deactivate_needed, overwrite, upd_pg_class_inplace; bool deactivate_needed,
overwrite,
upd_pg_class_inplace;
#ifdef OLD_FILE_NAMING #ifdef OLD_FILE_NAMING
overwrite = upd_pg_class_inplace = deactivate_needed = true; overwrite = upd_pg_class_inplace = deactivate_needed = true;
#else #else
Relation rel; Relation rel;
overwrite = upd_pg_class_inplace = deactivate_needed = false; overwrite = upd_pg_class_inplace = deactivate_needed = false;
/* /*
* avoid heap_update() pg_class tuples while processing * avoid heap_update() pg_class tuples while processing reindex for
* reindex for pg_class. * pg_class.
*/ */
if (IsIgnoringSystemIndexes()) if (IsIgnoringSystemIndexes())
upd_pg_class_inplace = true; upd_pg_class_inplace = true;
/* /*
* ignore the indexes of the target system relation while processing * ignore the indexes of the target system relation while processing
* reindex. * reindex.
@ -2184,10 +2194,10 @@ reindex_relation(Oid relid, bool force)
if (!IsIgnoringSystemIndexes() && IsSystemRelationName(NameStr(rel->rd_rel->relname))) if (!IsIgnoringSystemIndexes() && IsSystemRelationName(NameStr(rel->rd_rel->relname)))
deactivate_needed = true; deactivate_needed = true;
#ifndef ENABLE_REINDEX_NAILED_RELATIONS #ifndef ENABLE_REINDEX_NAILED_RELATIONS
/* /*
* nailed relations are never updated. * nailed relations are never updated. We couldn't keep the
* We couldn't keep the consistency between the relation * consistency between the relation descriptors and pg_class tuples.
* descriptors and pg_class tuples.
*/ */
if (rel->rd_isnailed) if (rel->rd_isnailed)
{ {
@ -2200,9 +2210,10 @@ reindex_relation(Oid relid, bool force)
elog(ERROR, "the target relation %u is nailed", relid); elog(ERROR, "the target relation %u is nailed", relid);
} }
#endif /* ENABLE_REINDEX_NAILED_RELATIONS */ #endif /* ENABLE_REINDEX_NAILED_RELATIONS */
/* /*
* Shared system indexes must be overwritten because it's * Shared system indexes must be overwritten because it's impossible
* impossible to update pg_class tuples of all databases. * to update pg_class tuples of all databases.
*/ */
if (IsSharedSystemRelationName(NameStr(rel->rd_rel->relname))) if (IsSharedSystemRelationName(NameStr(rel->rd_rel->relname)))
{ {
@ -2252,24 +2263,27 @@ reindex_relation(Oid relid, bool force)
heap_endscan(scan); heap_endscan(scan);
heap_close(indexRelation, AccessShareLock); heap_close(indexRelation, AccessShareLock);
if (reindexed) if (reindexed)
/* /*
* Ok,we could use the reindexed indexes of the target * Ok,we could use the reindexed indexes of the target system
* system relation now. * relation now.
*/ */
{ {
if (deactivate_needed) if (deactivate_needed)
{ {
if (!overwrite && relid == RelOid_pg_class) if (!overwrite && relid == RelOid_pg_class)
{ {
/* /*
* For pg_class, relhasindex should be set * For pg_class, relhasindex should be set to true here in
* to true here in place. * place.
*/ */
setRelhasindex(relid, true); setRelhasindex(relid, true);
CommandCounterIncrement(); CommandCounterIncrement();
/* /*
* However the following setRelhasindex() * However the following setRelhasindex() is needed to
* is needed to keep consistency with WAL. * keep consistency with WAL.
*/ */
} }
setRelhasindex(relid, true); setRelhasindex(relid, true);

View File

@ -9,7 +9,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.76 2001/01/24 19:42:51 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.77 2001/03/22 03:59:20 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.37 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.38 2001/03/22 03:59:20 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -79,8 +79,8 @@ AggregateCreate(char *aggName,
/* /*
* Handle the aggregate's base type (input data type). This can be * Handle the aggregate's base type (input data type). This can be
* specified as 'ANY' for a data-independent transition function, * specified as 'ANY' for a data-independent transition function, such
* such as COUNT(*). * as COUNT(*).
*/ */
basetype = GetSysCacheOid(TYPENAME, basetype = GetSysCacheOid(TYPENAME,
PointerGetDatum(aggbasetypeName), PointerGetDatum(aggbasetypeName),
@ -118,9 +118,7 @@ AggregateCreate(char *aggName,
nargs = 2; nargs = 2;
} }
else else
{
nargs = 1; nargs = 1;
}
tup = SearchSysCache(PROCNAME, tup = SearchSysCache(PROCNAME,
PointerGetDatum(aggtransfnName), PointerGetDatum(aggtransfnName),
Int32GetDatum(nargs), Int32GetDatum(nargs),
@ -134,16 +132,17 @@ AggregateCreate(char *aggName,
if (proc->prorettype != transtype) if (proc->prorettype != transtype)
elog(ERROR, "AggregateCreate: return type of '%s' is not '%s'", elog(ERROR, "AggregateCreate: return type of '%s' is not '%s'",
aggtransfnName, aggtranstypeName); aggtransfnName, aggtranstypeName);
/* /*
* If the transfn is strict and the initval is NULL, make sure * If the transfn is strict and the initval is NULL, make sure input
* input type and transtype are the same (or at least binary- * type and transtype are the same (or at least binary- compatible),
* compatible), so that it's OK to use the first input value * so that it's OK to use the first input value as the initial
* as the initial transValue. * transValue.
*/ */
if (proc->proisstrict && agginitval == NULL) if (proc->proisstrict && agginitval == NULL)
{ {
if (basetype != transtype && if (basetype != transtype &&
! IS_BINARY_COMPATIBLE(basetype, transtype)) !IS_BINARY_COMPATIBLE(basetype, transtype))
elog(ERROR, "AggregateCreate: must not omit initval when transfn is strict and transtype is not compatible with input type"); elog(ERROR, "AggregateCreate: must not omit initval when transfn is strict and transtype is not compatible with input type");
} }
ReleaseSysCache(tup); ReleaseSysCache(tup);
@ -168,6 +167,7 @@ AggregateCreate(char *aggName,
} }
else else
{ {
/* /*
* If no finalfn, aggregate result type is type of the state value * If no finalfn, aggregate result type is type of the state value
*/ */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.7 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.8 2001/03/22 03:59:20 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -51,7 +51,7 @@ LargeObjectCreate(Oid loid)
*/ */
for (i = 0; i < Natts_pg_largeobject; i++) for (i = 0; i < Natts_pg_largeobject; i++)
{ {
values[i] = (Datum)NULL; values[i] = (Datum) NULL;
nulls[i] = ' '; nulls[i] = ' ';
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.55 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.56 2001/03/22 03:59:20 momjian Exp $
* *
* NOTES * NOTES
* these routines moved here from commands/define.c and somewhat cleaned up. * these routines moved here from commands/define.c and somewhat cleaned up.

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.53 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.54 2001/03/22 03:59:20 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -247,8 +247,8 @@ ProcedureCreate(char *procedureName,
* symbol. Also check for a valid function information record. * symbol. Also check for a valid function information record.
* *
* We used to perform these checks only when the function was first * We used to perform these checks only when the function was first
* called, but it seems friendlier to verify the library's validity * called, but it seems friendlier to verify the library's validity at
* at CREATE FUNCTION time. * CREATE FUNCTION time.
*/ */
if (languageObjectId == ClanguageId) if (languageObjectId == ClanguageId)
@ -355,7 +355,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlist = parse->targetList; tlist = parse->targetList;
/* /*
* The last query must be a SELECT if and only if there is a return type. * The last query must be a SELECT if and only if there is a return
* type.
*/ */
if (rettype == InvalidOid) if (rettype == InvalidOid)
{ {
@ -375,8 +376,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlistlen = ExecCleanTargetListLength(tlist); tlistlen = ExecCleanTargetListLength(tlist);
/* /*
* For base-type returns, the target list should have exactly one entry, * For base-type returns, the target list should have exactly one
* and its type should agree with what the user declared. * entry, and its type should agree with what the user declared.
*/ */
typerelid = typeidTypeRelid(rettype); typerelid = typeidTypeRelid(rettype);
if (typerelid == InvalidOid) if (typerelid == InvalidOid)
@ -397,8 +398,8 @@ checkretval(Oid rettype, List *queryTreeList)
* If the target list is of length 1, and the type of the varnode in * If the target list is of length 1, and the type of the varnode in
* the target list is the same as the declared return type, this is * the target list is the same as the declared return type, this is
* okay. This can happen, for example, where the body of the function * okay. This can happen, for example, where the body of the function
* is 'SELECT (x = func2())', where func2 has the same return type * is 'SELECT (x = func2())', where func2 has the same return type as
* as the function that's calling it. * the function that's calling it.
*/ */
if (tlistlen == 1) if (tlistlen == 1)
{ {
@ -408,10 +409,10 @@ checkretval(Oid rettype, List *queryTreeList)
} }
/* /*
* By here, the procedure returns a tuple or set of tuples. This part of * By here, the procedure returns a tuple or set of tuples. This part
* the typechecking is a hack. We look up the relation that is the * of the typechecking is a hack. We look up the relation that is the
* declared return type, and be sure that attributes 1 .. n in the target * declared return type, and be sure that attributes 1 .. n in the
* list match the declared types. * target list match the declared types.
*/ */
reln = heap_open(typerelid, AccessShareLock); reln = heap_open(typerelid, AccessShareLock);
relid = reln->rd_id; relid = reln->rd_id;
@ -436,7 +437,7 @@ checkretval(Oid rettype, List *queryTreeList)
typeidTypeName(rettype), typeidTypeName(rettype),
typeidTypeName(tletype), typeidTypeName(tletype),
typeidTypeName(reln->rd_att->attrs[i]->atttypid), typeidTypeName(reln->rd_att->attrs[i]->atttypid),
i+1); i + 1);
i++; i++;
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.59 2001/02/12 20:07:21 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.60 2001/03/22 03:59:20 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.14 2001/02/16 03:16:58 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.15 2001/03/22 03:59:20 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -86,9 +86,10 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
CommitTransactionCommand(); CommitTransactionCommand();
return; return;
} }
/* /*
* We can VACUUM ANALYZE any table except pg_statistic. * We can VACUUM ANALYZE any table except pg_statistic. see
* see update_relstats * update_relstats
*/ */
if (strcmp(NameStr(((Form_pg_class) GETSTRUCT(tuple))->relname), if (strcmp(NameStr(((Form_pg_class) GETSTRUCT(tuple))->relname),
StatisticRelationName) == 0) StatisticRelationName) == 0)
@ -104,9 +105,11 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (!pg_ownercheck(GetUserId(), RelationGetRelationName(onerel), if (!pg_ownercheck(GetUserId(), RelationGetRelationName(onerel),
RELNAME)) RELNAME))
{ {
/* we already did an elog during vacuum
elog(NOTICE, "Skipping \"%s\" --- only table owner can VACUUM it", /*
RelationGetRelationName(onerel)); * we already did an elog during vacuum elog(NOTICE, "Skipping
* \"%s\" --- only table owner can VACUUM it",
* RelationGetRelationName(onerel));
*/ */
heap_close(onerel, NoLock); heap_close(onerel, NoLock);
CommitTransactionCommand(); CommitTransactionCommand();
@ -295,15 +298,16 @@ attr_stats(Relation onerel, int attr_cnt, VacAttrStats *vacattrstats, HeapTuple
stats->nonnull_cnt++; stats->nonnull_cnt++;
/* /*
* If the value is toasted, detoast it to avoid repeated detoastings * If the value is toasted, detoast it to avoid repeated
* and resultant memory leakage inside the comparison routines. * detoastings and resultant memory leakage inside the comparison
* routines.
*/ */
if (!stats->attr->attbyval && stats->attr->attlen == -1) if (!stats->attr->attbyval && stats->attr->attlen == -1)
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue)); value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
else else
value = origvalue; value = origvalue;
if (! stats->initialized) if (!stats->initialized)
{ {
bucketcpy(stats->attr, value, &stats->best, &stats->best_len); bucketcpy(stats->attr, value, &stats->best, &stats->best_len);
/* best_cnt gets incremented below */ /* best_cnt gets incremented below */
@ -489,22 +493,21 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
{ {
/* /*
* empty relation, so put a dummy value in * empty relation, so put a dummy value in attdispersion
* attdispersion
*/ */
selratio = 0; selratio = 0;
} }
else if (stats->null_cnt <= 1 && stats->best_cnt == 1) else if (stats->null_cnt <= 1 && stats->best_cnt == 1)
{ {
/* /*
* looks like we have a unique-key attribute --- flag * looks like we have a unique-key attribute --- flag this
* this with special -1.0 flag value. * with special -1.0 flag value.
* *
* The correct dispersion is 1.0/numberOfRows, but since * The correct dispersion is 1.0/numberOfRows, but since the
* the relation row count can get updated without * relation row count can get updated without recomputing
* recomputing dispersion, we want to store a * dispersion, we want to store a "symbolic" value and
* "symbolic" value and figure 1.0/numberOfRows on the * figure 1.0/numberOfRows on the fly.
* fly.
*/ */
selratio = -1; selratio = -1;
} }
@ -515,8 +518,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
{ {
/* /*
* exact result when there are just 1 or 2 * exact result when there are just 1 or 2 values...
* values...
*/ */
double min_cnt_d = stats->min_cnt, double min_cnt_d = stats->min_cnt,
max_cnt_d = stats->max_cnt, max_cnt_d = stats->max_cnt,
@ -552,12 +554,12 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
/* /*
* Create pg_statistic tuples for the relation, if we have * Create pg_statistic tuples for the relation, if we have
* gathered the right data. del_stats() previously * gathered the right data. del_stats() previously deleted
* deleted all the pg_statistic tuples for the rel, so we * all the pg_statistic tuples for the rel, so we just have to
* just have to insert new ones here. * insert new ones here.
* *
* Note analyze_rel() has seen to it that we won't come here * Note analyze_rel() has seen to it that we won't come here when
* when vacuuming pg_statistic itself. * vacuuming pg_statistic itself.
*/ */
if (VacAttrStatsLtGtValid(stats) && stats->initialized) if (VacAttrStatsLtGtValid(stats) && stats->initialized)
{ {
@ -682,6 +684,3 @@ del_stats(Oid relid, int attcnt, int *attnums)
*/ */
heap_close(pgstatistic, NoLock); heap_close(pgstatistic, NoLock);
} }

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.76 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.77 2001/03/22 03:59:21 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -161,6 +161,7 @@ Async_Notify(char *relname)
/* no point in making duplicate entries in the list ... */ /* no point in making duplicate entries in the list ... */
if (!AsyncExistsPendingNotify(relname)) if (!AsyncExistsPendingNotify(relname))
{ {
/* /*
* We allocate list memory from the global malloc pool to ensure * We allocate list memory from the global malloc pool to ensure
* that it will live until we want to use it. This is probably * that it will live until we want to use it. This is probably
@ -349,9 +350,7 @@ Async_UnlistenAll()
sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, key); sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, key);
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0))) while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0)))
{
simple_heap_delete(lRel, &lTuple->t_self); simple_heap_delete(lRel, &lTuple->t_self);
}
heap_endscan(sRel); heap_endscan(sRel);
heap_close(lRel, AccessExclusiveLock); heap_close(lRel, AccessExclusiveLock);
@ -499,6 +498,7 @@ AtCommit_Notify()
*/ */
if (kill(listenerPID, SIGUSR2) < 0) if (kill(listenerPID, SIGUSR2) < 0)
{ {
/* /*
* Get rid of pg_listener entry if it refers to a PID * Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend * that no longer exists. Presumably, that backend

View File

@ -15,7 +15,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.64 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.65 2001/03/22 03:59:21 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -75,8 +75,8 @@ cluster(char *oldrelname, char *oldindexname)
StrNCpy(saveoldindexname, oldindexname, NAMEDATALEN); StrNCpy(saveoldindexname, oldindexname, NAMEDATALEN);
/* /*
* We grab exclusive access to the target rel and index for the duration * We grab exclusive access to the target rel and index for the
* of the transaction. * duration of the transaction.
*/ */
OldHeap = heap_openr(saveoldrelname, AccessExclusiveLock); OldHeap = heap_openr(saveoldrelname, AccessExclusiveLock);
OIDOldHeap = RelationGetRelid(OldHeap); OIDOldHeap = RelationGetRelid(OldHeap);
@ -154,8 +154,8 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
OldHeapDesc = RelationGetDescr(OldHeap); OldHeapDesc = RelationGetDescr(OldHeap);
/* /*
* Need to make a copy of the tuple descriptor, * Need to make a copy of the tuple descriptor, since
* since heap_create_with_catalog modifies it. * heap_create_with_catalog modifies it.
*/ */
tupdesc = CreateTupleDescCopyConstr(OldHeapDesc); tupdesc = CreateTupleDescCopyConstr(OldHeapDesc);
@ -164,16 +164,15 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
allowSystemTableMods); allowSystemTableMods);
/* /*
* Advance command counter so that the newly-created * Advance command counter so that the newly-created relation's
* relation's catalog tuples will be visible to heap_open. * catalog tuples will be visible to heap_open.
*/ */
CommandCounterIncrement(); CommandCounterIncrement();
/* /*
* If necessary, create a TOAST table for the new relation. * If necessary, create a TOAST table for the new relation. Note that
* Note that AlterTableCreateToastTable ends with * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
* CommandCounterIncrement(), so that the TOAST table will * that the TOAST table will be visible for insertion.
* be visible for insertion.
*/ */
AlterTableCreateToastTable(NewName, true); AlterTableCreateToastTable(NewName, true);
@ -198,12 +197,12 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
/* /*
* Create a new index like the old one. To do this I get the info * Create a new index like the old one. To do this I get the info
* from pg_index, and add a new index with a temporary name (that * from pg_index, and add a new index with a temporary name (that will
* will be changed later). * be changed later).
* *
* NOTE: index_create will cause the new index to be a temp relation * NOTE: index_create will cause the new index to be a temp relation if
* if its parent table is, so we don't need to do anything special * its parent table is, so we don't need to do anything special for
* for the temp-table case here. * the temp-table case here.
*/ */
Old_pg_index_Tuple = SearchSysCache(INDEXRELID, Old_pg_index_Tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(OIDOldIndex), ObjectIdGetDatum(OIDOldIndex),
@ -266,13 +265,15 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
LocalHeapTuple.t_datamcxt = NULL; LocalHeapTuple.t_datamcxt = NULL;
LocalHeapTuple.t_data = NULL; LocalHeapTuple.t_data = NULL;
heap_fetch(LocalOldHeap, SnapshotNow, &LocalHeapTuple, &LocalBuffer); heap_fetch(LocalOldHeap, SnapshotNow, &LocalHeapTuple, &LocalBuffer);
if (LocalHeapTuple.t_data != NULL) { if (LocalHeapTuple.t_data != NULL)
{
/* /*
* We must copy the tuple because heap_insert() will overwrite * We must copy the tuple because heap_insert() will overwrite
* the commit-status fields of the tuple it's handed, and the * the commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus, * retrieved tuple will actually be in a disk buffer! Thus,
* the source relation would get trashed, which is bad news * the source relation would get trashed, which is bad news if
* if we abort later on. (This was a bug in releases thru 7.0) * we abort later on. (This was a bug in releases thru 7.0)
*/ */
HeapTuple copiedTuple = heap_copytuple(&LocalHeapTuple); HeapTuple copiedTuple = heap_copytuple(&LocalHeapTuple);

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.122 2001/02/27 22:07:34 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.123 2001/03/22 03:59:21 momjian Exp $
* *
* NOTES * NOTES
* The PerformAddAttribute() code, like most of the relation * The PerformAddAttribute() code, like most of the relation
@ -180,7 +180,7 @@ PerformPortalFetch(char *name,
*/ */
if (forward) if (forward)
{ {
if (! portal->atEnd) if (!portal->atEnd)
{ {
ExecutorRun(queryDesc, estate, EXEC_FOR, (long) count); ExecutorRun(queryDesc, estate, EXEC_FOR, (long) count);
if (estate->es_processed > 0) if (estate->es_processed > 0)
@ -191,7 +191,7 @@ PerformPortalFetch(char *name,
} }
else else
{ {
if (! portal->atStart) if (!portal->atStart)
{ {
ExecutorRun(queryDesc, estate, EXEC_BACK, (long) count); ExecutorRun(queryDesc, estate, EXEC_BACK, (long) count);
if (estate->es_processed > 0) if (estate->es_processed > 0)
@ -502,8 +502,8 @@ AlterTableAddColumn(const char *relationName,
heap_close(rel, NoLock); heap_close(rel, NoLock);
/* /*
* Automatically create the secondary relation for TOAST * Automatically create the secondary relation for TOAST if it
* if it formerly had no such but now has toastable attributes. * formerly had no such but now has toastable attributes.
*/ */
CommandCounterIncrement(); CommandCounterIncrement();
AlterTableCreateToastTable(relationName, true); AlterTableCreateToastTable(relationName, true);
@ -1106,7 +1106,7 @@ AlterTableAddConstraint(char *relationName,
#endif #endif
/* Disallow ADD CONSTRAINT on views, indexes, sequences, etc */ /* Disallow ADD CONSTRAINT on views, indexes, sequences, etc */
if (! is_relation(relationName)) if (!is_relation(relationName))
elog(ERROR, "ALTER TABLE ADD CONSTRAINT: %s is not a table", elog(ERROR, "ALTER TABLE ADD CONSTRAINT: %s is not a table",
relationName); relationName);
@ -1147,15 +1147,17 @@ AlterTableAddConstraint(char *relationName,
elog(ERROR, "ALTER TABLE: cannot add constraint to a view"); elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
/* /*
* Scan all of the rows, looking for a false match * Scan all of the rows, looking for a false
* match
*/ */
scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL); scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
AssertState(scan != NULL); AssertState(scan != NULL);
/* /*
* We need to make a parse state and range table to allow * We need to make a parse state and range
* us to transformExpr and fix_opids to get a version of * table to allow us to transformExpr and
* the expression we can pass to ExecQual * fix_opids to get a version of the
* expression we can pass to ExecQual
*/ */
pstate = make_parsestate(NULL); pstate = make_parsestate(NULL);
rte = addRangeTableEntry(pstate, relationName, NULL, rte = addRangeTableEntry(pstate, relationName, NULL,
@ -1174,14 +1176,16 @@ AlterTableAddConstraint(char *relationName,
name); name);
/* /*
* Make sure no outside relations are referred to. * Make sure no outside relations are referred
* to.
*/ */
if (length(pstate->p_rtable) != 1) if (length(pstate->p_rtable) != 1)
elog(ERROR, "Only relation '%s' can be referenced in CHECK", elog(ERROR, "Only relation '%s' can be referenced in CHECK",
relationName); relationName);
/* /*
* Might as well try to reduce any constant expressions. * Might as well try to reduce any constant
* expressions.
*/ */
expr = eval_const_expressions(expr); expr = eval_const_expressions(expr);
@ -1197,15 +1201,15 @@ AlterTableAddConstraint(char *relationName,
econtext = MakeExprContext(slot, CurrentMemoryContext); econtext = MakeExprContext(slot, CurrentMemoryContext);
/* /*
* Scan through the rows now, checking the expression * Scan through the rows now, checking the
* at each row. * expression at each row.
*/ */
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0))) while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{ {
ExecStoreTuple(tuple, slot, InvalidBuffer, false); ExecStoreTuple(tuple, slot, InvalidBuffer, false);
if (!ExecQual(qual, econtext, true)) if (!ExecQual(qual, econtext, true))
{ {
successful=false; successful = false;
break; break;
} }
ResetExprContext(econtext); ResetExprContext(econtext);
@ -1221,10 +1225,12 @@ AlterTableAddConstraint(char *relationName,
heap_close(rel, NoLock); heap_close(rel, NoLock);
elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name); elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
} }
/* /*
* Call AddRelationRawConstraints to do the real adding -- * Call AddRelationRawConstraints to do the
* It duplicates some of the above, but does not check the * real adding -- It duplicates some of the
* validity of the constraint against tuples already in * above, but does not check the validity of
* the constraint against tuples already in
* the table. * the table.
*/ */
AddRelationRawConstraints(rel, NIL, constlist); AddRelationRawConstraints(rel, NIL, constlist);
@ -1241,7 +1247,8 @@ AlterTableAddConstraint(char *relationName,
case T_FkConstraint: case T_FkConstraint:
{ {
FkConstraint *fkconstraint = (FkConstraint *) newConstraint; FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
Relation rel, pkrel; Relation rel,
pkrel;
HeapScanDesc scan; HeapScanDesc scan;
HeapTuple tuple; HeapTuple tuple;
Trigger trig; Trigger trig;
@ -1279,7 +1286,10 @@ AlterTableAddConstraint(char *relationName,
elog(ERROR, "referencing table \"%s\" not a relation", elog(ERROR, "referencing table \"%s\" not a relation",
relationName); relationName);
/* First we check for limited correctness of the constraint */ /*
* First we check for limited correctness of the
* constraint
*/
rel_attrs = pkrel->rd_att->attrs; rel_attrs = pkrel->rd_att->attrs;
indexoidlist = RelationGetIndexList(pkrel); indexoidlist = RelationGetIndexList(pkrel);
@ -1302,24 +1312,30 @@ AlterTableAddConstraint(char *relationName,
{ {
List *attrl; List *attrl;
/* Make sure this index has the same number of keys -- It obviously /*
* won't match otherwise. */ * Make sure this index has the same number of
* keys -- It obviously won't match otherwise.
*/
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++); for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
if (i!=length(fkconstraint->pk_attrs)) if (i != length(fkconstraint->pk_attrs))
found=false; found = false;
else { else
{
/* go through the fkconstraint->pk_attrs list */ /* go through the fkconstraint->pk_attrs list */
foreach(attrl, fkconstraint->pk_attrs) foreach(attrl, fkconstraint->pk_attrs)
{ {
Ident *attr=lfirst(attrl); Ident *attr = lfirst(attrl);
found = false; found = false;
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++) for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
{ {
int pkattno = indexStruct->indkey[i]; int pkattno = indexStruct->indkey[i];
if (pkattno>0)
if (pkattno > 0)
{ {
char *name = NameStr(rel_attrs[pkattno-1]->attname); char *name = NameStr(rel_attrs[pkattno - 1]->attname);
if (strcmp(name, attr->name)==0)
if (strcmp(name, attr->name) == 0)
{ {
found = true; found = true;
break; break;
@ -1344,18 +1360,24 @@ AlterTableAddConstraint(char *relationName,
heap_close(pkrel, NoLock); heap_close(pkrel, NoLock);
rel_attrs = rel->rd_att->attrs; rel_attrs = rel->rd_att->attrs;
if (fkconstraint->fk_attrs!=NIL) { if (fkconstraint->fk_attrs != NIL)
{
List *fkattrs; List *fkattrs;
Ident *fkattr; Ident *fkattr;
found = false; found = false;
foreach(fkattrs, fkconstraint->fk_attrs) { foreach(fkattrs, fkconstraint->fk_attrs)
{
int count; int count;
found = false; found = false;
fkattr=lfirst(fkattrs); fkattr = lfirst(fkattrs);
for (count = 0; count < rel->rd_att->natts; count++) { for (count = 0; count < rel->rd_att->natts; count++)
{
char *name = NameStr(rel->rd_att->attrs[count]->attname); char *name = NameStr(rel->rd_att->attrs[count]->attname);
if (strcmp(name, fkattr->name)==0) {
if (strcmp(name, fkattr->name) == 0)
{
found = true; found = true;
break; break;
} }
@ -1396,7 +1418,7 @@ AlterTableAddConstraint(char *relationName,
Ident *fk_at = lfirst(list); Ident *fk_at = lfirst(list);
trig.tgargs[count] = fk_at->name; trig.tgargs[count] = fk_at->name;
count+=2; count += 2;
} }
count = 5; count = 5;
foreach(list, fkconstraint->pk_attrs) foreach(list, fkconstraint->pk_attrs)
@ -1404,9 +1426,9 @@ AlterTableAddConstraint(char *relationName,
Ident *pk_at = lfirst(list); Ident *pk_at = lfirst(list);
trig.tgargs[count] = pk_at->name; trig.tgargs[count] = pk_at->name;
count+=2; count += 2;
} }
trig.tgnargs = count-1; trig.tgnargs = count - 1;
scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL); scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
AssertState(scan != NULL); AssertState(scan != NULL);
@ -1472,7 +1494,7 @@ AlterTableOwner(const char *relationName, const char *newOwnerName)
/* /*
* first check that we are a superuser * first check that we are a superuser
*/ */
if (! superuser()) if (!superuser())
elog(ERROR, "ALTER TABLE: permission denied"); elog(ERROR, "ALTER TABLE: permission denied");
/* /*
@ -1618,7 +1640,7 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
/* /*
* Check to see whether the table actually needs a TOAST table. * Check to see whether the table actually needs a TOAST table.
*/ */
if (! needs_toast_table(rel)) if (!needs_toast_table(rel))
{ {
if (silent) if (silent)
{ {
@ -1652,10 +1674,11 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
"chunk_data", "chunk_data",
BYTEAOID, BYTEAOID,
-1, 0, false); -1, 0, false);
/* /*
* Ensure that the toast table doesn't itself get toasted, * Ensure that the toast table doesn't itself get toasted, or we'll be
* or we'll be toast :-(. This is essential for chunk_data because * toast :-(. This is essential for chunk_data because type bytea is
* type bytea is toastable; hit the other two just to be sure. * toastable; hit the other two just to be sure.
*/ */
tupdesc->attrs[0]->attstorage = 'p'; tupdesc->attrs[0]->attstorage = 'p';
tupdesc->attrs[1]->attstorage = 'p'; tupdesc->attrs[1]->attstorage = 'p';

View File

@ -7,7 +7,7 @@
* Copyright (c) 1999, PostgreSQL Global Development Group * Copyright (c) 1999, PostgreSQL Global Development Group
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.26 2001/01/23 04:32:21 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.27 2001/03/22 03:59:21 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -7,7 +7,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.134 2001/03/14 21:47:50 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.135 2001/03/22 03:59:21 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -76,6 +76,7 @@ static StringInfoData attribute_buf;
#ifdef MULTIBYTE #ifdef MULTIBYTE
static int client_encoding; static int client_encoding;
static int server_encoding; static int server_encoding;
#endif #endif
@ -285,6 +286,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
elog(ERROR, "You must have Postgres superuser privilege to do a COPY " elog(ERROR, "You must have Postgres superuser privilege to do a COPY "
"directly to or from a file. Anyone can COPY to stdout or " "directly to or from a file. Anyone can COPY to stdout or "
"from stdin. Psql's \\copy command also works for anyone."); "from stdin. Psql's \\copy command also works for anyone.");
/* /*
* This restriction is unfortunate, but necessary until the frontend * This restriction is unfortunate, but necessary until the frontend
* COPY protocol is redesigned to be binary-safe... * COPY protocol is redesigned to be binary-safe...
@ -344,8 +346,8 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
mode_t oumask; /* Pre-existing umask value */ mode_t oumask; /* Pre-existing umask value */
/* /*
* Prevent write to relative path ... too easy to shoot oneself * Prevent write to relative path ... too easy to shoot
* in the foot by overwriting a database file ... * oneself in the foot by overwriting a database file ...
*/ */
if (filename[0] != '/') if (filename[0] != '/')
elog(ERROR, "Relative path not allowed for server side" elog(ERROR, "Relative path not allowed for server side"
@ -408,7 +410,10 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
attr_count = rel->rd_att->natts; attr_count = rel->rd_att->natts;
attr = rel->rd_att->attrs; attr = rel->rd_att->attrs;
/* For binary copy we really only need isvarlena, but compute it all... */ /*
* For binary copy we really only need isvarlena, but compute it
* all...
*/
out_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo)); out_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo));
elements = (Oid *) palloc(attr_count * sizeof(Oid)); elements = (Oid *) palloc(attr_count * sizeof(Oid));
isvarlena = (bool *) palloc(attr_count * sizeof(bool)); isvarlena = (bool *) palloc(attr_count * sizeof(bool));
@ -507,10 +512,12 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
} }
else else
{ {
/* /*
* If we have a toasted datum, forcibly detoast it to avoid * If we have a toasted datum, forcibly detoast it to
* memory leakage inside the type's output routine (or * avoid memory leakage inside the type's output routine
* for binary case, becase we must output untoasted value). * (or for binary case, becase we must output untoasted
* value).
*/ */
if (isvarlena[i]) if (isvarlena[i])
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue)); value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
@ -552,8 +559,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf; Datum datumBuf;
/* /*
* We need this horsing around because we don't know * We need this horsing around because we don't
* how shorter data values are aligned within a Datum. * know how shorter data values are aligned within
* a Datum.
*/ */
store_att_byval(&datumBuf, value, fld_size); store_att_byval(&datumBuf, value, fld_size);
CopySendData(&datumBuf, CopySendData(&datumBuf,
@ -622,8 +630,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
/* /*
* We need a ResultRelInfo so we can use the regular executor's * We need a ResultRelInfo so we can use the regular executor's
* index-entry-making machinery. (There used to be a huge amount * index-entry-making machinery. (There used to be a huge amount of
* of code here that basically duplicated execUtils.c ...) * code here that basically duplicated execUtils.c ...)
*/ */
resultRelInfo = makeNode(ResultRelInfo); resultRelInfo = makeNode(ResultRelInfo);
resultRelInfo->ri_RangeTableIndex = 1; /* dummy */ resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
@ -673,7 +681,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
if (CopyGetEof(fp)) if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: bogus file header (missing flags)"); elog(ERROR, "COPY BINARY: bogus file header (missing flags)");
file_has_oids = (tmp & (1 << 16)) != 0; file_has_oids = (tmp & (1 << 16)) != 0;
tmp &= ~ (1 << 16); tmp &= ~(1 << 16);
if ((tmp >> 16) != 0) if ((tmp >> 16) != 0)
elog(ERROR, "COPY BINARY: unrecognized critical flags in header"); elog(ERROR, "COPY BINARY: unrecognized critical flags in header");
/* Header extension length */ /* Header extension length */
@ -794,7 +802,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
continue; /* it's NULL; nulls[i] already set */ continue; /* it's NULL; nulls[i] already set */
if (fld_size != attr[i]->attlen) if (fld_size != attr[i]->attlen)
elog(ERROR, "COPY BINARY: sizeof(field %d) is %d, expected %d", elog(ERROR, "COPY BINARY: sizeof(field %d) is %d, expected %d",
i+1, (int) fld_size, (int) attr[i]->attlen); i + 1, (int) fld_size, (int) attr[i]->attlen);
if (fld_size == -1) if (fld_size == -1)
{ {
/* varlena field */ /* varlena field */
@ -833,8 +841,9 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf; Datum datumBuf;
/* /*
* We need this horsing around because we don't know * We need this horsing around because we don't
* how shorter data values are aligned within a Datum. * know how shorter data values are aligned within
* a Datum.
*/ */
Assert(fld_size > 0 && fld_size <= sizeof(Datum)); Assert(fld_size > 0 && fld_size <= sizeof(Datum));
CopyGetData(&datumBuf, fld_size, fp); CopyGetData(&datumBuf, fld_size, fp);
@ -1163,6 +1172,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
char *string_start; char *string_start;
int mblen; int mblen;
int i; int i;
#endif #endif
#ifdef MULTIBYTE #ifdef MULTIBYTE
@ -1182,7 +1192,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
#endif #endif
#ifdef MULTIBYTE #ifdef MULTIBYTE
for (; (mblen = (server_encoding == client_encoding? 1 : pg_encoding_mblen(client_encoding, string))) && for (; (mblen = (server_encoding == client_encoding ? 1 : pg_encoding_mblen(client_encoding, string))) &&
((c = *string) != '\0'); string += mblen) ((c = *string) != '\0'); string += mblen)
#else #else
for (; (c = *string) != '\0'; string++) for (; (c = *string) != '\0'; string++)

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.72 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.73 2001/03/22 03:59:22 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -152,8 +152,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
/* /*
* Open the new relation and acquire exclusive lock on it. This isn't * Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't * really necessary for locking out other backends (since they can't
* see the new rel anyway until we commit), but it keeps the lock manager * see the new rel anyway until we commit), but it keeps the lock
* from complaining about deadlock risks. * manager from complaining about deadlock risks.
*/ */
rel = heap_openr(relname, AccessExclusiveLock); rel = heap_openr(relname, AccessExclusiveLock);
@ -255,11 +255,11 @@ change_varattnos_walker(Node *node, const AttrNumber *newattno)
if (var->varlevelsup == 0 && var->varno == 1) if (var->varlevelsup == 0 && var->varno == 1)
{ {
/* /*
* ??? the following may be a problem when the * ??? the following may be a problem when the node is
* node is multiply referenced though * multiply referenced though stringToNode() doesn't create
* stringToNode() doesn't create such a node * such a node currently.
* currently.
*/ */
Assert(newattno[var->varattno - 1] > 0); Assert(newattno[var->varattno - 1] > 0);
var->varattno = newattno[var->varattno - 1]; var->varattno = newattno[var->varattno - 1];
@ -373,9 +373,12 @@ MergeAttributes(List *schema, List *supers, bool istemp,
AttrNumber attrno; AttrNumber attrno;
TupleDesc tupleDesc; TupleDesc tupleDesc;
TupleConstr *constr; TupleConstr *constr;
AttrNumber *newattno, *partialAttidx; AttrNumber *newattno,
*partialAttidx;
Node *expr; Node *expr;
int i, attidx, attno_exist; int i,
attidx,
attno_exist;
relation = heap_openr(name, AccessShareLock); relation = heap_openr(name, AccessShareLock);
@ -385,7 +388,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && is_temp_rel_name(name)) if (!istemp && is_temp_rel_name(name))
elog(ERROR, "CREATE TABLE: cannot inherit from temp relation \"%s\"", name); elog(ERROR, "CREATE TABLE: cannot inherit from temp relation \"%s\"", name);
/* We should have an UNDER permission flag for this, but for now, /*
* We should have an UNDER permission flag for this, but for now,
* demand that creator of a child table own the parent. * demand that creator of a child table own the parent.
*/ */
if (!pg_ownercheck(GetUserId(), name, RELNAME)) if (!pg_ownercheck(GetUserId(), name, RELNAME))
@ -397,14 +401,15 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/* allocate a new attribute number table and initialize */ /* allocate a new attribute number table and initialize */
newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber)); newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++) for (i = 0; i < tupleDesc->natts; i++)
newattno [i] = 0; newattno[i] = 0;
/* /*
* searching and storing order are different. * searching and storing order are different. another table is
* another table is needed. * needed.
*/ */
partialAttidx = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber)); partialAttidx = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++) for (i = 0; i < tupleDesc->natts; i++)
partialAttidx [i] = 0; partialAttidx[i] = 0;
constr = tupleDesc->constr; constr = tupleDesc->constr;
attidx = 0; attidx = 0;

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.73 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.74 2001/03/22 03:59:22 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -84,10 +84,10 @@ createdb(const char *dbname, const char *dbpath,
/* /*
* Check for db name conflict. There is a race condition here, since * Check for db name conflict. There is a race condition here, since
* another backend could create the same DB name before we commit. * another backend could create the same DB name before we commit.
* However, holding an exclusive lock on pg_database for the whole time * However, holding an exclusive lock on pg_database for the whole
* we are copying the source database doesn't seem like a good idea, * time we are copying the source database doesn't seem like a good
* so accept possibility of race to create. We will check again after * idea, so accept possibility of race to create. We will check again
* we grab the exclusive lock. * after we grab the exclusive lock.
*/ */
if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL)) if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL))
elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname); elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
@ -102,9 +102,10 @@ createdb(const char *dbname, const char *dbpath,
&src_istemplate, &src_lastsysoid, src_dbpath)) &src_istemplate, &src_lastsysoid, src_dbpath))
elog(ERROR, "CREATE DATABASE: template \"%s\" does not exist", elog(ERROR, "CREATE DATABASE: template \"%s\" does not exist",
dbtemplate); dbtemplate);
/* /*
* Permission check: to copy a DB that's not marked datistemplate, * Permission check: to copy a DB that's not marked datistemplate, you
* you must be superuser or the owner thereof. * must be superuser or the owner thereof.
*/ */
if (!src_istemplate) if (!src_istemplate)
{ {
@ -112,6 +113,7 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied", elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied",
dbtemplate); dbtemplate);
} }
/* /*
* Determine physical path of source database * Determine physical path of source database
*/ */
@ -134,13 +136,15 @@ createdb(const char *dbname, const char *dbpath,
encoding = src_encoding; encoding = src_encoding;
/* /*
* Preassign OID for pg_database tuple, so that we can compute db path. * Preassign OID for pg_database tuple, so that we can compute db
* path.
*/ */
dboid = newoid(); dboid = newoid();
/* /*
* Compute nominal location (where we will try to access the database), * Compute nominal location (where we will try to access the
* and resolve alternate physical location if one is specified. * database), and resolve alternate physical location if one is
* specified.
*/ */
nominal_loc = GetDatabasePath(dboid); nominal_loc = GetDatabasePath(dboid);
alt_loc = resolve_alt_dbpath(dbpath, dboid); alt_loc = resolve_alt_dbpath(dbpath, dboid);
@ -155,8 +159,8 @@ createdb(const char *dbname, const char *dbpath,
/* /*
* Force dirty buffers out to disk, to ensure source database is * Force dirty buffers out to disk, to ensure source database is
* up-to-date for the copy. (We really only need to flush buffers * up-to-date for the copy. (We really only need to flush buffers for
* for the source database...) * the source database...)
*/ */
BufferSync(); BufferSync();
@ -231,7 +235,8 @@ createdb(const char *dbname, const char *dbpath,
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls); tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
tuple->t_data->t_oid = dboid; /* override heap_insert's OID selection */ tuple->t_data->t_oid = dboid; /* override heap_insert's OID
* selection */
heap_insert(pg_database_rel, tuple); heap_insert(pg_database_rel, tuple);
@ -311,8 +316,8 @@ dropdb(const char *dbname)
elog(ERROR, "DROP DATABASE: permission denied"); elog(ERROR, "DROP DATABASE: permission denied");
/* /*
* Disallow dropping a DB that is marked istemplate. This is just * Disallow dropping a DB that is marked istemplate. This is just to
* to prevent people from accidentally dropping template0 or template1; * prevent people from accidentally dropping template0 or template1;
* they can do so if they're really determined ... * they can do so if they're really determined ...
*/ */
if (db_istemplate) if (db_istemplate)
@ -338,6 +343,7 @@ dropdb(const char *dbname)
tup = heap_getnext(pgdbscan, 0); tup = heap_getnext(pgdbscan, 0);
if (!HeapTupleIsValid(tup)) if (!HeapTupleIsValid(tup))
{ {
/* /*
* This error should never come up since the existence of the * This error should never come up since the existence of the
* database is checked earlier * database is checked earlier
@ -481,10 +487,10 @@ get_user_info(Oid use_sysid, bool *use_super, bool *use_createdb)
static char * static char *
resolve_alt_dbpath(const char * dbpath, Oid dboid) resolve_alt_dbpath(const char *dbpath, Oid dboid)
{ {
const char * prefix; const char *prefix;
char * ret; char *ret;
size_t len; size_t len;
if (dbpath == NULL || dbpath[0] == '\0') if (dbpath == NULL || dbpath[0] == '\0')
@ -502,7 +508,8 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
else else
{ {
/* must be environment variable */ /* must be environment variable */
char * var = getenv(dbpath); char *var = getenv(dbpath);
if (!var) if (!var)
elog(ERROR, "Postmaster environment variable '%s' not set", dbpath); elog(ERROR, "Postmaster environment variable '%s' not set", dbpath);
if (var[0] != '/') if (var[0] != '/')
@ -519,7 +526,7 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
static bool static bool
remove_dbdirs(const char * nominal_loc, const char * alt_loc) remove_dbdirs(const char *nominal_loc, const char *alt_loc)
{ {
const char *target_dir; const char *target_dir;
char buf[MAXPGPATH + 100]; char buf[MAXPGPATH + 100];

View File

@ -10,7 +10,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.52 2001/02/12 20:07:21 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.53 2001/03/22 03:59:22 momjian Exp $
* *
* DESCRIPTION * DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the * The "DefineFoo" routines take the parse tree and pick out the
@ -70,7 +70,7 @@ case_translate_language_name(const char *input, char *output)
--------------------------------------------------------------------------*/ --------------------------------------------------------------------------*/
int i; int i;
for (i = 0; i < NAMEDATALEN-1 && input[i]; ++i) for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]); output[i] = tolower((unsigned char) input[i]);
output[i] = '\0'; output[i] = '\0';
@ -217,21 +217,26 @@ void
CreateFunction(ProcedureStmt *stmt, CommandDest dest) CreateFunction(ProcedureStmt *stmt, CommandDest dest)
{ {
char *probin_str; char *probin_str;
/* pathname of executable file that executes this function, if any */ /* pathname of executable file that executes this function, if any */
char *prosrc_str; char *prosrc_str;
/* SQL that executes this function, if any */ /* SQL that executes this function, if any */
char *prorettype; char *prorettype;
/* Type of return value (or member of set of values) from function */ /* Type of return value (or member of set of values) from function */
char languageName[NAMEDATALEN]; char languageName[NAMEDATALEN];
/* /*
* name of language of function, with case adjusted: "C", * name of language of function, with case adjusted: "C", "internal",
* "internal", "sql", etc. * "sql", etc.
*/ */
bool returnsSet; bool returnsSet;
/* The function returns a set of values, as opposed to a singleton. */ /* The function returns a set of values, as opposed to a singleton. */
/* /*
@ -380,14 +385,14 @@ DefineOperator(char *oprName,
{ {
typeName1 = defGetString(defel); typeName1 = defGetString(defel);
if (IsA(defel->arg, TypeName) if (IsA(defel->arg, TypeName)
&& ((TypeName *) defel->arg)->setof) &&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for leftarg"); elog(ERROR, "setof type not implemented for leftarg");
} }
else if (strcasecmp(defel->defname, "rightarg") == 0) else if (strcasecmp(defel->defname, "rightarg") == 0)
{ {
typeName2 = defGetString(defel); typeName2 = defGetString(defel);
if (IsA(defel->arg, TypeName) if (IsA(defel->arg, TypeName)
&& ((TypeName *) defel->arg)->setof) &&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for rightarg"); elog(ERROR, "setof type not implemented for rightarg");
} }
else if (strcasecmp(defel->defname, "procedure") == 0) else if (strcasecmp(defel->defname, "procedure") == 0)
@ -478,8 +483,8 @@ DefineAggregate(char *aggName, List *parameters)
DefElem *defel = (DefElem *) lfirst(pl); DefElem *defel = (DefElem *) lfirst(pl);
/* /*
* sfunc1, stype1, and initcond1 are accepted as obsolete spellings * sfunc1, stype1, and initcond1 are accepted as obsolete
* for sfunc, stype, initcond. * spellings for sfunc, stype, initcond.
*/ */
if (strcasecmp(defel->defname, "sfunc") == 0) if (strcasecmp(defel->defname, "sfunc") == 0)
transfuncName = defGetString(defel); transfuncName = defGetString(defel);
@ -543,13 +548,13 @@ DefineType(char *typeName, List *parameters)
char delimiter = DEFAULT_TYPDELIM; char delimiter = DEFAULT_TYPDELIM;
char *shadow_type; char *shadow_type;
List *pl; List *pl;
char alignment = 'i'; /* default alignment */ char alignment = 'i';/* default alignment */
char storage = 'p'; /* default storage in TOAST */ char storage = 'p'; /* default storage in TOAST */
/* /*
* Type names must be one character shorter than other names, * Type names must be one character shorter than other names, allowing
* allowing room to create the corresponding array type name with * room to create the corresponding array type name with prepended
* prepended "_". * "_".
*/ */
if (strlen(typeName) > (NAMEDATALEN - 2)) if (strlen(typeName) > (NAMEDATALEN - 2))
{ {
@ -699,7 +704,9 @@ defGetString(DefElem *def)
return str; return str;
} }
case T_Float: case T_Float:
/* T_Float values are kept in string form, so this type cheat
/*
* T_Float values are kept in string form, so this type cheat
* works (and doesn't risk losing precision) * works (and doesn't risk losing precision)
*/ */
return strVal(def->arg); return strVal(def->arg);

View File

@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California * Portions Copyright (c) 1994-5, Regents of the University of California
* *
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.64 2001/01/27 01:41:19 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.65 2001/03/22 03:59:22 momjian Exp $
* *
*/ */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.45 2001/02/23 09:26:14 inoue Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.46 2001/03/22 03:59:23 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -118,9 +118,9 @@ DefineIndex(char *heapRelationName,
accessMethodName); accessMethodName);
/* /*
* XXX Hardwired hacks to check for limitations on supported index types. * XXX Hardwired hacks to check for limitations on supported index
* We really ought to be learning this info from entries in the pg_am * types. We really ought to be learning this info from entries in the
* table, instead of having it wired in here! * pg_am table, instead of having it wired in here!
*/ */
if (unique && accessMethodId != BTREE_AM_OID) if (unique && accessMethodId != BTREE_AM_OID)
elog(ERROR, "DefineIndex: unique indices are only available with the btree access method"); elog(ERROR, "DefineIndex: unique indices are only available with the btree access method");
@ -161,7 +161,8 @@ DefineIndex(char *heapRelationName,
elog(ERROR, "Existing indexes are inactive. REINDEX first"); elog(ERROR, "Existing indexes are inactive. REINDEX first");
/* /*
* Prepare arguments for index_create, primarily an IndexInfo structure * Prepare arguments for index_create, primarily an IndexInfo
* structure
*/ */
indexInfo = makeNode(IndexInfo); indexInfo = makeNode(IndexInfo);
indexInfo->ii_Predicate = (Node *) cnfPred; indexInfo->ii_Predicate = (Node *) cnfPred;
@ -415,7 +416,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
* has exact-match or binary-compatible input types. * has exact-match or binary-compatible input types.
* ---------------- * ----------------
*/ */
if (! func_get_detail(funcIndex->name, nargs, argTypes, if (!func_get_detail(funcIndex->name, nargs, argTypes,
&funcid, &rettype, &retset, &true_typeids)) &funcid, &rettype, &retset, &true_typeids))
func_error("DefineIndex", funcIndex->name, nargs, argTypes, NULL); func_error("DefineIndex", funcIndex->name, nargs, argTypes, NULL);
@ -425,7 +426,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
for (i = 0; i < nargs; i++) for (i = 0; i < nargs; i++)
{ {
if (argTypes[i] != true_typeids[i] && if (argTypes[i] != true_typeids[i] &&
! IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i])) !IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
func_error("DefineIndex", funcIndex->name, nargs, argTypes, func_error("DefineIndex", funcIndex->name, nargs, argTypes,
"Index function must be binary-compatible with table datatype"); "Index function must be binary-compatible with table datatype");
} }
@ -439,7 +440,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
indexInfo->ii_FuncOid = funcid; indexInfo->ii_FuncOid = funcid;
/* Need to do the fmgr function lookup now, too */ /* Need to do the fmgr function lookup now, too */
fmgr_info(funcid, & indexInfo->ii_FuncInfo); fmgr_info(funcid, &indexInfo->ii_FuncInfo);
} }
static void static void
@ -515,8 +516,8 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
attribute->class); attribute->class);
/* /*
* Assume the opclass is supported by this index access method * Assume the opclass is supported by this index access method if we
* if we can find at least one relevant entry in pg_amop. * can find at least one relevant entry in pg_amop.
*/ */
ScanKeyEntryInitialize(&entry[0], 0, ScanKeyEntryInitialize(&entry[0], 0,
Anum_pg_amop_amopid, Anum_pg_amop_amopid,
@ -530,7 +531,7 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
relation = heap_openr(AccessMethodOperatorRelationName, AccessShareLock); relation = heap_openr(AccessMethodOperatorRelationName, AccessShareLock);
scan = heap_beginscan(relation, false, SnapshotNow, 2, entry); scan = heap_beginscan(relation, false, SnapshotNow, 2, entry);
if (! HeapTupleIsValid(tuple = heap_getnext(scan, 0))) if (!HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
elog(ERROR, "DefineIndex: opclass \"%s\" not supported by access method \"%s\"", elog(ERROR, "DefineIndex: opclass \"%s\" not supported by access method \"%s\"",
attribute->class, accessMethodName); attribute->class, accessMethodName);
@ -540,17 +541,18 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
heap_close(relation, AccessShareLock); heap_close(relation, AccessShareLock);
/* /*
* Make sure the operators associated with this opclass actually accept * Make sure the operators associated with this opclass actually
* the column data type. This prevents possible coredumps caused by * accept the column data type. This prevents possible coredumps
* user errors like applying text_ops to an int4 column. We will accept * caused by user errors like applying text_ops to an int4 column. We
* an opclass as OK if the operator's input datatype is binary-compatible * will accept an opclass as OK if the operator's input datatype is
* with the actual column datatype. Note we assume that all the operators * binary-compatible with the actual column datatype. Note we assume
* associated with an opclass accept the same datatypes, so checking the * that all the operators associated with an opclass accept the same
* first one we happened to find in the table is sufficient. * datatypes, so checking the first one we happened to find in the
* table is sufficient.
* *
* If the opclass was the default for the datatype, assume we can skip * If the opclass was the default for the datatype, assume we can skip
* this check --- that saves a few cycles in the most common case. * this check --- that saves a few cycles in the most common case. If
* If pg_opclass is wrong then we're probably screwed anyway... * pg_opclass is wrong then we're probably screwed anyway...
*/ */
if (doTypeCheck) if (doTypeCheck)
{ {
@ -564,7 +566,7 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
optup->oprright : optup->oprleft; optup->oprright : optup->oprleft;
if (attrType != opInputType && if (attrType != opInputType &&
! IS_BINARY_COMPATIBLE(attrType, opInputType)) !IS_BINARY_COMPATIBLE(attrType, opInputType))
elog(ERROR, "DefineIndex: opclass \"%s\" does not accept datatype \"%s\"", elog(ERROR, "DefineIndex: opclass \"%s\" does not accept datatype \"%s\"",
attribute->class, typeidTypeName(attrType)); attribute->class, typeidTypeName(attrType));
ReleaseSysCache(tuple); ReleaseSysCache(tuple);
@ -752,18 +754,18 @@ ReindexDatabase(const char *dbname, bool force, bool all)
elog(ERROR, "REINDEX DATABASE: Can be executed only on the currently open database."); elog(ERROR, "REINDEX DATABASE: Can be executed only on the currently open database.");
/* /*
* We cannot run inside a user transaction block; if we were * We cannot run inside a user transaction block; if we were inside a
* inside a transaction, then our commit- and * transaction, then our commit- and start-transaction-command calls
* start-transaction-command calls would not have the intended effect! * would not have the intended effect!
*/ */
if (IsTransactionBlock()) if (IsTransactionBlock())
elog(ERROR, "REINDEX DATABASE cannot run inside a BEGIN/END block"); elog(ERROR, "REINDEX DATABASE cannot run inside a BEGIN/END block");
/* /*
* Create a memory context that will survive forced transaction commits * Create a memory context that will survive forced transaction
* we do below. Since it is a child of QueryContext, it will go away * commits we do below. Since it is a child of QueryContext, it will
* eventually even if we suffer an error; there's no need for special * go away eventually even if we suffer an error; there's no need for
* abort cleanup logic. * special abort cleanup logic.
*/ */
private_context = AllocSetContextCreate(QueryContext, private_context = AllocSetContextCreate(QueryContext,
"ReindexDatabase", "ReindexDatabase",

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.59 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.60 2001/03/22 03:59:23 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.55 2001/01/24 19:42:52 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.56 2001/03/22 03:59:23 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -189,15 +189,15 @@ renamerel(const char *oldrelname, const char *newrelname)
newrelname); newrelname);
/* /*
* Check for renaming a temp table, which only requires altering * Check for renaming a temp table, which only requires altering the
* the temp-table mapping, not the underlying table. * temp-table mapping, not the underlying table.
*/ */
if (rename_temp_relation(oldrelname, newrelname)) if (rename_temp_relation(oldrelname, newrelname))
return; /* all done... */ return; /* all done... */
/* /*
* Instead of using heap_openr(), do it the hard way, so that we * Instead of using heap_openr(), do it the hard way, so that we can
* can rename indexes as well as regular relations. * rename indexes as well as regular relations.
*/ */
targetrelation = RelationNameGetRelation(oldrelname); targetrelation = RelationNameGetRelation(oldrelname);
@ -219,8 +219,9 @@ renamerel(const char *oldrelname, const char *newrelname)
heap_close(targetrelation, NoLock); heap_close(targetrelation, NoLock);
/* /*
* Flush the relcache entry (easier than trying to change it at exactly * Flush the relcache entry (easier than trying to change it at
* the right instant). It'll get rebuilt on next access to relation. * exactly the right instant). It'll get rebuilt on next access to
* relation.
* *
* XXX What if relation is myxactonly? * XXX What if relation is myxactonly?
* *
@ -244,8 +245,8 @@ renamerel(const char *oldrelname, const char *newrelname)
elog(ERROR, "renamerel: relation \"%s\" exists", newrelname); elog(ERROR, "renamerel: relation \"%s\" exists", newrelname);
/* /*
* Update pg_class tuple with new relname. (Scribbling on reltup * Update pg_class tuple with new relname. (Scribbling on reltup is
* is OK because it's a copy...) * OK because it's a copy...)
*/ */
StrNCpy(NameStr(((Form_pg_class) GETSTRUCT(reltup))->relname), StrNCpy(NameStr(((Form_pg_class) GETSTRUCT(reltup))->relname),
newrelname, NAMEDATALEN); newrelname, NAMEDATALEN);

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.51 2001/03/07 21:20:26 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.52 2001/03/22 03:59:23 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -140,7 +140,7 @@ DefineSequence(CreateSeqStmt *seq)
case SEQ_COL_LOG: case SEQ_COL_LOG:
typnam->name = "int4"; typnam->name = "int4";
coldef->colname = "log_cnt"; coldef->colname = "log_cnt";
value[i - 1] = Int32GetDatum((int32)1); value[i - 1] = Int32GetDatum((int32) 1);
break; break;
case SEQ_COL_CYCLE: case SEQ_COL_CYCLE:
typnam->name = "char"; typnam->name = "char";
@ -311,7 +311,7 @@ nextval(PG_FUNCTION_ARGS)
xlrec.node = elm->rel->rd_node; xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec); rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
@ -319,12 +319,12 @@ nextval(PG_FUNCTION_ARGS)
seq->is_called = 't'; seq->is_called = 't';
seq->log_cnt = 0; seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer; rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper; rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
rdata[1].len = ((PageHeader)page)->pd_special - rdata[1].len = ((PageHeader) page)->pd_special -
((PageHeader)page)->pd_upper; ((PageHeader) page)->pd_upper;
rdata[1].next = NULL; rdata[1].next = NULL;
recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata); recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr); PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID); PageSetSUI(page, ThisStartUpID);
@ -409,7 +409,8 @@ do_setval(char *seqname, int32 next, bool iscalled)
/* save info in local cache */ /* save info in local cache */
elm->last = next; /* last returned number */ elm->last = next; /* last returned number */
elm->cached = next; /* last cached number (forget cached values) */ elm->cached = next; /* last cached number (forget cached
* values) */
START_CRIT_SECTION(); START_CRIT_SECTION();
{ {
@ -420,7 +421,7 @@ do_setval(char *seqname, int32 next, bool iscalled)
xlrec.node = elm->rel->rd_node; xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer; rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec; rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec); rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]); rdata[0].next = &(rdata[1]);
@ -428,12 +429,12 @@ do_setval(char *seqname, int32 next, bool iscalled)
seq->is_called = 't'; seq->is_called = 't';
seq->log_cnt = 0; seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer; rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper; rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
rdata[1].len = ((PageHeader)page)->pd_special - rdata[1].len = ((PageHeader) page)->pd_special -
((PageHeader)page)->pd_upper; ((PageHeader) page)->pd_upper;
rdata[1].next = NULL; rdata[1].next = NULL;
recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata); recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr); PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID); PageSetSUI(page, ThisStartUpID);
@ -511,6 +512,7 @@ get_seq_name(text *seqin)
else else
{ {
seqname = rawname; seqname = rawname;
/* /*
* It's important that this match the identifier downcasing code * It's important that this match the identifier downcasing code
* used by backend/parser/scan.l. * used by backend/parser/scan.l.
@ -752,7 +754,8 @@ get_param(DefElem *def)
return -1; return -1;
} }
void seq_redo(XLogRecPtr lsn, XLogRecord *record) void
seq_redo(XLogRecPtr lsn, XLogRecord *record)
{ {
uint8 info = record->xl_info & ~XLR_INFO_MASK; uint8 info = record->xl_info & ~XLR_INFO_MASK;
Relation reln; Relation reln;
@ -760,7 +763,7 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
Page page; Page page;
char *item; char *item;
Size itemsz; Size itemsz;
xl_seq_rec *xlrec = (xl_seq_rec*) XLogRecGetData(record); xl_seq_rec *xlrec = (xl_seq_rec *) XLogRecGetData(record);
sequence_magic *sm; sequence_magic *sm;
if (info != XLOG_SEQ_LOG) if (info != XLOG_SEQ_LOG)
@ -781,10 +784,10 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
sm = (sequence_magic *) PageGetSpecialPointer(page); sm = (sequence_magic *) PageGetSpecialPointer(page);
sm->magic = SEQ_MAGIC; sm->magic = SEQ_MAGIC;
item = (char*)xlrec + sizeof(xl_seq_rec); item = (char *) xlrec + sizeof(xl_seq_rec);
itemsz = record->xl_len - sizeof(xl_seq_rec); itemsz = record->xl_len - sizeof(xl_seq_rec);
itemsz = MAXALIGN(itemsz); itemsz = MAXALIGN(itemsz);
if (PageAddItem(page, (Item)item, itemsz, if (PageAddItem(page, (Item) item, itemsz,
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber) FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(STOP, "seq_redo: failed to add item to page"); elog(STOP, "seq_redo: failed to add item to page");
@ -795,14 +798,16 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
return; return;
} }
void seq_undo(XLogRecPtr lsn, XLogRecord *record) void
seq_undo(XLogRecPtr lsn, XLogRecord *record)
{ {
} }
void seq_desc(char *buf, uint8 xl_info, char* rec) void
seq_desc(char *buf, uint8 xl_info, char *rec)
{ {
uint8 info = xl_info & ~XLR_INFO_MASK; uint8 info = xl_info & ~XLR_INFO_MASK;
xl_seq_rec *xlrec = (xl_seq_rec*) rec; xl_seq_rec *xlrec = (xl_seq_rec *) rec;
if (info == XLOG_SEQ_LOG) if (info == XLOG_SEQ_LOG)
strcat(buf, "log: "); strcat(buf, "log: ");

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.88 2001/03/14 21:50:32 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.89 2001/03/22 03:59:23 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -87,7 +87,9 @@ CreateTrigger(CreateTrigStmt *stmt)
constrrelid = InvalidOid; constrrelid = InvalidOid;
else else
{ {
/* NoLock is probably sufficient here, since we're only
/*
* NoLock is probably sufficient here, since we're only
* interested in getting the relation's OID... * interested in getting the relation's OID...
*/ */
rel = heap_openr(stmt->constrrelname, NoLock); rel = heap_openr(stmt->constrrelname, NoLock);
@ -211,7 +213,7 @@ CreateTrigger(CreateTrigStmt *stmt)
foreach(le, stmt->args) foreach(le, stmt->args)
{ {
char *ar = ((Value*) lfirst(le))->val.str; char *ar = ((Value *) lfirst(le))->val.str;
len += strlen(ar) + 4; len += strlen(ar) + 4;
for (; *ar; ar++) for (; *ar; ar++)
@ -224,7 +226,7 @@ CreateTrigger(CreateTrigStmt *stmt)
args[0] = '\0'; args[0] = '\0';
foreach(le, stmt->args) foreach(le, stmt->args)
{ {
char *s = ((Value*) lfirst(le))->val.str; char *s = ((Value *) lfirst(le))->val.str;
char *d = args + strlen(args); char *d = args + strlen(args);
while (*s) while (*s)
@ -577,7 +579,8 @@ RelationBuildTriggers(Relation relation)
DatumGetCString(DirectFunctionCall1(nameout, DatumGetCString(DirectFunctionCall1(nameout,
NameGetDatum(&pg_trigger->tgname)))); NameGetDatum(&pg_trigger->tgname))));
build->tgfoid = pg_trigger->tgfoid; build->tgfoid = pg_trigger->tgfoid;
build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as uninitialized */ build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as
* uninitialized */
build->tgtype = pg_trigger->tgtype; build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled; build->tgenabled = pg_trigger->tgenabled;
build->tgisconstraint = pg_trigger->tgisconstraint; build->tgisconstraint = pg_trigger->tgisconstraint;
@ -841,17 +844,17 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContext oldContext; MemoryContext oldContext;
/* /*
* Fmgr lookup info is cached in the Trigger structure, * Fmgr lookup info is cached in the Trigger structure, so that we
* so that we need not repeat the lookup on every call. * need not repeat the lookup on every call.
*/ */
if (trigger->tgfunc.fn_oid == InvalidOid) if (trigger->tgfunc.fn_oid == InvalidOid)
fmgr_info(trigger->tgfoid, &trigger->tgfunc); fmgr_info(trigger->tgfoid, &trigger->tgfunc);
/* /*
* Do the function evaluation in the per-tuple memory context, * Do the function evaluation in the per-tuple memory context, so that
* so that leaked memory will be reclaimed once per tuple. * leaked memory will be reclaimed once per tuple. Note in particular
* Note in particular that any new tuple created by the trigger function * that any new tuple created by the trigger function will live till
* will live till the end of the tuple cycle. * the end of the tuple cycle.
*/ */
oldContext = MemoryContextSwitchTo(per_tuple_context); oldContext = MemoryContextSwitchTo(per_tuple_context);
@ -868,8 +871,8 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContextSwitchTo(oldContext); MemoryContextSwitchTo(oldContext);
/* /*
* Trigger protocol allows function to return a null pointer, * Trigger protocol allows function to return a null pointer, but NOT
* but NOT to set the isnull result flag. * to set the isnull result flag.
*/ */
if (fcinfo.isnull) if (fcinfo.isnull)
elog(ERROR, "ExecCallTriggerFunc: function %u returned NULL", elog(ERROR, "ExecCallTriggerFunc: function %u returned NULL",
@ -915,9 +918,7 @@ ExecARInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
if (rel->trigdesc->n_after_row[TRIGGER_EVENT_INSERT] > 0 || if (rel->trigdesc->n_after_row[TRIGGER_EVENT_INSERT] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0 || rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0) rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0)
{
DeferredTriggerSaveEvent(rel, TRIGGER_EVENT_INSERT, NULL, trigtuple); DeferredTriggerSaveEvent(rel, TRIGGER_EVENT_INSERT, NULL, trigtuple);
}
} }
bool bool
@ -1240,10 +1241,11 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
static void static void
deferredTriggerAddEvent(DeferredTriggerEvent event) deferredTriggerAddEvent(DeferredTriggerEvent event)
{ {
/* /*
* Since the event list could grow quite long, we keep track of the * Since the event list could grow quite long, we keep track of the
* list tail and append there, rather than just doing a stupid "lappend". * list tail and append there, rather than just doing a stupid
* This avoids O(N^2) behavior for large numbers of events. * "lappend". This avoids O(N^2) behavior for large numbers of events.
*/ */
event->dte_next = NULL; event->dte_next = NULL;
if (deftrig_event_tail == NULL) if (deftrig_event_tail == NULL)

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.73 2001/01/24 19:42:53 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.74 2001/03/22 03:59:24 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -603,6 +603,7 @@ DropUser(DropUserStmt *stmt)
} }
heap_endscan(scan); heap_endscan(scan);
heap_close(pg_rel, AccessExclusiveLock); heap_close(pg_rel, AccessExclusiveLock);
/* /*
* Advance command counter so that later iterations of this loop * Advance command counter so that later iterations of this loop
* will see the changes already made. This is essential if, for * will see the changes already made. This is essential if, for

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.187 2001/03/14 08:40:57 inoue Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.188 2001/03/22 03:59:24 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -120,9 +120,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *anal_cols)
/* /*
* Create special memory context for cross-transaction storage. * Create special memory context for cross-transaction storage.
* *
* Since it is a child of QueryContext, it will go away eventually * Since it is a child of QueryContext, it will go away eventually even
* even if we suffer an error; there's no need for special abort * if we suffer an error; there's no need for special abort cleanup
* cleanup logic. * logic.
*/ */
vac_context = AllocSetContextCreate(QueryContext, vac_context = AllocSetContextCreate(QueryContext,
"Vacuum", "Vacuum",
@ -215,8 +215,8 @@ vacuum_shutdown()
/* /*
* Clean up working storage --- note we must do this after * Clean up working storage --- note we must do this after
* StartTransactionCommand, else we might be trying to delete * StartTransactionCommand, else we might be trying to delete the
* the active context! * active context!
*/ */
MemoryContextDelete(vac_context); MemoryContextDelete(vac_context);
vac_context = NULL; vac_context = NULL;
@ -360,10 +360,10 @@ vacuum_rel(Oid relid)
{ {
Relation onerel; Relation onerel;
LockRelId onerelid; LockRelId onerelid;
VacPageListData vacuum_pages; /* List of pages to vacuum and/or clean VacPageListData vacuum_pages; /* List of pages to vacuum and/or
* indices */ * clean indices */
VacPageListData fraged_pages; /* List of pages with space enough for VacPageListData fraged_pages; /* List of pages with space enough
* re-using */ * for re-using */
Relation *Irel; Relation *Irel;
int32 nindices, int32 nindices,
i; i;
@ -412,9 +412,9 @@ vacuum_rel(Oid relid)
/* /*
* Get a session-level exclusive lock too. This will protect our * Get a session-level exclusive lock too. This will protect our
* exclusive access to the relation across multiple transactions, * exclusive access to the relation across multiple transactions, so
* so that we can vacuum the relation's TOAST table (if any) secure * that we can vacuum the relation's TOAST table (if any) secure in
* in the knowledge that no one is diddling the parent relation. * the knowledge that no one is diddling the parent relation.
* *
* NOTE: this cannot block, even if someone else is waiting for access, * NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the * because the lock manager knows that both lock requests are from the
@ -459,9 +459,10 @@ vacuum_rel(Oid relid)
else else
vacrelstats->hasindex = false; vacrelstats->hasindex = false;
#ifdef NOT_USED #ifdef NOT_USED
/* /*
* reindex in VACUUM is dangerous under WAL. * reindex in VACUUM is dangerous under WAL. ifdef out until it
* ifdef out until it becomes safe. * becomes safe.
*/ */
if (reindex) if (reindex)
{ {
@ -506,6 +507,7 @@ vacuum_rel(Oid relid)
} }
else else
{ {
/* /*
* Flush dirty pages out to disk. We must do this even if we * Flush dirty pages out to disk. We must do this even if we
* didn't do anything else, because we want to ensure that all * didn't do anything else, because we want to ensure that all
@ -537,11 +539,11 @@ vacuum_rel(Oid relid)
CommitTransactionCommand(); CommitTransactionCommand();
/* /*
* If the relation has a secondary toast one, vacuum that too * If the relation has a secondary toast one, vacuum that too while we
* while we still hold the session lock on the master table. * still hold the session lock on the master table. We don't need to
* We don't need to propagate "analyze" to it, because the toaster * propagate "analyze" to it, because the toaster always uses
* always uses hardcoded index access and statistics are * hardcoded index access and statistics are totally unimportant for
* totally unimportant for toast relations * toast relations
*/ */
if (toast_relid != InvalidOid) if (toast_relid != InvalidOid)
vacuum_rel(toast_relid); vacuum_rel(toast_relid);
@ -964,8 +966,8 @@ Re-using: Free/Avail. Space %lu/%lu; EndEmpty/Avail. Pages %u/%u. %s",
nblocks, changed_pages, vacuum_pages->num_pages, empty_pages, nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed, new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash, nkeep, vacrelstats->num_vtlinks, ncrash,
nunused, (unsigned long)min_tlen, (unsigned long)max_tlen, nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
(unsigned long)free_size, (unsigned long)usable_free_size, (unsigned long) free_size, (unsigned long) usable_free_size,
empty_end_pages, fraged_pages->num_pages, empty_end_pages, fraged_pages->num_pages,
show_rusage(&ru0)); show_rusage(&ru0));
@ -1142,8 +1144,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* /*
* If this (chain) tuple is moved by me already then I * If this (chain) tuple is moved by me already then I
* have to check is it in vacpage or not - i.e. is it moved * have to check is it in vacpage or not - i.e. is it
* while cleaning this page or some previous one. * moved while cleaning this page or some previous one.
*/ */
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF) if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{ {
@ -1232,8 +1234,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* xaction and this tuple is already deleted by * xaction and this tuple is already deleted by
* me. Actually, upper part of chain should be * me. Actually, upper part of chain should be
* removed and seems that this should be handled * removed and seems that this should be handled
* in scan_heap(), but it's not implemented at * in scan_heap(), but it's not implemented at the
* the moment and so we just stop shrinking here. * moment and so we just stop shrinking here.
*/ */
ReleaseBuffer(Cbuf); ReleaseBuffer(Cbuf);
pfree(vtmove); pfree(vtmove);
@ -1256,8 +1258,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{ {
/* /*
* if to_vacpage no longer has enough free space to be * if to_vacpage no longer has enough free space
* useful, remove it from fraged_pages list * to be useful, remove it from fraged_pages list
*/ */
if (to_vacpage != NULL && if (to_vacpage != NULL &&
!enough_space(to_vacpage, vacrelstats->min_tlen)) !enough_space(to_vacpage, vacrelstats->min_tlen))
@ -1460,21 +1462,22 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* *
* NOTE: a nasty bug used to lurk here. It is possible * NOTE: a nasty bug used to lurk here. It is possible
* for the source and destination pages to be the same * for the source and destination pages to be the same
* (since this tuple-chain member can be on a page lower * (since this tuple-chain member can be on a page
* than the one we're currently processing in the outer * lower than the one we're currently processing in
* loop). If that's true, then after vacuum_page() the * the outer loop). If that's true, then after
* source tuple will have been moved, and tuple.t_data * vacuum_page() the source tuple will have been
* will be pointing at garbage. Therefore we must do * moved, and tuple.t_data will be pointing at
* everything that uses tuple.t_data BEFORE this step!! * garbage. Therefore we must do everything that uses
* tuple.t_data BEFORE this step!!
* *
* This path is different from the other callers of * This path is different from the other callers of
* vacuum_page, because we have already incremented the * vacuum_page, because we have already incremented
* vacpage's offsets_used field to account for the * the vacpage's offsets_used field to account for the
* tuple(s) we expect to move onto the page. Therefore * tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is * vacuum_page's check for offsets_used == 0 is wrong.
* wrong. But since that's a good debugging check for * But since that's a good debugging check for all
* all other callers, we work around it here rather * other callers, we work around it here rather than
* than remove it. * remove it.
*/ */
if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd) if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
{ {
@ -1498,7 +1501,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (newoff == InvalidOffsetNumber) if (newoff == InvalidOffsetNumber)
{ {
elog(STOP, "moving chain: failed to add item with len = %lu to page %u", elog(STOP, "moving chain: failed to add item with len = %lu to page %u",
(unsigned long)tuple_len, destvacpage->blkno); (unsigned long) tuple_len, destvacpage->blkno);
} }
newitemid = PageGetItemId(ToPage, newoff); newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data); pfree(newtup.t_data);
@ -1526,7 +1529,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* /*
* Set new tuple's t_ctid pointing to itself for last * Set new tuple's t_ctid pointing to itself for last
* tuple in chain, and to next tuple in chain otherwise. * tuple in chain, and to next tuple in chain
* otherwise.
*/ */
if (!ItemPointerIsValid(&Ctid)) if (!ItemPointerIsValid(&Ctid))
newtup.t_data->t_ctid = newtup.t_self; newtup.t_data->t_ctid = newtup.t_self;
@ -1552,13 +1556,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (Irel != (Relation *) NULL) if (Irel != (Relation *) NULL)
{ {
/* /*
* XXX using CurrentMemoryContext here means * XXX using CurrentMemoryContext here means
* intra-vacuum memory leak for functional indexes. * intra-vacuum memory leak for functional
* Should fix someday. * indexes. Should fix someday.
* *
* XXX This code fails to handle partial indexes! * XXX This code fails to handle partial indexes!
* Probably should change it to use ExecOpenIndices. * Probably should change it to use
* ExecOpenIndices.
*/ */
for (i = 0; i < nindices; i++) for (i = 0; i < nindices; i++)
{ {
@ -1653,7 +1659,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{ {
elog(STOP, "\ elog(STOP, "\
failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)", failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
(unsigned long)tuple_len, cur_page->blkno, (unsigned long)cur_page->free, (unsigned long) tuple_len, cur_page->blkno, (unsigned long) cur_page->free,
cur_page->offsets_used, cur_page->offsets_free); cur_page->offsets_used, cur_page->offsets_free);
} }
newitemid = PageGetItemId(ToPage, newoff); newitemid = PageGetItemId(ToPage, newoff);
@ -1698,13 +1704,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* insert index' tuples if needed */ /* insert index' tuples if needed */
if (Irel != (Relation *) NULL) if (Irel != (Relation *) NULL)
{ {
/* /*
* XXX using CurrentMemoryContext here means * XXX using CurrentMemoryContext here means intra-vacuum
* intra-vacuum memory leak for functional indexes. * memory leak for functional indexes. Should fix someday.
* Should fix someday.
* *
* XXX This code fails to handle partial indexes! * XXX This code fails to handle partial indexes! Probably
* Probably should change it to use ExecOpenIndices. * should change it to use ExecOpenIndices.
*/ */
for (i = 0; i < nindices; i++) for (i = 0; i < nindices; i++)
{ {
@ -1803,14 +1809,15 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (num_moved > 0) if (num_moved > 0)
{ {
/* /*
* We have to commit our tuple movings before we truncate the * We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand * relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our * here, relying on the session-level table lock to protect our
* exclusive access to the relation. However, that would require * exclusive access to the relation. However, that would require
* a lot of extra code to close and re-open the relation, indices, * a lot of extra code to close and re-open the relation, indices,
* etc. For now, a quick hack: record status of current transaction * etc. For now, a quick hack: record status of current
* as committed, and continue. * transaction as committed, and continue.
*/ */
RecordTransactionCommit(); RecordTransactionCommit();
} }
@ -1907,7 +1914,7 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
vacpage->offsets_free > 0) vacpage->offsets_free > 0)
{ {
char unbuf[BLCKSZ]; char unbuf[BLCKSZ];
OffsetNumber *unused = (OffsetNumber*)unbuf; OffsetNumber *unused = (OffsetNumber *) unbuf;
int uncnt; int uncnt;
buf = ReadBuffer(onerel, vacpage->blkno); buf = ReadBuffer(onerel, vacpage->blkno);
@ -1943,8 +1950,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
uncnt = PageRepairFragmentation(page, unused); uncnt = PageRepairFragmentation(page, unused);
{ {
XLogRecPtr recptr; XLogRecPtr recptr;
recptr = log_heap_clean(onerel, buf, (char*)unused,
(char*)(&(unused[uncnt])) - (char*)unused); recptr = log_heap_clean(onerel, buf, (char *) unused,
(char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr); PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID); PageSetSUI(page, ThisStartUpID);
} }
@ -1962,9 +1970,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* /*
* Flush dirty pages out to disk. We do this unconditionally, even if * Flush dirty pages out to disk. We do this unconditionally, even if
* we don't need to truncate, because we want to ensure that all tuples * we don't need to truncate, because we want to ensure that all
* have correct on-row commit status on disk (see bufmgr.c's comments * tuples have correct on-row commit status on disk (see bufmgr.c's
* for FlushRelationBuffers()). * comments for FlushRelationBuffers()).
*/ */
i = FlushRelationBuffers(onerel, blkno); i = FlushRelationBuffers(onerel, blkno);
if (i < 0) if (i < 0)
@ -2005,8 +2013,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
int i; int i;
nblocks = vacuum_pages->num_pages; nblocks = vacuum_pages->num_pages;
nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with them */
* them */
for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++) for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++)
{ {
@ -2022,9 +2029,9 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
/* /*
* Flush dirty pages out to disk. We do this unconditionally, even if * Flush dirty pages out to disk. We do this unconditionally, even if
* we don't need to truncate, because we want to ensure that all tuples * we don't need to truncate, because we want to ensure that all
* have correct on-row commit status on disk (see bufmgr.c's comments * tuples have correct on-row commit status on disk (see bufmgr.c's
* for FlushRelationBuffers()). * comments for FlushRelationBuffers()).
*/ */
Assert(vacrelstats->num_pages >= vacuum_pages->empty_end_pages); Assert(vacrelstats->num_pages >= vacuum_pages->empty_end_pages);
nblocks = vacrelstats->num_pages - vacuum_pages->empty_end_pages; nblocks = vacrelstats->num_pages - vacuum_pages->empty_end_pages;
@ -2042,7 +2049,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
vacrelstats->num_pages, nblocks); vacrelstats->num_pages, nblocks);
nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks); nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks);
Assert(nblocks >= 0); Assert(nblocks >= 0);
vacrelstats->num_pages = nblocks; /* set new number of blocks */ vacrelstats->num_pages = nblocks; /* set new number of
* blocks */
} }
} }
@ -2054,7 +2062,7 @@ static void
vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage) vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
{ {
char unbuf[BLCKSZ]; char unbuf[BLCKSZ];
OffsetNumber *unused = (OffsetNumber*)unbuf; OffsetNumber *unused = (OffsetNumber *) unbuf;
int uncnt; int uncnt;
Page page = BufferGetPage(buffer); Page page = BufferGetPage(buffer);
ItemId itemid; ItemId itemid;
@ -2072,8 +2080,9 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
uncnt = PageRepairFragmentation(page, unused); uncnt = PageRepairFragmentation(page, unused);
{ {
XLogRecPtr recptr; XLogRecPtr recptr;
recptr = log_heap_clean(onerel, buffer, (char*)unused,
(char*)(&(unused[uncnt])) - (char*)unused); recptr = log_heap_clean(onerel, buffer, (char *) unused,
(char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr); PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID); PageSetSUI(page, ThisStartUpID);
} }

View File

@ -9,7 +9,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.45 2001/01/24 19:42:53 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.46 2001/03/22 03:59:25 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -453,6 +453,7 @@ parse_DefaultXactIsoLevel(char *value)
{ {
#if 0 #if 0
TransactionState s = CurrentTransactionState; TransactionState s = CurrentTransactionState;
#endif #endif
if (value == NULL) if (value == NULL)
@ -701,25 +702,24 @@ reset_server_encoding(void)
void void
SetPGVariable(const char *name, const char *value) SetPGVariable(const char *name, const char *value)
{ {
char *mvalue = value ? pstrdup(value) : ((char*) NULL); char *mvalue = value ? pstrdup(value) : ((char *) NULL);
/* /*
* Special cases ought to be removed and handled separately * Special cases ought to be removed and handled separately by TCOP
* by TCOP
*/ */
if (strcasecmp(name, "datestyle")==0) if (strcasecmp(name, "datestyle") == 0)
parse_date(mvalue); parse_date(mvalue);
else if (strcasecmp(name, "timezone")==0) else if (strcasecmp(name, "timezone") == 0)
parse_timezone(mvalue); parse_timezone(mvalue);
else if (strcasecmp(name, "DefaultXactIsoLevel")==0) else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
parse_DefaultXactIsoLevel(mvalue); parse_DefaultXactIsoLevel(mvalue);
else if (strcasecmp(name, "XactIsoLevel")==0) else if (strcasecmp(name, "XactIsoLevel") == 0)
parse_XactIsoLevel(mvalue); parse_XactIsoLevel(mvalue);
else if (strcasecmp(name, "client_encoding")==0) else if (strcasecmp(name, "client_encoding") == 0)
parse_client_encoding(mvalue); parse_client_encoding(mvalue);
else if (strcasecmp(name, "server_encoding")==0) else if (strcasecmp(name, "server_encoding") == 0)
parse_server_encoding(mvalue); parse_server_encoding(mvalue);
else if (strcasecmp(name, "random_seed")==0) else if (strcasecmp(name, "random_seed") == 0)
parse_random_seed(mvalue); parse_random_seed(mvalue);
else else
SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET); SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET);
@ -732,23 +732,24 @@ SetPGVariable(const char *name, const char *value)
void void
GetPGVariable(const char *name) GetPGVariable(const char *name)
{ {
if (strcasecmp(name, "datestyle")==0) if (strcasecmp(name, "datestyle") == 0)
show_date(); show_date();
else if (strcasecmp(name, "timezone")==0) else if (strcasecmp(name, "timezone") == 0)
show_timezone(); show_timezone();
else if (strcasecmp(name, "DefaultXactIsoLevel")==0) else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
show_DefaultXactIsoLevel(); show_DefaultXactIsoLevel();
else if (strcasecmp(name, "XactIsoLevel")==0) else if (strcasecmp(name, "XactIsoLevel") == 0)
show_XactIsoLevel(); show_XactIsoLevel();
else if (strcasecmp(name, "client_encoding")==0) else if (strcasecmp(name, "client_encoding") == 0)
show_client_encoding(); show_client_encoding();
else if (strcasecmp(name, "server_encoding")==0) else if (strcasecmp(name, "server_encoding") == 0)
show_server_encoding(); show_server_encoding();
else if (strcasecmp(name, "random_seed")==0) else if (strcasecmp(name, "random_seed") == 0)
show_random_seed(); show_random_seed();
else else
{ {
const char * val = GetConfigOption(name); const char *val = GetConfigOption(name);
elog(NOTICE, "%s is %s", name, val); elog(NOTICE, "%s is %s", name, val);
} }
} }
@ -756,19 +757,19 @@ GetPGVariable(const char *name)
void void
ResetPGVariable(const char *name) ResetPGVariable(const char *name)
{ {
if (strcasecmp(name, "datestyle")==0) if (strcasecmp(name, "datestyle") == 0)
reset_date(); reset_date();
else if (strcasecmp(name, "timezone")==0) else if (strcasecmp(name, "timezone") == 0)
reset_timezone(); reset_timezone();
else if (strcasecmp(name, "DefaultXactIsoLevel")==0) else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
reset_DefaultXactIsoLevel(); reset_DefaultXactIsoLevel();
else if (strcasecmp(name, "XactIsoLevel")==0) else if (strcasecmp(name, "XactIsoLevel") == 0)
reset_XactIsoLevel(); reset_XactIsoLevel();
else if (strcasecmp(name, "client_encoding")==0) else if (strcasecmp(name, "client_encoding") == 0)
reset_client_encoding(); reset_client_encoding();
else if (strcasecmp(name, "server_encoding")==0) else if (strcasecmp(name, "server_encoding") == 0)
reset_server_encoding(); reset_server_encoding();
else if (strcasecmp(name, "random_seed")==0) else if (strcasecmp(name, "random_seed") == 0)
reset_random_seed(); reset_random_seed();
else else
SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET); SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET);

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $Id: view.c,v 1.53 2001/01/24 19:42:53 momjian Exp $ * $Id: view.c,v 1.54 2001/03/22 03:59:25 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -57,7 +57,7 @@ DefineVirtualRelation(char *relname, List *tlist)
TargetEntry *entry = lfirst(t); TargetEntry *entry = lfirst(t);
Resdom *res = entry->resdom; Resdom *res = entry->resdom;
if (! res->resjunk) if (!res->resjunk)
{ {
char *resname = res->resname; char *resname = res->resname;
char *restypename = typeidTypeName(res->restype); char *restypename = typeidTypeName(res->restype);
@ -118,9 +118,9 @@ MakeRetrieveViewRuleName(char *viewName)
snprintf(buf, buflen, "_RET%s", viewName); snprintf(buf, buflen, "_RET%s", viewName);
/* clip to less than NAMEDATALEN bytes, if necessary */ /* clip to less than NAMEDATALEN bytes, if necessary */
#ifdef MULTIBYTE #ifdef MULTIBYTE
maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN-1); maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN - 1);
#else #else
maxlen = NAMEDATALEN-1; maxlen = NAMEDATALEN - 1;
#endif #endif
if (maxlen < buflen) if (maxlen < buflen)
buf[maxlen] = '\0'; buf[maxlen] = '\0';
@ -211,12 +211,12 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
*rt_entry2; *rt_entry2;
/* /*
* Make a copy of the given parsetree. It's not so much that we * Make a copy of the given parsetree. It's not so much that we don't
* don't want to scribble on our input, it's that the parser has * want to scribble on our input, it's that the parser has a bad habit
* a bad habit of outputting multiple links to the same subtree * of outputting multiple links to the same subtree for constructs
* for constructs like BETWEEN, and we mustn't have OffsetVarNodes * like BETWEEN, and we mustn't have OffsetVarNodes increment the
* increment the varno of a Var node twice. copyObject will expand * varno of a Var node twice. copyObject will expand any
* any multiply-referenced subtree into multiple copies. * multiply-referenced subtree into multiple copies.
*/ */
viewParse = (Query *) copyObject(viewParse); viewParse = (Query *) copyObject(viewParse);
@ -261,6 +261,7 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
void void
DefineView(char *viewName, Query *viewParse) DefineView(char *viewName, Query *viewParse)
{ {
/* /*
* Create the "view" relation NOTE: if it already exists, the xact * Create the "view" relation NOTE: if it already exists, the xact
* will be aborted. * will be aborted.
@ -295,9 +296,10 @@ DefineView(char *viewName, Query *viewParse)
void void
RemoveView(char *viewName) RemoveView(char *viewName)
{ {
/* /*
* We just have to drop the relation; the associated rules will * We just have to drop the relation; the associated rules will be
* be cleaned up automatically. * cleaned up automatically.
*/ */
heap_drop_with_catalog(viewName, allowSystemTableMods); heap_drop_with_catalog(viewName, allowSystemTableMods);
} }

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $Id: execAmi.c,v 1.56 2001/01/24 19:42:53 momjian Exp $ * $Id: execAmi.c,v 1.57 2001/03/22 03:59:25 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.25 2001/01/29 00:39:17 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.26 2001/03/22 03:59:26 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -265,6 +265,7 @@ ExecInitJunkFilter(List *targetList, TupleDesc tupType)
void void
ExecFreeJunkFilter(JunkFilter *junkfilter) ExecFreeJunkFilter(JunkFilter *junkfilter)
{ {
/* /*
* Since the junkfilter is inside its own context, we just have to * Since the junkfilter is inside its own context, we just have to
* delete the context and we're set. * delete the context and we're set.

View File

@ -27,7 +27,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.138 2001/01/29 00:39:18 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.139 2001/03/22 03:59:26 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -287,6 +287,7 @@ ExecutorEnd(QueryDesc *queryDesc, EState *estate)
static void static void
ExecCheckQueryPerms(CmdType operation, Query *parseTree, Plan *plan) ExecCheckQueryPerms(CmdType operation, Query *parseTree, Plan *plan)
{ {
/* /*
* Check RTEs in the query's primary rangetable. * Check RTEs in the query's primary rangetable.
*/ */
@ -405,12 +406,13 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
relName = rte->relname; relName = rte->relname;
/* /*
* userid to check as: current user unless we have a setuid indication. * userid to check as: current user unless we have a setuid
* indication.
* *
* Note: GetUserId() is presently fast enough that there's no harm * Note: GetUserId() is presently fast enough that there's no harm in
* in calling it separately for each RTE. If that stops being true, * calling it separately for each RTE. If that stops being true, we
* we could call it once in ExecCheckQueryPerms and pass the userid * could call it once in ExecCheckQueryPerms and pass the userid down
* down from there. But for now, no need for the extra clutter. * from there. But for now, no need for the extra clutter.
*/ */
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
@ -426,6 +428,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
if (rte->checkForWrite) if (rte->checkForWrite)
{ {
/* /*
* Note: write access in a SELECT context means SELECT FOR UPDATE. * Note: write access in a SELECT context means SELECT FOR UPDATE.
* Right now we don't distinguish that from true update as far as * Right now we don't distinguish that from true update as far as
@ -519,6 +522,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (resultRelations != NIL) if (resultRelations != NIL)
{ {
/* /*
* Multiple result relations (due to inheritance) * Multiple result relations (due to inheritance)
* parseTree->resultRelations identifies them all * parseTree->resultRelations identifies them all
@ -541,8 +545,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
} }
else else
{ {
/* /*
* Single result relation identified by parseTree->resultRelation * Single result relation identified by
* parseTree->resultRelation
*/ */
numResultRelations = 1; numResultRelations = 1;
resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo)); resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
@ -559,6 +565,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
} }
else else
{ {
/* /*
* if no result relation, then set state appropriately * if no result relation, then set state appropriately
*/ */
@ -616,10 +623,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
tupType = ExecGetTupType(plan); /* tuple descriptor */ tupType = ExecGetTupType(plan); /* tuple descriptor */
/* /*
* Initialize the junk filter if needed. SELECT and INSERT queries need * Initialize the junk filter if needed. SELECT and INSERT queries
* a filter if there are any junk attrs in the tlist. UPDATE and * need a filter if there are any junk attrs in the tlist. UPDATE and
* DELETE always need one, since there's always a junk 'ctid' attribute * DELETE always need one, since there's always a junk 'ctid'
* present --- no need to look first. * attribute present --- no need to look first.
*/ */
{ {
bool junk_filter_needed = false; bool junk_filter_needed = false;
@ -650,11 +657,12 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (junk_filter_needed) if (junk_filter_needed)
{ {
/* /*
* If there are multiple result relations, each one needs * If there are multiple result relations, each one needs its
* its own junk filter. Note this is only possible for * own junk filter. Note this is only possible for
* UPDATE/DELETE, so we can't be fooled by some needing * UPDATE/DELETE, so we can't be fooled by some needing a
* a filter and some not. * filter and some not.
*/ */
if (parseTree->resultRelations != NIL) if (parseTree->resultRelations != NIL)
{ {
@ -678,6 +686,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
resultRelInfo++; resultRelInfo++;
subplans = lnext(subplans); subplans = lnext(subplans);
} }
/* /*
* Set active junkfilter too; at this point ExecInitAppend * Set active junkfilter too; at this point ExecInitAppend
* has already selected an active result relation... * has already selected an active result relation...
@ -750,10 +759,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
CommandCounterIncrement(); CommandCounterIncrement();
/* /*
* If necessary, create a TOAST table for the into relation. * If necessary, create a TOAST table for the into
* Note that AlterTableCreateToastTable ends with * relation. Note that AlterTableCreateToastTable ends
* CommandCounterIncrement(), so that the TOAST table will * with CommandCounterIncrement(), so that the TOAST table
* be visible for insertion. * will be visible for insertion.
*/ */
AlterTableCreateToastTable(intoName, true); AlterTableCreateToastTable(intoName, true);
@ -817,9 +826,8 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
/* /*
* If there are indices on the result relation, open them and save * If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new * descriptors in the result relation info, so that we can add new
* index entries for the tuples we add/update. We need not do * index entries for the tuples we add/update. We need not do this
* this for a DELETE, however, since deletion doesn't affect * for a DELETE, however, since deletion doesn't affect indexes.
* indexes.
*/ */
if (resultRelationDesc->rd_rel->relhasindex && if (resultRelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE) operation != CMD_DELETE)
@ -857,8 +865,8 @@ EndPlan(Plan *plan, EState *estate)
estate->es_tupleTable = NULL; estate->es_tupleTable = NULL;
/* /*
* close the result relation(s) if any, but hold locks * close the result relation(s) if any, but hold locks until xact
* until xact commit. Also clean up junkfilters if present. * commit. Also clean up junkfilters if present.
*/ */
resultRelInfo = estate->es_result_relations; resultRelInfo = estate->es_result_relations;
for (i = estate->es_num_result_relations; i > 0; i--) for (i = estate->es_num_result_relations; i > 0; i--)
@ -1227,11 +1235,12 @@ ExecAppend(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */ if (newtuple != tuple) /* modified by Trigger(s) */
{ {
/* /*
* Insert modified tuple into tuple table slot, replacing the * Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple * original. We assume that it was allocated in per-tuple
* memory context, and therefore will go away by itself. * memory context, and therefore will go away by itself. The
* The tuple table slot should not try to clear it. * tuple table slot should not try to clear it.
*/ */
ExecStoreTuple(newtuple, slot, InvalidBuffer, false); ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple; tuple = newtuple;
@ -1411,11 +1420,12 @@ ExecReplace(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */ if (newtuple != tuple) /* modified by Trigger(s) */
{ {
/* /*
* Insert modified tuple into tuple table slot, replacing the * Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple * original. We assume that it was allocated in per-tuple
* memory context, and therefore will go away by itself. * memory context, and therefore will go away by itself. The
* The tuple table slot should not try to clear it. * tuple table slot should not try to clear it.
*/ */
ExecStoreTuple(newtuple, slot, InvalidBuffer, false); ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple; tuple = newtuple;
@ -1469,10 +1479,10 @@ lreplace:;
/* /*
* Note: instead of having to update the old index tuples associated * Note: instead of having to update the old index tuples associated
* with the heap tuple, all we do is form and insert new index * with the heap tuple, all we do is form and insert new index tuples.
* tuples. This is because replaces are actually deletes and inserts * This is because replaces are actually deletes and inserts and index
* and index tuple deletion is done automagically by the vacuum * tuple deletion is done automagically by the vacuum daemon. All we
* daemon. All we do is insert new index tuples. -cim 9/27/89 * do is insert new index tuples. -cim 9/27/89
*/ */
/* /*
@ -1525,8 +1535,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
} }
/* /*
* We will use the EState's per-tuple context for evaluating constraint * We will use the EState's per-tuple context for evaluating
* expressions (creating it if it's not already there). * constraint expressions (creating it if it's not already there).
*/ */
econtext = GetPerTupleExprContext(estate); econtext = GetPerTupleExprContext(estate);
@ -1568,10 +1578,10 @@ ExecConstraints(char *caller, ResultRelInfo *resultRelInfo,
for (attrChk = 1; attrChk <= natts; attrChk++) for (attrChk = 1; attrChk <= natts; attrChk++)
{ {
if (rel->rd_att->attrs[attrChk-1]->attnotnull && if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
heap_attisnull(tuple, attrChk)) heap_attisnull(tuple, attrChk))
elog(ERROR, "%s: Fail to add null value in not null attribute %s", elog(ERROR, "%s: Fail to add null value in not null attribute %s",
caller, NameStr(rel->rd_att->attrs[attrChk-1]->attname)); caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
} }
} }

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.83 2001/01/29 00:39:18 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.84 2001/03/22 03:59:26 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -112,10 +112,11 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext, econtext,
isNull, isNull,
isDone)); isDone));
/* /*
* If refexpr yields NULL, result is always NULL, for now anyway. * If refexpr yields NULL, result is always NULL, for now anyway.
* (This means you cannot assign to an element or slice of an array * (This means you cannot assign to an element or slice of an
* that's NULL; it'll just stay NULL.) * array that's NULL; it'll just stay NULL.)
*/ */
if (*isNull) if (*isNull)
return (Datum) NULL; return (Datum) NULL;
@ -147,7 +148,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
/* If any index expr yields NULL, result is NULL or source array */ /* If any index expr yields NULL, result is NULL or source array */
if (*isNull) if (*isNull)
{ {
if (! isAssignment || array_source == NULL) if (!isAssignment || array_source == NULL)
return (Datum) NULL; return (Datum) NULL;
*isNull = false; *isNull = false;
return PointerGetDatum(array_source); return PointerGetDatum(array_source);
@ -166,10 +167,14 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext, econtext,
isNull, isNull,
NULL)); NULL));
/* If any index expr yields NULL, result is NULL or source array */
/*
* If any index expr yields NULL, result is NULL or source
* array
*/
if (*isNull) if (*isNull)
{ {
if (! isAssignment || array_source == NULL) if (!isAssignment || array_source == NULL)
return (Datum) NULL; return (Datum) NULL;
*isNull = false; *isNull = false;
return PointerGetDatum(array_source); return PointerGetDatum(array_source);
@ -189,9 +194,10 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext, econtext,
isNull, isNull,
NULL); NULL);
/* /*
* For now, can't cope with inserting NULL into an array, * For now, can't cope with inserting NULL into an array, so make
* so make it a no-op per discussion above... * it a no-op per discussion above...
*/ */
if (*isNull) if (*isNull)
{ {
@ -601,10 +607,12 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
if (thisArgIsDone != ExprSingleResult) if (thisArgIsDone != ExprSingleResult)
{ {
/* /*
* We allow only one argument to have a set value; we'd need * We allow only one argument to have a set value; we'd need
* much more complexity to keep track of multiple set arguments * much more complexity to keep track of multiple set
* (cf. ExecTargetList) and it doesn't seem worth it. * arguments (cf. ExecTargetList) and it doesn't seem worth
* it.
*/ */
if (argIsDone != ExprSingleResult) if (argIsDone != ExprSingleResult)
elog(ERROR, "Functions and operators can take only one set argument"); elog(ERROR, "Functions and operators can take only one set argument");
@ -639,8 +647,8 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
/* /*
* arguments is a list of expressions to evaluate before passing to * arguments is a list of expressions to evaluate before passing to
* the function manager. We skip the evaluation if it was already * the function manager. We skip the evaluation if it was already
* done in the previous call (ie, we are continuing the evaluation * done in the previous call (ie, we are continuing the evaluation of
* of a set-valued function). Otherwise, collect the current argument * a set-valued function). Otherwise, collect the current argument
* values into fcache->fcinfo. * values into fcache->fcinfo.
*/ */
if (fcache->fcinfo.nargs > 0 && !fcache->argsValid) if (fcache->fcinfo.nargs > 0 && !fcache->argsValid)
@ -664,6 +672,7 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
*/ */
if (fcache->func.fn_retset || fcache->hasSetArg) if (fcache->func.fn_retset || fcache->hasSetArg)
{ {
/* /*
* We need to return a set result. Complain if caller not ready * We need to return a set result. Complain if caller not ready
* to accept one. * to accept one.
@ -672,15 +681,16 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
elog(ERROR, "Set-valued function called in context that cannot accept a set"); elog(ERROR, "Set-valued function called in context that cannot accept a set");
/* /*
* This loop handles the situation where we have both a set argument * This loop handles the situation where we have both a set
* and a set-valued function. Once we have exhausted the function's * argument and a set-valued function. Once we have exhausted the
* value(s) for a particular argument value, we have to get the next * function's value(s) for a particular argument value, we have to
* argument value and start the function over again. We might have * get the next argument value and start the function over again.
* to do it more than once, if the function produces an empty result * We might have to do it more than once, if the function produces
* set for a particular input value. * an empty result set for a particular input value.
*/ */
for (;;) for (;;)
{ {
/* /*
* If function is strict, and there are any NULL arguments, * If function is strict, and there are any NULL arguments,
* skip calling the function (at least for this set of args). * skip calling the function (at least for this set of args).
@ -716,13 +726,15 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
if (*isDone != ExprEndResult) if (*isDone != ExprEndResult)
{ {
/* /*
* Got a result from current argument. If function itself * Got a result from current argument. If function itself
* returns set, flag that we want to reuse current argument * returns set, flag that we want to reuse current
* values on next call. * argument values on next call.
*/ */
if (fcache->func.fn_retset) if (fcache->func.fn_retset)
fcache->argsValid = true; fcache->argsValid = true;
/* /*
* Make sure we say we are returning a set, even if the * Make sure we say we are returning a set, even if the
* function itself doesn't return sets. * function itself doesn't return sets.
@ -762,11 +774,12 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
} }
else else
{ {
/* /*
* Non-set case: much easier. * Non-set case: much easier.
* *
* If function is strict, and there are any NULL arguments, * If function is strict, and there are any NULL arguments, skip
* skip calling the function and return NULL. * calling the function and return NULL.
*/ */
if (fcache->func.fn_strict) if (fcache->func.fn_strict)
{ {
@ -852,9 +865,9 @@ ExecEvalFunc(Expr *funcClause,
FunctionCachePtr fcache; FunctionCachePtr fcache;
/* /*
* we extract the oid of the function associated with the func node and * we extract the oid of the function associated with the func node
* then pass the work onto ExecMakeFunctionResult which evaluates the * and then pass the work onto ExecMakeFunctionResult which evaluates
* arguments and returns the result of calling the function on the * the arguments and returns the result of calling the function on the
* evaluated arguments. * evaluated arguments.
* *
* this is nearly identical to the ExecEvalOper code. * this is nearly identical to the ExecEvalOper code.
@ -915,7 +928,7 @@ ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull)
* evaluation of 'not' is simple.. expr is false, then return 'true' * evaluation of 'not' is simple.. expr is false, then return 'true'
* and vice versa. * and vice versa.
*/ */
return BoolGetDatum(! DatumGetBool(expr_value)); return BoolGetDatum(!DatumGetBool(expr_value));
} }
/* ---------------------------------------------------------------- /* ----------------------------------------------------------------
@ -999,7 +1012,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
*/ */
if (*isNull) if (*isNull)
AnyNull = true; /* remember we got a null */ AnyNull = true; /* remember we got a null */
else if (! DatumGetBool(clause_value)) else if (!DatumGetBool(clause_value))
return clause_value; return clause_value;
} }
@ -1359,7 +1372,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
} }
else else
{ {
if (! DatumGetBool(expr_value)) if (!DatumGetBool(expr_value))
{ {
result = false; /* definitely FALSE */ result = false; /* definitely FALSE */
break; break;
@ -1408,14 +1421,12 @@ ExecCleanTargetListLength(List *targetlist)
if (curTle->resdom != NULL) if (curTle->resdom != NULL)
{ {
if (! curTle->resdom->resjunk) if (!curTle->resdom->resjunk)
len++; len++;
} }
else else
{
len += curTle->fjoin->fj_nNodes; len += curTle->fjoin->fj_nNodes;
} }
}
return len; return len;
} }
@ -1440,6 +1451,7 @@ ExecTargetList(List *targetlist,
ExprDoneCond *isDone) ExprDoneCond *isDone)
{ {
MemoryContext oldContext; MemoryContext oldContext;
#define NPREALLOCDOMAINS 64 #define NPREALLOCDOMAINS 64
char nullsArray[NPREALLOCDOMAINS]; char nullsArray[NPREALLOCDOMAINS];
bool fjIsNullArray[NPREALLOCDOMAINS]; bool fjIsNullArray[NPREALLOCDOMAINS];
@ -1484,10 +1496,11 @@ ExecTargetList(List *targetlist,
* we have a really large targetlist. otherwise we use the stack. * we have a really large targetlist. otherwise we use the stack.
* *
* We also allocate a bool array that is used to hold fjoin result state, * We also allocate a bool array that is used to hold fjoin result state,
* and another array that holds the isDone status for each targetlist item. * and another array that holds the isDone status for each targetlist
* The isDone status is needed so that we can iterate, generating multiple * item. The isDone status is needed so that we can iterate,
* tuples, when one or more tlist items return sets. (We expect the caller * generating multiple tuples, when one or more tlist items return
* to call us again if we return *isDone = ExprMultipleResult.) * sets. (We expect the caller to call us again if we return *isDone
* = ExprMultipleResult.)
*/ */
if (nodomains > NPREALLOCDOMAINS) if (nodomains > NPREALLOCDOMAINS)
{ {
@ -1554,8 +1567,10 @@ ExecTargetList(List *targetlist,
ExecEvalFjoin(tle, econtext, fjIsNull, isDone); ExecEvalFjoin(tle, econtext, fjIsNull, isDone);
/* XXX this is wrong, but since fjoin code is completely broken /*
* anyway, I'm not going to worry about it now --- tgl 8/23/00 * XXX this is wrong, but since fjoin code is completely
* broken anyway, I'm not going to worry about it now --- tgl
* 8/23/00
*/ */
if (isDone && *isDone == ExprEndResult) if (isDone && *isDone == ExprEndResult)
{ {
@ -1594,6 +1609,7 @@ ExecTargetList(List *targetlist,
if (haveDoneSets) if (haveDoneSets)
{ {
/* /*
* note: can't get here unless we verified isDone != NULL * note: can't get here unless we verified isDone != NULL
*/ */
@ -1601,7 +1617,8 @@ ExecTargetList(List *targetlist,
{ {
/* /*
* all sets are done, so report that tlist expansion is complete. * all sets are done, so report that tlist expansion is
* complete.
*/ */
*isDone = ExprEndResult; *isDone = ExprEndResult;
MemoryContextSwitchTo(oldContext); MemoryContextSwitchTo(oldContext);
@ -1644,10 +1661,11 @@ ExecTargetList(List *targetlist,
} }
} }
} }
/* /*
* If we cannot make a tuple because some sets are empty, * If we cannot make a tuple because some sets are empty, we
* we still have to cycle the nonempty sets to completion, * still have to cycle the nonempty sets to completion, else
* else resources will not be released from subplans etc. * resources will not be released from subplans etc.
*/ */
if (*isDone == ExprEndResult) if (*isDone == ExprEndResult)
{ {

View File

@ -12,7 +12,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.15 2001/01/24 19:42:54 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.16 2001/03/22 03:59:26 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */

View File

@ -15,7 +15,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.46 2001/01/29 00:39:18 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.47 2001/03/22 03:59:26 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -422,7 +422,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
slot->val = (HeapTuple) NULL; slot->val = (HeapTuple) NULL;
slot->ttc_shouldFree = true; /* probably useless code... */ slot->ttc_shouldFree = true;/* probably useless code... */
/* ---------------- /* ----------------
* Drop the pin on the referenced buffer, if there is one. * Drop the pin on the referenced buffer, if there is one.
@ -541,11 +541,13 @@ ExecInitExtraTupleSlot(EState *estate)
TupleTableSlot * TupleTableSlot *
ExecInitNullTupleSlot(EState *estate, TupleDesc tupType) ExecInitNullTupleSlot(EState *estate, TupleDesc tupType)
{ {
TupleTableSlot* slot = ExecInitExtraTupleSlot(estate); TupleTableSlot *slot = ExecInitExtraTupleSlot(estate);
/* /*
* Since heap_getattr() will treat attributes beyond a tuple's t_natts * Since heap_getattr() will treat attributes beyond a tuple's t_natts
* as being NULL, we can make an all-nulls tuple just by making it be of * as being NULL, we can make an all-nulls tuple just by making it be
* zero length. However, the slot descriptor must match the real tupType. * of zero length. However, the slot descriptor must match the real
* tupType.
*/ */
HeapTuple nullTuple; HeapTuple nullTuple;
Datum values[1]; Datum values[1];

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.73 2001/01/29 00:39:19 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.74 2001/03/22 03:59:26 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -148,6 +148,7 @@ ExecAssignExprContext(EState *estate, CommonState *commonstate)
econtext->ecxt_innertuple = NULL; econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL; econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = CurrentMemoryContext; econtext->ecxt_per_query_memory = CurrentMemoryContext;
/* /*
* Create working memory for expression evaluation in this context. * Create working memory for expression evaluation in this context.
*/ */
@ -184,14 +185,16 @@ MakeExprContext(TupleTableSlot *slot,
econtext->ecxt_innertuple = NULL; econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL; econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = queryContext; econtext->ecxt_per_query_memory = queryContext;
/* /*
* We make the temporary context a child of current working context, * We make the temporary context a child of current working context,
* not of the specified queryContext. This seems reasonable but I'm * not of the specified queryContext. This seems reasonable but I'm
* not totally sure about it... * not totally sure about it...
* *
* Expression contexts made via this routine typically don't live long * Expression contexts made via this routine typically don't live long
* enough to get reset, so specify a minsize of 0. That avoids alloc'ing * enough to get reset, so specify a minsize of 0. That avoids
* any memory in the common case where expr eval doesn't use any. * alloc'ing any memory in the common case where expr eval doesn't use
* any.
*/ */
econtext->ecxt_per_tuple_memory = econtext->ecxt_per_tuple_memory =
AllocSetContextCreate(CurrentMemoryContext, AllocSetContextCreate(CurrentMemoryContext,
@ -467,7 +470,7 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
resultRelInfo->ri_NumIndices = 0; resultRelInfo->ri_NumIndices = 0;
/* checks for disabled indexes */ /* checks for disabled indexes */
if (! RelationGetForm(resultRelation)->relhasindex) if (!RelationGetForm(resultRelation)->relhasindex)
return; return;
if (IsIgnoringSystemIndexes() && if (IsIgnoringSystemIndexes() &&
IsSystemRelationName(RelationGetRelationName(resultRelation))) IsSystemRelationName(RelationGetRelationName(resultRelation)))
@ -635,8 +638,9 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
heapDescriptor = RelationGetDescr(heapRelation); heapDescriptor = RelationGetDescr(heapRelation);
/* /*
* We will use the EState's per-tuple context for evaluating predicates * We will use the EState's per-tuple context for evaluating
* and functional-index functions (creating it if it's not already there). * predicates and functional-index functions (creating it if it's not
* already there).
*/ */
econtext = GetPerTupleExprContext(estate); econtext = GetPerTupleExprContext(estate);

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.43 2001/01/29 00:39:19 tgl Exp $ * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.44 2001/03/22 03:59:26 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
@ -235,9 +235,7 @@ init_sql_fcache(FmgrInfo *finfo)
nargs * sizeof(Oid)); nargs * sizeof(Oid));
} }
else else
{
argOidVect = (Oid *) NULL; argOidVect = (Oid *) NULL;
}
tmp = SysCacheGetAttr(PROCOID, tmp = SysCacheGetAttr(PROCOID,
procedureTuple, procedureTuple,
@ -346,8 +344,8 @@ copy_function_result(SQLFunctionCachePtr fcache,
return resultSlot; /* no need to copy result */ return resultSlot; /* no need to copy result */
/* /*
* If first time through, we have to initialize the funcSlot's * If first time through, we have to initialize the funcSlot's tuple
* tuple descriptor. * descriptor.
*/ */
if (funcSlot->ttc_tupleDescriptor == NULL) if (funcSlot->ttc_tupleDescriptor == NULL)
{ {
@ -415,12 +413,14 @@ postquel_execute(execution_state *es,
/* /*
* If we are supposed to return a tuple, we return the tuple slot * If we are supposed to return a tuple, we return the tuple slot
* pointer converted to Datum. If we are supposed to return a simple * pointer converted to Datum. If we are supposed to return a
* value, then project out the first attribute of the result tuple * simple value, then project out the first attribute of the
* (ie, take the first result column of the final SELECT). * result tuple (ie, take the first result column of the final
* SELECT).
*/ */
if (fcache->returnsTuple) if (fcache->returnsTuple)
{ {
/* /*
* XXX do we need to remove junk attrs from the result tuple? * XXX do we need to remove junk attrs from the result tuple?
* Probably OK to leave them, as long as they are at the end. * Probably OK to leave them, as long as they are at the end.
@ -434,6 +434,7 @@ postquel_execute(execution_state *es,
1, 1,
resSlot->ttc_tupleDescriptor, resSlot->ttc_tupleDescriptor,
&(fcinfo->isnull)); &(fcinfo->isnull));
/* /*
* Note: if result type is pass-by-reference then we are * Note: if result type is pass-by-reference then we are
* returning a pointer into the tuple copied by * returning a pointer into the tuple copied by

Some files were not shown because too many files have changed in this diff Show More