mirror of
https://github.com/postgres/postgres.git
synced 2025-11-15 03:41:20 +03:00
Fix macros that were not properly surrounded by parens or braces.
This commit is contained in:
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/date.c,v 1.24 1998/02/26 04:36:57 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/date.c,v 1.25 1998/06/15 18:39:34 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This code is actually (almost) unused.
|
||||
@@ -187,8 +187,11 @@ reltimeout(int32 time)
|
||||
} /* reltimeout() */
|
||||
|
||||
|
||||
#define TMODULO(t,q,u) {q = (t / u); \
|
||||
if (q != 0) t -= (q * u);}
|
||||
#define TMODULO(t,q,u) \
|
||||
do { \
|
||||
q = (t / u); \
|
||||
if (q != 0) t -= (q * u); \
|
||||
} while(0)
|
||||
|
||||
static void
|
||||
reltime2tm(int32 time, struct tm * tm)
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/Attic/dt.c,v 1.53 1998/05/09 22:38:18 thomas Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/Attic/dt.c,v 1.54 1998/06/15 18:39:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -66,8 +66,12 @@ char *days[] = {"Sunday", "Monday", "Tuesday", "Wednesday",
|
||||
/* TMODULO()
|
||||
* Macro to replace modf(), which is broken on some platforms.
|
||||
*/
|
||||
#define TMODULO(t,q,u) {q = ((t < 0)? ceil(t / u): floor(t / u)); \
|
||||
if (q != 0) t -= rint(q * u);}
|
||||
#define TMODULO(t,q,u) \
|
||||
do { \
|
||||
q = ((t < 0)? ceil(t / u): floor(t / u)); \
|
||||
if (q != 0) \
|
||||
t -= rint(q * u); \
|
||||
} while(0)
|
||||
|
||||
static void GetEpochTime(struct tm * tm);
|
||||
|
||||
|
||||
36
src/backend/utils/cache/catcache.c
vendored
36
src/backend/utils/cache/catcache.c
vendored
@@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.27 1998/04/26 04:08:01 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.28 1998/06/15 18:39:40 momjian Exp $
|
||||
*
|
||||
* Notes:
|
||||
* XXX This needs to use exception.h to handle recovery when
|
||||
@@ -96,13 +96,17 @@ static long eqproc[] = {
|
||||
*/
|
||||
#ifdef CACHEDEBUG
|
||||
#define CatalogCacheInitializeCache_DEBUG1 \
|
||||
do { \
|
||||
elog(DEBUG, "CatalogCacheInitializeCache: cache @%08lx", cache); \
|
||||
if (relation) \
|
||||
elog(DEBUG, "CatalogCacheInitializeCache: called w/relation(inval)"); \
|
||||
else \
|
||||
elog(DEBUG, "CatalogCacheInitializeCache: called w/relname %s", \
|
||||
cache->cc_relname)
|
||||
cache->cc_relname) \
|
||||
} while(0)
|
||||
|
||||
#define CatalogCacheInitializeCache_DEBUG2 \
|
||||
do { \
|
||||
if (cache->cc_key[i] > 0) { \
|
||||
elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %d", \
|
||||
i+1, cache->cc_nkeys, cache->cc_key[i], \
|
||||
@@ -110,7 +114,9 @@ static long eqproc[] = {
|
||||
} else { \
|
||||
elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
|
||||
i+1, cache->cc_nkeys, cache->cc_key[i]); \
|
||||
}
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
#define CatalogCacheInitializeCache_DEBUG1
|
||||
#define CatalogCacheInitializeCache_DEBUG2
|
||||
@@ -654,16 +660,20 @@ SystemCacheRelationFlushed(Oid relId)
|
||||
*/
|
||||
#ifdef CACHEDEBUG
|
||||
#define InitSysCache_DEBUG1 \
|
||||
elog(DEBUG, "InitSysCache: rid=%d id=%d nkeys=%d size=%d\n", \
|
||||
cp->relationId, cp->id, cp->cc_nkeys, cp->cc_size); \
|
||||
for (i = 0; i < nkeys; i += 1) { \
|
||||
elog(DEBUG, "InitSysCache: key=%d len=%d skey=[%d %d %d %d]\n", \
|
||||
cp->cc_key[i], cp->cc_klen[i], \
|
||||
cp->cc_skey[i].sk_flags, \
|
||||
cp->cc_skey[i].sk_attno, \
|
||||
cp->cc_skey[i].sk_procedure, \
|
||||
cp->cc_skey[i].sk_argument); \
|
||||
}
|
||||
do { \
|
||||
elog(DEBUG, "InitSysCache: rid=%d id=%d nkeys=%d size=%d\n", \
|
||||
cp->relationId, cp->id, cp->cc_nkeys, cp->cc_size); \
|
||||
for (i = 0; i < nkeys; i += 1) \
|
||||
{ \
|
||||
elog(DEBUG, "InitSysCache: key=%d len=%d skey=[%d %d %d %d]\n", \
|
||||
cp->cc_key[i], cp->cc_klen[i], \
|
||||
cp->cc_skey[i].sk_flags, \
|
||||
cp->cc_skey[i].sk_attno, \
|
||||
cp->cc_skey[i].sk_procedure, \
|
||||
cp->cc_skey[i].sk_argument); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
#define InitSysCache_DEBUG1
|
||||
#endif
|
||||
|
||||
150
src/backend/utils/cache/relcache.c
vendored
150
src/backend/utils/cache/relcache.c
vendored
@@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.38 1998/04/27 04:07:20 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.39 1998/06/15 18:39:40 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -157,86 +157,80 @@ typedef struct relnamecacheent
|
||||
* -----------------
|
||||
*/
|
||||
#define RelationCacheInsert(RELATION) \
|
||||
{ RelIdCacheEnt *idhentry; RelNameCacheEnt *namehentry; \
|
||||
char *relname; Oid reloid; bool found; \
|
||||
relname = (RELATION->rd_rel->relname).data; \
|
||||
namehentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
|
||||
relname, \
|
||||
HASH_ENTER, \
|
||||
&found); \
|
||||
if (namehentry == NULL) { \
|
||||
elog(FATAL, "can't insert into relation descriptor cache"); \
|
||||
} \
|
||||
if (found && !IsBootstrapProcessingMode()) { \
|
||||
/* used to give notice -- now just keep quiet */ ; \
|
||||
} \
|
||||
namehentry->reldesc = RELATION; \
|
||||
reloid = RELATION->rd_id; \
|
||||
idhentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(char *)&reloid, \
|
||||
do { \
|
||||
RelIdCacheEnt *idhentry; RelNameCacheEnt *namehentry; \
|
||||
char *relname; Oid reloid; bool found; \
|
||||
relname = (RELATION->rd_rel->relname).data; \
|
||||
namehentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
|
||||
relname, \
|
||||
HASH_ENTER, \
|
||||
&found); \
|
||||
if (idhentry == NULL) { \
|
||||
elog(FATAL, "can't insert into relation descriptor cache"); \
|
||||
} \
|
||||
if (found && !IsBootstrapProcessingMode()) { \
|
||||
/* used to give notice -- now just keep quiet */ ; \
|
||||
} \
|
||||
idhentry->reldesc = RELATION; \
|
||||
}
|
||||
if (namehentry == NULL) \
|
||||
elog(FATAL, "can't insert into relation descriptor cache"); \
|
||||
if (found && !IsBootstrapProcessingMode()) \
|
||||
/* used to give notice -- now just keep quiet */ ; \
|
||||
namehentry->reldesc = RELATION; \
|
||||
reloid = RELATION->rd_id; \
|
||||
idhentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(char *)&reloid, \
|
||||
HASH_ENTER, \
|
||||
&found); \
|
||||
if (idhentry == NULL) \
|
||||
elog(FATAL, "can't insert into relation descriptor cache"); \
|
||||
if (found && !IsBootstrapProcessingMode()) \
|
||||
/* used to give notice -- now just keep quiet */ ; \
|
||||
idhentry->reldesc = RELATION; \
|
||||
} while(0)
|
||||
|
||||
#define RelationNameCacheLookup(NAME, RELATION) \
|
||||
{ RelNameCacheEnt *hentry; bool found; \
|
||||
hentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
|
||||
(char *)NAME,HASH_FIND,&found); \
|
||||
if (hentry == NULL) { \
|
||||
elog(FATAL, "error in CACHE"); \
|
||||
} \
|
||||
if (found) { \
|
||||
RELATION = hentry->reldesc; \
|
||||
} \
|
||||
else { \
|
||||
RELATION = NULL; \
|
||||
} \
|
||||
}
|
||||
#define RelationIdCacheLookup(ID, RELATION) \
|
||||
{ RelIdCacheEnt *hentry; bool found; \
|
||||
hentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(char *)&(ID),HASH_FIND, &found); \
|
||||
if (hentry == NULL) { \
|
||||
elog(FATAL, "error in CACHE"); \
|
||||
} \
|
||||
if (found) { \
|
||||
RELATION = hentry->reldesc; \
|
||||
} \
|
||||
else { \
|
||||
RELATION = NULL; \
|
||||
} \
|
||||
}
|
||||
#define RelationCacheDelete(RELATION) \
|
||||
{ RelNameCacheEnt *namehentry; RelIdCacheEnt *idhentry; \
|
||||
char *relname; Oid reloid; bool found; \
|
||||
relname = (RELATION->rd_rel->relname).data; \
|
||||
namehentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
|
||||
relname, \
|
||||
HASH_REMOVE, \
|
||||
&found); \
|
||||
if (namehentry == NULL) { \
|
||||
elog(FATAL, "can't delete from relation descriptor cache"); \
|
||||
} \
|
||||
if (!found) { \
|
||||
elog(NOTICE, "trying to delete a reldesc that does not exist."); \
|
||||
} \
|
||||
reloid = RELATION->rd_id; \
|
||||
idhentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(char *)&reloid, \
|
||||
HASH_REMOVE, &found); \
|
||||
if (idhentry == NULL) { \
|
||||
elog(FATAL, "can't delete from relation descriptor cache"); \
|
||||
} \
|
||||
if (!found) { \
|
||||
elog(NOTICE, "trying to delete a reldesc that does not exist."); \
|
||||
} \
|
||||
}
|
||||
do { \
|
||||
RelNameCacheEnt *hentry; bool found; \
|
||||
hentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
|
||||
(char *)NAME,HASH_FIND,&found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in CACHE"); \
|
||||
if (found) \
|
||||
RELATION = hentry->reldesc; \
|
||||
else \
|
||||
RELATION = NULL; \
|
||||
} while(0)
|
||||
|
||||
#define RelationIdCacheLookup(ID, RELATION) \
|
||||
do { \
|
||||
RelIdCacheEnt *hentry; \
|
||||
bool found; \
|
||||
hentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(char *)&(ID),HASH_FIND, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in CACHE"); \
|
||||
if (found) \
|
||||
RELATION = hentry->reldesc; \
|
||||
else \
|
||||
RELATION = NULL; \
|
||||
} while(0)
|
||||
|
||||
#define RelationCacheDelete(RELATION) \
|
||||
do { \
|
||||
RelNameCacheEnt *namehentry; RelIdCacheEnt *idhentry; \
|
||||
char *relname; Oid reloid; bool found; \
|
||||
relname = (RELATION->rd_rel->relname).data; \
|
||||
namehentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
|
||||
relname, \
|
||||
HASH_REMOVE, \
|
||||
&found); \
|
||||
if (namehentry == NULL) \
|
||||
elog(FATAL, "can't delete from relation descriptor cache"); \
|
||||
if (!found) \
|
||||
elog(NOTICE, "trying to delete a reldesc that does not exist."); \
|
||||
reloid = RELATION->rd_id; \
|
||||
idhentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(char *)&reloid, \
|
||||
HASH_REMOVE, &found); \
|
||||
if (idhentry == NULL) \
|
||||
elog(FATAL, "can't delete from relation descriptor cache"); \
|
||||
if (!found) \
|
||||
elog(NOTICE, "trying to delete a reldesc that does not exist."); \
|
||||
} while(0)
|
||||
|
||||
/* non-export function prototypes */
|
||||
static void
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.10 1998/02/26 04:38:23 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.11 1998/06/15 18:39:44 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -125,41 +125,49 @@ typedef struct portalhashent
|
||||
static HTAB *PortalHashTable = NULL;
|
||||
|
||||
#define PortalHashTableLookup(NAME, PORTAL) \
|
||||
{ PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
|
||||
MemSet(key, 0, MAX_PORTALNAME_LEN); \
|
||||
sprintf(key, "%s", NAME); \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_FIND, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
if (found) \
|
||||
PORTAL = hentry->portal; \
|
||||
else \
|
||||
PORTAL = NULL; \
|
||||
}
|
||||
do { \
|
||||
PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
|
||||
\
|
||||
MemSet(key, 0, MAX_PORTALNAME_LEN); \
|
||||
sprintf(key, "%s", NAME); \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_FIND, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
if (found) \
|
||||
PORTAL = hentry->portal; \
|
||||
else \
|
||||
PORTAL = NULL; \
|
||||
} while(0)
|
||||
|
||||
#define PortalHashTableInsert(PORTAL) \
|
||||
{ PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
|
||||
MemSet(key, 0, MAX_PORTALNAME_LEN); \
|
||||
sprintf(key, "%s", PORTAL->name); \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_ENTER, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
if (found) \
|
||||
elog(NOTICE, "trying to insert a portal name that exists."); \
|
||||
hentry->portal = PORTAL; \
|
||||
}
|
||||
do { \
|
||||
PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
|
||||
\
|
||||
MemSet(key, 0, MAX_PORTALNAME_LEN); \
|
||||
sprintf(key, "%s", PORTAL->name); \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_ENTER, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
if (found) \
|
||||
elog(NOTICE, "trying to insert a portal name that exists."); \
|
||||
hentry->portal = PORTAL; \
|
||||
} while(0)
|
||||
|
||||
#define PortalHashTableDelete(PORTAL) \
|
||||
{ PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
|
||||
MemSet(key, 0, MAX_PORTALNAME_LEN); \
|
||||
sprintf(key, "%s", PORTAL->name); \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_REMOVE, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
if (!found) \
|
||||
elog(NOTICE, "trying to delete portal name that does not exist."); \
|
||||
}
|
||||
{ \
|
||||
PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
|
||||
\
|
||||
MemSet(key, 0, MAX_PORTALNAME_LEN); \
|
||||
sprintf(key, "%s", PORTAL->name); \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_REMOVE, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
if (!found) \
|
||||
elog(NOTICE, "trying to delete portal name that does not exist."); \
|
||||
} while(0)
|
||||
|
||||
static GlobalMemory PortalMemory = NULL;
|
||||
static char PortalMemoryName[] = "Portal";
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/sort/Attic/psort.c,v 1.39 1998/02/26 04:38:29 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/sort/Attic/psort.c,v 1.40 1998/06/15 18:39:45 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Sorts the first relation into the second relation.
|
||||
@@ -222,20 +222,24 @@ inittapes(Sort *node)
|
||||
*/
|
||||
|
||||
|
||||
#define PUTTUP(NODE, TUP, FP) do {\
|
||||
((Psortstate *)NODE->psortstate)->BytesWritten += (TUP)->t_len; \
|
||||
fwrite((char *)TUP, (TUP)->t_len, 1, FP); \
|
||||
fwrite((char *)&((TUP)->t_len), sizeof (tlendummy), 1, FP); \
|
||||
} while (0)
|
||||
#define PUTTUP(NODE, TUP, FP) \
|
||||
( \
|
||||
((Psortstate *)NODE->psortstate)->BytesWritten += (TUP)->t_len, \
|
||||
fwrite((char *)TUP, (TUP)->t_len, 1, FP), \
|
||||
fwrite((char *)&((TUP)->t_len), sizeof (tlendummy), 1, FP) \
|
||||
)
|
||||
|
||||
#define ENDRUN(FP) fwrite((char *)&tlenzero, sizeof (tlenzero), 1, FP)
|
||||
#define GETLEN(LEN, FP) fread((char *)&(LEN), sizeof (tlenzero), 1, FP)
|
||||
#define ALLOCTUP(LEN) ((HeapTuple)palloc((unsigned)LEN))
|
||||
#define GETTUP(NODE, TUP, LEN, FP) do {\
|
||||
IncrProcessed(); \
|
||||
((Psortstate *)NODE->psortstate)->BytesRead += (LEN) - sizeof (tlenzero); \
|
||||
fread((char *)(TUP) + sizeof (tlenzero), (LEN) - sizeof (tlenzero), 1, FP); \
|
||||
fread((char *)&tlendummy, sizeof (tlendummy), 1, FP); \
|
||||
} while (0)
|
||||
#define GETTUP(NODE, TUP, LEN, FP) \
|
||||
( \
|
||||
IncrProcessed(), \
|
||||
((Psortstate *)NODE->psortstate)->BytesRead += (LEN) - sizeof (tlenzero), \
|
||||
fread((char *)(TUP) + sizeof (tlenzero), (LEN) - sizeof (tlenzero), 1, FP), \
|
||||
fread((char *)&tlendummy, sizeof (tlendummy), 1, FP) \
|
||||
)
|
||||
|
||||
#define SETTUPLEN(TUP, LEN) (TUP)->t_len = LEN
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user