1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-05 23:56:58 +03:00

Convert macros to static inline functions (htup_details.h, itup.h)

Discussion: https://www.postgresql.org/message-id/flat/5b558da8-99fb-0a99-83dd-f72f05388517@enterprisedb.com
This commit is contained in:
Peter Eisentraut 2025-01-23 12:07:38 +01:00
parent b15b8c5cf8
commit 34694ec888
3 changed files with 351 additions and 225 deletions

View File

@ -42,13 +42,14 @@
* was used to upgrade from an older version, tuples might still have an * was used to upgrade from an older version, tuples might still have an
* oid. Seems worthwhile to display that. * oid. Seems worthwhile to display that.
*/ */
#define HeapTupleHeaderGetOidOld(tup) \ static inline Oid
( \ HeapTupleHeaderGetOidOld(const HeapTupleHeaderData *tup)
((tup)->t_infomask & HEAP_HASOID_OLD) ? \ {
*((Oid *) ((char *)(tup) + (tup)->t_hoff - sizeof(Oid))) \ if (tup->t_infomask & HEAP_HASOID_OLD)
: \ return *((Oid *) ((char *) (tup) + (tup)->t_hoff - sizeof(Oid)));
InvalidOid \ else
) return InvalidOid;
}
/* /*

View File

@ -225,12 +225,13 @@ struct HeapTupleHeaderData
* *
* See also HeapTupleHeaderIsOnlyLocked, which also checks for a possible * See also HeapTupleHeaderIsOnlyLocked, which also checks for a possible
* aborted updater transaction. * aborted updater transaction.
*
* Beware of multiple evaluations of the argument.
*/ */
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask) \ static inline bool
(((infomask) & HEAP_XMAX_LOCK_ONLY) || \ HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
(((infomask) & (HEAP_XMAX_IS_MULTI | HEAP_LOCK_MASK)) == HEAP_XMAX_EXCL_LOCK)) {
return (infomask & HEAP_XMAX_LOCK_ONLY) ||
(infomask & (HEAP_XMAX_IS_MULTI | HEAP_LOCK_MASK)) == HEAP_XMAX_EXCL_LOCK;
}
/* /*
* A tuple that has HEAP_XMAX_IS_MULTI and HEAP_XMAX_LOCK_ONLY but neither of * A tuple that has HEAP_XMAX_IS_MULTI and HEAP_XMAX_LOCK_ONLY but neither of
@ -250,22 +251,35 @@ struct HeapTupleHeaderData
* bogus, regardless of where they stand with respect to the current valid * bogus, regardless of where they stand with respect to the current valid
* multixact range. * multixact range.
*/ */
#define HEAP_LOCKED_UPGRADED(infomask) \ static inline bool
( \ HEAP_LOCKED_UPGRADED(uint16 infomask)
((infomask) & HEAP_XMAX_IS_MULTI) != 0 && \ {
((infomask) & HEAP_XMAX_LOCK_ONLY) != 0 && \ return
(((infomask) & (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)) == 0) \ (infomask & HEAP_XMAX_IS_MULTI) != 0 &&
) (infomask & HEAP_XMAX_LOCK_ONLY) != 0 &&
(infomask & (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)) == 0;
}
/* /*
* Use these to test whether a particular lock is applied to a tuple * Use these to test whether a particular lock is applied to a tuple
*/ */
#define HEAP_XMAX_IS_SHR_LOCKED(infomask) \ static inline bool
(((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_SHR_LOCK) HEAP_XMAX_IS_SHR_LOCKED(int16 infomask)
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask) \ {
(((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_EXCL_LOCK) return (infomask & HEAP_LOCK_MASK) == HEAP_XMAX_SHR_LOCK;
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) \ }
(((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_KEYSHR_LOCK)
static inline bool
HEAP_XMAX_IS_EXCL_LOCKED(int16 infomask)
{
return (infomask & HEAP_LOCK_MASK) == HEAP_XMAX_EXCL_LOCK;
}
static inline bool
HEAP_XMAX_IS_KEYSHR_LOCKED(int16 infomask)
{
return (infomask & HEAP_LOCK_MASK) == HEAP_XMAX_KEYSHR_LOCK;
}
/* turn these all off when Xmax is to change */ /* turn these all off when Xmax is to change */
#define HEAP_XMAX_BITS (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID | \ #define HEAP_XMAX_BITS (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID | \
@ -292,12 +306,11 @@ struct HeapTupleHeaderData
#define HEAP_TUPLE_HAS_MATCH HEAP_ONLY_TUPLE /* tuple has a join match */ #define HEAP_TUPLE_HAS_MATCH HEAP_ONLY_TUPLE /* tuple has a join match */
/* /*
* HeapTupleHeader accessor macros * HeapTupleHeader accessor functions
*
* Note: beware of multiple evaluations of "tup" argument. But the Set
* macros evaluate their other argument only once.
*/ */
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup);
/* /*
* HeapTupleHeaderGetRawXmin returns the "raw" xmin field, which is the xid * HeapTupleHeaderGetRawXmin returns the "raw" xmin field, which is the xid
* originally used to insert the tuple. However, the tuple might actually * originally used to insert the tuple. However, the tuple might actually
@ -306,56 +319,78 @@ struct HeapTupleHeaderData
* the xmin to FrozenTransactionId, and that value may still be encountered * the xmin to FrozenTransactionId, and that value may still be encountered
* on disk. * on disk.
*/ */
#define HeapTupleHeaderGetRawXmin(tup) \ static inline TransactionId
( \ HeapTupleHeaderGetRawXmin(const HeapTupleHeaderData *tup)
(tup)->t_choice.t_heap.t_xmin \ {
) return tup->t_choice.t_heap.t_xmin;
}
#define HeapTupleHeaderGetXmin(tup) \ static inline TransactionId
( \ HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
HeapTupleHeaderXminFrozen(tup) ? \ {
FrozenTransactionId : HeapTupleHeaderGetRawXmin(tup) \ return HeapTupleHeaderXminFrozen(tup) ?
) FrozenTransactionId : HeapTupleHeaderGetRawXmin(tup);
}
#define HeapTupleHeaderSetXmin(tup, xid) \ static inline void
( \ HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
(tup)->t_choice.t_heap.t_xmin = (xid) \ {
) tup->t_choice.t_heap.t_xmin = xid;
}
#define HeapTupleHeaderXminCommitted(tup) \ static inline bool
( \ HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
((tup)->t_infomask & HEAP_XMIN_COMMITTED) != 0 \ {
) return (tup->t_infomask & HEAP_XMIN_COMMITTED) != 0;
}
#define HeapTupleHeaderXminInvalid(tup) \ static inline bool
( \ HeapTupleHeaderXminInvalid(const HeapTupleHeaderData *tup) \
((tup)->t_infomask & (HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID)) == \ {
HEAP_XMIN_INVALID \ return (tup->t_infomask & (HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID)) ==
) HEAP_XMIN_INVALID;
}
#define HeapTupleHeaderXminFrozen(tup) \ static inline bool
( \ HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
((tup)->t_infomask & (HEAP_XMIN_FROZEN)) == HEAP_XMIN_FROZEN \ {
) return (tup->t_infomask & HEAP_XMIN_FROZEN) == HEAP_XMIN_FROZEN;
}
#define HeapTupleHeaderSetXminCommitted(tup) \ static inline void
( \ HeapTupleHeaderSetXminCommitted(HeapTupleHeaderData *tup)
AssertMacro(!HeapTupleHeaderXminInvalid(tup)), \ {
((tup)->t_infomask |= HEAP_XMIN_COMMITTED) \ Assert(!HeapTupleHeaderXminInvalid(tup));
) tup->t_infomask |= HEAP_XMIN_COMMITTED;
}
#define HeapTupleHeaderSetXminInvalid(tup) \ static inline void
( \ HeapTupleHeaderSetXminInvalid(HeapTupleHeaderData *tup)
AssertMacro(!HeapTupleHeaderXminCommitted(tup)), \ {
((tup)->t_infomask |= HEAP_XMIN_INVALID) \ Assert(!HeapTupleHeaderXminCommitted(tup));
) tup->t_infomask |= HEAP_XMIN_INVALID;
}
#define HeapTupleHeaderSetXminFrozen(tup) \ static inline void
( \ HeapTupleHeaderSetXminFrozen(HeapTupleHeaderData *tup)
AssertMacro(!HeapTupleHeaderXminInvalid(tup)), \ {
((tup)->t_infomask |= HEAP_XMIN_FROZEN) \ Assert(!HeapTupleHeaderXminInvalid(tup));
) tup->t_infomask |= HEAP_XMIN_FROZEN;
}
static inline TransactionId
HeapTupleHeaderGetRawXmax(const HeapTupleHeaderData *tup)
{
return tup->t_choice.t_heap.t_xmax;
}
static inline void
HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
{
tup->t_choice.t_heap.t_xmax = xid;
}
#ifndef FRONTEND
/* /*
* HeapTupleHeaderGetRawXmax gets you the raw Xmax field. To find out the Xid * HeapTupleHeaderGetRawXmax gets you the raw Xmax field. To find out the Xid
* that updated a tuple, you might need to resolve the MultiXactId if certain * that updated a tuple, you might need to resolve the MultiXactId if certain
@ -363,25 +398,17 @@ struct HeapTupleHeaderData
* to resolve the MultiXactId if necessary. This might involve multixact I/O, * to resolve the MultiXactId if necessary. This might involve multixact I/O,
* so it should only be used if absolutely necessary. * so it should only be used if absolutely necessary.
*/ */
#define HeapTupleHeaderGetUpdateXid(tup) \ static inline TransactionId
( \ HeapTupleHeaderGetUpdateXid(const HeapTupleHeaderData *tup)
(!((tup)->t_infomask & HEAP_XMAX_INVALID) && \ {
((tup)->t_infomask & HEAP_XMAX_IS_MULTI) && \ if (!((tup)->t_infomask & HEAP_XMAX_INVALID) &&
!((tup)->t_infomask & HEAP_XMAX_LOCK_ONLY)) ? \ ((tup)->t_infomask & HEAP_XMAX_IS_MULTI) &&
HeapTupleGetUpdateXid(tup) \ !((tup)->t_infomask & HEAP_XMAX_LOCK_ONLY))
: \ return HeapTupleGetUpdateXid(tup);
HeapTupleHeaderGetRawXmax(tup) \ else
) return HeapTupleHeaderGetRawXmax(tup);
}
#define HeapTupleHeaderGetRawXmax(tup) \ #endif /* FRONTEND */
( \
(tup)->t_choice.t_heap.t_xmax \
)
#define HeapTupleHeaderSetXmax(tup, xid) \
( \
(tup)->t_choice.t_heap.t_xmax = (xid) \
)
/* /*
* HeapTupleHeaderGetRawCommandId will give you what's in the header whether * HeapTupleHeaderGetRawCommandId will give you what's in the header whether
@ -389,147 +416,168 @@ struct HeapTupleHeaderData
* HeapTupleHeaderGetCmax instead, but note that those Assert that you can * HeapTupleHeaderGetCmax instead, but note that those Assert that you can
* get a legitimate result, ie you are in the originating transaction! * get a legitimate result, ie you are in the originating transaction!
*/ */
#define HeapTupleHeaderGetRawCommandId(tup) \ static inline CommandId
( \ HeapTupleHeaderGetRawCommandId(const HeapTupleHeaderData *tup)
(tup)->t_choice.t_heap.t_field3.t_cid \ {
) return tup->t_choice.t_heap.t_field3.t_cid;
}
/* SetCmin is reasonably simple since we never need a combo CID */ /* SetCmin is reasonably simple since we never need a combo CID */
#define HeapTupleHeaderSetCmin(tup, cid) \ static inline void
do { \ HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
Assert(!((tup)->t_infomask & HEAP_MOVED)); \ {
(tup)->t_choice.t_heap.t_field3.t_cid = (cid); \ Assert(!(tup->t_infomask & HEAP_MOVED));
(tup)->t_infomask &= ~HEAP_COMBOCID; \ tup->t_choice.t_heap.t_field3.t_cid = cid;
} while (0) tup->t_infomask &= ~HEAP_COMBOCID;
}
/* SetCmax must be used after HeapTupleHeaderAdjustCmax; see combocid.c */ /* SetCmax must be used after HeapTupleHeaderAdjustCmax; see combocid.c */
#define HeapTupleHeaderSetCmax(tup, cid, iscombo) \ static inline void
do { \ HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
Assert(!((tup)->t_infomask & HEAP_MOVED)); \ {
(tup)->t_choice.t_heap.t_field3.t_cid = (cid); \ Assert(!((tup)->t_infomask & HEAP_MOVED));
if (iscombo) \ tup->t_choice.t_heap.t_field3.t_cid = cid;
(tup)->t_infomask |= HEAP_COMBOCID; \ if (iscombo)
else \ tup->t_infomask |= HEAP_COMBOCID;
(tup)->t_infomask &= ~HEAP_COMBOCID; \ else
} while (0) tup->t_infomask &= ~HEAP_COMBOCID;
}
#define HeapTupleHeaderGetXvac(tup) \ static inline TransactionId
( \ HeapTupleHeaderGetXvac(const HeapTupleHeaderData *tup)
((tup)->t_infomask & HEAP_MOVED) ? \ {
(tup)->t_choice.t_heap.t_field3.t_xvac \ if (tup->t_infomask & HEAP_MOVED)
: \ return tup->t_choice.t_heap.t_field3.t_xvac;
InvalidTransactionId \ else
) return InvalidTransactionId;
}
#define HeapTupleHeaderSetXvac(tup, xid) \ static inline void
do { \ HeapTupleHeaderSetXvac(HeapTupleHeaderData *tup, TransactionId xid)
Assert((tup)->t_infomask & HEAP_MOVED); \ {
(tup)->t_choice.t_heap.t_field3.t_xvac = (xid); \ Assert(tup->t_infomask & HEAP_MOVED);
} while (0) tup->t_choice.t_heap.t_field3.t_xvac = xid;
}
StaticAssertDecl(MaxOffsetNumber < SpecTokenOffsetNumber, StaticAssertDecl(MaxOffsetNumber < SpecTokenOffsetNumber,
"invalid speculative token constant"); "invalid speculative token constant");
#define HeapTupleHeaderIsSpeculative(tup) \ static inline bool
( \ HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
(ItemPointerGetOffsetNumberNoCheck(&(tup)->t_ctid) == SpecTokenOffsetNumber) \ {
) return ItemPointerGetOffsetNumberNoCheck(&tup->t_ctid) == SpecTokenOffsetNumber;
}
#define HeapTupleHeaderGetSpeculativeToken(tup) \ static inline BlockNumber
( \ HeapTupleHeaderGetSpeculativeToken(const HeapTupleHeaderData *tup)
AssertMacro(HeapTupleHeaderIsSpeculative(tup)), \ {
ItemPointerGetBlockNumber(&(tup)->t_ctid) \ Assert(HeapTupleHeaderIsSpeculative(tup));
) return ItemPointerGetBlockNumber(&tup->t_ctid);
}
#define HeapTupleHeaderSetSpeculativeToken(tup, token) \ static inline void
( \ HeapTupleHeaderSetSpeculativeToken(HeapTupleHeaderData *tup, BlockNumber token)
ItemPointerSet(&(tup)->t_ctid, token, SpecTokenOffsetNumber) \ {
) ItemPointerSet(&tup->t_ctid, token, SpecTokenOffsetNumber);
}
#define HeapTupleHeaderIndicatesMovedPartitions(tup) \ static inline bool
ItemPointerIndicatesMovedPartitions(&(tup)->t_ctid) HeapTupleHeaderIndicatesMovedPartitions(const HeapTupleHeaderData *tup)
{
return ItemPointerIndicatesMovedPartitions(&tup->t_ctid);
}
#define HeapTupleHeaderSetMovedPartitions(tup) \ static inline void
ItemPointerSetMovedPartitions(&(tup)->t_ctid) HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
{
ItemPointerSetMovedPartitions(&tup->t_ctid);
}
#define HeapTupleHeaderGetDatumLength(tup) \ static inline uint32
VARSIZE(tup) HeapTupleHeaderGetDatumLength(const HeapTupleHeaderData *tup)
{
return VARSIZE(tup);
}
#define HeapTupleHeaderSetDatumLength(tup, len) \ static inline void
SET_VARSIZE(tup, len) HeapTupleHeaderSetDatumLength(HeapTupleHeaderData *tup, uint32 len)
{
SET_VARSIZE(tup, len);
}
#define HeapTupleHeaderGetTypeId(tup) \ static inline Oid
( \ HeapTupleHeaderGetTypeId(const HeapTupleHeaderData *tup)
(tup)->t_choice.t_datum.datum_typeid \ {
) return tup->t_choice.t_datum.datum_typeid;
}
#define HeapTupleHeaderSetTypeId(tup, typeid) \ static inline void
( \ HeapTupleHeaderSetTypeId(HeapTupleHeaderData *tup, Oid datum_typeid)
(tup)->t_choice.t_datum.datum_typeid = (typeid) \ {
) tup->t_choice.t_datum.datum_typeid = datum_typeid;
}
#define HeapTupleHeaderGetTypMod(tup) \ static inline int32
( \ HeapTupleHeaderGetTypMod(const HeapTupleHeaderData *tup)
(tup)->t_choice.t_datum.datum_typmod \ {
) return tup->t_choice.t_datum.datum_typmod;
}
#define HeapTupleHeaderSetTypMod(tup, typmod) \ static inline void
( \ HeapTupleHeaderSetTypMod(HeapTupleHeaderData *tup, int32 typmod)
(tup)->t_choice.t_datum.datum_typmod = (typmod) \ {
) tup->t_choice.t_datum.datum_typmod = typmod;
}
/* /*
* Note that we stop considering a tuple HOT-updated as soon as it is known * Note that we stop considering a tuple HOT-updated as soon as it is known
* aborted or the would-be updating transaction is known aborted. For best * aborted or the would-be updating transaction is known aborted. For best
* efficiency, check tuple visibility before using this macro, so that the * efficiency, check tuple visibility before using this function, so that the
* INVALID bits will be as up to date as possible. * INVALID bits will be as up to date as possible.
*/ */
#define HeapTupleHeaderIsHotUpdated(tup) \ static inline bool
( \ HeapTupleHeaderIsHotUpdated(const HeapTupleHeaderData *tup)
((tup)->t_infomask2 & HEAP_HOT_UPDATED) != 0 && \ {
((tup)->t_infomask & HEAP_XMAX_INVALID) == 0 && \ return
!HeapTupleHeaderXminInvalid(tup) \ (tup->t_infomask2 & HEAP_HOT_UPDATED) != 0 &&
) (tup->t_infomask & HEAP_XMAX_INVALID) == 0 &&
!HeapTupleHeaderXminInvalid(tup);
}
#define HeapTupleHeaderSetHotUpdated(tup) \ static inline void
( \ HeapTupleHeaderSetHotUpdated(HeapTupleHeaderData *tup)
(tup)->t_infomask2 |= HEAP_HOT_UPDATED \ {
) tup->t_infomask2 |= HEAP_HOT_UPDATED;
}
#define HeapTupleHeaderClearHotUpdated(tup) \ static inline void
( \ HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
(tup)->t_infomask2 &= ~HEAP_HOT_UPDATED \ {
) tup->t_infomask2 &= ~HEAP_HOT_UPDATED;
}
#define HeapTupleHeaderIsHeapOnly(tup) \ static inline bool
( \ HeapTupleHeaderIsHeapOnly(const HeapTupleHeaderData *tup) \
((tup)->t_infomask2 & HEAP_ONLY_TUPLE) != 0 \ {
) return (tup->t_infomask2 & HEAP_ONLY_TUPLE) != 0;
}
#define HeapTupleHeaderSetHeapOnly(tup) \ static inline void
( \ HeapTupleHeaderSetHeapOnly(HeapTupleHeaderData *tup)
(tup)->t_infomask2 |= HEAP_ONLY_TUPLE \ {
) tup->t_infomask2 |= HEAP_ONLY_TUPLE;
}
#define HeapTupleHeaderClearHeapOnly(tup) \ static inline void
( \ HeapTupleHeaderClearHeapOnly(HeapTupleHeaderData *tup)
(tup)->t_infomask2 &= ~HEAP_ONLY_TUPLE \ {
) tup->t_infomask2 &= ~HEAP_ONLY_TUPLE;
}
#define HeapTupleHeaderHasMatch(tup) \ /*
( \ * These are used with both HeapTuple and MinimalTuple, so they must be
((tup)->t_infomask2 & HEAP_TUPLE_HAS_MATCH) != 0 \ * macros.
) */
#define HeapTupleHeaderSetMatch(tup) \
( \
(tup)->t_infomask2 |= HEAP_TUPLE_HAS_MATCH \
)
#define HeapTupleHeaderClearMatch(tup) \
( \
(tup)->t_infomask2 &= ~HEAP_TUPLE_HAS_MATCH \
)
#define HeapTupleHeaderGetNatts(tup) \ #define HeapTupleHeaderGetNatts(tup) \
((tup)->t_infomask2 & HEAP_NATTS_MASK) ((tup)->t_infomask2 & HEAP_NATTS_MASK)
@ -547,7 +595,11 @@ StaticAssertDecl(MaxOffsetNumber < SpecTokenOffsetNumber,
* BITMAPLEN(NATTS) - * BITMAPLEN(NATTS) -
* Computes size of null bitmap given number of data columns. * Computes size of null bitmap given number of data columns.
*/ */
#define BITMAPLEN(NATTS) (((int)(NATTS) + 7) / 8) static inline int
BITMAPLEN(int NATTS)
{
return (NATTS + 7) / 8;
}
/* /*
* MaxHeapTupleSize is the maximum allowed size of a heap tuple, including * MaxHeapTupleSize is the maximum allowed size of a heap tuple, including
@ -651,48 +703,107 @@ struct MinimalTupleData
#define SizeofMinimalTupleHeader offsetof(MinimalTupleData, t_bits) #define SizeofMinimalTupleHeader offsetof(MinimalTupleData, t_bits)
/*
* MinimalTuple accessor functions
*/
static inline bool
HeapTupleHeaderHasMatch(const MinimalTupleData *tup)
{
return (tup->t_infomask2 & HEAP_TUPLE_HAS_MATCH) != 0;
}
static inline void
HeapTupleHeaderSetMatch(MinimalTupleData *tup)
{
tup->t_infomask2 |= HEAP_TUPLE_HAS_MATCH;
}
static inline void
HeapTupleHeaderClearMatch(MinimalTupleData *tup)
{
tup->t_infomask2 &= ~HEAP_TUPLE_HAS_MATCH;
}
/* /*
* GETSTRUCT - given a HeapTuple pointer, return address of the user data * GETSTRUCT - given a HeapTuple pointer, return address of the user data
*/ */
#define GETSTRUCT(TUP) ((char *) ((TUP)->t_data) + (TUP)->t_data->t_hoff) static inline void *
GETSTRUCT(const HeapTupleData *tuple)
{
return ((char *) (tuple->t_data) + tuple->t_data->t_hoff);
}
/* /*
* Accessor macros to be used with HeapTuple pointers. * Accessor functions to be used with HeapTuple pointers.
*/ */
#define HeapTupleHasNulls(tuple) \ static inline bool
(((tuple)->t_data->t_infomask & HEAP_HASNULL) != 0) HeapTupleHasNulls(const HeapTupleData *tuple)
{
return (tuple->t_data->t_infomask & HEAP_HASNULL) != 0;
}
#define HeapTupleNoNulls(tuple) \ static inline bool
(!((tuple)->t_data->t_infomask & HEAP_HASNULL)) HeapTupleNoNulls(const HeapTupleData *tuple)
{
return !HeapTupleHasNulls(tuple);
}
#define HeapTupleHasVarWidth(tuple) \ static inline bool
(((tuple)->t_data->t_infomask & HEAP_HASVARWIDTH) != 0) HeapTupleHasVarWidth(const HeapTupleData *tuple)
{
return (tuple->t_data->t_infomask & HEAP_HASVARWIDTH) != 0;
}
#define HeapTupleAllFixed(tuple) \ static inline bool
(!((tuple)->t_data->t_infomask & HEAP_HASVARWIDTH)) HeapTupleAllFixed(const HeapTupleData *tuple)
{
return !HeapTupleHasVarWidth(tuple);
}
#define HeapTupleHasExternal(tuple) \ static inline bool
(((tuple)->t_data->t_infomask & HEAP_HASEXTERNAL) != 0) HeapTupleHasExternal(const HeapTupleData *tuple)
{
return (tuple->t_data->t_infomask & HEAP_HASEXTERNAL) != 0;
}
#define HeapTupleIsHotUpdated(tuple) \ static inline bool
HeapTupleHeaderIsHotUpdated((tuple)->t_data) HeapTupleIsHotUpdated(const HeapTupleData *tuple)
{
return HeapTupleHeaderIsHotUpdated(tuple->t_data);
}
#define HeapTupleSetHotUpdated(tuple) \ static inline void
HeapTupleHeaderSetHotUpdated((tuple)->t_data) HeapTupleSetHotUpdated(const HeapTupleData *tuple)
{
HeapTupleHeaderSetHotUpdated(tuple->t_data);
}
#define HeapTupleClearHotUpdated(tuple) \ static inline void
HeapTupleHeaderClearHotUpdated((tuple)->t_data) HeapTupleClearHotUpdated(const HeapTupleData *tuple)
{
HeapTupleHeaderClearHotUpdated(tuple->t_data);
}
#define HeapTupleIsHeapOnly(tuple) \ static inline bool
HeapTupleHeaderIsHeapOnly((tuple)->t_data) HeapTupleIsHeapOnly(const HeapTupleData *tuple)
{
return HeapTupleHeaderIsHeapOnly(tuple->t_data);
}
#define HeapTupleSetHeapOnly(tuple) \ static inline void
HeapTupleHeaderSetHeapOnly((tuple)->t_data) HeapTupleSetHeapOnly(const HeapTupleData *tuple)
{
HeapTupleHeaderSetHeapOnly(tuple->t_data);
}
#define HeapTupleClearHeapOnly(tuple) \ static inline void
HeapTupleHeaderClearHeapOnly((tuple)->t_data) HeapTupleClearHeapOnly(const HeapTupleData *tuple)
{
HeapTupleHeaderClearHeapOnly(tuple->t_data);
}
/* prototypes for functions in common/heaptuple.c */ /* prototypes for functions in common/heaptuple.c */
extern Size heap_compute_data_size(TupleDesc tupleDesc, extern Size heap_compute_data_size(TupleDesc tupleDesc,

View File

@ -68,9 +68,23 @@ typedef IndexAttributeBitMapData * IndexAttributeBitMap;
#define INDEX_VAR_MASK 0x4000 #define INDEX_VAR_MASK 0x4000
#define INDEX_NULL_MASK 0x8000 #define INDEX_NULL_MASK 0x8000
#define IndexTupleSize(itup) ((Size) ((itup)->t_info & INDEX_SIZE_MASK)) static inline Size
#define IndexTupleHasNulls(itup) ((((IndexTuple) (itup))->t_info & INDEX_NULL_MASK)) IndexTupleSize(const IndexTupleData *itup)
#define IndexTupleHasVarwidths(itup) ((((IndexTuple) (itup))->t_info & INDEX_VAR_MASK)) {
return (itup->t_info & INDEX_SIZE_MASK);
}
static inline bool
IndexTupleHasNulls(const IndexTupleData *itup)
{
return itup->t_info & INDEX_NULL_MASK;
}
static inline bool
IndexTupleHasVarwidths(const IndexTupleData *itup)
{
return itup->t_info & INDEX_VAR_MASK;
}
/* routines in indextuple.c */ /* routines in indextuple.c */