mirror of
https://github.com/postgres/postgres.git
synced 2025-10-27 00:12:01 +03:00
pgindent run before PG 9.1 beta 1.
This commit is contained in:
@@ -39,9 +39,9 @@
|
||||
typedef struct GinStatsData
|
||||
{
|
||||
BlockNumber nPendingPages;
|
||||
BlockNumber nTotalPages;
|
||||
BlockNumber nEntryPages;
|
||||
BlockNumber nDataPages;
|
||||
BlockNumber nTotalPages;
|
||||
BlockNumber nEntryPages;
|
||||
BlockNumber nDataPages;
|
||||
int64 nEntries;
|
||||
int32 ginVersion;
|
||||
} GinStatsData;
|
||||
|
||||
@@ -73,14 +73,14 @@ typedef struct GinMetaPageData
|
||||
/*
|
||||
* Statistics for planner use (accurate as of last VACUUM)
|
||||
*/
|
||||
BlockNumber nTotalPages;
|
||||
BlockNumber nEntryPages;
|
||||
BlockNumber nDataPages;
|
||||
BlockNumber nTotalPages;
|
||||
BlockNumber nEntryPages;
|
||||
BlockNumber nDataPages;
|
||||
int64 nEntries;
|
||||
|
||||
/*
|
||||
* GIN version number (ideally this should have been at the front, but
|
||||
* too late now. Don't move it!)
|
||||
* GIN version number (ideally this should have been at the front, but too
|
||||
* late now. Don't move it!)
|
||||
*
|
||||
* Currently 1 (for indexes initialized in 9.1 or later)
|
||||
*
|
||||
@@ -207,7 +207,7 @@ typedef signed char GinNullCategory;
|
||||
#define GinGetPostingTree(itup) GinItemPointerGetBlockNumber(&(itup)->t_tid)
|
||||
|
||||
#define GinGetPostingOffset(itup) GinItemPointerGetBlockNumber(&(itup)->t_tid)
|
||||
#define GinSetPostingOffset(itup,n) ItemPointerSetBlockNumber(&(itup)->t_tid,n)
|
||||
#define GinSetPostingOffset(itup,n) ItemPointerSetBlockNumber(&(itup)->t_tid,n)
|
||||
#define GinGetPosting(itup) ((ItemPointer) ((char*)(itup) + GinGetPostingOffset(itup)))
|
||||
|
||||
#define GinMaxItemSize \
|
||||
@@ -427,12 +427,12 @@ extern Buffer GinNewBuffer(Relation index);
|
||||
extern void GinInitBuffer(Buffer b, uint32 f);
|
||||
extern void GinInitPage(Page page, uint32 f, Size pageSize);
|
||||
extern void GinInitMetabuffer(Buffer b);
|
||||
extern int ginCompareEntries(GinState *ginstate, OffsetNumber attnum,
|
||||
Datum a, GinNullCategory categorya,
|
||||
Datum b, GinNullCategory categoryb);
|
||||
extern int ginCompareAttEntries(GinState *ginstate,
|
||||
extern int ginCompareEntries(GinState *ginstate, OffsetNumber attnum,
|
||||
Datum a, GinNullCategory categorya,
|
||||
Datum b, GinNullCategory categoryb);
|
||||
extern int ginCompareAttEntries(GinState *ginstate,
|
||||
OffsetNumber attnuma, Datum a, GinNullCategory categorya,
|
||||
OffsetNumber attnumb, Datum b, GinNullCategory categoryb);
|
||||
OffsetNumber attnumb, Datum b, GinNullCategory categoryb);
|
||||
extern Datum *ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
|
||||
Datum value, bool isNull,
|
||||
int32 *nentries, GinNullCategory **categories);
|
||||
@@ -508,7 +508,7 @@ extern GinBtreeStack *ginPrepareFindLeafPage(GinBtree btree, BlockNumber blkno);
|
||||
extern GinBtreeStack *ginFindLeafPage(GinBtree btree, GinBtreeStack *stack);
|
||||
extern void freeGinBtreeStack(GinBtreeStack *stack);
|
||||
extern void ginInsertValue(GinBtree btree, GinBtreeStack *stack,
|
||||
GinStatsData *buildStats);
|
||||
GinStatsData *buildStats);
|
||||
extern void ginFindParents(GinBtree btree, GinBtreeStack *stack, BlockNumber rootBlkno);
|
||||
|
||||
/* ginentrypage.c */
|
||||
@@ -525,8 +525,8 @@ extern IndexTuple ginPageGetLinkItup(Buffer buf);
|
||||
/* gindatapage.c */
|
||||
extern int ginCompareItemPointers(ItemPointer a, ItemPointer b);
|
||||
extern uint32 ginMergeItemPointers(ItemPointerData *dst,
|
||||
ItemPointerData *a, uint32 na,
|
||||
ItemPointerData *b, uint32 nb);
|
||||
ItemPointerData *a, uint32 na,
|
||||
ItemPointerData *b, uint32 nb);
|
||||
|
||||
extern void GinDataPageAddItem(Page page, void *data, OffsetNumber offset);
|
||||
extern void GinPageDeletePostingItem(Page page, OffsetNumber offset);
|
||||
@@ -538,10 +538,10 @@ typedef struct
|
||||
} GinPostingTreeScan;
|
||||
|
||||
extern GinPostingTreeScan *ginPrepareScanPostingTree(Relation index,
|
||||
BlockNumber rootBlkno, bool searchMode);
|
||||
BlockNumber rootBlkno, bool searchMode);
|
||||
extern void ginInsertItemPointers(GinPostingTreeScan *gdi,
|
||||
ItemPointerData *items, uint32 nitem,
|
||||
GinStatsData *buildStats);
|
||||
ItemPointerData *items, uint32 nitem,
|
||||
GinStatsData *buildStats);
|
||||
extern Buffer ginScanBeginPostingTree(GinPostingTreeScan *gdi);
|
||||
extern void ginDataFillRoot(GinBtree btree, Buffer root, Buffer lbuf, Buffer rbuf);
|
||||
extern void ginPrepareDataScan(GinBtree btree, Relation index);
|
||||
@@ -561,7 +561,7 @@ extern void ginPrepareDataScan(GinBtree btree, Relation index);
|
||||
*
|
||||
* In each GinScanKeyData, nentries is the true number of entries, while
|
||||
* nuserentries is the number that extractQueryFn returned (which is what
|
||||
* we report to consistentFn). The "user" entries must come first.
|
||||
* we report to consistentFn). The "user" entries must come first.
|
||||
*/
|
||||
typedef struct GinScanKeyData *GinScanKey;
|
||||
|
||||
@@ -591,17 +591,17 @@ typedef struct GinScanKeyData
|
||||
OffsetNumber attnum;
|
||||
|
||||
/*
|
||||
* Match status data. curItem is the TID most recently tested (could be
|
||||
* a lossy-page pointer). curItemMatches is TRUE if it passes the
|
||||
* Match status data. curItem is the TID most recently tested (could be a
|
||||
* lossy-page pointer). curItemMatches is TRUE if it passes the
|
||||
* consistentFn test; if so, recheckCurItem is the recheck flag.
|
||||
* isFinished means that all the input entry streams are finished, so
|
||||
* this key cannot succeed for any later TIDs.
|
||||
* isFinished means that all the input entry streams are finished, so this
|
||||
* key cannot succeed for any later TIDs.
|
||||
*/
|
||||
ItemPointerData curItem;
|
||||
bool curItemMatches;
|
||||
bool recheckCurItem;
|
||||
bool isFinished;
|
||||
} GinScanKeyData;
|
||||
} GinScanKeyData;
|
||||
|
||||
typedef struct GinScanEntryData
|
||||
{
|
||||
@@ -633,7 +633,7 @@ typedef struct GinScanEntryData
|
||||
bool isFinished;
|
||||
bool reduceResult;
|
||||
uint32 predictNumberResult;
|
||||
} GinScanEntryData;
|
||||
} GinScanEntryData;
|
||||
|
||||
typedef struct GinScanOpaqueData
|
||||
{
|
||||
|
||||
@@ -53,7 +53,7 @@
|
||||
#define RTOverAboveStrategyNumber 12
|
||||
#define RTOldContainsStrategyNumber 13 /* for old spelling of @> */
|
||||
#define RTOldContainedByStrategyNumber 14 /* for old spelling of <@ */
|
||||
#define RTKNNSearchStrategyNumber 15
|
||||
#define RTKNNSearchStrategyNumber 15
|
||||
|
||||
/*
|
||||
* Page opaque data in a GiST index page.
|
||||
|
||||
@@ -79,13 +79,13 @@ typedef struct GISTSearchItem
|
||||
BlockNumber blkno; /* index page number, or InvalidBlockNumber */
|
||||
union
|
||||
{
|
||||
GistNSN parentlsn; /* parent page's LSN, if index page */
|
||||
GistNSN parentlsn; /* parent page's LSN, if index page */
|
||||
/* we must store parentlsn to detect whether a split occurred */
|
||||
GISTSearchHeapItem heap; /* heap info, if heap tuple */
|
||||
} data;
|
||||
} GISTSearchItem;
|
||||
} GISTSearchItem;
|
||||
|
||||
#define GISTSearchItemIsHeap(item) ((item).blkno == InvalidBlockNumber)
|
||||
#define GISTSearchItemIsHeap(item) ((item).blkno == InvalidBlockNumber)
|
||||
|
||||
/*
|
||||
* Within a GISTSearchTreeItem's chain, heap items always appear before
|
||||
@@ -132,9 +132,9 @@ typedef GISTScanOpaqueData *GISTScanOpaque;
|
||||
/* XLog stuff */
|
||||
|
||||
#define XLOG_GIST_PAGE_UPDATE 0x00
|
||||
/* #define XLOG_GIST_NEW_ROOT 0x20 */ /* not used anymore */
|
||||
/* #define XLOG_GIST_NEW_ROOT 0x20 */ /* not used anymore */
|
||||
#define XLOG_GIST_PAGE_SPLIT 0x30
|
||||
/* #define XLOG_GIST_INSERT_COMPLETE 0x40 */ /* not used anymore */
|
||||
/* #define XLOG_GIST_INSERT_COMPLETE 0x40 */ /* not used anymore */
|
||||
#define XLOG_GIST_CREATE_INDEX 0x50
|
||||
#define XLOG_GIST_PAGE_DELETE 0x60
|
||||
|
||||
@@ -147,7 +147,7 @@ typedef struct gistxlogPageUpdate
|
||||
* If this operation completes a page split, by inserting a downlink for
|
||||
* the split page, leftchild points to the left half of the split.
|
||||
*/
|
||||
BlockNumber leftchild;
|
||||
BlockNumber leftchild;
|
||||
|
||||
/* number of deleted offsets */
|
||||
uint16 ntodelete;
|
||||
@@ -161,7 +161,7 @@ typedef struct gistxlogPageSplit
|
||||
{
|
||||
RelFileNode node;
|
||||
BlockNumber origblkno; /* splitted page */
|
||||
BlockNumber origrlink; /* rightlink of the page before split */
|
||||
BlockNumber origrlink; /* rightlink of the page before split */
|
||||
GistNSN orignsn; /* NSN of the page before split */
|
||||
bool origleaf; /* was splitted page a leaf page? */
|
||||
|
||||
@@ -210,8 +210,8 @@ typedef struct GISTInsertStack
|
||||
Page page;
|
||||
|
||||
/*
|
||||
* log sequence number from page->lsn to recognize page update and
|
||||
* compare it with page's nsn to recognize page split
|
||||
* log sequence number from page->lsn to recognize page update and compare
|
||||
* it with page's nsn to recognize page split
|
||||
*/
|
||||
GistNSN lsn;
|
||||
|
||||
@@ -300,7 +300,7 @@ extern void gist_xlog_cleanup(void);
|
||||
|
||||
extern XLogRecPtr gistXLogUpdate(RelFileNode node, Buffer buffer,
|
||||
OffsetNumber *todelete, int ntodelete,
|
||||
IndexTuple *itup, int ntup,
|
||||
IndexTuple *itup, int ntup,
|
||||
Buffer leftchild);
|
||||
|
||||
extern XLogRecPtr gistXLogSplit(RelFileNode node,
|
||||
|
||||
@@ -31,13 +31,13 @@ typedef struct BulkInsertStateData
|
||||
{
|
||||
BufferAccessStrategy strategy; /* our BULKWRITE strategy object */
|
||||
Buffer current_buf; /* current insertion target page */
|
||||
} BulkInsertStateData;
|
||||
} BulkInsertStateData;
|
||||
|
||||
|
||||
extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
|
||||
HeapTuple tuple);
|
||||
extern Buffer RelationGetBufferForTuple(Relation relation, Size len,
|
||||
Buffer otherBuffer, int options,
|
||||
struct BulkInsertStateData *bistate);
|
||||
struct BulkInsertStateData * bistate);
|
||||
|
||||
#endif /* HIO_H */
|
||||
|
||||
@@ -201,7 +201,7 @@ typedef HeapTupleHeaderData *HeapTupleHeader;
|
||||
* any visibility information, so we can overlay it on a visibility flag
|
||||
* instead of using up a dedicated bit.
|
||||
*/
|
||||
#define HEAP_TUPLE_HAS_MATCH HEAP_ONLY_TUPLE /* tuple has a join match */
|
||||
#define HEAP_TUPLE_HAS_MATCH HEAP_ONLY_TUPLE /* tuple has a join match */
|
||||
|
||||
/*
|
||||
* HeapTupleHeader accessor macros
|
||||
|
||||
@@ -55,7 +55,7 @@ typedef IndexTupleData *IndexTuple;
|
||||
typedef struct IndexAttributeBitMapData
|
||||
{
|
||||
bits8 bits[(INDEX_MAX_KEYS + 8 - 1) / 8];
|
||||
} IndexAttributeBitMapData;
|
||||
} IndexAttributeBitMapData;
|
||||
|
||||
typedef IndexAttributeBitMapData *IndexAttributeBitMap;
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ typedef struct HeapScanDescData
|
||||
int rs_mindex; /* marked tuple's saved index */
|
||||
int rs_ntuples; /* number of visible tuples on page */
|
||||
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */
|
||||
} HeapScanDescData;
|
||||
} HeapScanDescData;
|
||||
|
||||
/*
|
||||
* We use the same IndexScanDescData structure for both amgettuple-based
|
||||
@@ -64,9 +64,9 @@ typedef struct IndexScanDescData
|
||||
Relation indexRelation; /* index relation descriptor */
|
||||
Snapshot xs_snapshot; /* snapshot to see */
|
||||
int numberOfKeys; /* number of index qualifier conditions */
|
||||
int numberOfOrderBys; /* number of ordering operators */
|
||||
ScanKey keyData; /* array of index qualifier descriptors */
|
||||
ScanKey orderByData; /* array of ordering op descriptors */
|
||||
int numberOfOrderBys; /* number of ordering operators */
|
||||
ScanKey keyData; /* array of index qualifier descriptors */
|
||||
ScanKey orderByData; /* array of ordering op descriptors */
|
||||
|
||||
/* signaling to index AM about killing index tuples */
|
||||
bool kill_prior_tuple; /* last-returned tuple is dead */
|
||||
@@ -87,7 +87,7 @@ typedef struct IndexScanDescData
|
||||
bool xs_hot_dead; /* T if all members of HOT chain are dead */
|
||||
OffsetNumber xs_next_hot; /* next member of HOT chain, if any */
|
||||
TransactionId xs_prev_xmax; /* previous HOT chain member's XMAX, if any */
|
||||
} IndexScanDescData;
|
||||
} IndexScanDescData;
|
||||
|
||||
/* Struct for heap-or-index scans of system tables */
|
||||
typedef struct SysScanDescData
|
||||
@@ -96,6 +96,6 @@ typedef struct SysScanDescData
|
||||
Relation irel; /* NULL if doing heap scan */
|
||||
HeapScanDesc scan; /* only valid in heap-scan case */
|
||||
IndexScanDesc iscan; /* only valid in index-scan case */
|
||||
} SysScanDescData;
|
||||
} SysScanDescData;
|
||||
|
||||
#endif /* RELSCAN_H */
|
||||
|
||||
@@ -115,8 +115,8 @@ extern void TupleDescInitEntry(TupleDesc desc,
|
||||
int attdim);
|
||||
|
||||
extern void TupleDescInitEntryCollation(TupleDesc desc,
|
||||
AttrNumber attributeNumber,
|
||||
Oid collationid);
|
||||
AttrNumber attributeNumber,
|
||||
Oid collationid);
|
||||
|
||||
extern TupleDesc BuildDescForRelation(List *schema);
|
||||
|
||||
|
||||
@@ -54,13 +54,13 @@ extern bool XactDeferrable;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
SYNCHRONOUS_COMMIT_OFF, /* asynchronous commit */
|
||||
SYNCHRONOUS_COMMIT_LOCAL_FLUSH, /* wait for local flush only */
|
||||
SYNCHRONOUS_COMMIT_REMOTE_FLUSH /* wait for local and remote flush */
|
||||
} SyncCommitLevel;
|
||||
SYNCHRONOUS_COMMIT_OFF, /* asynchronous commit */
|
||||
SYNCHRONOUS_COMMIT_LOCAL_FLUSH, /* wait for local flush only */
|
||||
SYNCHRONOUS_COMMIT_REMOTE_FLUSH /* wait for local and remote flush */
|
||||
} SyncCommitLevel;
|
||||
|
||||
/* Define the default setting for synchonous_commit */
|
||||
#define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH
|
||||
#define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH
|
||||
|
||||
/* Synchronous commit level */
|
||||
extern int synchronous_commit;
|
||||
|
||||
@@ -259,12 +259,12 @@ typedef struct CheckpointStatsData
|
||||
int ckpt_segs_removed; /* # of xlog segments deleted */
|
||||
int ckpt_segs_recycled; /* # of xlog segments recycled */
|
||||
|
||||
int ckpt_sync_rels; /* # of relations synced */
|
||||
uint64 ckpt_longest_sync; /* Longest sync for one relation */
|
||||
uint64 ckpt_agg_sync_time; /* The sum of all the individual sync
|
||||
* times, which is not necessarily the
|
||||
* same as the total elapsed time for
|
||||
* the entire sync phase. */
|
||||
int ckpt_sync_rels; /* # of relations synced */
|
||||
uint64 ckpt_longest_sync; /* Longest sync for one relation */
|
||||
uint64 ckpt_agg_sync_time; /* The sum of all the individual sync
|
||||
* times, which is not necessarily the
|
||||
* same as the total elapsed time for
|
||||
* the entire sync phase. */
|
||||
} CheckpointStatsData;
|
||||
|
||||
extern CheckpointStatsData CheckpointStats;
|
||||
|
||||
@@ -88,7 +88,7 @@ typedef uint32 TimeLineID;
|
||||
* read those buffers except during crash recovery or if wal_level != minimal,
|
||||
* it is a win to use it in all cases where we sync on each write(). We could
|
||||
* allow O_DIRECT with fsync(), but it is unclear if fsync() could process
|
||||
* writes not buffered in the kernel. Also, O_DIRECT is never enough to force
|
||||
* writes not buffered in the kernel. Also, O_DIRECT is never enough to force
|
||||
* data to the drives, it merely tries to bypass the kernel cache, so we still
|
||||
* need O_SYNC/O_DSYNC.
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user