mirror of
https://github.com/postgres/postgres.git
synced 2025-09-09 13:09:39 +03:00
Standard pgindent run for 8.1.
This commit is contained in:
@@ -64,7 +64,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.15 2004/12/31 22:02:52 pgsql Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.16 2005/10/15 02:49:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -91,10 +91,9 @@
|
||||
typedef struct IndirectBlock
|
||||
{
|
||||
int nextSlot; /* next pointer slot to write or read */
|
||||
struct IndirectBlock *nextup; /* parent indirect level, or NULL
|
||||
* if top */
|
||||
long ptrs[BLOCKS_PER_INDIR_BLOCK]; /* indexes of contained
|
||||
* blocks */
|
||||
struct IndirectBlock *nextup; /* parent indirect level, or NULL if
|
||||
* top */
|
||||
long ptrs[BLOCKS_PER_INDIR_BLOCK]; /* indexes of contained blocks */
|
||||
} IndirectBlock;
|
||||
|
||||
/*
|
||||
@@ -107,24 +106,23 @@ typedef struct LogicalTape
|
||||
{
|
||||
IndirectBlock *indirect; /* bottom of my indirect-block hierarchy */
|
||||
bool writing; /* T while in write phase */
|
||||
bool frozen; /* T if blocks should not be freed when
|
||||
* read */
|
||||
bool frozen; /* T if blocks should not be freed when read */
|
||||
bool dirty; /* does buffer need to be written? */
|
||||
|
||||
/*
|
||||
* The total data volume in the logical tape is numFullBlocks * BLCKSZ
|
||||
* + lastBlockBytes. BUT: we do not update lastBlockBytes during
|
||||
* writing, only at completion of a write phase.
|
||||
* The total data volume in the logical tape is numFullBlocks * BLCKSZ +
|
||||
* lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
|
||||
* only at completion of a write phase.
|
||||
*/
|
||||
long numFullBlocks; /* number of complete blocks in log tape */
|
||||
int lastBlockBytes; /* valid bytes in last (incomplete) block */
|
||||
|
||||
/*
|
||||
* Buffer for current data block. Note we don't bother to store the
|
||||
* actual file block number of the data block (during the write phase
|
||||
* it hasn't been assigned yet, and during read we don't care
|
||||
* anymore). But we do need the relative block number so we can detect
|
||||
* end-of-tape while reading.
|
||||
* actual file block number of the data block (during the write phase it
|
||||
* hasn't been assigned yet, and during read we don't care anymore). But
|
||||
* we do need the relative block number so we can detect end-of-tape while
|
||||
* reading.
|
||||
*/
|
||||
long curBlockNumber; /* this block's logical blk# within tape */
|
||||
int pos; /* next read/write position in buffer */
|
||||
@@ -144,20 +142,18 @@ struct LogicalTapeSet
|
||||
long nFileBlocks; /* # of blocks used in underlying file */
|
||||
|
||||
/*
|
||||
* We store the numbers of recycled-and-available blocks in
|
||||
* freeBlocks[]. When there are no such blocks, we extend the
|
||||
* underlying file. Note that the block numbers in freeBlocks are
|
||||
* always in *decreasing* order, so that removing the last entry gives
|
||||
* us the lowest free block.
|
||||
* We store the numbers of recycled-and-available blocks in freeBlocks[].
|
||||
* When there are no such blocks, we extend the underlying file. Note
|
||||
* that the block numbers in freeBlocks are always in *decreasing* order,
|
||||
* so that removing the last entry gives us the lowest free block.
|
||||
*/
|
||||
long *freeBlocks; /* resizable array */
|
||||
int nFreeBlocks; /* # of currently free blocks */
|
||||
int freeBlocksLen; /* current allocated length of
|
||||
* freeBlocks[] */
|
||||
int freeBlocksLen; /* current allocated length of freeBlocks[] */
|
||||
|
||||
/*
|
||||
* tapes[] is declared size 1 since C wants a fixed size, but actually
|
||||
* it is of length nTapes.
|
||||
* tapes[] is declared size 1 since C wants a fixed size, but actually it
|
||||
* is of length nTapes.
|
||||
*/
|
||||
int nTapes; /* # of logical tapes in set */
|
||||
LogicalTape *tapes[1]; /* must be last in struct! */
|
||||
@@ -232,9 +228,9 @@ static long
|
||||
ltsGetFreeBlock(LogicalTapeSet *lts)
|
||||
{
|
||||
/*
|
||||
* If there are multiple free blocks, we select the one appearing last
|
||||
* in freeBlocks[]. If there are none, assign the next block at the
|
||||
* end of the file.
|
||||
* If there are multiple free blocks, we select the one appearing last in
|
||||
* freeBlocks[]. If there are none, assign the next block at the end of
|
||||
* the file.
|
||||
*/
|
||||
if (lts->nFreeBlocks > 0)
|
||||
return lts->freeBlocks[--lts->nFreeBlocks];
|
||||
@@ -258,14 +254,14 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
|
||||
{
|
||||
lts->freeBlocksLen *= 2;
|
||||
lts->freeBlocks = (long *) repalloc(lts->freeBlocks,
|
||||
lts->freeBlocksLen * sizeof(long));
|
||||
lts->freeBlocksLen * sizeof(long));
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert blocknum into array, preserving decreasing order (so that
|
||||
* ltsGetFreeBlock returns the lowest available block number). This
|
||||
* could get fairly slow if there were many free blocks, but we don't
|
||||
* expect there to be very many at one time.
|
||||
* ltsGetFreeBlock returns the lowest available block number). This could
|
||||
* get fairly slow if there were many free blocks, but we don't expect
|
||||
* there to be very many at one time.
|
||||
*/
|
||||
ndx = lts->nFreeBlocks++;
|
||||
ptr = lts->freeBlocks + ndx;
|
||||
@@ -293,8 +289,8 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
|
||||
if (indirect->nextSlot >= BLOCKS_PER_INDIR_BLOCK)
|
||||
{
|
||||
/*
|
||||
* This indirect block is full, so dump it out and recursively
|
||||
* save its address in the next indirection level. Create a new
|
||||
* This indirect block is full, so dump it out and recursively save
|
||||
* its address in the next indirection level. Create a new
|
||||
* indirection level if there wasn't one before.
|
||||
*/
|
||||
long indirblock = ltsGetFreeBlock(lts);
|
||||
@@ -336,8 +332,8 @@ ltsRewindIndirectBlock(LogicalTapeSet *lts,
|
||||
indirect->ptrs[indirect->nextSlot] = -1L;
|
||||
|
||||
/*
|
||||
* If block is not topmost, write it out, and recurse to obtain
|
||||
* address of first block in this hierarchy level. Read that one in.
|
||||
* If block is not topmost, write it out, and recurse to obtain address of
|
||||
* first block in this hierarchy level. Read that one in.
|
||||
*/
|
||||
if (indirect->nextup != NULL)
|
||||
{
|
||||
@@ -371,8 +367,8 @@ ltsRewindFrozenIndirectBlock(LogicalTapeSet *lts,
|
||||
IndirectBlock *indirect)
|
||||
{
|
||||
/*
|
||||
* If block is not topmost, recurse to obtain address of first block
|
||||
* in this hierarchy level. Read that one in.
|
||||
* If block is not topmost, recurse to obtain address of first block in
|
||||
* this hierarchy level. Read that one in.
|
||||
*/
|
||||
if (indirect->nextup != NULL)
|
||||
{
|
||||
@@ -448,8 +444,8 @@ ltsRecallPrevBlockNum(LogicalTapeSet *lts,
|
||||
ltsReadBlock(lts, indirblock, (void *) indirect->ptrs);
|
||||
|
||||
/*
|
||||
* The previous block would only have been written out if full, so
|
||||
* we need not search it for a -1 sentinel.
|
||||
* The previous block would only have been written out if full, so we
|
||||
* need not search it for a -1 sentinel.
|
||||
*/
|
||||
indirect->nextSlot = BLOCKS_PER_INDIR_BLOCK + 1;
|
||||
}
|
||||
@@ -471,8 +467,8 @@ LogicalTapeSetCreate(int ntapes)
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Create top-level struct. First LogicalTape pointer is already
|
||||
* counted in sizeof(LogicalTapeSet).
|
||||
* Create top-level struct. First LogicalTape pointer is already counted
|
||||
* in sizeof(LogicalTapeSet).
|
||||
*/
|
||||
Assert(ntapes > 0);
|
||||
lts = (LogicalTapeSet *) palloc(sizeof(LogicalTapeSet) +
|
||||
@@ -617,8 +613,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
|
||||
if (lt->writing)
|
||||
{
|
||||
/*
|
||||
* Completion of a write phase. Flush last partial data
|
||||
* block, flush any partial indirect blocks, rewind for normal
|
||||
* Completion of a write phase. Flush last partial data block,
|
||||
* flush any partial indirect blocks, rewind for normal
|
||||
* (destructive) read.
|
||||
*/
|
||||
if (lt->dirty)
|
||||
@@ -630,8 +626,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* This is only OK if tape is frozen; we rewind for (another)
|
||||
* read pass.
|
||||
* This is only OK if tape is frozen; we rewind for (another) read
|
||||
* pass.
|
||||
*/
|
||||
Assert(lt->frozen);
|
||||
datablocknum = ltsRewindFrozenIndirectBlock(lts, lt->indirect);
|
||||
@@ -656,8 +652,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
|
||||
*
|
||||
* NOTE: we assume the caller has read the tape to the end; otherwise
|
||||
* untouched data and indirect blocks will not have been freed. We
|
||||
* could add more code to free any unread blocks, but in current
|
||||
* usage of this module it'd be useless code.
|
||||
* could add more code to free any unread blocks, but in current usage
|
||||
* of this module it'd be useless code.
|
||||
*/
|
||||
IndirectBlock *ib,
|
||||
*nextib;
|
||||
@@ -757,8 +753,8 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
|
||||
Assert(lt->writing);
|
||||
|
||||
/*
|
||||
* Completion of a write phase. Flush last partial data block, flush
|
||||
* any partial indirect blocks, rewind for nondestructive read.
|
||||
* Completion of a write phase. Flush last partial data block, flush any
|
||||
* partial indirect blocks, rewind for nondestructive read.
|
||||
*/
|
||||
if (lt->dirty)
|
||||
ltsDumpBuffer(lts, lt);
|
||||
@@ -826,9 +822,9 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
|
||||
return false; /* a seek too far... */
|
||||
|
||||
/*
|
||||
* OK, we need to back up nblocks blocks. This implementation would
|
||||
* be pretty inefficient for long seeks, but we really aren't
|
||||
* expecting that (a seek over one tuple is typical).
|
||||
* OK, we need to back up nblocks blocks. This implementation would be
|
||||
* pretty inefficient for long seeks, but we really aren't expecting that
|
||||
* (a seek over one tuple is typical).
|
||||
*/
|
||||
while (nblocks-- > 0)
|
||||
{
|
||||
@@ -883,9 +879,9 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* OK, advance or back up to the target block. This implementation
|
||||
* would be pretty inefficient for long seeks, but we really aren't
|
||||
* expecting that (a seek over one tuple is typical).
|
||||
* OK, advance or back up to the target block. This implementation would
|
||||
* be pretty inefficient for long seeks, but we really aren't expecting
|
||||
* that (a seek over one tuple is typical).
|
||||
*/
|
||||
while (lt->curBlockNumber > blocknum)
|
||||
{
|
||||
|
@@ -78,7 +78,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.51 2005/10/03 22:55:54 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.52 2005/10/15 02:49:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -102,7 +102,7 @@
|
||||
|
||||
/* GUC variable */
|
||||
#ifdef TRACE_SORT
|
||||
bool trace_sort = false;
|
||||
bool trace_sort = false;
|
||||
#endif
|
||||
|
||||
|
||||
@@ -112,8 +112,7 @@ bool trace_sort = false;
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
TSS_INITIAL, /* Loading tuples; still within memory
|
||||
* limit */
|
||||
TSS_INITIAL, /* Loading tuples; still within memory limit */
|
||||
TSS_BUILDRUNS, /* Loading tuples; writing to tape */
|
||||
TSS_SORTEDINMEM, /* Sort completed entirely in memory */
|
||||
TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
|
||||
@@ -135,13 +134,12 @@ struct Tuplesortstate
|
||||
TupSortStatus status; /* enumerated value as shown above */
|
||||
bool randomAccess; /* did caller request random access? */
|
||||
long availMem; /* remaining memory available, in bytes */
|
||||
LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp
|
||||
* file */
|
||||
LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
|
||||
|
||||
/*
|
||||
* These function pointers decouple the routines that must know what
|
||||
* kind of tuple we are sorting from the routines that don't need to
|
||||
* know it. They are set up by the tuplesort_begin_xxx routines.
|
||||
* These function pointers decouple the routines that must know what kind
|
||||
* of tuple we are sorting from the routines that don't need to know it.
|
||||
* They are set up by the tuplesort_begin_xxx routines.
|
||||
*
|
||||
* Function to compare two tuples; result is per qsort() convention, ie:
|
||||
*
|
||||
@@ -150,83 +148,78 @@ struct Tuplesortstate
|
||||
int (*comparetup) (Tuplesortstate *state, const void *a, const void *b);
|
||||
|
||||
/*
|
||||
* Function to copy a supplied input tuple into palloc'd space. (NB:
|
||||
* we assume that a single pfree() is enough to release the tuple
|
||||
* later, so the representation must be "flat" in one palloc chunk.)
|
||||
* state->availMem must be decreased by the amount of space used.
|
||||
* Function to copy a supplied input tuple into palloc'd space. (NB: we
|
||||
* assume that a single pfree() is enough to release the tuple later, so
|
||||
* the representation must be "flat" in one palloc chunk.) state->availMem
|
||||
* must be decreased by the amount of space used.
|
||||
*/
|
||||
void *(*copytup) (Tuplesortstate *state, void *tup);
|
||||
|
||||
/*
|
||||
* Function to write a stored tuple onto tape. The representation of
|
||||
* the tuple on tape need not be the same as it is in memory;
|
||||
* requirements on the tape representation are given below. After
|
||||
* writing the tuple, pfree() it, and increase state->availMem by the
|
||||
* amount of memory space thereby released.
|
||||
* Function to write a stored tuple onto tape. The representation of the
|
||||
* tuple on tape need not be the same as it is in memory; requirements on
|
||||
* the tape representation are given below. After writing the tuple,
|
||||
* pfree() it, and increase state->availMem by the amount of memory space
|
||||
* thereby released.
|
||||
*/
|
||||
void (*writetup) (Tuplesortstate *state, int tapenum, void *tup);
|
||||
|
||||
/*
|
||||
* Function to read a stored tuple from tape back into memory. 'len'
|
||||
* is the already-read length of the stored tuple. Create and return
|
||||
* a palloc'd copy, and decrease state->availMem by the amount of
|
||||
* memory space consumed.
|
||||
* Function to read a stored tuple from tape back into memory. 'len' is
|
||||
* the already-read length of the stored tuple. Create and return a
|
||||
* palloc'd copy, and decrease state->availMem by the amount of memory
|
||||
* space consumed.
|
||||
*/
|
||||
void *(*readtup) (Tuplesortstate *state, int tapenum, unsigned int len);
|
||||
|
||||
/*
|
||||
* This array holds pointers to tuples in sort memory. If we are in
|
||||
* state INITIAL, the tuples are in no particular order; if we are in
|
||||
* state SORTEDINMEM, the tuples are in final sorted order; in states
|
||||
* BUILDRUNS and FINALMERGE, the tuples are organized in "heap" order
|
||||
* per Algorithm H. (Note that memtupcount only counts the tuples
|
||||
* that are part of the heap --- during merge passes, memtuples[]
|
||||
* entries beyond TAPERANGE are never in the heap and are used to hold
|
||||
* pre-read tuples.) In state SORTEDONTAPE, the array is not used.
|
||||
* This array holds pointers to tuples in sort memory. If we are in state
|
||||
* INITIAL, the tuples are in no particular order; if we are in state
|
||||
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
|
||||
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
|
||||
* H. (Note that memtupcount only counts the tuples that are part of the
|
||||
* heap --- during merge passes, memtuples[] entries beyond TAPERANGE are
|
||||
* never in the heap and are used to hold pre-read tuples.) In state
|
||||
* SORTEDONTAPE, the array is not used.
|
||||
*/
|
||||
void **memtuples; /* array of pointers to palloc'd tuples */
|
||||
int memtupcount; /* number of tuples currently present */
|
||||
int memtupsize; /* allocated length of memtuples array */
|
||||
|
||||
/*
|
||||
* While building initial runs, this array holds the run number for
|
||||
* each tuple in memtuples[]. During merge passes, we re-use it to
|
||||
* hold the input tape number that each tuple in the heap was read
|
||||
* from, or to hold the index of the next tuple pre-read from the same
|
||||
* tape in the case of pre-read entries. This array is never
|
||||
* allocated unless we need to use tapes. Whenever it is allocated,
|
||||
* it has the same length as memtuples[].
|
||||
* While building initial runs, this array holds the run number for each
|
||||
* tuple in memtuples[]. During merge passes, we re-use it to hold the
|
||||
* input tape number that each tuple in the heap was read from, or to hold
|
||||
* the index of the next tuple pre-read from the same tape in the case of
|
||||
* pre-read entries. This array is never allocated unless we need to use
|
||||
* tapes. Whenever it is allocated, it has the same length as
|
||||
* memtuples[].
|
||||
*/
|
||||
int *memtupindex; /* index value associated with
|
||||
* memtuples[i] */
|
||||
int *memtupindex; /* index value associated with memtuples[i] */
|
||||
|
||||
/*
|
||||
* While building initial runs, this is the current output run number
|
||||
* (starting at 0). Afterwards, it is the number of initial runs we
|
||||
* made.
|
||||
* (starting at 0). Afterwards, it is the number of initial runs we made.
|
||||
*/
|
||||
int currentRun;
|
||||
|
||||
/*
|
||||
* These variables are only used during merge passes. mergeactive[i]
|
||||
* is true if we are reading an input run from (actual) tape number i
|
||||
* and have not yet exhausted that run. mergenext[i] is the memtuples
|
||||
* index of the next pre-read tuple (next to be loaded into the heap)
|
||||
* for tape i, or 0 if we are out of pre-read tuples. mergelast[i]
|
||||
* similarly points to the last pre-read tuple from each tape.
|
||||
* mergeavailmem[i] is the amount of unused space allocated for tape
|
||||
* i. mergefreelist and mergefirstfree keep track of unused locations
|
||||
* in the memtuples[] array. memtupindex[] links together pre-read
|
||||
* tuples for each tape as well as recycled locations in
|
||||
* mergefreelist. It is OK to use 0 as a null link in these lists,
|
||||
* because memtuples[0] is part of the merge heap and is never a
|
||||
* pre-read tuple.
|
||||
* These variables are only used during merge passes. mergeactive[i] is
|
||||
* true if we are reading an input run from (actual) tape number i and
|
||||
* have not yet exhausted that run. mergenext[i] is the memtuples index
|
||||
* of the next pre-read tuple (next to be loaded into the heap) for tape
|
||||
* i, or 0 if we are out of pre-read tuples. mergelast[i] similarly
|
||||
* points to the last pre-read tuple from each tape. mergeavailmem[i] is
|
||||
* the amount of unused space allocated for tape i. mergefreelist and
|
||||
* mergefirstfree keep track of unused locations in the memtuples[] array.
|
||||
* memtupindex[] links together pre-read tuples for each tape as well as
|
||||
* recycled locations in mergefreelist. It is OK to use 0 as a null link
|
||||
* in these lists, because memtuples[0] is part of the merge heap and is
|
||||
* never a pre-read tuple.
|
||||
*/
|
||||
bool mergeactive[MAXTAPES]; /* Active input run source? */
|
||||
int mergenext[MAXTAPES]; /* first preread tuple for each
|
||||
* source */
|
||||
int mergelast[MAXTAPES]; /* last preread tuple for each
|
||||
* source */
|
||||
int mergenext[MAXTAPES]; /* first preread tuple for each source */
|
||||
int mergelast[MAXTAPES]; /* last preread tuple for each source */
|
||||
long mergeavailmem[MAXTAPES]; /* availMem for prereading
|
||||
* tapes */
|
||||
long spacePerTape; /* actual per-tape target usage */
|
||||
@@ -240,17 +233,15 @@ struct Tuplesortstate
|
||||
*/
|
||||
int Level; /* Knuth's l */
|
||||
int destTape; /* current output tape (Knuth's j, less 1) */
|
||||
int tp_fib[MAXTAPES]; /* Target Fibonacci run counts
|
||||
* (A[]) */
|
||||
int tp_fib[MAXTAPES]; /* Target Fibonacci run counts (A[]) */
|
||||
int tp_runs[MAXTAPES]; /* # of real runs on each tape */
|
||||
int tp_dummy[MAXTAPES]; /* # of dummy runs for each tape
|
||||
* (D[]) */
|
||||
int tp_dummy[MAXTAPES]; /* # of dummy runs for each tape (D[]) */
|
||||
int tp_tapenum[MAXTAPES]; /* Actual tape numbers (TAPE[]) */
|
||||
|
||||
/*
|
||||
* These variables are used after completion of sorting to keep track
|
||||
* of the next tuple to return. (In the tape case, the tape's current
|
||||
* read position is also critical state.)
|
||||
* These variables are used after completion of sorting to keep track of
|
||||
* the next tuple to return. (In the tape case, the tape's current read
|
||||
* position is also critical state.)
|
||||
*/
|
||||
int result_tape; /* actual tape number of finished output */
|
||||
int current; /* array index (only used if SORTEDINMEM) */
|
||||
@@ -258,8 +249,7 @@ struct Tuplesortstate
|
||||
|
||||
/* markpos_xxx holds marked position for mark and restore */
|
||||
long markpos_block; /* tape block# (only used if SORTEDONTAPE) */
|
||||
int markpos_offset; /* saved "current", or offset in tape
|
||||
* block */
|
||||
int markpos_offset; /* saved "current", or offset in tape block */
|
||||
bool markpos_eof; /* saved "eof_reached" */
|
||||
|
||||
/*
|
||||
@@ -272,8 +262,8 @@ struct Tuplesortstate
|
||||
SortFunctionKind *sortFnKinds;
|
||||
|
||||
/*
|
||||
* These variables are specific to the IndexTuple case; they are set
|
||||
* by tuplesort_begin_index and used only by the IndexTuple routines.
|
||||
* These variables are specific to the IndexTuple case; they are set by
|
||||
* tuplesort_begin_index and used only by the IndexTuple routines.
|
||||
*/
|
||||
Relation indexRel;
|
||||
ScanKey indexScanKey;
|
||||
@@ -458,8 +448,7 @@ tuplesort_begin_common(int workMem, bool randomAccess)
|
||||
|
||||
/* Algorithm D variables will be initialized by inittapes, if needed */
|
||||
|
||||
state->result_tape = -1; /* flag that result tape has not been
|
||||
* formed */
|
||||
state->result_tape = -1; /* flag that result tape has not been formed */
|
||||
|
||||
return state;
|
||||
}
|
||||
@@ -505,8 +494,8 @@ tuplesort_begin_heap(TupleDesc tupDesc,
|
||||
&state->sortFnKinds[i]);
|
||||
|
||||
/*
|
||||
* We needn't fill in sk_strategy or sk_subtype since these
|
||||
* scankeys will never be passed to an index.
|
||||
* We needn't fill in sk_strategy or sk_subtype since these scankeys
|
||||
* will never be passed to an index.
|
||||
*/
|
||||
ScanKeyInit(&state->scanKeys[i],
|
||||
attNums[i],
|
||||
@@ -606,8 +595,7 @@ tuplesort_end(Tuplesortstate *state)
|
||||
pfree(state->memtupindex);
|
||||
|
||||
/*
|
||||
* this stuff might better belong in a variant-specific shutdown
|
||||
* routine
|
||||
* this stuff might better belong in a variant-specific shutdown routine
|
||||
*/
|
||||
if (state->scanKeys)
|
||||
pfree(state->scanKeys);
|
||||
@@ -724,16 +712,16 @@ puttuple_common(Tuplesortstate *state, void *tuple)
|
||||
|
||||
/*
|
||||
* Insert the copied tuple into the heap, with run number
|
||||
* currentRun if it can go into the current run, else run
|
||||
* number currentRun+1. The tuple can go into the current run
|
||||
* if it is >= the first not-yet-output tuple. (Actually, it
|
||||
* could go into the current run if it is >= the most recently
|
||||
* output tuple ... but that would require keeping around the
|
||||
* tuple we last output, and it's simplest to let writetup
|
||||
* free each tuple as soon as it's written.)
|
||||
* currentRun if it can go into the current run, else run number
|
||||
* currentRun+1. The tuple can go into the current run if it is
|
||||
* >= the first not-yet-output tuple. (Actually, it could go into
|
||||
* the current run if it is >= the most recently output tuple ...
|
||||
* but that would require keeping around the tuple we last output,
|
||||
* and it's simplest to let writetup free each tuple as soon as
|
||||
* it's written.)
|
||||
*
|
||||
* Note there will always be at least one tuple in the heap at
|
||||
* this point; see dumptuples.
|
||||
* Note there will always be at least one tuple in the heap at this
|
||||
* point; see dumptuples.
|
||||
*/
|
||||
Assert(state->memtupcount > 0);
|
||||
if (COMPARETUP(state, tuple, state->memtuples[0]) >= 0)
|
||||
@@ -742,8 +730,7 @@ puttuple_common(Tuplesortstate *state, void *tuple)
|
||||
tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
|
||||
|
||||
/*
|
||||
* If we are over the memory limit, dump tuples till we're
|
||||
* under.
|
||||
* If we are over the memory limit, dump tuples till we're under.
|
||||
*/
|
||||
dumptuples(state, false);
|
||||
break;
|
||||
@@ -770,8 +757,8 @@ tuplesort_performsort(Tuplesortstate *state)
|
||||
case TSS_INITIAL:
|
||||
|
||||
/*
|
||||
* We were able to accumulate all the tuples within the
|
||||
* allowed amount of memory. Just qsort 'em and we're done.
|
||||
* We were able to accumulate all the tuples within the allowed
|
||||
* amount of memory. Just qsort 'em and we're done.
|
||||
*/
|
||||
if (state->memtupcount > 1)
|
||||
{
|
||||
@@ -788,10 +775,10 @@ tuplesort_performsort(Tuplesortstate *state)
|
||||
case TSS_BUILDRUNS:
|
||||
|
||||
/*
|
||||
* Finish tape-based sort. First, flush all tuples remaining
|
||||
* in memory out to tape; then merge until we have a single
|
||||
* remaining run (or, if !randomAccess, one run per tape).
|
||||
* Note that mergeruns sets the correct state->status.
|
||||
* Finish tape-based sort. First, flush all tuples remaining in
|
||||
* memory out to tape; then merge until we have a single remaining
|
||||
* run (or, if !randomAccess, one run per tape). Note that
|
||||
* mergeruns sets the correct state->status.
|
||||
*/
|
||||
dumptuples(state, true);
|
||||
mergeruns(state);
|
||||
@@ -880,16 +867,15 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
|
||||
/*
|
||||
* Backward.
|
||||
*
|
||||
* if all tuples are fetched already then we return last tuple,
|
||||
* else - tuple before last returned.
|
||||
* if all tuples are fetched already then we return last tuple, else
|
||||
* - tuple before last returned.
|
||||
*/
|
||||
if (state->eof_reached)
|
||||
{
|
||||
/*
|
||||
* Seek position is pointing just past the zero tuplen at
|
||||
* the end of file; back up to fetch last tuple's ending
|
||||
* length word. If seek fails we must have a completely
|
||||
* empty file.
|
||||
* Seek position is pointing just past the zero tuplen at the
|
||||
* end of file; back up to fetch last tuple's ending length
|
||||
* word. If seek fails we must have a completely empty file.
|
||||
*/
|
||||
if (!LogicalTapeBackspace(state->tapeset,
|
||||
state->result_tape,
|
||||
@@ -900,9 +886,8 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Back up and fetch previously-returned tuple's ending
|
||||
* length word. If seek fails, assume we are at start of
|
||||
* file.
|
||||
* Back up and fetch previously-returned tuple's ending length
|
||||
* word. If seek fails, assume we are at start of file.
|
||||
*/
|
||||
if (!LogicalTapeBackspace(state->tapeset,
|
||||
state->result_tape,
|
||||
@@ -915,17 +900,17 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
|
||||
*/
|
||||
if (!LogicalTapeBackspace(state->tapeset,
|
||||
state->result_tape,
|
||||
tuplen + 2 * sizeof(unsigned int)))
|
||||
tuplen + 2 * sizeof(unsigned int)))
|
||||
{
|
||||
/*
|
||||
* If that fails, presumably the prev tuple is the
|
||||
* first in the file. Back up so that it becomes next
|
||||
* to read in forward direction (not obviously right,
|
||||
* but that is what in-memory case does).
|
||||
* If that fails, presumably the prev tuple is the first
|
||||
* in the file. Back up so that it becomes next to read
|
||||
* in forward direction (not obviously right, but that is
|
||||
* what in-memory case does).
|
||||
*/
|
||||
if (!LogicalTapeBackspace(state->tapeset,
|
||||
state->result_tape,
|
||||
tuplen + sizeof(unsigned int)))
|
||||
tuplen + sizeof(unsigned int)))
|
||||
elog(ERROR, "bogus tuple length in backward scan");
|
||||
return NULL;
|
||||
}
|
||||
@@ -934,9 +919,9 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
|
||||
tuplen = getlen(state, state->result_tape, false);
|
||||
|
||||
/*
|
||||
* Now we have the length of the prior tuple, back up and read
|
||||
* it. Note: READTUP expects we are positioned after the
|
||||
* initial length word of the tuple, so back up to that point.
|
||||
* Now we have the length of the prior tuple, back up and read it.
|
||||
* Note: READTUP expects we are positioned after the initial
|
||||
* length word of the tuple, so back up to that point.
|
||||
*/
|
||||
if (!LogicalTapeBackspace(state->tapeset,
|
||||
state->result_tape,
|
||||
@@ -968,14 +953,12 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
|
||||
if ((tupIndex = state->mergenext[srcTape]) == 0)
|
||||
{
|
||||
/*
|
||||
* out of preloaded data on this tape, try to read
|
||||
* more
|
||||
* out of preloaded data on this tape, try to read more
|
||||
*/
|
||||
mergepreread(state);
|
||||
|
||||
/*
|
||||
* if still no data, we've reached end of run on this
|
||||
* tape
|
||||
* if still no data, we've reached end of run on this tape
|
||||
*/
|
||||
if ((tupIndex = state->mergenext[srcTape]) == 0)
|
||||
return tup;
|
||||
@@ -1062,12 +1045,12 @@ inittapes(Tuplesortstate *state)
|
||||
USEMEM(state, GetMemoryChunkSpace(state->memtupindex));
|
||||
|
||||
/*
|
||||
* Convert the unsorted contents of memtuples[] into a heap. Each
|
||||
* tuple is marked as belonging to run number zero.
|
||||
* Convert the unsorted contents of memtuples[] into a heap. Each tuple is
|
||||
* marked as belonging to run number zero.
|
||||
*
|
||||
* NOTE: we pass false for checkIndex since there's no point in comparing
|
||||
* indexes in this step, even though we do intend the indexes to be
|
||||
* part of the sort key...
|
||||
* indexes in this step, even though we do intend the indexes to be part
|
||||
* of the sort key...
|
||||
*/
|
||||
ntuples = state->memtupcount;
|
||||
state->memtupcount = 0; /* make the heap empty */
|
||||
@@ -1150,8 +1133,8 @@ mergeruns(Tuplesortstate *state)
|
||||
|
||||
/*
|
||||
* If we produced only one initial run (quite likely if the total data
|
||||
* volume is between 1X and 2X workMem), we can just use that tape as
|
||||
* the finished output, rather than doing a useless merge.
|
||||
* volume is between 1X and 2X workMem), we can just use that tape as the
|
||||
* finished output, rather than doing a useless merge.
|
||||
*/
|
||||
if (state->currentRun == 1)
|
||||
{
|
||||
@@ -1183,8 +1166,8 @@ mergeruns(Tuplesortstate *state)
|
||||
}
|
||||
|
||||
/*
|
||||
* If we don't have to produce a materialized sorted tape,
|
||||
* quit as soon as we're down to one real/dummy run per tape.
|
||||
* If we don't have to produce a materialized sorted tape, quit as
|
||||
* soon as we're down to one real/dummy run per tape.
|
||||
*/
|
||||
if (!state->randomAccess && allOneRun)
|
||||
{
|
||||
@@ -1215,8 +1198,7 @@ mergeruns(Tuplesortstate *state)
|
||||
state->tp_runs[TAPERANGE - 1] = 0;
|
||||
|
||||
/*
|
||||
* reassign tape units per step D6; note we no longer care about
|
||||
* A[]
|
||||
* reassign tape units per step D6; note we no longer care about A[]
|
||||
*/
|
||||
svTape = state->tp_tapenum[TAPERANGE];
|
||||
svDummy = state->tp_dummy[TAPERANGE];
|
||||
@@ -1233,12 +1215,12 @@ mergeruns(Tuplesortstate *state)
|
||||
}
|
||||
|
||||
/*
|
||||
* Done. Knuth says that the result is on TAPE[1], but since we
|
||||
* exited the loop without performing the last iteration of step D6,
|
||||
* we have not rearranged the tape unit assignment, and therefore the
|
||||
* result is on TAPE[T]. We need to do it this way so that we can
|
||||
* freeze the final output tape while rewinding it. The last
|
||||
* iteration of step D6 would be a waste of cycles anyway...
|
||||
* Done. Knuth says that the result is on TAPE[1], but since we exited
|
||||
* the loop without performing the last iteration of step D6, we have not
|
||||
* rearranged the tape unit assignment, and therefore the result is on
|
||||
* TAPE[T]. We need to do it this way so that we can freeze the final
|
||||
* output tape while rewinding it. The last iteration of step D6 would be
|
||||
* a waste of cycles anyway...
|
||||
*/
|
||||
state->result_tape = state->tp_tapenum[TAPERANGE];
|
||||
LogicalTapeFreeze(state->tapeset, state->result_tape);
|
||||
@@ -1262,16 +1244,15 @@ mergeonerun(Tuplesortstate *state)
|
||||
spaceFreed;
|
||||
|
||||
/*
|
||||
* Start the merge by loading one tuple from each active source tape
|
||||
* into the heap. We can also decrease the input run/dummy run
|
||||
* counts.
|
||||
* Start the merge by loading one tuple from each active source tape into
|
||||
* the heap. We can also decrease the input run/dummy run counts.
|
||||
*/
|
||||
beginmerge(state);
|
||||
|
||||
/*
|
||||
* Execute merge by repeatedly extracting lowest tuple in heap,
|
||||
* writing it out, and replacing it with next tuple from same tape (if
|
||||
* there is another one).
|
||||
* Execute merge by repeatedly extracting lowest tuple in heap, writing it
|
||||
* out, and replacing it with next tuple from same tape (if there is
|
||||
* another one).
|
||||
*/
|
||||
while (state->memtupcount > 0)
|
||||
{
|
||||
@@ -1304,8 +1285,8 @@ mergeonerun(Tuplesortstate *state)
|
||||
}
|
||||
|
||||
/*
|
||||
* When the heap empties, we're done. Write an end-of-run marker on
|
||||
* the output tape, and increment its count of real runs.
|
||||
* When the heap empties, we're done. Write an end-of-run marker on the
|
||||
* output tape, and increment its count of real runs.
|
||||
*/
|
||||
markrunend(state, destTape);
|
||||
state->tp_runs[TAPERANGE]++;
|
||||
@@ -1341,8 +1322,7 @@ beginmerge(Tuplesortstate *state)
|
||||
memset(state->mergelast, 0, sizeof(state->mergelast));
|
||||
memset(state->mergeavailmem, 0, sizeof(state->mergeavailmem));
|
||||
state->mergefreelist = 0; /* nothing in the freelist */
|
||||
state->mergefirstfree = MAXTAPES; /* first slot available for
|
||||
* preread */
|
||||
state->mergefirstfree = MAXTAPES; /* first slot available for preread */
|
||||
|
||||
/* Adjust run counts and mark the active tapes */
|
||||
activeTapes = 0;
|
||||
@@ -1361,8 +1341,8 @@ beginmerge(Tuplesortstate *state)
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize space allocation to let each active input tape have an
|
||||
* equal share of preread space.
|
||||
* Initialize space allocation to let each active input tape have an equal
|
||||
* share of preread space.
|
||||
*/
|
||||
Assert(activeTapes > 0);
|
||||
state->spacePerTape = state->availMem / activeTapes;
|
||||
@@ -1373,8 +1353,8 @@ beginmerge(Tuplesortstate *state)
|
||||
}
|
||||
|
||||
/*
|
||||
* Preread as many tuples as possible (and at least one) from each
|
||||
* active tape
|
||||
* Preread as many tuples as possible (and at least one) from each active
|
||||
* tape
|
||||
*/
|
||||
mergepreread(state);
|
||||
|
||||
@@ -1432,8 +1412,8 @@ mergepreread(Tuplesortstate *state)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Read tuples from this tape until it has used up its free
|
||||
* memory, but ensure that we have at least one.
|
||||
* Read tuples from this tape until it has used up its free memory,
|
||||
* but ensure that we have at least one.
|
||||
*/
|
||||
priorAvail = state->availMem;
|
||||
state->availMem = state->mergeavailmem[srcTape];
|
||||
@@ -1508,8 +1488,8 @@ dumptuples(Tuplesortstate *state, bool alltuples)
|
||||
(LACKMEM(state) && state->memtupcount > 1))
|
||||
{
|
||||
/*
|
||||
* Dump the heap's frontmost entry, and sift up to remove it from
|
||||
* the heap.
|
||||
* Dump the heap's frontmost entry, and sift up to remove it from the
|
||||
* heap.
|
||||
*/
|
||||
Assert(state->memtupcount > 0);
|
||||
WRITETUP(state, state->tp_tapenum[state->destTape],
|
||||
@@ -1680,8 +1660,8 @@ tuplesort_heap_insert(Tuplesortstate *state, void *tuple,
|
||||
memtupindex = state->memtupindex;
|
||||
|
||||
/*
|
||||
* Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth
|
||||
* is using 1-based array indexes, not 0-based.
|
||||
* Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
|
||||
* using 1-based array indexes, not 0-based.
|
||||
*/
|
||||
j = state->memtupcount++;
|
||||
while (j > 0)
|
||||
@@ -1805,12 +1785,12 @@ SelectSortFunction(Oid sortOperator,
|
||||
Oid opclass = InvalidOid;
|
||||
|
||||
/*
|
||||
* Search pg_amop to see if the target operator is registered as the
|
||||
* "<" or ">" operator of any btree opclass. It's possible that it
|
||||
* might be registered both ways (eg, if someone were to build a
|
||||
* "reverse sort" opclass for some reason); prefer the "<" case if so.
|
||||
* If the operator is registered the same way in multiple opclasses,
|
||||
* assume we can use the associated comparator function from any one.
|
||||
* Search pg_amop to see if the target operator is registered as the "<"
|
||||
* or ">" operator of any btree opclass. It's possible that it might be
|
||||
* registered both ways (eg, if someone were to build a "reverse sort"
|
||||
* opclass for some reason); prefer the "<" case if so. If the operator is
|
||||
* registered the same way in multiple opclasses, assume we can use the
|
||||
* associated comparator function from any one.
|
||||
*/
|
||||
catlist = SearchSysCacheList(AMOPOPID, 1,
|
||||
ObjectIdGetDatum(sortOperator),
|
||||
@@ -1854,11 +1834,11 @@ SelectSortFunction(Oid sortOperator,
|
||||
}
|
||||
|
||||
/*
|
||||
* Can't find a comparator, so use the operator as-is. Decide whether
|
||||
* it is forward or reverse sort by looking at its name (grotty, but
|
||||
* this only matters for deciding which end NULLs should get sorted
|
||||
* to). XXX possibly better idea: see whether its selectivity
|
||||
* function is scalargtcmp?
|
||||
* Can't find a comparator, so use the operator as-is. Decide whether it
|
||||
* is forward or reverse sort by looking at its name (grotty, but this
|
||||
* only matters for deciding which end NULLs should get sorted to). XXX
|
||||
* possibly better idea: see whether its selectivity function is
|
||||
* scalargtcmp?
|
||||
*/
|
||||
tuple = SearchSysCache(OPEROID,
|
||||
ObjectIdGetDatum(sortOperator),
|
||||
@@ -2142,15 +2122,15 @@ comparetup_index(Tuplesortstate *state, const void *a, const void *b)
|
||||
* If btree has asked us to enforce uniqueness, complain if two equal
|
||||
* tuples are detected (unless there was at least one NULL field).
|
||||
*
|
||||
* It is sufficient to make the test here, because if two tuples are
|
||||
* equal they *must* get compared at some stage of the sort ---
|
||||
* otherwise the sort algorithm wouldn't have checked whether one must
|
||||
* appear before the other.
|
||||
* It is sufficient to make the test here, because if two tuples are equal
|
||||
* they *must* get compared at some stage of the sort --- otherwise the
|
||||
* sort algorithm wouldn't have checked whether one must appear before the
|
||||
* other.
|
||||
*
|
||||
* Some rather brain-dead implementations of qsort will sometimes call
|
||||
* the comparison routine to compare a value to itself. (At this
|
||||
* writing only QNX 4 is known to do such silly things.) Don't raise
|
||||
* a bogus error in that case.
|
||||
* Some rather brain-dead implementations of qsort will sometimes call the
|
||||
* comparison routine to compare a value to itself. (At this writing only
|
||||
* QNX 4 is known to do such silly things.) Don't raise a bogus error in
|
||||
* that case.
|
||||
*/
|
||||
if (state->enforceUnique && !equal_hasnull && tuple1 != tuple2)
|
||||
ereport(ERROR,
|
||||
@@ -2159,10 +2139,10 @@ comparetup_index(Tuplesortstate *state, const void *a, const void *b)
|
||||
errdetail("Table contains duplicated values.")));
|
||||
|
||||
/*
|
||||
* If key values are equal, we sort on ItemPointer. This does not
|
||||
* affect validity of the finished index, but it offers cheap
|
||||
* insurance against performance problems with bad qsort
|
||||
* implementations that have trouble with large numbers of equal keys.
|
||||
* If key values are equal, we sort on ItemPointer. This does not affect
|
||||
* validity of the finished index, but it offers cheap insurance against
|
||||
* performance problems with bad qsort implementations that have trouble
|
||||
* with large numbers of equal keys.
|
||||
*/
|
||||
{
|
||||
BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
|
||||
|
@@ -36,7 +36,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.22 2005/05/06 17:24:54 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.23 2005/10/15 02:49:37 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -72,41 +72,41 @@ struct Tuplestorestate
|
||||
BufFile *myfile; /* underlying file, or NULL if none */
|
||||
|
||||
/*
|
||||
* These function pointers decouple the routines that must know what
|
||||
* kind of tuple we are handling from the routines that don't need to
|
||||
* know it. They are set up by the tuplestore_begin_xxx routines.
|
||||
* These function pointers decouple the routines that must know what kind
|
||||
* of tuple we are handling from the routines that don't need to know it.
|
||||
* They are set up by the tuplestore_begin_xxx routines.
|
||||
*
|
||||
* (Although tuplestore.c currently only supports heap tuples, I've
|
||||
* copied this part of tuplesort.c so that extension to other kinds of
|
||||
* objects will be easy if it's ever needed.)
|
||||
* (Although tuplestore.c currently only supports heap tuples, I've copied
|
||||
* this part of tuplesort.c so that extension to other kinds of objects
|
||||
* will be easy if it's ever needed.)
|
||||
*
|
||||
* Function to copy a supplied input tuple into palloc'd space. (NB: we
|
||||
* assume that a single pfree() is enough to release the tuple later,
|
||||
* so the representation must be "flat" in one palloc chunk.)
|
||||
* state->availMem must be decreased by the amount of space used.
|
||||
* assume that a single pfree() is enough to release the tuple later, so
|
||||
* the representation must be "flat" in one palloc chunk.) state->availMem
|
||||
* must be decreased by the amount of space used.
|
||||
*/
|
||||
void *(*copytup) (Tuplestorestate *state, void *tup);
|
||||
|
||||
/*
|
||||
* Function to write a stored tuple onto tape. The representation of
|
||||
* the tuple on tape need not be the same as it is in memory;
|
||||
* requirements on the tape representation are given below. After
|
||||
* writing the tuple, pfree() it, and increase state->availMem by the
|
||||
* amount of memory space thereby released.
|
||||
* Function to write a stored tuple onto tape. The representation of the
|
||||
* tuple on tape need not be the same as it is in memory; requirements on
|
||||
* the tape representation are given below. After writing the tuple,
|
||||
* pfree() it, and increase state->availMem by the amount of memory space
|
||||
* thereby released.
|
||||
*/
|
||||
void (*writetup) (Tuplestorestate *state, void *tup);
|
||||
|
||||
/*
|
||||
* Function to read a stored tuple from tape back into memory. 'len'
|
||||
* is the already-read length of the stored tuple. Create and return
|
||||
* a palloc'd copy, and decrease state->availMem by the amount of
|
||||
* memory space consumed.
|
||||
* Function to read a stored tuple from tape back into memory. 'len' is
|
||||
* the already-read length of the stored tuple. Create and return a
|
||||
* palloc'd copy, and decrease state->availMem by the amount of memory
|
||||
* space consumed.
|
||||
*/
|
||||
void *(*readtup) (Tuplestorestate *state, unsigned int len);
|
||||
|
||||
/*
|
||||
* This array holds pointers to tuples in memory if we are in state
|
||||
* INMEM. In states WRITEFILE and READFILE it's not used.
|
||||
* This array holds pointers to tuples in memory if we are in state INMEM.
|
||||
* In states WRITEFILE and READFILE it's not used.
|
||||
*/
|
||||
void **memtuples; /* array of pointers to palloc'd tuples */
|
||||
int memtupcount; /* number of tuples currently present */
|
||||
@@ -115,17 +115,17 @@ struct Tuplestorestate
|
||||
/*
|
||||
* These variables are used to keep track of the current position.
|
||||
*
|
||||
* In state WRITEFILE, the current file seek position is the write point,
|
||||
* and the read position is remembered in readpos_xxx; in state
|
||||
* READFILE, the current file seek position is the read point, and the
|
||||
* write position is remembered in writepos_xxx. (The write position
|
||||
* is the same as EOF, but since BufFileSeek doesn't currently
|
||||
* implement SEEK_END, we have to remember it explicitly.)
|
||||
* In state WRITEFILE, the current file seek position is the write point, and
|
||||
* the read position is remembered in readpos_xxx; in state READFILE, the
|
||||
* current file seek position is the read point, and the write position is
|
||||
* remembered in writepos_xxx. (The write position is the same as EOF,
|
||||
* but since BufFileSeek doesn't currently implement SEEK_END, we have to
|
||||
* remember it explicitly.)
|
||||
*
|
||||
* Special case: if we are in WRITEFILE state and eof_reached is true,
|
||||
* then the read position is implicitly equal to the write position
|
||||
* (and hence to the file seek position); this way we need not update
|
||||
* the readpos_xxx variables on each write.
|
||||
* Special case: if we are in WRITEFILE state and eof_reached is true, then
|
||||
* the read position is implicitly equal to the write position (and hence
|
||||
* to the file seek position); this way we need not update the readpos_xxx
|
||||
* variables on each write.
|
||||
*/
|
||||
bool eof_reached; /* read reached EOF (always valid) */
|
||||
int current; /* next array index (valid if INMEM) */
|
||||
@@ -429,7 +429,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
|
||||
&state->writepos_file, &state->writepos_offset);
|
||||
if (!state->eof_reached)
|
||||
if (BufFileSeek(state->myfile,
|
||||
state->readpos_file, state->readpos_offset,
|
||||
state->readpos_file, state->readpos_offset,
|
||||
SEEK_SET) != 0)
|
||||
elog(ERROR, "seek failed");
|
||||
state->status = TSS_READFILE;
|
||||
@@ -454,11 +454,11 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
|
||||
/*
|
||||
* Backward.
|
||||
*
|
||||
* if all tuples are fetched already then we return last tuple,
|
||||
* else - tuple before last returned.
|
||||
* if all tuples are fetched already then we return last tuple, else
|
||||
* - tuple before last returned.
|
||||
*
|
||||
* Back up to fetch previously-returned tuple's ending length
|
||||
* word. If seek fails, assume we are at start of file.
|
||||
* Back up to fetch previously-returned tuple's ending length word.
|
||||
* If seek fails, assume we are at start of file.
|
||||
*/
|
||||
if (BufFileSeek(state->myfile, 0, -(long) sizeof(unsigned int),
|
||||
SEEK_CUR) != 0)
|
||||
@@ -476,17 +476,17 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
|
||||
* Back up to get ending length word of tuple before it.
|
||||
*/
|
||||
if (BufFileSeek(state->myfile, 0,
|
||||
-(long) (tuplen + 2 * sizeof(unsigned int)),
|
||||
-(long) (tuplen + 2 * sizeof(unsigned int)),
|
||||
SEEK_CUR) != 0)
|
||||
{
|
||||
/*
|
||||
* If that fails, presumably the prev tuple is the
|
||||
* first in the file. Back up so that it becomes next
|
||||
* to read in forward direction (not obviously right,
|
||||
* but that is what in-memory case does).
|
||||
* If that fails, presumably the prev tuple is the first
|
||||
* in the file. Back up so that it becomes next to read
|
||||
* in forward direction (not obviously right, but that is
|
||||
* what in-memory case does).
|
||||
*/
|
||||
if (BufFileSeek(state->myfile, 0,
|
||||
-(long) (tuplen + sizeof(unsigned int)),
|
||||
-(long) (tuplen + sizeof(unsigned int)),
|
||||
SEEK_CUR) != 0)
|
||||
elog(ERROR, "bogus tuple length in backward scan");
|
||||
return NULL;
|
||||
@@ -495,9 +495,9 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
|
||||
}
|
||||
|
||||
/*
|
||||
* Now we have the length of the prior tuple, back up and read
|
||||
* it. Note: READTUP expects we are positioned after the
|
||||
* initial length word of the tuple, so back up to that point.
|
||||
* Now we have the length of the prior tuple, back up and read it.
|
||||
* Note: READTUP expects we are positioned after the initial
|
||||
* length word of the tuple, so back up to that point.
|
||||
*/
|
||||
if (BufFileSeek(state->myfile, 0,
|
||||
-(long) tuplen,
|
||||
|
Reference in New Issue
Block a user