1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-02 04:21:28 +03:00

Change CRCs in WAL records from 64bit to 32bit for performance reasons.

Instead of a separate CRC on each backup block, include backup blocks
in their parent WAL record's CRC; this is important to ensure that the
backup block really goes with the WAL record, ie there was not a page
tear right at the start of the backup block.  Implement a simple form
of compression of backup blocks: drop any run of zeroes starting at
pd_lower, so as not to store the unused 'hole' that commonly exists in
PG heap and index pages.  Tweak PageRepairFragmentation and related
routines to ensure they keep the unused space zeroed, so that the above
compression method remains effective.  All per recent discussions.
This commit is contained in:
Tom Lane
2005-06-02 05:55:29 +00:00
parent c196c7ae8b
commit 21fda22ec4
11 changed files with 516 additions and 212 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.63 2005/03/22 06:17:03 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.64 2005/06/02 05:55:28 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -357,7 +357,7 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
lp = PageGetItemId(page, i + 1);
lp->lp_len = 0; /* indicate unused & deallocated */
}
((PageHeader) page)->pd_upper = pd_special;
((PageHeader) page)->pd_upper = pd_upper = pd_special;
}
else
{ /* nused != 0 */
@@ -411,11 +411,17 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
lp->lp_off = upper;
}
((PageHeader) page)->pd_upper = upper;
((PageHeader) page)->pd_upper = pd_upper = upper;
pfree(itemidbase);
}
/*
* Zero out the now-free space. This is not essential, but it allows
* xlog.c to compress WAL data better.
*/
MemSet((char *) page + pd_lower, 0, pd_upper - pd_lower);
return (nline - nused);
}
@@ -525,6 +531,13 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
phdr->pd_upper += size;
phdr->pd_lower -= sizeof(ItemIdData);
/*
* Zero out the just-freed space. This is not essential, but it allows
* xlog.c to compress WAL data better.
*/
MemSet((char *) page + phdr->pd_lower, 0, sizeof(ItemIdData));
MemSet(addr, 0, size);
/*
* Finally, we need to adjust the linp entries that remain.
*
@@ -672,8 +685,14 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
lp->lp_off = upper;
}
phdr->pd_lower = SizeOfPageHeaderData + nused * sizeof(ItemIdData);
phdr->pd_upper = upper;
phdr->pd_lower = pd_lower = SizeOfPageHeaderData + nused * sizeof(ItemIdData);
phdr->pd_upper = pd_upper = upper;
/*
* Zero out the now-free space. This is not essential, but it allows
* xlog.c to compress WAL data better.
*/
MemSet((char *) page + pd_lower, 0, pd_upper - pd_lower);
pfree(itemidbase);
}