1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-03 09:13:20 +03:00

Add parallel pg_dump option.

New infrastructure is added which creates a set number of workers
(threads on Windows, forked processes on Unix). Jobs are then
handed out to these workers by the master process as needed.
pg_restore is adjusted to use this new infrastructure in place of the
old setup which created a new worker for each step on the fly. Parallel
dumps acquire a snapshot clone in order to stay consistent, if
available.

The parallel option is selected by the -j / --jobs command line
parameter of pg_dump.

Joachim Wieland, lightly editorialized by Andrew Dunstan.
This commit is contained in:
Andrew Dunstan
2013-03-24 11:27:20 -04:00
parent 3b91fe185a
commit 9e257a181c
22 changed files with 2776 additions and 830 deletions

View File

@@ -54,6 +54,7 @@
#include "compress_io.h"
#include "dumputils.h"
#include "parallel.h"
/*----------------------
* Compressor API
@@ -182,6 +183,9 @@ size_t
WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs,
const void *data, size_t dLen)
{
/* Are we aborting? */
checkAborting(AH);
switch (cs->comprAlg)
{
case COMPR_ALG_LIBZ:
@@ -351,6 +355,9 @@ ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF)
/* no minimal chunk size for zlib */
while ((cnt = readF(AH, &buf, &buflen)))
{
/* Are we aborting? */
checkAborting(AH);
zp->next_in = (void *) buf;
zp->avail_in = cnt;
@@ -411,6 +418,9 @@ ReadDataFromArchiveNone(ArchiveHandle *AH, ReadFunc readF)
while ((cnt = readF(AH, &buf, &buflen)))
{
/* Are we aborting? */
checkAborting(AH);
ahwrite(buf, 1, cnt, AH);
}