1
0
mirror of https://github.com/postgres/postgres.git synced 2025-04-25 21:42:33 +03:00

- Support for BLOB output from pg_dump and input via pg_restore

- Support for direct DB connection in pg_restore
- Fixes in support for --insert flag
- pg_dump now outputs in modified OID order
- various other bug fixes
This commit is contained in:
Philip Warner 2000-07-21 11:40:08 +00:00
parent 0143d391c6
commit e8f69be054
9 changed files with 2922 additions and 1772 deletions

View File

@ -4,7 +4,7 @@
#
# Copyright (c) 1994, Regents of the University of California
#
# $Header: /cvsroot/pgsql/src/bin/pg_dump/Makefile,v 1.19 2000/07/04 19:52:00 petere Exp $
# $Header: /cvsroot/pgsql/src/bin/pg_dump/Makefile,v 1.20 2000/07/21 11:40:08 pjw Exp $
#
#-------------------------------------------------------------------------
@ -12,8 +12,8 @@ subdir = src/bin/pg_dump
top_builddir = ../../..
include ../../Makefile.global
OBJS= pg_backup_archiver.o pg_backup_custom.o pg_backup_files.o \
pg_backup_plain_text.o $(STRDUP)
OBJS= pg_backup_archiver.o pg_backup_db.o pg_backup_custom.o pg_backup_files.o \
pg_backup_null.o pg_backup_tar.o $(STRDUP)
CFLAGS+= -I$(LIBPQDIR)
LIBS+= -lz

View File

@ -1,17 +1,23 @@
Notes on pg_dump
================
pg_dump, by default, still outputs text files.
1. pg_dump, by default, still outputs text files.
pg_dumpall forces all pg_dump output to be text, since it also outputs text into the same output stream.
2. pg_dumpall forces all pg_dump output to be text, since it also outputs text into the same output stream.
The plain text output format can not be used as input into pg_restore.
3. The plain text output format can not be used as input into pg_restore.
4. pg_dump now dumps the items in a modified OID order to try to improve relaibility of default restores.
To dump a database into the next custom format, type:
pg_dump <db-name> -Fc > <backup-file>
or, in TAR format
pg_dump <db-name> -Ft > <backup-file>
To restore, try
To list contents:
@ -53,7 +59,37 @@ or, simply:
pg_restore backup.bck --use=toc.lis | psql newdbname
Philip Warner, 3-Jul-2000
BLOBs
=====
To dump blobs you must use the custom archive format (-Fc) or TAR format (-Ft), and specify the
--blobs qualifier to the pg_dump command.
To restore blobs you must use a direct database connection (--db=db-to-restore-to).
eg.
pg_dump --blob -Fc db-to-backup -f backup.bck
pg_restore backup.bck --db=db-to-restore-into
TAR
===
The TAR archive that pg_dump creates currently has a blank username & group for the files,
but should be otherwise valid. It also includes a 'restore.sql' script which is there for
the benefit of humans. It is never used by pg_restore.
Note: the TAR format archive can only be used as input into pg_restore if it is in TAR form.
(ie. you should not extract the files then expect pg_restore to work).
You can extract, edit, and tar the files again, and it should work, but the 'toc'
file should go at the start, the data files be in the order they are used, and
the BLOB files at the end.
Philip Warner, 16-Jul-2000
pjw@rhyme.com.au

View File

@ -30,12 +30,15 @@
#define PG_BACKUP__
#include "postgres.h"
#include "libpq-fe.h"
typedef enum _archiveFormat {
archUnknown = 0,
archCustom = 1,
archFiles = 2,
archTar = 3,
archPlainText = 4
archNull = 4
} ArchiveFormat;
/*
@ -43,7 +46,8 @@ typedef enum _archiveFormat {
* time this gives us some abstraction and type checking.
*/
typedef struct _Archive {
/* Nothing here */
int verbose;
/* The rest is private */
} Archive;
typedef int (*DataDumperPtr)(Archive* AH, char* oid, void* userArg);
@ -73,6 +77,13 @@ typedef struct _restoreOptions {
char *tableNames;
char *triggerNames;
int useDB;
char *dbname;
char *pgport;
char *pghost;
int ignoreVersion;
int requirePassword;
int *idWanted;
int limitToList;
int compression;
@ -83,24 +94,42 @@ typedef struct _restoreOptions {
* Main archiver interface.
*/
extern void exit_horribly(Archive *AH, const char *fmt, ...);
/* Lets the archibe know we have a DB connection to shutdown if it dies */
PGconn* ConnectDatabase(Archive *AH,
const char* dbname,
const char* pghost,
const char* pgport,
const int reqPwd,
const int ignoreVersion);
/* Called to add a TOC entry */
extern void ArchiveEntry(Archive* AH, const char* oid, const char* name,
const char* desc, const char* (deps[]), const char* defn,
const char* dropStmt, const char* owner,
const char* dropStmt, const char* copyStmt, const char* owner,
DataDumperPtr dumpFn, void* dumpArg);
/* Called to write *data* to the archive */
extern int WriteData(Archive* AH, const void* data, int dLen);
//extern int StartBlobs(Archive* AH);
//extern int EndBlobs(Archive* AH);
extern int StartBlob(Archive* AH, int oid);
extern int EndBlob(Archive* AH, int oid);
extern void CloseArchive(Archive* AH);
extern void RestoreArchive(Archive* AH, RestoreOptions *ropt);
/* Open an existing archive */
extern Archive* OpenArchive(const char* FileSpec, ArchiveFormat fmt);
extern Archive* OpenArchive(const char* FileSpec, const ArchiveFormat fmt);
/* Create a new archive */
extern Archive* CreateArchive(const char* FileSpec, ArchiveFormat fmt, int compression);
extern Archive* CreateArchive(const char* FileSpec, const ArchiveFormat fmt,
const int compression);
/* The --list option */
extern void PrintTOCSummary(Archive* AH, RestoreOptions *ropt);

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,10 @@
#define __PG_BACKUP_ARCHIVE__
#include <stdio.h>
#include <time.h>
#include "postgres.h"
#include "pqexpbuffer.h"
#ifdef HAVE_LIBZ
#include <zlib.h>
@ -51,15 +55,23 @@ typedef z_stream *z_streamp;
#endif
#include "pg_backup.h"
#include "libpq-fe.h"
#define K_VERS_MAJOR 1
#define K_VERS_MINOR 2
#define K_VERS_REV 2
#define K_VERS_MINOR 4
#define K_VERS_REV 3
/* Data block types */
#define BLK_DATA 1
#define BLK_BLOB 2
#define BLK_BLOBS 3
/* Some important version numbers (checked in code) */
#define K_VERS_1_0 (( (1 * 256 + 0) * 256 + 0) * 256 + 0)
#define K_VERS_1_2 (( (1 * 256 + 2) * 256 + 0) * 256 + 0)
#define K_VERS_MAX (( (1 * 256 + 2) * 256 + 255) * 256 + 0)
#define K_VERS_1_2 (( (1 * 256 + 2) * 256 + 0) * 256 + 0) /* Allow No ZLIB */
#define K_VERS_1_3 (( (1 * 256 + 3) * 256 + 0) * 256 + 0) /* BLOBs */
#define K_VERS_1_4 (( (1 * 256 + 4) * 256 + 0) * 256 + 0) /* Date & name in header */
#define K_VERS_MAX (( (1 * 256 + 4) * 256 + 255) * 256 + 0)
struct _archiveHandle;
struct _tocEntry;
@ -72,6 +84,11 @@ typedef void (*StartDataPtr) (struct _archiveHandle* AH, struct _tocEntry* te);
typedef int (*WriteDataPtr) (struct _archiveHandle* AH, const void* data, int dLen);
typedef void (*EndDataPtr) (struct _archiveHandle* AH, struct _tocEntry* te);
typedef void (*StartBlobsPtr) (struct _archiveHandle* AH, struct _tocEntry* te);
typedef void (*StartBlobPtr) (struct _archiveHandle* AH, struct _tocEntry* te, int oid);
typedef void (*EndBlobPtr) (struct _archiveHandle* AH, struct _tocEntry* te, int oid);
typedef void (*EndBlobsPtr) (struct _archiveHandle* AH, struct _tocEntry* te);
typedef int (*WriteBytePtr) (struct _archiveHandle* AH, const int i);
typedef int (*ReadBytePtr) (struct _archiveHandle* AH);
typedef int (*WriteBufPtr) (struct _archiveHandle* AH, const void* c, int len);
@ -83,6 +100,8 @@ typedef void (*PrintExtraTocPtr) (struct _archiveHandle* AH, struct _tocEntry* t
typedef void (*PrintTocDataPtr) (struct _archiveHandle* AH, struct _tocEntry* te,
RestoreOptions *ropt);
typedef int (*CustomOutPtr) (struct _archiveHandle* AH, const void* buf, int len);
typedef int (*TocSortCompareFn) (const void* te1, const void *te2);
typedef enum _archiveMode {
@ -95,16 +114,44 @@ typedef struct _outputContext {
int gzOut;
} OutputContext;
typedef enum {
SQL_SCAN = 0,
SQL_IN_SQL_COMMENT,
SQL_IN_EXT_COMMENT,
SQL_IN_QUOTE} sqlparseState;
typedef struct {
int backSlash;
sqlparseState state;
char lastChar;
char quoteChar;
} sqlparseInfo;
typedef struct _archiveHandle {
Archive public; /* Public part of archive */
char vmaj; /* Version of file */
char vmin;
char vrev;
int version; /* Conveniently formatted version */
int debugLevel; /* Not used. Intended for logging */
int intSize; /* Size of an integer in the archive */
ArchiveFormat format; /* Archive format */
sqlparseInfo sqlparse;
PQExpBuffer sqlBuf;
time_t createDate; /* Date archive created */
/*
* Fields used when discovering header.
* A format can always get the previous read bytes from here...
*/
int readHeader; /* Used if file header has been read already */
char *lookahead; /* Buffer used when reading header to discover format */
int lookaheadSize; /* Size of allocated buffer */
int lookaheadLen; /* Length of data in lookahead */
int lookaheadPos; /* Current read position in lookahead buffer */
ArchiveEntryPtr ArchiveEntryPtr; /* Called for each metadata object */
StartDataPtr StartDataPtr; /* Called when table data is about to be dumped */
@ -121,6 +168,28 @@ typedef struct _archiveHandle {
PrintExtraTocPtr PrintExtraTocPtr; /* Extra TOC info for format */
PrintTocDataPtr PrintTocDataPtr;
StartBlobsPtr StartBlobsPtr;
EndBlobsPtr EndBlobsPtr;
StartBlobPtr StartBlobPtr;
EndBlobPtr EndBlobPtr;
CustomOutPtr CustomOutPtr; /* Alternate script output routine */
/* Stuff for direct DB connection */
char username[100];
char *dbname; /* Name of db for connection */
char *archdbname; /* DB name *read* from archive */
char *pghost;
char *pgport;
PGconn *connection;
int connectToDB; /* Flag to indicate if direct DB connection is required */
int pgCopyIn; /* Currently in libpq 'COPY IN' mode. */
PQExpBuffer pgCopyBuf; /* Left-over data from incomplete lines in COPY IN */
int loFd; /* BLOB fd */
int writingBlob; /* Flag */
int createdBlobXref; /* Flag */
int lastID; /* Last internal ID for a TOC entry */
char* fSpec; /* Archive File Spec */
FILE *FH; /* General purpose file handle */
@ -135,6 +204,7 @@ typedef struct _archiveHandle {
ArchiveMode mode; /* File mode - r or w */
void* formatData; /* Header data specific to file format */
RestoreOptions *ropt; /* Used to check restore options in ahwrite etc */
} ArchiveHandle;
typedef struct _tocEntry {
@ -148,6 +218,7 @@ typedef struct _tocEntry {
char* desc;
char* defn;
char* dropStmt;
char* copyStmt;
char* owner;
char** depOid;
int printed; /* Indicates if entry defn has been dumped */
@ -159,7 +230,8 @@ typedef struct _tocEntry {
} TocEntry;
extern void die_horribly(const char *fmt, ...);
/* Used everywhere */
extern void die_horribly(ArchiveHandle *AH, const char *fmt, ...);
extern void WriteTOC(ArchiveHandle* AH);
extern void ReadTOC(ArchiveHandle* AH);
@ -180,9 +252,15 @@ extern int ReadInt(ArchiveHandle* AH);
extern char* ReadStr(ArchiveHandle* AH);
extern int WriteStr(ArchiveHandle* AH, char* s);
extern void StartRestoreBlob(ArchiveHandle* AH, int oid);
extern void EndRestoreBlob(ArchiveHandle* AH, int oid);
extern void InitArchiveFmt_Custom(ArchiveHandle* AH);
extern void InitArchiveFmt_Files(ArchiveHandle* AH);
extern void InitArchiveFmt_PlainText(ArchiveHandle* AH);
extern void InitArchiveFmt_Null(ArchiveHandle* AH);
extern void InitArchiveFmt_Tar(ArchiveHandle* AH);
extern int isValidTarHeader(char *header);
extern OutputContext SetOutput(ArchiveHandle* AH, char *filename, int compression);
extern void ResetOutput(ArchiveHandle* AH, OutputContext savedContext);
@ -190,4 +268,6 @@ extern void ResetOutput(ArchiveHandle* AH, OutputContext savedContext);
int ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle* AH);
int ahprintf(ArchiveHandle* AH, const char *fmt, ...);
void ahlog(ArchiveHandle* AH, int level, const char *fmt, ...);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -47,10 +47,17 @@ static void _WriteExtraToc(ArchiveHandle* AH, TocEntry* te);
static void _ReadExtraToc(ArchiveHandle* AH, TocEntry* te);
static void _PrintExtraToc(ArchiveHandle* AH, TocEntry* te);
static void _StartBlobs(ArchiveHandle* AH, TocEntry* te);
static void _StartBlob(ArchiveHandle* AH, TocEntry* te, int oid);
static void _EndBlob(ArchiveHandle* AH, TocEntry* te, int oid);
static void _EndBlobs(ArchiveHandle* AH, TocEntry* te);
#define K_STD_BUF_SIZE 1024
typedef struct {
int hasSeek;
int filePos;
FILE *blobToc;
} lclContext;
typedef struct {
@ -62,6 +69,10 @@ typedef struct {
char *filename;
} lclTocEntry;
static char* progname = "Archiver(files)";
static void _LoadBlobs(ArchiveHandle* AH, RestoreOptions *ropt);
static void _getBlobTocEntry(ArchiveHandle* AH, int *oid, char *fname);
/*
* Initializer
*/
@ -84,6 +95,11 @@ void InitArchiveFmt_Files(ArchiveHandle* AH)
AH->WriteExtraTocPtr = _WriteExtraToc;
AH->PrintExtraTocPtr = _PrintExtraToc;
AH->StartBlobsPtr = _StartBlobs;
AH->StartBlobPtr = _StartBlob;
AH->EndBlobPtr = _EndBlob;
AH->EndBlobsPtr = _EndBlobs;
/*
* Set up some special context used in compressing data.
*/
@ -95,6 +111,13 @@ void InitArchiveFmt_Files(ArchiveHandle* AH)
* Now open the TOC file
*/
if (AH->mode == archModeWrite) {
fprintf(stderr, "\n*************************************************************\n"
"* WARNING: This format is for demonstration purposes. It is *\n"
"* not intended for general use. Files will be dumped *\n"
"* into the current working directory. *\n"
"***************************************************************\n\n");
if (AH->fSpec && strcmp(AH->fSpec,"") != 0) {
AH->FH = fopen(AH->fSpec, PG_BINARY_W);
} else {
@ -107,7 +130,8 @@ void InitArchiveFmt_Files(ArchiveHandle* AH)
}
} else {
} else { /* Read Mode */
if (AH->fSpec && strcmp(AH->fSpec,"") != 0) {
AH->FH = fopen(AH->fSpec, PG_BINARY_R);
} else {
@ -129,7 +153,7 @@ void InitArchiveFmt_Files(ArchiveHandle* AH)
static void _ArchiveEntry(ArchiveHandle* AH, TocEntry* te)
{
lclTocEntry* ctx;
char fn[1024];
char fn[K_STD_BUF_SIZE];
ctx = (lclTocEntry*)malloc(sizeof(lclTocEntry));
if (te->dataDumper) {
@ -217,36 +241,99 @@ static void _EndData(ArchiveHandle* AH, TocEntry* te)
tctx->FH = NULL;
}
/*
* Print data for a given file
*/
static void _PrintFileData(ArchiveHandle* AH, char *filename, RestoreOptions *ropt)
{
char buf[4096];
int cnt;
if (!filename)
return;
#ifdef HAVE_LIBZ
AH->FH = gzopen(filename,"rb");
#else
AH->FH = fopen(filename,PG_BINARY_R);
#endif
while ( (cnt = GZREAD(buf, 1, 4095, AH->FH)) > 0) {
buf[cnt] = '\0';
ahwrite(buf, 1, cnt, AH);
}
GZCLOSE(AH->FH);
}
/*
* Print data for a given TOC entry
*/
static void _PrintTocData(ArchiveHandle* AH, TocEntry* te, RestoreOptions *ropt)
{
lclTocEntry* tctx = (lclTocEntry*) te->formatData;
char buf[4096];
int cnt;
if (!tctx->filename)
return;
#ifdef HAVE_LIBZ
AH->FH = gzopen(tctx->filename,"rb");
#else
AH->FH = fopen(tctx->filename,PG_BINARY_R);
#endif
if (strcmp(te->desc, "BLOBS") == 0)
_LoadBlobs(AH, ropt);
else
{
_PrintFileData(AH, tctx->filename, ropt);
}
}
ahprintf(AH, "--\n-- Data for TOC Entry ID %d (OID %s) %s %s\n--\n\n",
te->id, te->oid, te->desc, te->name);
static void _getBlobTocEntry(ArchiveHandle* AH, int *oid, char fname[K_STD_BUF_SIZE])
{
lclContext* ctx = (lclContext*)AH->formatData;
char blobTe[K_STD_BUF_SIZE];
int fpos;
int eos;
while ( (cnt = GZREAD(buf, 1, 4096, AH->FH)) > 0) {
ahwrite(buf, 1, cnt, AH);
if (fgets(&blobTe[0], K_STD_BUF_SIZE - 1, ctx->blobToc) != NULL)
{
*oid = atoi(blobTe);
fpos = strcspn(blobTe, " ");
strncpy(fname, &blobTe[fpos+1], K_STD_BUF_SIZE - 1);
eos = strlen(fname)-1;
if (fname[eos] == '\n')
fname[eos] = '\0';
} else {
*oid = 0;
fname[0] = '\0';
}
}
static void _LoadBlobs(ArchiveHandle* AH, RestoreOptions *ropt)
{
int oid;
lclContext* ctx = (lclContext*)AH->formatData;
char fname[K_STD_BUF_SIZE];
ctx->blobToc = fopen("blobs.toc", PG_BINARY_R);
_getBlobTocEntry(AH, &oid, fname);
while(oid != 0)
{
StartRestoreBlob(AH, oid);
_PrintFileData(AH, fname, ropt);
EndRestoreBlob(AH, oid);
_getBlobTocEntry(AH, &oid, fname);
}
GZCLOSE(AH->FH);
ahprintf(AH, "\n\n");
fclose(ctx->blobToc);
}
static int _WriteByte(ArchiveHandle* AH, const int i)
{
lclContext* ctx = (lclContext*)AH->formatData;
@ -284,6 +371,7 @@ static int _ReadBuf(ArchiveHandle* AH, void* buf, int len)
{
lclContext* ctx = (lclContext*)AH->formatData;
int res;
res = fread(buf, 1, len, AH->FH);
ctx->filePos += res;
return res;
@ -301,3 +389,95 @@ static void _CloseArchive(ArchiveHandle* AH)
AH->FH = NULL;
}
/*
* BLOB support
*/
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
* to read the BLOBs back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
* Optional, but strongly recommended.
*
*/
static void _StartBlobs(ArchiveHandle* AH, TocEntry* te)
{
lclContext* ctx = (lclContext*)AH->formatData;
char fname[K_STD_BUF_SIZE];
sprintf(fname, "blobs.toc");
ctx->blobToc = fopen(fname, PG_BINARY_W);
}
/*
* Called by the archiver when the dumper calls StartBlob.
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void _StartBlob(ArchiveHandle* AH, TocEntry* te, int oid)
{
lclContext* ctx = (lclContext*)AH->formatData;
lclTocEntry* tctx = (lclTocEntry*)te->formatData;
char fmode[10];
char fname[255];
char *sfx;
if (oid == 0)
die_horribly(AH, "%s: illegal OID for BLOB (%d)\n", progname, oid);
if (AH->compression != 0)
sfx = ".gz";
else
sfx = "";
sprintf(fmode, "wb%d", AH->compression);
sprintf(fname, "blob_%d.dat%s", oid, sfx);
fprintf(ctx->blobToc, "%d %s\n", oid, fname);
#ifdef HAVE_LIBZ
tctx->FH = gzopen(fname, fmode);
#else
tctx->FH = fopen(fname, PG_BINARY_W);
#endif
}
/*
* Called by the archiver when the dumper calls EndBlob.
*
* Optional.
*
*/
static void _EndBlob(ArchiveHandle* AH, TocEntry* te, int oid)
{
lclTocEntry* tctx = (lclTocEntry*)te->formatData;
GZCLOSE(tctx->FH);
}
/*
* Called by the archiver when finishing saving all BLOB DATA.
*
* Optional.
*
*/
static void _EndBlobs(ArchiveHandle* AH, TocEntry* te)
{
lclContext* ctx = (lclContext*)AH->formatData;
/* Write out a fake zero OID to mark end-of-blobs. */
/* WriteInt(AH, 0); */
fclose(ctx->blobToc);
}

View File

@ -22,7 +22,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_dump.c,v 1.159 2000/07/17 03:05:20 tgl Exp $
* $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_dump.c,v 1.160 2000/07/21 11:40:08 pjw Exp $
*
* Modifications - 6/10/96 - dave@bensoft.com - version 1.13.dhb
*
@ -61,12 +61,19 @@
* - Added a -Z option for compression level on compressed formats
* - Restored '-f' in usage output
*
*-------------------------------------------------------------------------
*
* Modifications - 17-Jul-2000 - Philip Warner pjw@rhyme.com.au
* - Support for BLOB output.
* - Sort archive by OID, put some items at end (out of OID order)
*
*-------------------------------------------------------------------------
*/
#include <unistd.h> /* for getopt() */
#include <ctype.h>
#include "pg_backup.h"
#include "postgres.h"
#ifdef HAVE_GETOPT_H
@ -84,6 +91,7 @@
#include "catalog/pg_type.h"
#include "libpq-fe.h"
#include <libpq/libpq-fs.h>
#ifndef HAVE_STRDUP
#include "strdup.h"
#endif
@ -109,6 +117,9 @@ static void setMaxOid(Archive *fout);
static void AddAcl(char *aclbuf, const char *keyword);
static char *GetPrivileges(const char *s);
static int dumpBlobs(Archive *AH, char*, void*);
extern char *optarg;
extern int optind,
opterr;
@ -268,14 +279,26 @@ dumpClasses_nodumpData(Archive *fout, char* oid, void *dctxv)
if (oids == true)
{
archprintf(fout, "COPY %s WITH OIDS FROM stdin;\n",
fmtId(classname, force_quotes));
/*
* archprintf(fout, "COPY %s WITH OIDS FROM stdin;\n",
* fmtId(classname, force_quotes));
*
* - Not used as of V1.3 (needs to be in ArchiveEntry call)
*
*/
sprintf(query, "COPY %s WITH OIDS TO stdout;\n",
fmtId(classname, force_quotes));
}
else
{
archprintf(fout, "COPY %s FROM stdin;\n", fmtId(classname, force_quotes));
/*
*archprintf(fout, "COPY %s FROM stdin;\n", fmtId(classname, force_quotes));
*
* - Not used as of V1.3 (needs to be in ArchiveEntry call)
*
*/
sprintf(query, "COPY %s TO stdout;\n", fmtId(classname, force_quotes));
}
res = PQexec(g_conn, query);
@ -452,19 +475,28 @@ dumpClasses_dumpData(Archive *fout, char* oid, void *dctxv)
*/
static void
dumpClasses(const TableInfo *tblinfo, const int numTables, Archive *fout,
const char *onlytable, const bool oids)
const char *onlytable, const bool oids, const bool force_quotes)
{
int i;
char *all_only;
DataDumperPtr dumpFn;
DumpContext *dumpCtx;
char *oidsPart;
char copyBuf[512];
char *copyStmt;
if (onlytable == NULL)
all_only = "all";
else
all_only = "only";
if (oids == true)
oidsPart = "WITH OIDS ";
else
oidsPart = "";
if (g_verbose)
fprintf(stderr, "%s dumping out the contents of %s %d table%s/sequence%s %s\n",
g_comment_start, all_only,
@ -514,112 +546,28 @@ dumpClasses(const TableInfo *tblinfo, const int numTables, Archive *fout,
dumpCtx->tblidx = i;
dumpCtx->oids = oids;
if (!dumpData)
if (!dumpData) /* Dump/restore using COPY */
{
dumpFn = dumpClasses_nodumpData;
/* dumpClasses_nodumpData(fout, classname, oids); */
else
sprintf(copyBuf, "COPY %s %s FROM stdin;\n", fmtId(tblinfo[i].relname, force_quotes),
oidsPart);
copyStmt = copyBuf;
}
else /* Restore using INSERT */
{
dumpFn = dumpClasses_dumpData;
/* dumpClasses_dumpData(fout, classname); */
copyStmt = NULL;
}
ArchiveEntry(fout, tblinfo[i].oid, fmtId(tblinfo[i].relname, false),
"TABLE DATA", NULL, "", "", tblinfo[i].usename,
"TABLE DATA", NULL, "", "", copyStmt, tblinfo[i].usename,
dumpFn, dumpCtx);
}
}
}
static void
prompt_for_password(char *username, char *password)
{
char buf[512];
int length;
#ifdef HAVE_TERMIOS_H
struct termios t_orig,
t;
#endif
fprintf(stderr, "Username: ");
fflush(stderr);
fgets(username, 100, stdin);
length = strlen(username);
/* skip rest of the line */
if (length > 0 && username[length - 1] != '\n')
{
do
{
fgets(buf, 512, stdin);
} while (buf[strlen(buf) - 1] != '\n');
}
if (length > 0 && username[length - 1] == '\n')
username[length - 1] = '\0';
#ifdef HAVE_TERMIOS_H
tcgetattr(0, &t);
t_orig = t;
t.c_lflag &= ~ECHO;
tcsetattr(0, TCSADRAIN, &t);
#endif
fprintf(stderr, "Password: ");
fflush(stderr);
fgets(password, 100, stdin);
#ifdef HAVE_TERMIOS_H
tcsetattr(0, TCSADRAIN, &t_orig);
#endif
length = strlen(password);
/* skip rest of the line */
if (length > 0 && password[length - 1] != '\n')
{
do
{
fgets(buf, 512, stdin);
} while (buf[strlen(buf) - 1] != '\n');
}
if (length > 0 && password[length - 1] == '\n')
password[length - 1] = '\0';
fprintf(stderr, "\n\n");
}
static void
check_database_version(bool ignoreVersion)
{
PGresult *res;
double myversion;
const char *remoteversion_str;
double remoteversion;
myversion = strtod(PG_VERSION, NULL);
res = PQexec(g_conn, "SELECT version()");
if (!res ||
PQresultStatus(res) != PGRES_TUPLES_OK ||
PQntuples(res) != 1)
{
fprintf(stderr, "check_database_version(): command failed. Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
remoteversion_str = PQgetvalue(res, 0, 0);
remoteversion = strtod(remoteversion_str + 11, NULL);
if (myversion != remoteversion)
{
fprintf(stderr, "Database version: %s\npg_dump version: %s\n",
remoteversion_str, PG_VERSION);
if (ignoreVersion)
fprintf(stderr, "Proceeding despite version mismatch.\n");
else
{
fprintf(stderr, "Aborting because of version mismatch.\n"
"Use --ignore-version if you think it's safe to proceed anyway.\n");
exit_nicely(g_conn);
}
}
PQclear(res);
}
int
main(int argc, char **argv)
{
@ -634,20 +582,19 @@ main(int argc, char **argv)
bool oids = false;
TableInfo *tblinfo;
int numTables;
char connect_string[512] = "";
char tmp_string[128];
char username[100];
char password[100];
bool use_password = false;
int compressLevel = -1;
bool ignore_version = false;
int plainText = 0;
int outputClean = 0;
int outputBlobs = 0;
RestoreOptions *ropt;
#ifdef HAVE_GETOPT_LONG
static struct option long_options[] = {
{"data-only", no_argument, NULL, 'a'},
{"blobs", no_argument, NULL, 'b' },
{"clean", no_argument, NULL, 'c'},
{"file", required_argument, NULL, 'f'},
{"format", required_argument, NULL, 'F'},
@ -686,6 +633,12 @@ main(int argc, char **argv)
else
progname = strrchr(argv[0], SEP_CHAR) + 1;
/* Set defaulty options based on progname */
if (strcmp(progname, "pg_backup") == 0)
{
format = "c";
outputBlobs = 1;
}
#ifdef HAVE_GETOPT_LONG
while ((c = getopt_long(argc, argv, "acdDf:F:h:inNop:st:uvxzZ:V?", long_options, &optindex)) != -1)
@ -698,6 +651,10 @@ main(int argc, char **argv)
case 'a': /* Dump data only */
dataOnly = true;
break;
case 'b': /* Dump blobs */
outputBlobs = true;
break;
case 'c': /* clean (i.e., drop) schema prior to
* create */
outputClean = 1;
@ -843,7 +800,12 @@ main(int argc, char **argv)
case 'p':
case 'P':
plainText = 1;
g_fout = CreateArchive(filename, archPlainText, 0);
g_fout = CreateArchive(filename, archNull, 0);
break;
case 't':
case 'T':
g_fout = CreateArchive(filename, archTar, compressLevel);
break;
default:
@ -860,53 +822,13 @@ main(int argc, char **argv)
exit(1);
}
/* find database */
if (!(dbname = argv[optind]) &&
!(dbname = getenv("PGDATABASE")))
{
fprintf(stderr, "%s: no database name specified\n", progname);
exit(1);
}
/* Let the archiver know how noisy to be */
g_fout->verbose = g_verbose;
/* g_conn = PQsetdb(pghost, pgport, NULL, NULL, dbname); */
if (pghost != NULL)
{
sprintf(tmp_string, "host=%s ", pghost);
strcat(connect_string, tmp_string);
}
if (pgport != NULL)
{
sprintf(tmp_string, "port=%s ", pgport);
strcat(connect_string, tmp_string);
}
if (dbname != NULL)
{
sprintf(tmp_string, "dbname=%s ", dbname);
strcat(connect_string, tmp_string);
}
if (use_password)
{
prompt_for_password(username, password);
strcat(connect_string, "authtype=password ");
sprintf(tmp_string, "user=%s ", username);
strcat(connect_string, tmp_string);
sprintf(tmp_string, "password=%s ", password);
strcat(connect_string, tmp_string);
MemSet(tmp_string, 0, sizeof(tmp_string));
MemSet(password, 0, sizeof(password));
}
g_conn = PQconnectdb(connect_string);
MemSet(connect_string, 0, sizeof(connect_string));
/* check to see that the backend connection was successfully made */
if (PQstatus(g_conn) == CONNECTION_BAD)
{
fprintf(stderr, "Connection to database '%s' failed.\n", dbname);
fprintf(stderr, "%s\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
dbname = argv[optind];
/* check for version mismatch */
check_database_version(ignore_version);
/* Open the database using the Archiver, so it knows about it. Errors mean death */
g_conn = ConnectDatabase(g_fout, dbname, pghost, pgport, use_password, ignore_version);
/*
* Start serializable transaction to dump consistent data
@ -916,17 +838,15 @@ main(int argc, char **argv)
res = PQexec(g_conn, "begin");
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
{
fprintf(stderr, "BEGIN command failed. Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
exit_horribly(g_fout, "BEGIN command failed. Explanation from backend: '%s'.\n",
PQerrorMessage(g_conn));
PQclear(res);
res = PQexec(g_conn, "set transaction isolation level serializable");
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
{
fprintf(stderr, "SET TRANSACTION command failed. Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
exit_horribly(g_fout, "SET TRANSACTION command failed. Explanation from backend: '%s'.\n",
PQerrorMessage(g_conn));
PQclear(res);
}
@ -941,7 +861,12 @@ main(int argc, char **argv)
tblinfo = dumpSchema(g_fout, &numTables, tablename, aclsSkip, oids, schemaOnly, dataOnly);
if (!schemaOnly)
dumpClasses(tblinfo, numTables, g_fout, tablename, oids);
{
dumpClasses(tblinfo, numTables, g_fout, tablename, oids, force_quotes);
}
if (outputBlobs)
ArchiveEntry(g_fout, "0", "BLOBS", "BLOBS", NULL, "", "", "", "", dumpBlobs, 0);
if (!dataOnly) /* dump indexes and triggers at the end
* for performance */
@ -951,6 +876,15 @@ main(int argc, char **argv)
dumpRules(g_fout, tablename, tblinfo, numTables);
}
/* Now sort the output nicely */
SortTocByOID(g_fout);
MoveToEnd(g_fout, "TABLE DATA");
MoveToEnd(g_fout, "BLOBS");
MoveToEnd(g_fout, "INDEX");
MoveToEnd(g_fout, "TRIGGER");
MoveToEnd(g_fout, "RULE");
MoveToEnd(g_fout, "ACL");
if (plainText)
{
ropt = NewRestoreOptions();
@ -973,6 +907,92 @@ main(int argc, char **argv)
exit(0);
}
/*
* dumpBlobs:
* dump all blobs
*
*/
#define loBufSize 16384
#define loFetchSize 1000
static int
dumpBlobs(Archive *AH, char* junkOid, void *junkVal)
{
PQExpBuffer oidQry = createPQExpBuffer();
PQExpBuffer oidFetchQry = createPQExpBuffer();
PGresult *res;
int i;
int loFd;
char buf[loBufSize];
int cnt;
int blobOid;
if (g_verbose)
fprintf(stderr, "%s saving BLOBs\n", g_comment_start);
/* Cursor to get all BLOB tables */
appendPQExpBuffer(oidQry, "Declare blobOid Cursor for SELECT oid from pg_class where relkind = 'l'");
res = PQexec(g_conn, oidQry->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
{
fprintf(stderr, "dumpBlobs(): Declare Cursor failed. Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
/* Fetch for cursor */
appendPQExpBuffer(oidFetchQry, "Fetch %d in blobOid", loFetchSize);
do {
/* Do a fetch */
PQclear(res);
res = PQexec(g_conn, oidFetchQry->data);
if (!res || PQresultStatus(res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "dumpBlobs(): Fetch Cursor failed. Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
/* Process the tuples, if any */
for (i = 0; i < PQntuples(res); i++)
{
blobOid = atoi(PQgetvalue(res, i, 0));
/* Open the BLOB */
loFd = lo_open(g_conn, blobOid, INV_READ);
if (loFd == -1)
{
fprintf(stderr, "dumpBlobs(): Could not open large object. "
"Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
StartBlob(AH, blobOid);
/* Now read it in chunks, sending data to archive */
do {
cnt = lo_read(g_conn, loFd, buf, loBufSize);
if (cnt < 0) {
fprintf(stderr, "dumpBlobs(): Error reading large object. "
" Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
WriteData(AH, buf, cnt);
} while (cnt > 0);
lo_close(g_conn, loFd);
EndBlob(AH, blobOid);
}
} while (PQntuples(res) > 0);
return 1;
}
/*
* getTypes:
* read all base types in the system catalogs and return them in the
@ -2409,7 +2429,7 @@ dumpComment(Archive *fout, const char *target, const char *oid)
target, checkForQuote(PQgetvalue(res, 0, i_description)));
ArchiveEntry(fout, oid, target, "COMMENT", NULL, query->data, "" /*Del*/,
"" /*Owner*/, NULL, NULL);
"" /* Copy */, "" /*Owner*/, NULL, NULL);
}
@ -2542,7 +2562,7 @@ dumpTypes(Archive *fout, FuncInfo *finfo, int numFuncs,
appendPQExpBuffer(q, ");\n");
ArchiveEntry(fout, tinfo[i].oid, fmtId(tinfo[i].typname, force_quotes), "TYPE", NULL,
q->data, delq->data, tinfo[i].usename, NULL, NULL);
q->data, delq->data, "", tinfo[i].usename, NULL, NULL);
/*** Dump Type Comments ***/
@ -2629,7 +2649,7 @@ dumpProcLangs(Archive *fout, FuncInfo *finfo, int numFuncs,
lancompiler);
ArchiveEntry(fout, PQgetvalue(res, i, i_oid), lanname, "PROCEDURAL LANGUAGE",
NULL, defqry->data, delqry->data, "", NULL, NULL);
NULL, defqry->data, delqry->data, "", "", NULL, NULL);
free(lanname);
free(lancompiler);
@ -2669,8 +2689,8 @@ dumpOneFunc(Archive *fout, FuncInfo *finfo, int i,
PQExpBuffer fn = createPQExpBuffer();
PQExpBuffer delqry = createPQExpBuffer();
PQExpBuffer fnlist = createPQExpBuffer();
PQExpBuffer asPart = createPQExpBuffer();
int j;
PQExpBuffer asPart = createPQExpBuffer();
char func_lang[NAMEDATALEN + 1];
PGresult *res;
int nlangs;
@ -2751,7 +2771,7 @@ dumpOneFunc(Archive *fout, FuncInfo *finfo, int i,
asPart->data, func_lang);
ArchiveEntry(fout, finfo[i].oid, fn->data, "FUNCTION", NULL, q->data, delqry->data,
finfo[i].usename, NULL, NULL);
"", finfo[i].usename, NULL, NULL);
/*** Dump Function Comments ***/
@ -2870,7 +2890,7 @@ dumpOprs(Archive *fout, OprInfo *oprinfo, int numOperators,
sort2->data);
ArchiveEntry(fout, oprinfo[i].oid, oprinfo[i].oprname, "OPERATOR", NULL,
q->data, delq->data, oprinfo[i].usename, NULL, NULL);
q->data, delq->data, "", oprinfo[i].usename, NULL, NULL);
}
}
@ -2927,7 +2947,7 @@ dumpAggs(Archive *fout, AggInfo *agginfo, int numAggs,
details->data);
ArchiveEntry(fout, agginfo[i].oid, aggSig->data, "AGGREGATE", NULL,
q->data, delq->data, agginfo[i].usename, NULL, NULL);
q->data, delq->data, "", agginfo[i].usename, NULL, NULL);
/*** Dump Aggregate Comments ***/
@ -3096,7 +3116,7 @@ dumpACL(Archive *fout, TableInfo tbinfo)
free(aclbuf);
ArchiveEntry(fout, tbinfo.oid, tbinfo.relname, "ACL", NULL, sql, "", "", NULL, NULL);
ArchiveEntry(fout, tbinfo.oid, tbinfo.relname, "ACL", NULL, sql, "", "", "", NULL, NULL);
}
@ -3274,7 +3294,7 @@ dumpTables(Archive *fout, TableInfo *tblinfo, int numTables,
if (!dataOnly) {
ArchiveEntry(fout, tblinfo[i].oid, fmtId(tblinfo[i].relname, false),
"TABLE", NULL, q->data, delq->data, tblinfo[i].usename,
"TABLE", NULL, q->data, delq->data, "", tblinfo[i].usename,
NULL, NULL);
}
@ -3468,7 +3488,7 @@ dumpIndices(Archive *fout, IndInfo *indinfo, int numIndices,
/* Dump Index Comments */
ArchiveEntry(fout, tblinfo[tableInd].oid, id1->data, "INDEX", NULL, q->data, delq->data,
tblinfo[tableInd].usename, NULL, NULL);
"", tblinfo[tableInd].usename, NULL, NULL);
resetPQExpBuffer(q);
appendPQExpBuffer(q, "INDEX %s", id1->data);
@ -3599,7 +3619,7 @@ setMaxOid(Archive *fout)
pos = pos + snprintf(sql+pos, 1024-pos, "\\.\n");
pos = pos + snprintf(sql+pos, 1024-pos, "DROP TABLE pg_dump_oid;\n");
ArchiveEntry(fout, "0", "Max OID", "<Init>", NULL, sql, "","", NULL, NULL);
ArchiveEntry(fout, "0", "Max OID", "<Init>", NULL, sql, "", "", "", NULL, NULL);
}
/*
@ -3750,7 +3770,7 @@ dumpSequence(Archive *fout, TableInfo tbinfo)
}
ArchiveEntry(fout, tbinfo.oid, fmtId(tbinfo.relname, force_quotes), "SEQUENCE", NULL,
query->data, delqry->data, tbinfo.usename, NULL, NULL);
query->data, delqry->data, "", tbinfo.usename, NULL, NULL);
/* Dump Sequence Comments */
@ -3779,7 +3799,7 @@ dumpTriggers(Archive *fout, const char *tablename,
for (j = 0; j < tblinfo[i].ntrig; j++)
{
ArchiveEntry(fout, tblinfo[i].triggers[j].oid, tblinfo[i].triggers[j].tgname,
"TRIGGER", NULL, tblinfo[i].triggers[j].tgsrc, "",
"TRIGGER", NULL, tblinfo[i].triggers[j].tgsrc, "", "",
tblinfo[i].usename, NULL, NULL);
dumpComment(fout, tblinfo[i].triggers[j].tgcomment, tblinfo[i].triggers[j].oid);
}
@ -3846,7 +3866,7 @@ dumpRules(Archive *fout, const char *tablename,
ArchiveEntry(fout, PQgetvalue(res, i, i_oid), PQgetvalue(res, i, i_rulename),
"RULE", NULL, PQgetvalue(res, i, i_definition),
"", "", NULL, NULL);
"", "", "", NULL, NULL);
/* Dump rule comments */

View File

@ -84,19 +84,24 @@ typedef struct option optType;
struct option cmdopts[] = {
{ "clean", 0, NULL, 'c' },
{ "data-only", 0, NULL, 'a' },
{ "dbname", 1, NULL, 'd' },
{ "file", 1, NULL, 'f' },
{ "format", 1, NULL, 'F' },
{ "function", 2, NULL, 'p' },
{ "index", 2, NULL, 'i'},
{ "function", 2, NULL, 'P' },
{ "host", 1, NULL, 'h' },
{ "ignore-version", 0, NULL, 'i'},
{ "index", 2, NULL, 'I'},
{ "list", 0, NULL, 'l'},
{ "no-acl", 0, NULL, 'x' },
{ "port", 1, NULL, 'p' },
{ "oid-order", 0, NULL, 'o'},
{ "orig-order", 0, NULL, 'O' },
{ "password", 0, NULL, 'u' },
{ "rearrange", 0, NULL, 'r'},
{ "schema-only", 0, NULL, 's' },
{ "table", 2, NULL, 't'},
{ "trigger", 2, NULL, 'T' },
{ "use-list", 1, NULL, 'u'},
{ "use-list", 1, NULL, 'U'},
{ "verbose", 0, NULL, 'v' },
{ NULL, 0, NULL, 0}
};
@ -115,9 +120,9 @@ int main(int argc, char **argv)
progname = *argv;
#ifdef HAVE_GETOPT_LONG
while ((c = getopt_long(argc, argv, "acf:F:i:loOp:st:T:u:vx", cmdopts, NULL)) != EOF)
while ((c = getopt_long(argc, argv, "acd:f:F:h:i:loOp:st:T:u:U:vx", cmdopts, NULL)) != EOF)
#else
while ((c = getopt(argc, argv, "acf:F:i:loOp:st:T:u:vx")) != -1)
while ((c = getopt(argc, argv, "acd:f:F:h:i:loOp:st:T:u:U:vx")) != -1)
#endif
{
switch (c)
@ -129,6 +134,13 @@ int main(int argc, char **argv)
* create */
opts->dropSchema = 1;
break;
case 'd':
if (strlen(optarg) != 0)
{
opts->dbname = strdup(optarg);
opts->useDB = 1;
}
break;
case 'f': /* output file name */
opts->filename = strdup(optarg);
break;
@ -136,22 +148,32 @@ int main(int argc, char **argv)
if (strlen(optarg) != 0)
opts->formatName = strdup(optarg);
break;
case 'h':
if (strlen(optarg) != 0)
opts->pghost = strdup(optarg);
break;
case 'i':
opts->ignoreVersion = 1;
break;
case 'o':
opts->oidOrder = 1;
break;
case 'O':
opts->origOrder = 1;
break;
case 'p':
if (strlen(optarg) != 0)
opts->pgport = strdup(optarg);
break;
case 'r':
opts->rearrange = 1;
break;
case 'p': /* Function */
case 'P': /* Function */
opts->selTypes = 1;
opts->selFunction = 1;
opts->functionNames = _cleanupName(optarg);
break;
case 'i': /* Index */
case 'I': /* Index */
opts->selTypes = 1;
opts->selIndex = 1;
opts->indexNames = _cleanupName(optarg);
@ -173,7 +195,11 @@ int main(int argc, char **argv)
opts->tocSummary = 1;
break;
case 'u': /* input TOC summary file name */
case 'u':
opts->requirePassword = 1;
break;
case 'U': /* input TOC summary file name */
opts->tocFile = strdup(optarg);
break;
@ -209,14 +235,23 @@ int main(int argc, char **argv)
opts->format = archFiles;
break;
case 't':
case 'T':
opts->format = archTar;
break;
default:
fprintf(stderr, "%s: Unknown archive format '%s', please specify 'f' or 'c'\n", progname, opts->formatName);
fprintf(stderr, "%s: Unknown archive format '%s', please specify 't' or 'c'\n",
progname, opts->formatName);
exit (1);
}
}
AH = OpenArchive(fileSpec, opts->format);
/* Let the archiver know how noisy to be */
AH->verbose = opts->verbose;
if (opts->tocFile)
SortTocFromFile(AH, opts);
@ -226,7 +261,9 @@ int main(int argc, char **argv)
SortTocByID(AH);
if (opts->rearrange) {
MoveToStart(AH, "<Init>");
MoveToEnd(AH, "TABLE DATA");
MoveToEnd(AH, "BLOBS");
MoveToEnd(AH, "INDEX");
MoveToEnd(AH, "TRIGGER");
MoveToEnd(AH, "RULE");
@ -250,39 +287,48 @@ static void usage(const char *progname)
fprintf(stderr,
"usage: %s [options] [backup file]\n"
" -a, --data-only \t dump out only the data, no schema\n"
" -d, --dbname <name> \t specify database name\n"
" -c, --clean \t clean(drop) schema prior to create\n"
" -f filename \t script output filename\n"
" -F, --format {c|f} \t specify backup file format\n"
" -p, --function[=name] \t dump functions or named function\n"
" -h, --host <hostname> \t server host name\n"
" -i, --index[=name] \t dump indexes or named index\n"
" -l, --list \t dump summarized TOC for this file\n"
" -o, --oid-order \t dump in oid order\n"
" -O, --orig-order \t dump in original dump order\n"
" -p, --port <port> \t server port number\n"
" -P, --function[=name] \t dump functions or named function\n"
" -r, --rearrange \t rearrange output to put indexes etc at end\n"
" -s, --schema-only \t dump out only the schema, no data\n"
" -t [table], --table[=table] \t dump for this table only\n"
" -T, --trigger[=name] \t dump triggers or named trigger\n"
" -u, --use-list filename \t use specified TOC for ordering output from this file\n"
" -v \t verbose\n"
" -u, --password \t use password authentication\n"
" -U, --use-list filename \t use specified TOC for ordering output from this file\n"
" -v, --verbose \t verbose\n"
" -x, --no-acl \t skip dumping of ACLs (grant/revoke)\n"
, progname);
#else
fprintf(stderr,
"usage: %s [options] [backup file]\n"
" -a \t dump out only the data, no schema\n"
" -d, <name> \t specify database name\n"
" -c \t clean(drop) schema prior to create\n"
" -f filename NOT IMPLEMENTED \t script output filename\n"
" -F {c|f} \t specify backup file format\n"
" -p name \t dump functions or named function\n"
" -h, <hostname> \t server host name\n"
" -i name \t dump indexes or named index\n"
" -l \t dump summarized TOC for this file\n"
" -o \t dump in oid order\n"
" -O \t dump in original dump order\n"
" -p <port> \t server port number\n"
" -P name \t dump functions or named function\n"
" -r \t rearrange output to put indexes etc at end\n"
" -s \t dump out only the schema, no data\n"
" -t name \t dump for this table only\n"
" -T name \t dump triggers or named trigger\n"
" -u filename \t use specified TOC for ordering output from this file\n"
" -u \t use password authentication\n"
" -U filename \t use specified TOC for ordering output from this file\n"
" -v \t verbose\n"
" -x \t skip dumping of ACLs (grant/revoke)\n"
, progname);