1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-23 14:01:44 +03:00

Have \copy go through SendQuery

This enables a bunch of features, notably ON_ERROR_ROLLBACK.  It also
makes COPY failure (either in the server or psql) as a whole behave more
sanely in psql.

Additionally, having more commands in the same command line as COPY
works better (though since psql splits lines at semicolons, this doesn't
matter much unless you're using -c).

Also tighten a couple of switches on PQresultStatus() to add
PGRES_COPY_BOTH support and stop assuming that unknown statuses received
are errors; have those print diagnostics where warranted.

Author: Noah Misch
This commit is contained in:
Alvaro Herrera
2012-01-25 18:06:00 -03:00
parent 6eb71ac552
commit 08146775ac
5 changed files with 251 additions and 127 deletions

View File

@ -244,8 +244,9 @@ do_copy(const char *args)
{
PQExpBufferData query;
FILE *copystream;
FILE *save_file;
FILE **override_file;
struct copy_options *options;
PGresult *result;
bool success;
struct stat st;
@ -261,6 +262,8 @@ do_copy(const char *args)
if (options->from)
{
override_file = &pset.cur_cmd_source;
if (options->file)
copystream = fopen(options->file, PG_BINARY_R);
else if (!options->psql_inout)
@ -270,6 +273,8 @@ do_copy(const char *args)
}
else
{
override_file = &pset.queryFout;
if (options->file)
copystream = fopen(options->file, PG_BINARY_W);
else if (!options->psql_inout)
@ -308,52 +313,13 @@ do_copy(const char *args)
if (options->after_tofrom)
appendPQExpBufferStr(&query, options->after_tofrom);
result = PSQLexec(query.data, true);
/* Run it like a user command, interposing the data source or sink. */
save_file = *override_file;
*override_file = copystream;
success = SendQuery(query.data);
*override_file = save_file;
termPQExpBuffer(&query);
switch (PQresultStatus(result))
{
case PGRES_COPY_OUT:
SetCancelConn();
success = handleCopyOut(pset.db, copystream);
ResetCancelConn();
break;
case PGRES_COPY_IN:
SetCancelConn();
success = handleCopyIn(pset.db, copystream,
PQbinaryTuples(result));
ResetCancelConn();
break;
case PGRES_NONFATAL_ERROR:
case PGRES_FATAL_ERROR:
case PGRES_BAD_RESPONSE:
success = false;
psql_error("\\copy: %s", PQerrorMessage(pset.db));
break;
default:
success = false;
psql_error("\\copy: unexpected response (%d)\n",
PQresultStatus(result));
break;
}
PQclear(result);
/*
* Make sure we have pumped libpq dry of results; else it may still be in
* ASYNC_BUSY state, leading to false readings in, eg, get_prompt().
*/
while ((result = PQgetResult(pset.db)) != NULL)
{
success = false;
psql_error("\\copy: unexpected response (%d)\n",
PQresultStatus(result));
/* if still in COPY IN state, try to get out of it */
if (PQresultStatus(result) == PGRES_COPY_IN)
PQputCopyEnd(pset.db, _("trying to exit copy mode"));
PQclear(result);
}
if (options->file != NULL)
{
if (fclose(copystream) != 0)
@ -425,8 +391,30 @@ handleCopyOut(PGconn *conn, FILE *copystream)
OK = false;
}
/* Check command status and return to normal libpq state */
res = PQgetResult(conn);
/*
* Check command status and return to normal libpq state. After a
* client-side error, the server will remain ready to deliver data. The
* cleanest thing is to fully drain and discard that data. If the
* client-side error happened early in a large file, this takes a long
* time. Instead, take advantage of the fact that PQexec() will silently
* end any ongoing PGRES_COPY_OUT state. This does cause us to lose the
* results of any commands following the COPY in a single command string.
* It also only works for protocol version 3. XXX should we clean up
* using the slow way when the connection is using protocol version 2?
*
* We must not ever return with the status still PGRES_COPY_OUT. Our
* caller is unable to distinguish that situation from reaching the next
* COPY in a command string that happened to contain two consecutive COPY
* TO STDOUT commands. We trust that no condition can make PQexec() fail
* indefinitely while retaining status PGRES_COPY_OUT.
*/
while (res = PQgetResult(conn), PQresultStatus(res) == PGRES_COPY_OUT)
{
OK = false;
PQclear(res);
PQexec(conn, "-- clear PGRES_COPY_OUT state");
}
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
psql_error("%s", PQerrorMessage(conn));
@ -471,13 +459,8 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary)
/* Terminate data transfer */
PQputCopyEnd(conn, _("canceled by user"));
/* Check command status and return to normal libpq state */
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
psql_error("%s", PQerrorMessage(conn));
PQclear(res);
return false;
OK = false;
goto copyin_cleanup;
}
/* Prompt if interactive input */
@ -600,8 +583,24 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary)
OK ? NULL : _("aborted because of read failure")) <= 0)
OK = false;
/* Check command status and return to normal libpq state */
res = PQgetResult(conn);
copyin_cleanup:
/*
* Check command status and return to normal libpq state
*
* We must not ever return with the status still PGRES_COPY_IN. Our
* caller is unable to distinguish that situation from reaching the next
* COPY in a command string that happened to contain two consecutive COPY
* FROM STDIN commands. XXX if something makes PQputCopyEnd() fail
* indefinitely while retaining status PGRES_COPY_IN, we get an infinite
* loop. This is more realistic than handleCopyOut()'s counterpart risk.
*/
while (res = PQgetResult(conn), PQresultStatus(res) == PGRES_COPY_IN)
{
OK = false;
PQclear(res);
PQputCopyEnd(pset.db, _("trying to exit copy mode"));
}
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
psql_error("%s", PQerrorMessage(conn));