mirror of
https://github.com/postgres/postgres.git
synced 2025-07-30 11:03:19 +03:00
Add a "row processor" API to libpq for better handling of large results.
Traditionally libpq has collected an entire query result before passing it back to the application. That provides a simple and transactional API, but it's pretty inefficient for large result sets. This patch allows the application to process each row on-the-fly instead of accumulating the rows into the PGresult. Error recovery becomes a bit more complex, but often that tradeoff is well worth making. Kyotaro Horiguchi, reviewed by Marko Kreen and Tom Lane
This commit is contained in:
@ -218,6 +218,32 @@ pqGetnchar(char *s, size_t len, PGconn *conn)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* pqSkipnchar:
|
||||
* skip over len bytes in input buffer.
|
||||
*
|
||||
* Note: this is primarily useful for its debug output, which should
|
||||
* be exactly the same as for pqGetnchar. We assume the data in question
|
||||
* will actually be used, but just isn't getting copied anywhere as yet.
|
||||
*/
|
||||
int
|
||||
pqSkipnchar(size_t len, PGconn *conn)
|
||||
{
|
||||
if (len > (size_t) (conn->inEnd - conn->inCursor))
|
||||
return EOF;
|
||||
|
||||
if (conn->Pfdebug)
|
||||
{
|
||||
fprintf(conn->Pfdebug, "From backend (%lu)> ", (unsigned long) len);
|
||||
fputnbytes(conn->Pfdebug, conn->inBuffer + conn->inCursor, len);
|
||||
fprintf(conn->Pfdebug, "\n");
|
||||
}
|
||||
|
||||
conn->inCursor += len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* pqPutnchar:
|
||||
* write exactly len bytes to the current message
|
||||
|
Reference in New Issue
Block a user