1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-07 19:06:32 +03:00

Solve the problem of OID collisions by probing for duplicate OIDs

whenever we generate a new OID.  This prevents occasional duplicate-OID
errors that can otherwise occur once the OID counter has wrapped around.
Duplicate relfilenode values are also checked for when creating new
physical files.  Per my recent proposal.
This commit is contained in:
Tom Lane
2005-08-12 01:36:05 +00:00
parent 9e4a2de844
commit 721e53785d
20 changed files with 416 additions and 268 deletions

View File

@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.111 2005/06/13 02:26:49 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.112 2005/08/12 01:35:58 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -177,14 +177,14 @@ inv_create(Oid lobjId)
{
/*
* Allocate an OID to be the LO's identifier, unless we were told
* what to use. In event of collision with an existing ID, loop
* to find a free one.
* what to use. We can use the index on pg_largeobject for checking
* OID uniqueness, even though it has additional columns besides OID.
*/
if (!OidIsValid(lobjId))
{
do {
lobjId = newoid();
} while (LargeObjectExists(lobjId));
open_lo_relation();
lobjId = GetNewOidWithIndex(lo_heap_r, lo_index_r);
}
/*