mirror of
https://github.com/postgres/postgres.git
synced 2025-11-19 13:42:17 +03:00
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
This commit is contained in:
@@ -59,29 +59,29 @@
|
||||
/* Backend-local tracking for on-detach callbacks. */
|
||||
typedef struct dsm_segment_detach_callback
|
||||
{
|
||||
on_dsm_detach_callback function;
|
||||
Datum arg;
|
||||
slist_node node;
|
||||
on_dsm_detach_callback function;
|
||||
Datum arg;
|
||||
slist_node node;
|
||||
} dsm_segment_detach_callback;
|
||||
|
||||
/* Backend-local state for a dynamic shared memory segment. */
|
||||
struct dsm_segment
|
||||
{
|
||||
dlist_node node; /* List link in dsm_segment_list. */
|
||||
ResourceOwner resowner; /* Resource owner. */
|
||||
dsm_handle handle; /* Segment name. */
|
||||
uint32 control_slot; /* Slot in control segment. */
|
||||
void *impl_private; /* Implementation-specific private data. */
|
||||
void *mapped_address; /* Mapping address, or NULL if unmapped. */
|
||||
Size mapped_size; /* Size of our mapping. */
|
||||
slist_head on_detach; /* On-detach callbacks. */
|
||||
dlist_node node; /* List link in dsm_segment_list. */
|
||||
ResourceOwner resowner; /* Resource owner. */
|
||||
dsm_handle handle; /* Segment name. */
|
||||
uint32 control_slot; /* Slot in control segment. */
|
||||
void *impl_private; /* Implementation-specific private data. */
|
||||
void *mapped_address; /* Mapping address, or NULL if unmapped. */
|
||||
Size mapped_size; /* Size of our mapping. */
|
||||
slist_head on_detach; /* On-detach callbacks. */
|
||||
};
|
||||
|
||||
/* Shared-memory state for a dynamic shared memory segment. */
|
||||
typedef struct dsm_control_item
|
||||
{
|
||||
dsm_handle handle;
|
||||
uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */
|
||||
uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */
|
||||
} dsm_control_item;
|
||||
|
||||
/* Layout of the dynamic shared memory control segment. */
|
||||
@@ -90,7 +90,7 @@ typedef struct dsm_control_header
|
||||
uint32 magic;
|
||||
uint32 nitems;
|
||||
uint32 maxitems;
|
||||
dsm_control_item item[FLEXIBLE_ARRAY_MEMBER];
|
||||
dsm_control_item item[FLEXIBLE_ARRAY_MEMBER];
|
||||
} dsm_control_header;
|
||||
|
||||
static void dsm_cleanup_for_mmap(void);
|
||||
@@ -132,7 +132,7 @@ static dlist_head dsm_segment_list = DLIST_STATIC_INIT(dsm_segment_list);
|
||||
static dsm_handle dsm_control_handle;
|
||||
static dsm_control_header *dsm_control;
|
||||
static Size dsm_control_mapped_size = 0;
|
||||
static void *dsm_control_impl_private = NULL;
|
||||
static void *dsm_control_impl_private = NULL;
|
||||
|
||||
/*
|
||||
* Start up the dynamic shared memory system.
|
||||
@@ -166,14 +166,14 @@ dsm_postmaster_startup(PGShmemHeader *shim)
|
||||
maxitems = PG_DYNSHMEM_FIXED_SLOTS
|
||||
+ PG_DYNSHMEM_SLOTS_PER_BACKEND * MaxBackends;
|
||||
elog(DEBUG2, "dynamic shared memory system will support %u segments",
|
||||
maxitems);
|
||||
maxitems);
|
||||
segsize = dsm_control_bytes_needed(maxitems);
|
||||
|
||||
/*
|
||||
* Loop until we find an unused identifier for the new control segment.
|
||||
* We sometimes use 0 as a sentinel value indicating that no control
|
||||
* segment is known to exist, so avoid using that value for a real
|
||||
* control segment.
|
||||
* Loop until we find an unused identifier for the new control segment. We
|
||||
* sometimes use 0 as a sentinel value indicating that no control segment
|
||||
* is known to exist, so avoid using that value for a real control
|
||||
* segment.
|
||||
*/
|
||||
for (;;)
|
||||
{
|
||||
@@ -224,17 +224,17 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
|
||||
|
||||
/*
|
||||
* Try to attach the segment. If this fails, it probably just means that
|
||||
* the operating system has been rebooted and the segment no longer exists,
|
||||
* or an unrelated proces has used the same shm ID. So just fall out
|
||||
* quietly.
|
||||
* the operating system has been rebooted and the segment no longer
|
||||
* exists, or an unrelated proces has used the same shm ID. So just fall
|
||||
* out quietly.
|
||||
*/
|
||||
if (!dsm_impl_op(DSM_OP_ATTACH, old_control_handle, 0, &impl_private,
|
||||
&mapped_address, &mapped_size, DEBUG1))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We've managed to reattach it, but the contents might not be sane.
|
||||
* If they aren't, we disregard the segment after all.
|
||||
* We've managed to reattach it, but the contents might not be sane. If
|
||||
* they aren't, we disregard the segment after all.
|
||||
*/
|
||||
old_control = (dsm_control_header *) mapped_address;
|
||||
if (!dsm_control_segment_sane(old_control, mapped_size))
|
||||
@@ -245,14 +245,14 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, the control segment looks basically valid, so we can get use
|
||||
* it to get a list of segments that need to be removed.
|
||||
* OK, the control segment looks basically valid, so we can get use it to
|
||||
* get a list of segments that need to be removed.
|
||||
*/
|
||||
nitems = old_control->nitems;
|
||||
for (i = 0; i < nitems; ++i)
|
||||
{
|
||||
dsm_handle handle;
|
||||
uint32 refcnt;
|
||||
dsm_handle handle;
|
||||
uint32 refcnt;
|
||||
|
||||
/* If the reference count is 0, the slot is actually unused. */
|
||||
refcnt = old_control->item[i].refcnt;
|
||||
@@ -262,7 +262,7 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
|
||||
/* Log debugging information. */
|
||||
handle = old_control->item[i].handle;
|
||||
elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u (reference count %u)",
|
||||
handle, refcnt);
|
||||
handle, refcnt);
|
||||
|
||||
/* Destroy the referenced segment. */
|
||||
dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
|
||||
@@ -290,7 +290,7 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
|
||||
static void
|
||||
dsm_cleanup_for_mmap(void)
|
||||
{
|
||||
DIR *dir;
|
||||
DIR *dir;
|
||||
struct dirent *dent;
|
||||
|
||||
/* Open the directory; can't use AllocateDir in postmaster. */
|
||||
@@ -298,15 +298,16 @@ dsm_cleanup_for_mmap(void)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not open directory \"%s\": %m",
|
||||
PG_DYNSHMEM_DIR)));
|
||||
PG_DYNSHMEM_DIR)));
|
||||
|
||||
/* Scan for something with a name of the correct format. */
|
||||
while ((dent = ReadDir(dir, PG_DYNSHMEM_DIR)) != NULL)
|
||||
{
|
||||
if (strncmp(dent->d_name, PG_DYNSHMEM_MMAP_FILE_PREFIX,
|
||||
strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0)
|
||||
strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0)
|
||||
{
|
||||
char buf[MAXPGPATH];
|
||||
char buf[MAXPGPATH];
|
||||
|
||||
snprintf(buf, MAXPGPATH, PG_DYNSHMEM_DIR "/%s", dent->d_name);
|
||||
|
||||
elog(DEBUG2, "removing file \"%s\"", buf);
|
||||
@@ -314,7 +315,7 @@ dsm_cleanup_for_mmap(void)
|
||||
/* We found a matching file; so remove it. */
|
||||
if (unlink(buf) != 0)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
save_errno = errno;
|
||||
closedir(dir);
|
||||
@@ -352,8 +353,8 @@ dsm_postmaster_shutdown(int code, Datum arg)
|
||||
* If some other backend exited uncleanly, it might have corrupted the
|
||||
* control segment while it was dying. In that case, we warn and ignore
|
||||
* the contents of the control segment. This may end up leaving behind
|
||||
* stray shared memory segments, but there's not much we can do about
|
||||
* that if the metadata is gone.
|
||||
* stray shared memory segments, but there's not much we can do about that
|
||||
* if the metadata is gone.
|
||||
*/
|
||||
nitems = dsm_control->nitems;
|
||||
if (!dsm_control_segment_sane(dsm_control, dsm_control_mapped_size))
|
||||
@@ -375,7 +376,7 @@ dsm_postmaster_shutdown(int code, Datum arg)
|
||||
/* Log debugging information. */
|
||||
handle = dsm_control->item[i].handle;
|
||||
elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u",
|
||||
handle);
|
||||
handle);
|
||||
|
||||
/* Destroy the segment. */
|
||||
dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private,
|
||||
@@ -427,7 +428,7 @@ dsm_backend_startup(void)
|
||||
&dsm_control_mapped_size, WARNING);
|
||||
ereport(FATAL,
|
||||
(errcode(ERRCODE_INTERNAL_ERROR),
|
||||
errmsg("dynamic shared memory control segment is not valid")));
|
||||
errmsg("dynamic shared memory control segment is not valid")));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -455,9 +456,9 @@ dsm_set_control_handle(dsm_handle h)
|
||||
dsm_segment *
|
||||
dsm_create(Size size)
|
||||
{
|
||||
dsm_segment *seg = dsm_create_descriptor();
|
||||
uint32 i;
|
||||
uint32 nitems;
|
||||
dsm_segment *seg = dsm_create_descriptor();
|
||||
uint32 i;
|
||||
uint32 nitems;
|
||||
|
||||
/* Unsafe in postmaster (and pointless in a stand-alone backend). */
|
||||
Assert(IsUnderPostmaster);
|
||||
@@ -524,10 +525,10 @@ dsm_create(Size size)
|
||||
dsm_segment *
|
||||
dsm_attach(dsm_handle h)
|
||||
{
|
||||
dsm_segment *seg;
|
||||
dlist_iter iter;
|
||||
uint32 i;
|
||||
uint32 nitems;
|
||||
dsm_segment *seg;
|
||||
dlist_iter iter;
|
||||
uint32 i;
|
||||
uint32 nitems;
|
||||
|
||||
/* Unsafe in postmaster (and pointless in a stand-alone backend). */
|
||||
Assert(IsUnderPostmaster);
|
||||
@@ -537,13 +538,13 @@ dsm_attach(dsm_handle h)
|
||||
|
||||
/*
|
||||
* Since this is just a debugging cross-check, we could leave it out
|
||||
* altogether, or include it only in assert-enabled builds. But since
|
||||
* the list of attached segments should normally be very short, let's
|
||||
* include it always for right now.
|
||||
* altogether, or include it only in assert-enabled builds. But since the
|
||||
* list of attached segments should normally be very short, let's include
|
||||
* it always for right now.
|
||||
*
|
||||
* If you're hitting this error, you probably want to attempt to
|
||||
* find an existing mapping via dsm_find_mapping() before calling
|
||||
* dsm_attach() to create a new one.
|
||||
* If you're hitting this error, you probably want to attempt to find an
|
||||
* existing mapping via dsm_find_mapping() before calling dsm_attach() to
|
||||
* create a new one.
|
||||
*/
|
||||
dlist_foreach(iter, &dsm_segment_list)
|
||||
{
|
||||
@@ -584,10 +585,10 @@ dsm_attach(dsm_handle h)
|
||||
LWLockRelease(DynamicSharedMemoryControlLock);
|
||||
|
||||
/*
|
||||
* If we didn't find the handle we're looking for in the control
|
||||
* segment, it probably means that everyone else who had it mapped,
|
||||
* including the original creator, died before we got to this point.
|
||||
* It's up to the caller to decide what to do about that.
|
||||
* If we didn't find the handle we're looking for in the control segment,
|
||||
* it probably means that everyone else who had it mapped, including the
|
||||
* original creator, died before we got to this point. It's up to the
|
||||
* caller to decide what to do about that.
|
||||
*/
|
||||
if (seg->control_slot == INVALID_CONTROL_SLOT)
|
||||
{
|
||||
@@ -612,7 +613,7 @@ dsm_backend_shutdown(void)
|
||||
{
|
||||
while (!dlist_is_empty(&dsm_segment_list))
|
||||
{
|
||||
dsm_segment *seg;
|
||||
dsm_segment *seg;
|
||||
|
||||
seg = dlist_head_element(dsm_segment, node, &dsm_segment_list);
|
||||
dsm_detach(seg);
|
||||
@@ -628,11 +629,11 @@ dsm_backend_shutdown(void)
|
||||
void
|
||||
dsm_detach_all(void)
|
||||
{
|
||||
void *control_address = dsm_control;
|
||||
void *control_address = dsm_control;
|
||||
|
||||
while (!dlist_is_empty(&dsm_segment_list))
|
||||
{
|
||||
dsm_segment *seg;
|
||||
dsm_segment *seg;
|
||||
|
||||
seg = dlist_head_element(dsm_segment, node, &dsm_segment_list);
|
||||
dsm_detach(seg);
|
||||
@@ -697,7 +698,7 @@ dsm_detach(dsm_segment *seg)
|
||||
{
|
||||
slist_node *node;
|
||||
dsm_segment_detach_callback *cb;
|
||||
on_dsm_detach_callback function;
|
||||
on_dsm_detach_callback function;
|
||||
Datum arg;
|
||||
|
||||
node = slist_pop_head_node(&seg->on_detach);
|
||||
@@ -710,13 +711,12 @@ dsm_detach(dsm_segment *seg)
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to remove the mapping, if one exists. Normally, there will be,
|
||||
* but maybe not, if we failed partway through a create or attach
|
||||
* operation. We remove the mapping before decrementing the reference
|
||||
* count so that the process that sees a zero reference count can be
|
||||
* certain that no remaining mappings exist. Even if this fails, we
|
||||
* pretend that it works, because retrying is likely to fail in the
|
||||
* same way.
|
||||
* Try to remove the mapping, if one exists. Normally, there will be, but
|
||||
* maybe not, if we failed partway through a create or attach operation.
|
||||
* We remove the mapping before decrementing the reference count so that
|
||||
* the process that sees a zero reference count can be certain that no
|
||||
* remaining mappings exist. Even if this fails, we pretend that it
|
||||
* works, because retrying is likely to fail in the same way.
|
||||
*/
|
||||
if (seg->mapped_address != NULL)
|
||||
{
|
||||
@@ -730,8 +730,8 @@ dsm_detach(dsm_segment *seg)
|
||||
/* Reduce reference count, if we previously increased it. */
|
||||
if (seg->control_slot != INVALID_CONTROL_SLOT)
|
||||
{
|
||||
uint32 refcnt;
|
||||
uint32 control_slot = seg->control_slot;
|
||||
uint32 refcnt;
|
||||
uint32 control_slot = seg->control_slot;
|
||||
|
||||
LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE);
|
||||
Assert(dsm_control->item[control_slot].handle == seg->handle);
|
||||
@@ -744,15 +744,15 @@ dsm_detach(dsm_segment *seg)
|
||||
if (refcnt == 1)
|
||||
{
|
||||
/*
|
||||
* If we fail to destroy the segment here, or are killed before
|
||||
* we finish doing so, the reference count will remain at 1, which
|
||||
* If we fail to destroy the segment here, or are killed before we
|
||||
* finish doing so, the reference count will remain at 1, which
|
||||
* will mean that nobody else can attach to the segment. At
|
||||
* postmaster shutdown time, or when a new postmaster is started
|
||||
* after a hard kill, another attempt will be made to remove the
|
||||
* segment.
|
||||
*
|
||||
* The main case we're worried about here is being killed by
|
||||
* a signal before we can finish removing the segment. In that
|
||||
* The main case we're worried about here is being killed by a
|
||||
* signal before we can finish removing the segment. In that
|
||||
* case, it's important to be sure that the segment still gets
|
||||
* removed. If we actually fail to remove the segment for some
|
||||
* other reason, the postmaster may not have any better luck than
|
||||
@@ -827,8 +827,8 @@ dsm_keep_segment(dsm_segment *seg)
|
||||
dsm_segment *
|
||||
dsm_find_mapping(dsm_handle h)
|
||||
{
|
||||
dlist_iter iter;
|
||||
dsm_segment *seg;
|
||||
dlist_iter iter;
|
||||
dsm_segment *seg;
|
||||
|
||||
dlist_foreach(iter, &dsm_segment_list)
|
||||
{
|
||||
@@ -899,7 +899,7 @@ void
|
||||
cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function,
|
||||
Datum arg)
|
||||
{
|
||||
slist_mutable_iter iter;
|
||||
slist_mutable_iter iter;
|
||||
|
||||
slist_foreach_modify(iter, &seg->on_detach)
|
||||
{
|
||||
@@ -921,7 +921,7 @@ cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function,
|
||||
void
|
||||
reset_on_dsm_detach(void)
|
||||
{
|
||||
dlist_iter iter;
|
||||
dlist_iter iter;
|
||||
|
||||
dlist_foreach(iter, &dsm_segment_list)
|
||||
{
|
||||
@@ -952,7 +952,7 @@ reset_on_dsm_detach(void)
|
||||
static dsm_segment *
|
||||
dsm_create_descriptor(void)
|
||||
{
|
||||
dsm_segment *seg;
|
||||
dsm_segment *seg;
|
||||
|
||||
ResourceOwnerEnlargeDSMs(CurrentResourceOwner);
|
||||
|
||||
@@ -1005,5 +1005,5 @@ static uint64
|
||||
dsm_control_bytes_needed(uint32 nitems)
|
||||
{
|
||||
return offsetof(dsm_control_header, item)
|
||||
+ sizeof(dsm_control_item) * (uint64) nitems;
|
||||
+sizeof(dsm_control_item) * (uint64) nitems;
|
||||
}
|
||||
|
||||
@@ -76,40 +76,40 @@ static bool dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
#endif
|
||||
#ifdef USE_DSM_SYSV
|
||||
static bool dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
void **impl_private, void **mapped_address,
|
||||
Size *mapped_size, int elevel);
|
||||
void **impl_private, void **mapped_address,
|
||||
Size *mapped_size, int elevel);
|
||||
#endif
|
||||
#ifdef USE_DSM_WINDOWS
|
||||
static bool dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
void **impl_private, void **mapped_address,
|
||||
Size *mapped_size, int elevel);
|
||||
void **impl_private, void **mapped_address,
|
||||
Size *mapped_size, int elevel);
|
||||
#endif
|
||||
#ifdef USE_DSM_MMAP
|
||||
static bool dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
void **impl_private, void **mapped_address,
|
||||
Size *mapped_size, int elevel);
|
||||
#endif
|
||||
static int errcode_for_dynamic_shared_memory(void);
|
||||
static int errcode_for_dynamic_shared_memory(void);
|
||||
|
||||
const struct config_enum_entry dynamic_shared_memory_options[] = {
|
||||
#ifdef USE_DSM_POSIX
|
||||
{ "posix", DSM_IMPL_POSIX, false},
|
||||
{"posix", DSM_IMPL_POSIX, false},
|
||||
#endif
|
||||
#ifdef USE_DSM_SYSV
|
||||
{ "sysv", DSM_IMPL_SYSV, false},
|
||||
{"sysv", DSM_IMPL_SYSV, false},
|
||||
#endif
|
||||
#ifdef USE_DSM_WINDOWS
|
||||
{ "windows", DSM_IMPL_WINDOWS, false},
|
||||
{"windows", DSM_IMPL_WINDOWS, false},
|
||||
#endif
|
||||
#ifdef USE_DSM_MMAP
|
||||
{ "mmap", DSM_IMPL_MMAP, false},
|
||||
{"mmap", DSM_IMPL_MMAP, false},
|
||||
#endif
|
||||
{ "none", DSM_IMPL_NONE, false},
|
||||
{"none", DSM_IMPL_NONE, false},
|
||||
{NULL, 0, false}
|
||||
};
|
||||
|
||||
/* Implementation selector. */
|
||||
int dynamic_shared_memory_type;
|
||||
int dynamic_shared_memory_type;
|
||||
|
||||
/* Size of buffer to be used for zero-filling. */
|
||||
#define ZBUFFER_SIZE 8192
|
||||
@@ -137,20 +137,20 @@ int dynamic_shared_memory_type;
|
||||
* segment.
|
||||
*
|
||||
* Arguments:
|
||||
* op: The operation to be performed.
|
||||
* handle: The handle of an existing object, or for DSM_OP_CREATE, the
|
||||
* a new handle the caller wants created.
|
||||
* request_size: For DSM_OP_CREATE, the requested size. For DSM_OP_RESIZE,
|
||||
* the new size. Otherwise, 0.
|
||||
* impl_private: Private, implementation-specific data. Will be a pointer
|
||||
* to NULL for the first operation on a shared memory segment within this
|
||||
* backend; thereafter, it will point to the value to which it was set
|
||||
* on the previous call.
|
||||
* mapped_address: Pointer to start of current mapping; pointer to NULL
|
||||
* if none. Updated with new mapping address.
|
||||
* mapped_size: Pointer to size of current mapping; pointer to 0 if none.
|
||||
* Updated with new mapped size.
|
||||
* elevel: Level at which to log errors.
|
||||
* op: The operation to be performed.
|
||||
* handle: The handle of an existing object, or for DSM_OP_CREATE, the
|
||||
* a new handle the caller wants created.
|
||||
* request_size: For DSM_OP_CREATE, the requested size. For DSM_OP_RESIZE,
|
||||
* the new size. Otherwise, 0.
|
||||
* impl_private: Private, implementation-specific data. Will be a pointer
|
||||
* to NULL for the first operation on a shared memory segment within this
|
||||
* backend; thereafter, it will point to the value to which it was set
|
||||
* on the previous call.
|
||||
* mapped_address: Pointer to start of current mapping; pointer to NULL
|
||||
* if none. Updated with new mapping address.
|
||||
* mapped_size: Pointer to size of current mapping; pointer to 0 if none.
|
||||
* Updated with new mapped size.
|
||||
* elevel: Level at which to log errors.
|
||||
*
|
||||
* Return value: true on success, false on failure. When false is returned,
|
||||
* a message should first be logged at the specified elevel, except in the
|
||||
@@ -165,7 +165,7 @@ dsm_impl_op(dsm_op op, dsm_handle handle, Size request_size,
|
||||
{
|
||||
Assert(op == DSM_OP_CREATE || op == DSM_OP_RESIZE || request_size == 0);
|
||||
Assert((op != DSM_OP_CREATE && op != DSM_OP_ATTACH) ||
|
||||
(*mapped_address == NULL && *mapped_size == 0));
|
||||
(*mapped_address == NULL && *mapped_size == 0));
|
||||
|
||||
switch (dynamic_shared_memory_type)
|
||||
{
|
||||
@@ -243,10 +243,10 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
void **impl_private, void **mapped_address, Size *mapped_size,
|
||||
int elevel)
|
||||
{
|
||||
char name[64];
|
||||
int flags;
|
||||
int fd;
|
||||
char *address;
|
||||
char name[64];
|
||||
int flags;
|
||||
int fd;
|
||||
char *address;
|
||||
|
||||
snprintf(name, 64, "/PostgreSQL.%u", handle);
|
||||
|
||||
@@ -258,8 +258,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
{
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
*mapped_address = NULL;
|
||||
@@ -268,8 +268,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
{
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not remove shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not remove shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -290,7 +290,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not open shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -304,7 +304,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
if (fstat(fd, &st) != 0)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -314,14 +314,14 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not stat shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
request_size = st.st_size;
|
||||
}
|
||||
else if (*mapped_size != request_size && ftruncate(fd, request_size))
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -332,8 +332,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not resize shared memory segment %s to %zu bytes: %m",
|
||||
name, request_size)));
|
||||
errmsg("could not resize shared memory segment %s to %zu bytes: %m",
|
||||
name, request_size)));
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -347,7 +347,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
return true;
|
||||
if (munmap(*mapped_address, *mapped_size) != 0)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -358,8 +358,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
*mapped_address = NULL;
|
||||
@@ -367,11 +367,11 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
|
||||
}
|
||||
|
||||
/* Map it. */
|
||||
address = mmap(NULL, request_size, PROT_READ|PROT_WRITE,
|
||||
MAP_SHARED|MAP_HASSEMAPHORE, fd, 0);
|
||||
address = mmap(NULL, request_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_HASSEMAPHORE, fd, 0);
|
||||
if (address == MAP_FAILED)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -409,11 +409,11 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
void **impl_private, void **mapped_address, Size *mapped_size,
|
||||
int elevel)
|
||||
{
|
||||
key_t key;
|
||||
int ident;
|
||||
char *address;
|
||||
char name[64];
|
||||
int *ident_cache;
|
||||
key_t key;
|
||||
int ident;
|
||||
char *address;
|
||||
char name[64];
|
||||
int *ident_cache;
|
||||
|
||||
/* Resize is not supported for System V shared memory. */
|
||||
if (op == DSM_OP_RESIZE)
|
||||
@@ -427,38 +427,38 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* POSIX shared memory and mmap-based shared memory identify segments
|
||||
* with names. To avoid needless error message variation, we use the
|
||||
* handle as the name.
|
||||
* POSIX shared memory and mmap-based shared memory identify segments with
|
||||
* names. To avoid needless error message variation, we use the handle as
|
||||
* the name.
|
||||
*/
|
||||
snprintf(name, 64, "%u", handle);
|
||||
|
||||
/*
|
||||
* The System V shared memory namespace is very restricted; names are
|
||||
* of type key_t, which is expected to be some sort of integer data type,
|
||||
* but not necessarily the same one as dsm_handle. Since we use
|
||||
* dsm_handle to identify shared memory segments across processes, this
|
||||
* might seem like a problem, but it's really not. If dsm_handle is
|
||||
* bigger than key_t, the cast below might truncate away some bits from
|
||||
* the handle the user-provided, but it'll truncate exactly the same bits
|
||||
* away in exactly the same fashion every time we use that handle, which
|
||||
* is all that really matters. Conversely, if dsm_handle is smaller than
|
||||
* key_t, we won't use the full range of available key space, but that's
|
||||
* no big deal either.
|
||||
* The System V shared memory namespace is very restricted; names are of
|
||||
* type key_t, which is expected to be some sort of integer data type, but
|
||||
* not necessarily the same one as dsm_handle. Since we use dsm_handle to
|
||||
* identify shared memory segments across processes, this might seem like
|
||||
* a problem, but it's really not. If dsm_handle is bigger than key_t,
|
||||
* the cast below might truncate away some bits from the handle the
|
||||
* user-provided, but it'll truncate exactly the same bits away in exactly
|
||||
* the same fashion every time we use that handle, which is all that
|
||||
* really matters. Conversely, if dsm_handle is smaller than key_t, we
|
||||
* won't use the full range of available key space, but that's no big deal
|
||||
* either.
|
||||
*
|
||||
* We do make sure that the key isn't negative, because that might not
|
||||
* be portable.
|
||||
* We do make sure that the key isn't negative, because that might not be
|
||||
* portable.
|
||||
*/
|
||||
key = (key_t) handle;
|
||||
if (key < 1) /* avoid compiler warning if type is unsigned */
|
||||
if (key < 1) /* avoid compiler warning if type is unsigned */
|
||||
key = -key;
|
||||
|
||||
/*
|
||||
* There's one special key, IPC_PRIVATE, which can't be used. If we end
|
||||
* up with that value by chance during a create operation, just pretend
|
||||
* it already exists, so that caller will retry. If we run into it
|
||||
* anywhere else, the caller has passed a handle that doesn't correspond
|
||||
* to anything we ever created, which should not happen.
|
||||
* up with that value by chance during a create operation, just pretend it
|
||||
* already exists, so that caller will retry. If we run into it anywhere
|
||||
* else, the caller has passed a handle that doesn't correspond to
|
||||
* anything we ever created, which should not happen.
|
||||
*/
|
||||
if (key == IPC_PRIVATE)
|
||||
{
|
||||
@@ -469,9 +469,9 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
}
|
||||
|
||||
/*
|
||||
* Before we can do anything with a shared memory segment, we have to
|
||||
* map the shared memory key to a shared memory identifier using shmget().
|
||||
* To avoid repeated lookups, we store the key using impl_private.
|
||||
* Before we can do anything with a shared memory segment, we have to map
|
||||
* the shared memory key to a shared memory identifier using shmget(). To
|
||||
* avoid repeated lookups, we store the key using impl_private.
|
||||
*/
|
||||
if (*impl_private != NULL)
|
||||
{
|
||||
@@ -480,8 +480,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
}
|
||||
else
|
||||
{
|
||||
int flags = IPCProtection;
|
||||
size_t segsize;
|
||||
int flags = IPCProtection;
|
||||
size_t segsize;
|
||||
|
||||
/*
|
||||
* Allocate the memory BEFORE acquiring the resource, so that we don't
|
||||
@@ -506,7 +506,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
{
|
||||
if (errno != EEXIST)
|
||||
{
|
||||
int save_errno = errno;
|
||||
int save_errno = errno;
|
||||
|
||||
pfree(ident_cache);
|
||||
errno = save_errno;
|
||||
ereport(elevel,
|
||||
@@ -529,8 +530,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
{
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
*mapped_address = NULL;
|
||||
@@ -539,8 +540,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
{
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not remove shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not remove shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -553,7 +554,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
if (shmctl(ident, IPC_STAT, &shm) != 0)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -564,7 +565,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not stat shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
request_size = shm.shm_segsz;
|
||||
@@ -574,7 +575,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size,
|
||||
address = shmat(ident, NULL, PG_SHMAT_FLAGS);
|
||||
if (address == (void *) -1)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -614,9 +615,9 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
void **impl_private, void **mapped_address,
|
||||
Size *mapped_size, int elevel)
|
||||
{
|
||||
char *address;
|
||||
char *address;
|
||||
HANDLE hmap;
|
||||
char name[64];
|
||||
char name[64];
|
||||
MEMORY_BASIC_INFORMATION info;
|
||||
|
||||
/* Resize is not supported for Windows shared memory. */
|
||||
@@ -631,12 +632,12 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Storing the shared memory segment in the Global\ namespace, can
|
||||
* allow any process running in any session to access that file
|
||||
* mapping object provided that the caller has the required access rights.
|
||||
* But to avoid issues faced in main shared memory, we are using the naming
|
||||
* convention similar to main shared memory. We can change here once
|
||||
* issue mentioned in GetSharedMemName is resolved.
|
||||
* Storing the shared memory segment in the Global\ namespace, can allow
|
||||
* any process running in any session to access that file mapping object
|
||||
* provided that the caller has the required access rights. But to avoid
|
||||
* issues faced in main shared memory, we are using the naming convention
|
||||
* similar to main shared memory. We can change here once issue mentioned
|
||||
* in GetSharedMemName is resolved.
|
||||
*/
|
||||
snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle);
|
||||
|
||||
@@ -652,8 +653,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
_dosmaperr(GetLastError());
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
if (*impl_private != NULL
|
||||
@@ -662,8 +663,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
_dosmaperr(GetLastError());
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not remove shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not remove shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -688,9 +689,9 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
size_low = (DWORD) request_size;
|
||||
|
||||
hmap = CreateFileMapping(INVALID_HANDLE_VALUE, /* Use the pagefile */
|
||||
NULL, /* Default security attrs */
|
||||
PAGE_READWRITE, /* Memory is read/write */
|
||||
size_high, /* Upper 32 bits of size */
|
||||
NULL, /* Default security attrs */
|
||||
PAGE_READWRITE, /* Memory is read/write */
|
||||
size_high, /* Upper 32 bits of size */
|
||||
size_low, /* Lower 32 bits of size */
|
||||
name);
|
||||
if (!hmap)
|
||||
@@ -698,8 +699,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
_dosmaperr(GetLastError());
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not create shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not create shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
_dosmaperr(GetLastError());
|
||||
@@ -718,8 +719,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
else
|
||||
{
|
||||
hmap = OpenFileMapping(FILE_MAP_WRITE | FILE_MAP_READ,
|
||||
FALSE, /* do not inherit the name */
|
||||
name); /* name of mapping object */
|
||||
FALSE, /* do not inherit the name */
|
||||
name); /* name of mapping object */
|
||||
if (!hmap)
|
||||
{
|
||||
_dosmaperr(GetLastError());
|
||||
@@ -736,7 +737,7 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
0, 0, 0);
|
||||
if (!address)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
_dosmaperr(GetLastError());
|
||||
/* Back out what's already been done. */
|
||||
@@ -752,14 +753,14 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
}
|
||||
|
||||
/*
|
||||
* VirtualQuery gives size in page_size units, which is 4K for Windows.
|
||||
* We need size only when we are attaching, but it's better to get the
|
||||
* size when creating new segment to keep size consistent both for
|
||||
* VirtualQuery gives size in page_size units, which is 4K for Windows. We
|
||||
* need size only when we are attaching, but it's better to get the size
|
||||
* when creating new segment to keep size consistent both for
|
||||
* DSM_OP_CREATE and DSM_OP_ATTACH.
|
||||
*/
|
||||
if (VirtualQuery(address, &info, sizeof(info)) == 0)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
_dosmaperr(GetLastError());
|
||||
/* Back out what's already been done. */
|
||||
@@ -770,8 +771,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not stat shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not stat shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -799,13 +800,13 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
void **impl_private, void **mapped_address, Size *mapped_size,
|
||||
int elevel)
|
||||
{
|
||||
char name[64];
|
||||
int flags;
|
||||
int fd;
|
||||
char *address;
|
||||
char name[64];
|
||||
int flags;
|
||||
int fd;
|
||||
char *address;
|
||||
|
||||
snprintf(name, 64, PG_DYNSHMEM_DIR "/" PG_DYNSHMEM_MMAP_FILE_PREFIX "%u",
|
||||
handle);
|
||||
handle);
|
||||
|
||||
/* Handle teardown cases. */
|
||||
if (op == DSM_OP_DETACH || op == DSM_OP_DESTROY)
|
||||
@@ -815,8 +816,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
{
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
*mapped_address = NULL;
|
||||
@@ -825,8 +826,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
{
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not remove shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not remove shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -840,7 +841,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not open shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -854,7 +855,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
if (fstat(fd, &st) != 0)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -864,14 +865,14 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not stat shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
request_size = st.st_size;
|
||||
}
|
||||
else if (*mapped_size > request_size && ftruncate(fd, request_size))
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -882,8 +883,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not resize shared memory segment %s to %zu bytes: %m",
|
||||
name, request_size)));
|
||||
errmsg("could not resize shared memory segment %s to %zu bytes: %m",
|
||||
name, request_size)));
|
||||
return false;
|
||||
}
|
||||
else if (*mapped_size < request_size)
|
||||
@@ -891,23 +892,23 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
/*
|
||||
* Allocate a buffer full of zeros.
|
||||
*
|
||||
* Note: palloc zbuffer, instead of just using a local char array,
|
||||
* to ensure it is reasonably well-aligned; this may save a few
|
||||
* cycles transferring data to the kernel.
|
||||
* Note: palloc zbuffer, instead of just using a local char array, to
|
||||
* ensure it is reasonably well-aligned; this may save a few cycles
|
||||
* transferring data to the kernel.
|
||||
*/
|
||||
char *zbuffer = (char *) palloc0(ZBUFFER_SIZE);
|
||||
uint32 remaining = request_size;
|
||||
bool success = true;
|
||||
char *zbuffer = (char *) palloc0(ZBUFFER_SIZE);
|
||||
uint32 remaining = request_size;
|
||||
bool success = true;
|
||||
|
||||
/*
|
||||
* Zero-fill the file. We have to do this the hard way to ensure
|
||||
* that all the file space has really been allocated, so that we
|
||||
* don't later seg fault when accessing the memory mapping. This
|
||||
* is pretty pessimal.
|
||||
* Zero-fill the file. We have to do this the hard way to ensure that
|
||||
* all the file space has really been allocated, so that we don't
|
||||
* later seg fault when accessing the memory mapping. This is pretty
|
||||
* pessimal.
|
||||
*/
|
||||
while (success && remaining > 0)
|
||||
{
|
||||
Size goal = remaining;
|
||||
Size goal = remaining;
|
||||
|
||||
if (goal > ZBUFFER_SIZE)
|
||||
goal = ZBUFFER_SIZE;
|
||||
@@ -919,7 +920,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
if (!success)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -931,7 +932,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not resize shared memory segment %s to %zu bytes: %m",
|
||||
name, request_size)));
|
||||
name, request_size)));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -946,7 +947,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
return true;
|
||||
if (munmap(*mapped_address, *mapped_size) != 0)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -957,8 +958,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
|
||||
ereport(elevel,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
errmsg("could not unmap shared memory segment \"%s\": %m",
|
||||
name)));
|
||||
return false;
|
||||
}
|
||||
*mapped_address = NULL;
|
||||
@@ -966,11 +967,11 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
|
||||
}
|
||||
|
||||
/* Map it. */
|
||||
address = mmap(NULL, request_size, PROT_READ|PROT_WRITE,
|
||||
MAP_SHARED|MAP_HASSEMAPHORE, fd, 0);
|
||||
address = mmap(NULL, request_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_HASSEMAPHORE, fd, 0);
|
||||
if (address == MAP_FAILED)
|
||||
{
|
||||
int save_errno;
|
||||
int save_errno;
|
||||
|
||||
/* Back out what's already been done. */
|
||||
save_errno = errno;
|
||||
@@ -1009,24 +1010,24 @@ dsm_impl_keep_segment(dsm_handle handle, void *impl_private)
|
||||
{
|
||||
#ifdef USE_DSM_WINDOWS
|
||||
case DSM_IMPL_WINDOWS:
|
||||
{
|
||||
HANDLE hmap;
|
||||
|
||||
if (!DuplicateHandle(GetCurrentProcess(), impl_private,
|
||||
PostmasterHandle, &hmap, 0, FALSE,
|
||||
DUPLICATE_SAME_ACCESS))
|
||||
{
|
||||
char name[64];
|
||||
HANDLE hmap;
|
||||
|
||||
snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle);
|
||||
_dosmaperr(GetLastError());
|
||||
ereport(ERROR,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not duplicate handle for \"%s\": %m",
|
||||
name)));
|
||||
if (!DuplicateHandle(GetCurrentProcess(), impl_private,
|
||||
PostmasterHandle, &hmap, 0, FALSE,
|
||||
DUPLICATE_SAME_ACCESS))
|
||||
{
|
||||
char name[64];
|
||||
|
||||
snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle);
|
||||
_dosmaperr(GetLastError());
|
||||
ereport(ERROR,
|
||||
(errcode_for_dynamic_shared_memory(),
|
||||
errmsg("could not duplicate handle for \"%s\": %m",
|
||||
name)));
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* POSTGRES inter-process communication definitions.
|
||||
*
|
||||
* This file is misnamed, as it no longer has much of anything directly
|
||||
* to do with IPC. The functionality here is concerned with managing
|
||||
* to do with IPC. The functionality here is concerned with managing
|
||||
* exit-time cleanup for either a postmaster or a backend.
|
||||
*
|
||||
*
|
||||
@@ -90,7 +90,7 @@ static int on_proc_exit_index,
|
||||
* -cim 2/6/90
|
||||
*
|
||||
* Unfortunately, we can't really guarantee that add-on code
|
||||
* obeys the rule of not calling exit() directly. So, while
|
||||
* obeys the rule of not calling exit() directly. So, while
|
||||
* this is the preferred way out of the system, we also register
|
||||
* an atexit callback that will make sure cleanup happens.
|
||||
* ----------------------------------------------------------------
|
||||
@@ -109,7 +109,7 @@ proc_exit(int code)
|
||||
* fixed file name, each backend will overwrite earlier profiles. To
|
||||
* fix that, we create a separate subdirectory for each backend
|
||||
* (./gprof/pid) and 'cd' to that subdirectory before we exit() - that
|
||||
* forces mcleanup() to write each profile into its own directory. We
|
||||
* forces mcleanup() to write each profile into its own directory. We
|
||||
* end up with something like: $PGDATA/gprof/8829/gmon.out
|
||||
* $PGDATA/gprof/8845/gmon.out ...
|
||||
*
|
||||
@@ -219,16 +219,16 @@ shmem_exit(int code)
|
||||
/*
|
||||
* Call before_shmem_exit callbacks.
|
||||
*
|
||||
* These should be things that need most of the system to still be
|
||||
* up and working, such as cleanup of temp relations, which requires
|
||||
* catalog access; or things that need to be completed because later
|
||||
* cleanup steps depend on them, such as releasing lwlocks.
|
||||
* These should be things that need most of the system to still be up and
|
||||
* working, such as cleanup of temp relations, which requires catalog
|
||||
* access; or things that need to be completed because later cleanup steps
|
||||
* depend on them, such as releasing lwlocks.
|
||||
*/
|
||||
elog(DEBUG3, "shmem_exit(%d): %d before_shmem_exit callbacks to make",
|
||||
code, before_shmem_exit_index);
|
||||
while (--before_shmem_exit_index >= 0)
|
||||
(*before_shmem_exit_list[before_shmem_exit_index].function) (code,
|
||||
before_shmem_exit_list[before_shmem_exit_index].arg);
|
||||
before_shmem_exit_list[before_shmem_exit_index].arg);
|
||||
before_shmem_exit_index = 0;
|
||||
|
||||
/*
|
||||
@@ -241,9 +241,9 @@ shmem_exit(int code)
|
||||
* callback before invoking it, so that we don't get stuck in an infinite
|
||||
* loop if one of those callbacks itself throws an ERROR or FATAL.
|
||||
*
|
||||
* Note that explicitly calling this function here is quite different
|
||||
* from registering it as an on_shmem_exit callback for precisely this
|
||||
* reason: if one dynamic shared memory callback errors out, the remaining
|
||||
* Note that explicitly calling this function here is quite different from
|
||||
* registering it as an on_shmem_exit callback for precisely this reason:
|
||||
* if one dynamic shared memory callback errors out, the remaining
|
||||
* callbacks will still be invoked. Thus, hard-coding this call puts it
|
||||
* equal footing with callbacks for the main shared memory segment.
|
||||
*/
|
||||
@@ -261,7 +261,7 @@ shmem_exit(int code)
|
||||
code, on_shmem_exit_index);
|
||||
while (--on_shmem_exit_index >= 0)
|
||||
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
|
||||
on_shmem_exit_list[on_shmem_exit_index].arg);
|
||||
on_shmem_exit_list[on_shmem_exit_index].arg);
|
||||
on_shmem_exit_index = 0;
|
||||
}
|
||||
|
||||
@@ -287,7 +287,7 @@ atexit_callback(void)
|
||||
* on_proc_exit
|
||||
*
|
||||
* this function adds a callback function to the list of
|
||||
* functions invoked by proc_exit(). -cim 2/6/90
|
||||
* functions invoked by proc_exit(). -cim 2/6/90
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
void
|
||||
@@ -380,7 +380,7 @@ cancel_before_shmem_exit(pg_on_exit_callback function, Datum arg)
|
||||
{
|
||||
if (before_shmem_exit_index > 0 &&
|
||||
before_shmem_exit_list[before_shmem_exit_index - 1].function
|
||||
== function &&
|
||||
== function &&
|
||||
before_shmem_exit_list[before_shmem_exit_index - 1].arg == arg)
|
||||
--before_shmem_exit_index;
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ static bool addin_request_allowed = true;
|
||||
* a loadable module.
|
||||
*
|
||||
* This is only useful if called from the _PG_init hook of a library that
|
||||
* is loaded into the postmaster via shared_preload_libraries. Once
|
||||
* is loaded into the postmaster via shared_preload_libraries. Once
|
||||
* shared memory has been allocated, calls will be ignored. (We could
|
||||
* raise an error, but it seems better to make it a no-op, so that
|
||||
* libraries containing such calls can be reloaded if needed.)
|
||||
@@ -85,7 +85,7 @@ RequestAddinShmemSpace(Size size)
|
||||
* This is a bit code-wasteful and could be cleaned up.)
|
||||
*
|
||||
* If "makePrivate" is true then we only need private memory, not shared
|
||||
* memory. This is true for a standalone backend, false for a postmaster.
|
||||
* memory. This is true for a standalone backend, false for a postmaster.
|
||||
*/
|
||||
void
|
||||
CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
|
||||
|
||||
@@ -26,9 +26,9 @@
|
||||
|
||||
/*
|
||||
* The postmaster is signaled by its children by sending SIGUSR1. The
|
||||
* specific reason is communicated via flags in shared memory. We keep
|
||||
* specific reason is communicated via flags in shared memory. We keep
|
||||
* a boolean flag for each possible "reason", so that different reasons
|
||||
* can be signaled by different backends at the same time. (However,
|
||||
* can be signaled by different backends at the same time. (However,
|
||||
* if the same reason is signaled more than once simultaneously, the
|
||||
* postmaster will observe it only once.)
|
||||
*
|
||||
@@ -42,7 +42,7 @@
|
||||
* have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is
|
||||
* available for assignment. An ASSIGNED slot is associated with a postmaster
|
||||
* child process, but either the process has not touched shared memory yet,
|
||||
* or it has successfully cleaned up after itself. A ACTIVE slot means the
|
||||
* or it has successfully cleaned up after itself. A ACTIVE slot means the
|
||||
* process is actively using shared memory. The slots are assigned to
|
||||
* child processes at random, and postmaster.c is responsible for tracking
|
||||
* which one goes with which PID.
|
||||
|
||||
@@ -19,11 +19,11 @@
|
||||
*
|
||||
* During hot standby, we also keep a list of XIDs representing transactions
|
||||
* that are known to be running in the master (or more precisely, were running
|
||||
* as of the current point in the WAL stream). This list is kept in the
|
||||
* as of the current point in the WAL stream). This list is kept in the
|
||||
* KnownAssignedXids array, and is updated by watching the sequence of
|
||||
* arriving XIDs. This is necessary because if we leave those XIDs out of
|
||||
* snapshots taken for standby queries, then they will appear to be already
|
||||
* complete, leading to MVCC failures. Note that in hot standby, the PGPROC
|
||||
* complete, leading to MVCC failures. Note that in hot standby, the PGPROC
|
||||
* array represents standby processes, which by definition are not running
|
||||
* transactions that have XIDs.
|
||||
*
|
||||
@@ -276,7 +276,7 @@ ProcArrayAdd(PGPROC *proc)
|
||||
if (arrayP->numProcs >= arrayP->maxProcs)
|
||||
{
|
||||
/*
|
||||
* Ooops, no room. (This really shouldn't happen, since there is a
|
||||
* Ooops, no room. (This really shouldn't happen, since there is a
|
||||
* fixed supply of PGPROC structs too, and so we should have failed
|
||||
* earlier.)
|
||||
*/
|
||||
@@ -686,7 +686,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
|
||||
ExtendSUBTRANS(latestObservedXid);
|
||||
TransactionIdAdvance(latestObservedXid);
|
||||
}
|
||||
TransactionIdRetreat(latestObservedXid); /* = running->nextXid - 1 */
|
||||
TransactionIdRetreat(latestObservedXid); /* = running->nextXid - 1 */
|
||||
|
||||
/* ----------
|
||||
* Now we've got the running xids we need to set the global values that
|
||||
@@ -733,7 +733,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
|
||||
* ShmemVariableCache->nextXid must be beyond any observed xid.
|
||||
*
|
||||
* We don't expect anyone else to modify nextXid, hence we don't need to
|
||||
* hold a lock while examining it. We still acquire the lock to modify
|
||||
* hold a lock while examining it. We still acquire the lock to modify
|
||||
* it, though.
|
||||
*/
|
||||
nextXid = latestObservedXid;
|
||||
@@ -1485,7 +1485,7 @@ GetSnapshotData(Snapshot snapshot)
|
||||
* do that much work while holding the ProcArrayLock.
|
||||
*
|
||||
* The other backend can add more subxids concurrently, but cannot
|
||||
* remove any. Hence it's important to fetch nxids just once.
|
||||
* remove any. Hence it's important to fetch nxids just once.
|
||||
* Should be safe to use memcpy, though. (We needn't worry about
|
||||
* missing any xids added concurrently, because they must postdate
|
||||
* xmax.)
|
||||
@@ -2153,7 +2153,7 @@ BackendPidGetProc(int pid)
|
||||
* Only main transaction Ids are considered. This function is mainly
|
||||
* useful for determining what backend owns a lock.
|
||||
*
|
||||
* Beware that not every xact has an XID assigned. However, as long as you
|
||||
* Beware that not every xact has an XID assigned. However, as long as you
|
||||
* only call this using an XID found on disk, you're safe.
|
||||
*/
|
||||
int
|
||||
@@ -2217,7 +2217,7 @@ IsBackendPid(int pid)
|
||||
* some snapshot we have. Since we examine the procarray with only shared
|
||||
* lock, there are race conditions: a backend could set its xmin just after
|
||||
* we look. Indeed, on multiprocessors with weak memory ordering, the
|
||||
* other backend could have set its xmin *before* we look. We know however
|
||||
* other backend could have set its xmin *before* we look. We know however
|
||||
* that such a backend must have held shared ProcArrayLock overlapping our
|
||||
* own hold of ProcArrayLock, else we would see its xmin update. Therefore,
|
||||
* any snapshot the other backend is taking concurrently with our scan cannot
|
||||
@@ -2723,7 +2723,7 @@ ProcArrayGetReplicationSlotXmin(TransactionId *xmin,
|
||||
* XidCacheRemoveRunningXids
|
||||
*
|
||||
* Remove a bunch of TransactionIds from the list of known-running
|
||||
* subtransactions for my backend. Both the specified xid and those in
|
||||
* subtransactions for my backend. Both the specified xid and those in
|
||||
* the xids[] array (of length nxids) are removed from the subxids cache.
|
||||
* latestXid must be the latest XID among the group.
|
||||
*/
|
||||
@@ -2829,7 +2829,7 @@ DisplayXidCache(void)
|
||||
* treated as running by standby transactions, even though they are not in
|
||||
* the standby server's PGXACT array.
|
||||
*
|
||||
* We record all XIDs that we know have been assigned. That includes all the
|
||||
* We record all XIDs that we know have been assigned. That includes all the
|
||||
* XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have
|
||||
* been assigned. We can deduce the existence of unobserved XIDs because we
|
||||
* know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids
|
||||
@@ -2838,7 +2838,7 @@ DisplayXidCache(void)
|
||||
*
|
||||
* During hot standby we do not fret too much about the distinction between
|
||||
* top-level XIDs and subtransaction XIDs. We store both together in the
|
||||
* KnownAssignedXids list. In backends, this is copied into snapshots in
|
||||
* KnownAssignedXids list. In backends, this is copied into snapshots in
|
||||
* GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot()
|
||||
* doesn't care about the distinction either. Subtransaction XIDs are
|
||||
* effectively treated as top-level XIDs and in the typical case pg_subtrans
|
||||
@@ -3053,14 +3053,14 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid)
|
||||
* must hold shared ProcArrayLock to examine the array. To remove XIDs from
|
||||
* the array, the startup process must hold ProcArrayLock exclusively, for
|
||||
* the usual transactional reasons (compare commit/abort of a transaction
|
||||
* during normal running). Compressing unused entries out of the array
|
||||
* during normal running). Compressing unused entries out of the array
|
||||
* likewise requires exclusive lock. To add XIDs to the array, we just insert
|
||||
* them into slots to the right of the head pointer and then advance the head
|
||||
* pointer. This wouldn't require any lock at all, except that on machines
|
||||
* with weak memory ordering we need to be careful that other processors
|
||||
* see the array element changes before they see the head pointer change.
|
||||
* We handle this by using a spinlock to protect reads and writes of the
|
||||
* head/tail pointers. (We could dispense with the spinlock if we were to
|
||||
* head/tail pointers. (We could dispense with the spinlock if we were to
|
||||
* create suitable memory access barrier primitives and use those instead.)
|
||||
* The spinlock must be taken to read or write the head/tail pointers unless
|
||||
* the caller holds ProcArrayLock exclusively.
|
||||
@@ -3157,7 +3157,7 @@ KnownAssignedXidsCompress(bool force)
|
||||
* If exclusive_lock is true then caller already holds ProcArrayLock in
|
||||
* exclusive mode, so we need no extra locking here. Else caller holds no
|
||||
* lock, so we need to be sure we maintain sufficient interlocks against
|
||||
* concurrent readers. (Only the startup process ever calls this, so no need
|
||||
* concurrent readers. (Only the startup process ever calls this, so no need
|
||||
* to worry about concurrent writers.)
|
||||
*/
|
||||
static void
|
||||
@@ -3203,7 +3203,7 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
|
||||
Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids);
|
||||
|
||||
/*
|
||||
* Verify that insertions occur in TransactionId sequence. Note that even
|
||||
* Verify that insertions occur in TransactionId sequence. Note that even
|
||||
* if the last existing element is marked invalid, it must still have a
|
||||
* correctly sequenced XID value.
|
||||
*/
|
||||
@@ -3306,7 +3306,7 @@ KnownAssignedXidsSearch(TransactionId xid, bool remove)
|
||||
}
|
||||
|
||||
/*
|
||||
* Standard binary search. Note we can ignore the KnownAssignedXidsValid
|
||||
* Standard binary search. Note we can ignore the KnownAssignedXidsValid
|
||||
* array here, since even invalid entries will contain sorted XIDs.
|
||||
*/
|
||||
first = tail;
|
||||
|
||||
@@ -64,7 +64,7 @@ typedef struct
|
||||
* Spurious wakeups must be expected. Make sure that the flag is cleared
|
||||
* in the error path.
|
||||
*/
|
||||
bool set_latch_on_sigusr1;
|
||||
bool set_latch_on_sigusr1;
|
||||
|
||||
static ProcSignalSlot *ProcSignalSlots = NULL;
|
||||
static volatile ProcSignalSlot *MyProcSignalSlot = NULL;
|
||||
|
||||
@@ -142,7 +142,7 @@ static shm_mq_result shm_mq_send_bytes(shm_mq_handle *mq, Size nbytes,
|
||||
void *data, bool nowait, Size *bytes_written);
|
||||
static shm_mq_result shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed,
|
||||
bool nowait, Size *nbytesp, void **datap);
|
||||
static bool shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr,
|
||||
static bool shm_mq_wait_internal(volatile shm_mq *mq, PGPROC *volatile * ptr,
|
||||
BackgroundWorkerHandle *handle);
|
||||
static uint64 shm_mq_get_bytes_read(volatile shm_mq *mq, bool *detached);
|
||||
static void shm_mq_inc_bytes_read(volatile shm_mq *mq, Size n);
|
||||
@@ -152,8 +152,8 @@ static shm_mq_result shm_mq_notify_receiver(volatile shm_mq *mq);
|
||||
static void shm_mq_detach_callback(dsm_segment *seg, Datum arg);
|
||||
|
||||
/* Minimum queue size is enough for header and at least one chunk of data. */
|
||||
const Size shm_mq_minimum_size =
|
||||
MAXALIGN(offsetof(shm_mq, mq_ring)) + MAXIMUM_ALIGNOF;
|
||||
const Size shm_mq_minimum_size =
|
||||
MAXALIGN(offsetof(shm_mq, mq_ring)) + MAXIMUM_ALIGNOF;
|
||||
|
||||
#define MQH_INITIAL_BUFSIZE 8192
|
||||
|
||||
@@ -193,7 +193,7 @@ void
|
||||
shm_mq_set_receiver(shm_mq *mq, PGPROC *proc)
|
||||
{
|
||||
volatile shm_mq *vmq = mq;
|
||||
PGPROC *sender;
|
||||
PGPROC *sender;
|
||||
|
||||
SpinLockAcquire(&mq->mq_mutex);
|
||||
Assert(vmq->mq_receiver == NULL);
|
||||
@@ -212,7 +212,7 @@ void
|
||||
shm_mq_set_sender(shm_mq *mq, PGPROC *proc)
|
||||
{
|
||||
volatile shm_mq *vmq = mq;
|
||||
PGPROC *receiver;
|
||||
PGPROC *receiver;
|
||||
|
||||
SpinLockAcquire(&mq->mq_mutex);
|
||||
Assert(vmq->mq_sender == NULL);
|
||||
@@ -231,7 +231,7 @@ PGPROC *
|
||||
shm_mq_get_receiver(shm_mq *mq)
|
||||
{
|
||||
volatile shm_mq *vmq = mq;
|
||||
PGPROC *receiver;
|
||||
PGPROC *receiver;
|
||||
|
||||
SpinLockAcquire(&mq->mq_mutex);
|
||||
receiver = vmq->mq_receiver;
|
||||
@@ -247,7 +247,7 @@ PGPROC *
|
||||
shm_mq_get_sender(shm_mq *mq)
|
||||
{
|
||||
volatile shm_mq *vmq = mq;
|
||||
PGPROC *sender;
|
||||
PGPROC *sender;
|
||||
|
||||
SpinLockAcquire(&mq->mq_mutex);
|
||||
sender = vmq->mq_sender;
|
||||
@@ -280,7 +280,7 @@ shm_mq_get_sender(shm_mq *mq)
|
||||
shm_mq_handle *
|
||||
shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
|
||||
{
|
||||
shm_mq_handle *mqh = palloc(sizeof(shm_mq_handle));
|
||||
shm_mq_handle *mqh = palloc(sizeof(shm_mq_handle));
|
||||
|
||||
Assert(mq->mq_receiver == MyProc || mq->mq_sender == MyProc);
|
||||
mqh->mqh_queue = mq;
|
||||
@@ -317,9 +317,9 @@ shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle)
|
||||
shm_mq_result
|
||||
shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait)
|
||||
{
|
||||
shm_mq_result res;
|
||||
shm_mq *mq = mqh->mqh_queue;
|
||||
Size bytes_written;
|
||||
shm_mq_result res;
|
||||
shm_mq *mq = mqh->mqh_queue;
|
||||
Size bytes_written;
|
||||
|
||||
Assert(mq->mq_sender == MyProc);
|
||||
|
||||
@@ -328,7 +328,7 @@ shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait)
|
||||
{
|
||||
Assert(mqh->mqh_partial_bytes < sizeof(Size));
|
||||
res = shm_mq_send_bytes(mqh, sizeof(Size) - mqh->mqh_partial_bytes,
|
||||
((char *) &nbytes) + mqh->mqh_partial_bytes,
|
||||
((char *) &nbytes) +mqh->mqh_partial_bytes,
|
||||
nowait, &bytes_written);
|
||||
mqh->mqh_partial_bytes += bytes_written;
|
||||
if (res != SHM_MQ_SUCCESS)
|
||||
@@ -390,11 +390,11 @@ shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait)
|
||||
shm_mq_result
|
||||
shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
|
||||
{
|
||||
shm_mq *mq = mqh->mqh_queue;
|
||||
shm_mq_result res;
|
||||
Size rb = 0;
|
||||
Size nbytes;
|
||||
void *rawdata;
|
||||
shm_mq *mq = mqh->mqh_queue;
|
||||
shm_mq_result res;
|
||||
Size rb = 0;
|
||||
Size nbytes;
|
||||
void *rawdata;
|
||||
|
||||
Assert(mq->mq_receiver == MyProc);
|
||||
|
||||
@@ -439,18 +439,19 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
|
||||
*/
|
||||
if (mqh->mqh_partial_bytes == 0 && rb >= sizeof(Size))
|
||||
{
|
||||
Size needed;
|
||||
Size needed;
|
||||
|
||||
nbytes = * (Size *) rawdata;
|
||||
nbytes = *(Size *) rawdata;
|
||||
|
||||
/* If we've already got the whole message, we're done. */
|
||||
needed = MAXALIGN(sizeof(Size)) + MAXALIGN(nbytes);
|
||||
if (rb >= needed)
|
||||
{
|
||||
/*
|
||||
* Technically, we could consume the message length information
|
||||
* at this point, but the extra write to shared memory wouldn't
|
||||
* be free and in most cases we would reap no benefit.
|
||||
* Technically, we could consume the message length
|
||||
* information at this point, but the extra write to shared
|
||||
* memory wouldn't be free and in most cases we would reap no
|
||||
* benefit.
|
||||
*/
|
||||
mqh->mqh_consume_pending = needed;
|
||||
*nbytesp = nbytes;
|
||||
@@ -469,7 +470,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
|
||||
}
|
||||
else
|
||||
{
|
||||
Size lengthbytes;
|
||||
Size lengthbytes;
|
||||
|
||||
/* Can't be split unless bigger than required alignment. */
|
||||
Assert(sizeof(Size) > MAXIMUM_ALIGNOF);
|
||||
@@ -498,7 +499,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
|
||||
if (mqh->mqh_partial_bytes >= sizeof(Size))
|
||||
{
|
||||
Assert(mqh->mqh_partial_bytes == sizeof(Size));
|
||||
mqh->mqh_expected_bytes = * (Size *) mqh->mqh_buffer;
|
||||
mqh->mqh_expected_bytes = *(Size *) mqh->mqh_buffer;
|
||||
mqh->mqh_length_word_complete = true;
|
||||
mqh->mqh_partial_bytes = 0;
|
||||
}
|
||||
@@ -527,12 +528,12 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
|
||||
|
||||
/*
|
||||
* The message has wrapped the buffer. We'll need to copy it in order
|
||||
* to return it to the client in one chunk. First, make sure we have a
|
||||
* large enough buffer available.
|
||||
* to return it to the client in one chunk. First, make sure we have
|
||||
* a large enough buffer available.
|
||||
*/
|
||||
if (mqh->mqh_buflen < nbytes)
|
||||
{
|
||||
Size newbuflen = Max(mqh->mqh_buflen, MQH_INITIAL_BUFSIZE);
|
||||
Size newbuflen = Max(mqh->mqh_buflen, MQH_INITIAL_BUFSIZE);
|
||||
|
||||
while (newbuflen < nbytes)
|
||||
newbuflen *= 2;
|
||||
@@ -551,7 +552,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
|
||||
/* Loop until we've copied the entire message. */
|
||||
for (;;)
|
||||
{
|
||||
Size still_needed;
|
||||
Size still_needed;
|
||||
|
||||
/* Copy as much as we can. */
|
||||
Assert(mqh->mqh_partial_bytes + rb <= nbytes);
|
||||
@@ -559,10 +560,10 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
|
||||
mqh->mqh_partial_bytes += rb;
|
||||
|
||||
/*
|
||||
* Update count of bytes read, with alignment padding. Note
|
||||
* that this will never actually insert any padding except at the
|
||||
* end of a message, because the buffer size is a multiple of
|
||||
* MAXIMUM_ALIGNOF, and each read and write is as well.
|
||||
* Update count of bytes read, with alignment padding. Note that this
|
||||
* will never actually insert any padding except at the end of a
|
||||
* message, because the buffer size is a multiple of MAXIMUM_ALIGNOF,
|
||||
* and each read and write is as well.
|
||||
*/
|
||||
Assert(mqh->mqh_partial_bytes == nbytes || rb == MAXALIGN(rb));
|
||||
shm_mq_inc_bytes_read(mq, MAXALIGN(rb));
|
||||
@@ -601,7 +602,7 @@ shm_mq_result
|
||||
shm_mq_wait_for_attach(shm_mq_handle *mqh)
|
||||
{
|
||||
shm_mq *mq = mqh->mqh_queue;
|
||||
PGPROC **victim;
|
||||
PGPROC **victim;
|
||||
|
||||
if (shm_mq_get_receiver(mq) == MyProc)
|
||||
victim = &mq->mq_sender;
|
||||
@@ -663,8 +664,8 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
|
||||
|
||||
while (sent < nbytes)
|
||||
{
|
||||
bool detached;
|
||||
uint64 rb;
|
||||
bool detached;
|
||||
uint64 rb;
|
||||
|
||||
/* Compute number of ring buffer bytes used and available. */
|
||||
rb = shm_mq_get_bytes_read(mq, &detached);
|
||||
@@ -679,7 +680,7 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
|
||||
|
||||
if (available == 0)
|
||||
{
|
||||
shm_mq_result res;
|
||||
shm_mq_result res;
|
||||
|
||||
/*
|
||||
* The queue is full, so if the receiver isn't yet known to be
|
||||
@@ -717,11 +718,11 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for our latch to be set. It might already be set for
|
||||
* some unrelated reason, but that'll just result in one extra
|
||||
* trip through the loop. It's worth it to avoid resetting the
|
||||
* latch at top of loop, because setting an already-set latch is
|
||||
* much cheaper than setting one that has been reset.
|
||||
* Wait for our latch to be set. It might already be set for some
|
||||
* unrelated reason, but that'll just result in one extra trip
|
||||
* through the loop. It's worth it to avoid resetting the latch
|
||||
* at top of loop, because setting an already-set latch is much
|
||||
* cheaper than setting one that has been reset.
|
||||
*/
|
||||
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
|
||||
|
||||
@@ -733,8 +734,8 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
|
||||
}
|
||||
else
|
||||
{
|
||||
Size offset = mq->mq_bytes_written % (uint64) ringsize;
|
||||
Size sendnow = Min(available, ringsize - offset);
|
||||
Size offset = mq->mq_bytes_written % (uint64) ringsize;
|
||||
Size sendnow = Min(available, ringsize - offset);
|
||||
|
||||
/* Write as much data as we can via a single memcpy(). */
|
||||
memcpy(&mq->mq_ring[mq->mq_ring_offset + offset],
|
||||
@@ -751,9 +752,9 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait,
|
||||
shm_mq_inc_bytes_written(mq, MAXALIGN(sendnow));
|
||||
|
||||
/*
|
||||
* For efficiency, we don't set the reader's latch here. We'll
|
||||
* do that only when the buffer fills up or after writing an
|
||||
* entire message.
|
||||
* For efficiency, we don't set the reader's latch here. We'll do
|
||||
* that only when the buffer fills up or after writing an entire
|
||||
* message.
|
||||
*/
|
||||
}
|
||||
}
|
||||
@@ -801,10 +802,10 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait,
|
||||
/*
|
||||
* Fall out before waiting if the queue has been detached.
|
||||
*
|
||||
* Note that we don't check for this until *after* considering
|
||||
* whether the data already available is enough, since the
|
||||
* receiver can finish receiving a message stored in the buffer
|
||||
* even after the sender has detached.
|
||||
* Note that we don't check for this until *after* considering whether
|
||||
* the data already available is enough, since the receiver can finish
|
||||
* receiving a message stored in the buffer even after the sender has
|
||||
* detached.
|
||||
*/
|
||||
if (detached)
|
||||
return SHM_MQ_DETACHED;
|
||||
@@ -814,11 +815,11 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait,
|
||||
return SHM_MQ_WOULD_BLOCK;
|
||||
|
||||
/*
|
||||
* Wait for our latch to be set. It might already be set for
|
||||
* some unrelated reason, but that'll just result in one extra
|
||||
* trip through the loop. It's worth it to avoid resetting the
|
||||
* latch at top of loop, because setting an already-set latch is
|
||||
* much cheaper than setting one that has been reset.
|
||||
* Wait for our latch to be set. It might already be set for some
|
||||
* unrelated reason, but that'll just result in one extra trip through
|
||||
* the loop. It's worth it to avoid resetting the latch at top of
|
||||
* loop, because setting an already-set latch is much cheaper than
|
||||
* setting one that has been reset.
|
||||
*/
|
||||
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
|
||||
|
||||
@@ -842,11 +843,11 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait,
|
||||
* non-NULL when our counterpart attaches to the queue.
|
||||
*/
|
||||
static bool
|
||||
shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr,
|
||||
shm_mq_wait_internal(volatile shm_mq *mq, PGPROC *volatile * ptr,
|
||||
BackgroundWorkerHandle *handle)
|
||||
{
|
||||
bool save_set_latch_on_sigusr1;
|
||||
bool result = false;
|
||||
bool save_set_latch_on_sigusr1;
|
||||
bool result = false;
|
||||
|
||||
save_set_latch_on_sigusr1 = set_latch_on_sigusr1;
|
||||
if (handle != NULL)
|
||||
@@ -856,9 +857,9 @@ shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr,
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
BgwHandleStatus status;
|
||||
pid_t pid;
|
||||
bool detached;
|
||||
BgwHandleStatus status;
|
||||
pid_t pid;
|
||||
bool detached;
|
||||
|
||||
/* Acquire the lock just long enough to check the pointer. */
|
||||
SpinLockAcquire(&mq->mq_mutex);
|
||||
@@ -913,7 +914,7 @@ shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr,
|
||||
static uint64
|
||||
shm_mq_get_bytes_read(volatile shm_mq *mq, bool *detached)
|
||||
{
|
||||
uint64 v;
|
||||
uint64 v;
|
||||
|
||||
SpinLockAcquire(&mq->mq_mutex);
|
||||
v = mq->mq_bytes_read;
|
||||
@@ -948,7 +949,7 @@ shm_mq_inc_bytes_read(volatile shm_mq *mq, Size n)
|
||||
static uint64
|
||||
shm_mq_get_bytes_written(volatile shm_mq *mq, bool *detached)
|
||||
{
|
||||
uint64 v;
|
||||
uint64 v;
|
||||
|
||||
SpinLockAcquire(&mq->mq_mutex);
|
||||
v = mq->mq_bytes_written;
|
||||
@@ -975,8 +976,8 @@ shm_mq_inc_bytes_written(volatile shm_mq *mq, Size n)
|
||||
static shm_mq_result
|
||||
shm_mq_notify_receiver(volatile shm_mq *mq)
|
||||
{
|
||||
PGPROC *receiver;
|
||||
bool detached;
|
||||
PGPROC *receiver;
|
||||
bool detached;
|
||||
|
||||
SpinLockAcquire(&mq->mq_mutex);
|
||||
detached = mq->mq_detached;
|
||||
|
||||
@@ -19,17 +19,17 @@
|
||||
|
||||
typedef struct shm_toc_entry
|
||||
{
|
||||
uint64 key; /* Arbitrary identifier */
|
||||
uint64 offset; /* Bytes offset */
|
||||
uint64 key; /* Arbitrary identifier */
|
||||
uint64 offset; /* Bytes offset */
|
||||
} shm_toc_entry;
|
||||
|
||||
struct shm_toc
|
||||
{
|
||||
uint64 toc_magic; /* Magic number for this TOC */
|
||||
slock_t toc_mutex; /* Spinlock for mutual exclusion */
|
||||
Size toc_total_bytes; /* Bytes managed by this TOC */
|
||||
uint64 toc_magic; /* Magic number for this TOC */
|
||||
slock_t toc_mutex; /* Spinlock for mutual exclusion */
|
||||
Size toc_total_bytes; /* Bytes managed by this TOC */
|
||||
Size toc_allocated_bytes; /* Bytes allocated of those managed */
|
||||
Size toc_nentry; /* Number of entries in TOC */
|
||||
Size toc_nentry; /* Number of entries in TOC */
|
||||
shm_toc_entry toc_entry[FLEXIBLE_ARRAY_MEMBER];
|
||||
};
|
||||
|
||||
@@ -39,7 +39,7 @@ struct shm_toc
|
||||
shm_toc *
|
||||
shm_toc_create(uint64 magic, void *address, Size nbytes)
|
||||
{
|
||||
shm_toc *toc = (shm_toc *) address;
|
||||
shm_toc *toc = (shm_toc *) address;
|
||||
|
||||
Assert(nbytes > offsetof(shm_toc, toc_entry));
|
||||
toc->toc_magic = magic;
|
||||
@@ -58,7 +58,7 @@ shm_toc_create(uint64 magic, void *address, Size nbytes)
|
||||
extern shm_toc *
|
||||
shm_toc_attach(uint64 magic, void *address)
|
||||
{
|
||||
shm_toc *toc = (shm_toc *) address;
|
||||
shm_toc *toc = (shm_toc *) address;
|
||||
|
||||
if (toc->toc_magic != magic)
|
||||
return NULL;
|
||||
@@ -96,7 +96,7 @@ shm_toc_allocate(shm_toc *toc, Size nbytes)
|
||||
total_bytes = vtoc->toc_total_bytes;
|
||||
allocated_bytes = vtoc->toc_allocated_bytes;
|
||||
nentry = vtoc->toc_nentry;
|
||||
toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry)
|
||||
toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry)
|
||||
+ allocated_bytes;
|
||||
|
||||
/* Check for memory exhaustion and overflow. */
|
||||
@@ -132,7 +132,7 @@ shm_toc_freespace(shm_toc *toc)
|
||||
nentry = vtoc->toc_nentry;
|
||||
SpinLockRelease(&toc->toc_mutex);
|
||||
|
||||
toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry);
|
||||
toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry);
|
||||
Assert(allocated_bytes + BUFFERALIGN(toc_bytes) <= total_bytes);
|
||||
return total_bytes - (allocated_bytes + BUFFERALIGN(toc_bytes));
|
||||
}
|
||||
@@ -176,7 +176,7 @@ shm_toc_insert(shm_toc *toc, uint64 key, void *address)
|
||||
total_bytes = vtoc->toc_total_bytes;
|
||||
allocated_bytes = vtoc->toc_allocated_bytes;
|
||||
nentry = vtoc->toc_nentry;
|
||||
toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry)
|
||||
toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry)
|
||||
+ allocated_bytes;
|
||||
|
||||
/* Check for memory exhaustion and overflow. */
|
||||
@@ -241,6 +241,6 @@ Size
|
||||
shm_toc_estimate(shm_toc_estimator *e)
|
||||
{
|
||||
return add_size(offsetof(shm_toc, toc_entry),
|
||||
add_size(mul_size(e->number_of_keys, sizeof(shm_toc_entry)),
|
||||
e->space_for_chunks));
|
||||
add_size(mul_size(e->number_of_keys, sizeof(shm_toc_entry)),
|
||||
e->space_for_chunks));
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
* for a module and should never be allocated after the shared memory
|
||||
* initialization phase. Hash tables have a fixed maximum size, but
|
||||
* their actual size can vary dynamically. When entries are added
|
||||
* to the table, more space is allocated. Queues link data structures
|
||||
* to the table, more space is allocated. Queues link data structures
|
||||
* that have been allocated either within fixed-size structures or as hash
|
||||
* buckets. Each shared data structure has a string name to identify
|
||||
* it (assigned in the module that declares it).
|
||||
@@ -40,7 +40,7 @@
|
||||
* The shmem index has two purposes: first, it gives us
|
||||
* a simple model of how the world looks when a backend process
|
||||
* initializes. If something is present in the shmem index,
|
||||
* it is initialized. If it is not, it is uninitialized. Second,
|
||||
* it is initialized. If it is not, it is uninitialized. Second,
|
||||
* the shmem index allows us to allocate shared memory on demand
|
||||
* instead of trying to preallocate structures and hard-wire the
|
||||
* sizes and locations in header files. If you are using a lot
|
||||
@@ -55,8 +55,8 @@
|
||||
* pointers using the method described in (b) above.
|
||||
*
|
||||
* (d) memory allocation model: shared memory can never be
|
||||
* freed, once allocated. Each hash table has its own free list,
|
||||
* so hash buckets can be reused when an item is deleted. However,
|
||||
* freed, once allocated. Each hash table has its own free list,
|
||||
* so hash buckets can be reused when an item is deleted. However,
|
||||
* if one hash table grows very large and then shrinks, its space
|
||||
* cannot be redistributed to other tables. We could build a simple
|
||||
* hash bucket garbage collector if need be. Right now, it seems
|
||||
@@ -232,7 +232,7 @@ InitShmemIndex(void)
|
||||
*
|
||||
* Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
|
||||
* hashtable to exist already, we have a bit of a circularity problem in
|
||||
* initializing the ShmemIndex itself. The special "ShmemIndex" hash
|
||||
* initializing the ShmemIndex itself. The special "ShmemIndex" hash
|
||||
* table name will tell ShmemInitStruct to fake it.
|
||||
*/
|
||||
info.keysize = SHMEM_INDEX_KEYSIZE;
|
||||
@@ -309,7 +309,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
|
||||
* ShmemInitStruct -- Create/attach to a structure in shared memory.
|
||||
*
|
||||
* This is called during initialization to find or allocate
|
||||
* a data structure in shared memory. If no other process
|
||||
* a data structure in shared memory. If no other process
|
||||
* has created the structure, this routine allocates space
|
||||
* for it. If it exists already, a pointer to the existing
|
||||
* structure is returned.
|
||||
@@ -318,7 +318,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
|
||||
* already in the shmem index (hence, already initialized).
|
||||
*
|
||||
* Note: before Postgres 9.0, this function returned NULL for some failure
|
||||
* cases. Now, it always throws error instead, so callers need not check
|
||||
* cases. Now, it always throws error instead, so callers need not check
|
||||
* for NULL.
|
||||
*/
|
||||
void *
|
||||
@@ -350,7 +350,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
|
||||
* be trying to init the shmem index itself.
|
||||
*
|
||||
* Notice that the ShmemIndexLock is released before the shmem
|
||||
* index has been initialized. This should be OK because no other
|
||||
* index has been initialized. This should be OK because no other
|
||||
* process can be accessing shared memory yet.
|
||||
*/
|
||||
Assert(shmemseghdr->index == NULL);
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
*
|
||||
* Package for managing doubly-linked lists in shared memory.
|
||||
* The only tricky thing is that SHM_QUEUE will usually be a field
|
||||
* in a larger record. SHMQueueNext has to return a pointer
|
||||
* in a larger record. SHMQueueNext has to return a pointer
|
||||
* to the record itself instead of a pointer to the SHMQueue field
|
||||
* of the record. It takes an extra parameter and does some extra
|
||||
* pointer arithmetic to do this correctly.
|
||||
|
||||
@@ -29,7 +29,7 @@ uint64 SharedInvalidMessageCounter;
|
||||
* Because backends sitting idle will not be reading sinval events, we
|
||||
* need a way to give an idle backend a swift kick in the rear and make
|
||||
* it catch up before the sinval queue overflows and forces it to go
|
||||
* through a cache reset exercise. This is done by sending
|
||||
* through a cache reset exercise. This is done by sending
|
||||
* PROCSIG_CATCHUP_INTERRUPT to any backend that gets too far behind.
|
||||
*
|
||||
* State for catchup events consists of two flags: one saying whether
|
||||
@@ -68,7 +68,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
|
||||
* NOTE: it is entirely possible for this routine to be invoked recursively
|
||||
* as a consequence of processing inside the invalFunction or resetFunction.
|
||||
* Furthermore, such a recursive call must guarantee that all outstanding
|
||||
* inval messages have been processed before it exits. This is the reason
|
||||
* inval messages have been processed before it exits. This is the reason
|
||||
* for the strange-looking choice to use a statically allocated buffer array
|
||||
* and counters; it's so that a recursive call can process messages already
|
||||
* sucked out of sinvaladt.c.
|
||||
@@ -137,7 +137,7 @@ ReceiveSharedInvalidMessages(
|
||||
* We are now caught up. If we received a catchup signal, reset that
|
||||
* flag, and call SICleanupQueue(). This is not so much because we need
|
||||
* to flush dead messages right now, as that we want to pass on the
|
||||
* catchup signal to the next slowest backend. "Daisy chaining" the
|
||||
* catchup signal to the next slowest backend. "Daisy chaining" the
|
||||
* catchup signal this way avoids creating spikes in system load for what
|
||||
* should be just a background maintenance activity.
|
||||
*/
|
||||
@@ -157,7 +157,7 @@ ReceiveSharedInvalidMessages(
|
||||
*
|
||||
* If we are idle (catchupInterruptEnabled is set), we can safely
|
||||
* invoke ProcessCatchupEvent directly. Otherwise, just set a flag
|
||||
* to do it later. (Note that it's quite possible for normal processing
|
||||
* to do it later. (Note that it's quite possible for normal processing
|
||||
* of the current transaction to cause ReceiveSharedInvalidMessages()
|
||||
* to be run later on; in that case the flag will get cleared again,
|
||||
* since there's no longer any reason to do anything.)
|
||||
@@ -233,7 +233,7 @@ HandleCatchupInterrupt(void)
|
||||
* EnableCatchupInterrupt
|
||||
*
|
||||
* This is called by the PostgresMain main loop just before waiting
|
||||
* for a frontend command. We process any pending catchup events,
|
||||
* for a frontend command. We process any pending catchup events,
|
||||
* and enable the signal handler to process future events directly.
|
||||
*
|
||||
* NOTE: the signal handler starts out disabled, and stays so until
|
||||
@@ -278,7 +278,7 @@ EnableCatchupInterrupt(void)
|
||||
* DisableCatchupInterrupt
|
||||
*
|
||||
* This is called by the PostgresMain main loop just after receiving
|
||||
* a frontend command. Signal handler execution of catchup events
|
||||
* a frontend command. Signal handler execution of catchup events
|
||||
* is disabled until the next EnableCatchupInterrupt call.
|
||||
*
|
||||
* The PROCSIG_NOTIFY_INTERRUPT signal handler also needs to call this,
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
* In reality, the messages are stored in a circular buffer of MAXNUMMESSAGES
|
||||
* entries. We translate MsgNum values into circular-buffer indexes by
|
||||
* computing MsgNum % MAXNUMMESSAGES (this should be fast as long as
|
||||
* MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
|
||||
* MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
|
||||
* doesn't exceed minMsgNum by more than MAXNUMMESSAGES, we have enough space
|
||||
* in the buffer. If the buffer does overflow, we recover by setting the
|
||||
* "reset" flag for each backend that has fallen too far behind. A backend
|
||||
@@ -59,7 +59,7 @@
|
||||
* normal behavior is that at most one such interrupt is in flight at a time;
|
||||
* when a backend completes processing a catchup interrupt, it executes
|
||||
* SICleanupQueue, which will signal the next-furthest-behind backend if
|
||||
* needed. This avoids undue contention from multiple backends all trying
|
||||
* needed. This avoids undue contention from multiple backends all trying
|
||||
* to catch up at once. However, the furthest-back backend might be stuck
|
||||
* in a state where it can't catch up. Eventually it will get reset, so it
|
||||
* won't cause any more problems for anyone but itself. But we don't want
|
||||
@@ -90,7 +90,7 @@
|
||||
* the writer wants to change maxMsgNum while readers need to read it.
|
||||
* We deal with that by having a spinlock that readers must take for just
|
||||
* long enough to read maxMsgNum, while writers take it for just long enough
|
||||
* to write maxMsgNum. (The exact rule is that you need the spinlock to
|
||||
* to write maxMsgNum. (The exact rule is that you need the spinlock to
|
||||
* read maxMsgNum if you are not holding SInvalWriteLock, and you need the
|
||||
* spinlock to write maxMsgNum unless you are holding both locks.)
|
||||
*
|
||||
@@ -442,7 +442,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
|
||||
SISeg *segP = shmInvalBuffer;
|
||||
|
||||
/*
|
||||
* N can be arbitrarily large. We divide the work into groups of no more
|
||||
* N can be arbitrarily large. We divide the work into groups of no more
|
||||
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
|
||||
* an unreasonably long time. (This is not so much because we care about
|
||||
* letting in other writers, as that some just-caught-up backend might be
|
||||
@@ -465,7 +465,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
|
||||
* If the buffer is full, we *must* acquire some space. Clean the
|
||||
* queue and reset anyone who is preventing space from being freed.
|
||||
* Otherwise, clean the queue only when it's exceeded the next
|
||||
* fullness threshold. We have to loop and recheck the buffer state
|
||||
* fullness threshold. We have to loop and recheck the buffer state
|
||||
* after any call of SICleanupQueue.
|
||||
*/
|
||||
for (;;)
|
||||
@@ -533,11 +533,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
|
||||
* executing on behalf of other backends, since each instance will modify only
|
||||
* fields of its own backend's ProcState, and no instance will look at fields
|
||||
* of other backends' ProcStates. We express this by grabbing SInvalReadLock
|
||||
* in shared mode. Note that this is not exactly the normal (read-only)
|
||||
* in shared mode. Note that this is not exactly the normal (read-only)
|
||||
* interpretation of a shared lock! Look closely at the interactions before
|
||||
* allowing SInvalReadLock to be grabbed in shared mode for any other reason!
|
||||
*
|
||||
* NB: this can also run in parallel with SIInsertDataEntries. It is not
|
||||
* NB: this can also run in parallel with SIInsertDataEntries. It is not
|
||||
* guaranteed that we will return any messages added after the routine is
|
||||
* entered.
|
||||
*
|
||||
@@ -557,10 +557,10 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
|
||||
|
||||
/*
|
||||
* Before starting to take locks, do a quick, unlocked test to see whether
|
||||
* there can possibly be anything to read. On a multiprocessor system,
|
||||
* there can possibly be anything to read. On a multiprocessor system,
|
||||
* it's possible that this load could migrate backwards and occur before
|
||||
* we actually enter this function, so we might miss a sinval message that
|
||||
* was just added by some other processor. But they can't migrate
|
||||
* was just added by some other processor. But they can't migrate
|
||||
* backwards over a preceding lock acquisition, so it should be OK. If we
|
||||
* haven't acquired a lock preventing against further relevant
|
||||
* invalidations, any such occurrence is not much different than if the
|
||||
@@ -651,7 +651,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
|
||||
*
|
||||
* Caution: because we transiently release write lock when we have to signal
|
||||
* some other backend, it is NOT guaranteed that there are still minFree
|
||||
* free message slots at exit. Caller must recheck and perhaps retry.
|
||||
* free message slots at exit. Caller must recheck and perhaps retry.
|
||||
*/
|
||||
void
|
||||
SICleanupQueue(bool callerHasWriteLock, int minFree)
|
||||
@@ -672,7 +672,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
|
||||
/*
|
||||
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
|
||||
* furthest-back backend that needs signaling (if any), and reset any
|
||||
* backends that are too far back. Note that because we ignore sendOnly
|
||||
* backends that are too far back. Note that because we ignore sendOnly
|
||||
* backends here it is possible for them to keep sending messages without
|
||||
* a problem even when they are the only active backend.
|
||||
*/
|
||||
|
||||
@@ -130,7 +130,7 @@ GetStandbyLimitTime(void)
|
||||
|
||||
/*
|
||||
* The cutoff time is the last WAL data receipt time plus the appropriate
|
||||
* delay variable. Delay of -1 means wait forever.
|
||||
* delay variable. Delay of -1 means wait forever.
|
||||
*/
|
||||
GetXLogReceiptTime(&rtime, &fromStream);
|
||||
if (fromStream)
|
||||
@@ -475,7 +475,7 @@ SendRecoveryConflictWithBufferPin(ProcSignalReason reason)
|
||||
* determine whether an actual deadlock condition is present: the lock we
|
||||
* need to wait for might be unrelated to any held by the Startup process.
|
||||
* Sooner or later, this mechanism should get ripped out in favor of somehow
|
||||
* accounting for buffer locks in DeadLockCheck(). However, errors here
|
||||
* accounting for buffer locks in DeadLockCheck(). However, errors here
|
||||
* seem to be very low-probability in practice, so for now it's not worth
|
||||
* the trouble.
|
||||
*/
|
||||
@@ -867,7 +867,7 @@ standby_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
XLogRecPtr
|
||||
LogStandbySnapshot(void)
|
||||
{
|
||||
XLogRecPtr recptr;
|
||||
XLogRecPtr recptr;
|
||||
RunningTransactions running;
|
||||
xl_standby_lock *locks;
|
||||
int nlocks;
|
||||
@@ -889,8 +889,8 @@ LogStandbySnapshot(void)
|
||||
running = GetRunningTransactionData();
|
||||
|
||||
/*
|
||||
* GetRunningTransactionData() acquired ProcArrayLock, we must release
|
||||
* it. For Hot Standby this can be done before inserting the WAL record
|
||||
* GetRunningTransactionData() acquired ProcArrayLock, we must release it.
|
||||
* For Hot Standby this can be done before inserting the WAL record
|
||||
* because ProcArrayApplyRecoveryInfo() rechecks the commit status using
|
||||
* the clog. For logical decoding, though, the lock can't be released
|
||||
* early becuase the clog might be "in the future" from the POV of the
|
||||
@@ -977,9 +977,9 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
|
||||
/*
|
||||
* Ensure running_xacts information is synced to disk not too far in the
|
||||
* future. We don't want to stall anything though (i.e. use XLogFlush()),
|
||||
* so we let the wal writer do it during normal
|
||||
* operation. XLogSetAsyncXactLSN() conveniently will mark the LSN as
|
||||
* to-be-synced and nudge the WALWriter into action if sleeping. Check
|
||||
* so we let the wal writer do it during normal operation.
|
||||
* XLogSetAsyncXactLSN() conveniently will mark the LSN as to-be-synced
|
||||
* and nudge the WALWriter into action if sleeping. Check
|
||||
* XLogBackgroundFlush() for details why a record might not be flushed
|
||||
* without it.
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user