1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-30 11:03:19 +03:00

pgindent run for 9.4

This includes removing tabs after periods in C comments, which was
applied to back branches, so this change should not effect backpatching.
This commit is contained in:
Bruce Momjian
2014-05-06 12:12:18 -04:00
parent fb85cd4320
commit 0a78320057
854 changed files with 7848 additions and 7368 deletions

View File

@ -25,7 +25,7 @@
typedef struct
{
int nworkers;
int nworkers;
BackgroundWorkerHandle *handle[FLEXIBLE_ARRAY_MEMBER];
} worker_state;
@ -34,7 +34,7 @@ static void setup_dynamic_shared_memory(int64 queue_size, int nworkers,
test_shm_mq_header **hdrp,
shm_mq **outp, shm_mq **inp);
static worker_state *setup_background_workers(int nworkers,
dsm_segment *seg);
dsm_segment *seg);
static void cleanup_background_workers(dsm_segment *seg, Datum arg);
static void wait_for_workers_to_become_ready(worker_state *wstate,
volatile test_shm_mq_header *hdr);
@ -50,9 +50,9 @@ test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp,
{
dsm_segment *seg;
test_shm_mq_header *hdr;
shm_mq *outq = NULL; /* placate compiler */
shm_mq *inq = NULL; /* placate compiler */
worker_state *wstate;
shm_mq *outq = NULL; /* placate compiler */
shm_mq *inq = NULL; /* placate compiler */
worker_state *wstate;
/* Set up a dynamic shared memory segment. */
setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
@ -69,8 +69,8 @@ test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp,
wait_for_workers_to_become_ready(wstate, hdr);
/*
* Once we reach this point, all workers are ready. We no longer need
* to kill them if we die; they'll die on their own as the message queues
* Once we reach this point, all workers are ready. We no longer need to
* kill them if we die; they'll die on their own as the message queues
* shut down.
*/
cancel_on_dsm_detach(seg, cleanup_background_workers,
@ -90,11 +90,11 @@ setup_dynamic_shared_memory(int64 queue_size, int nworkers,
dsm_segment **segp, test_shm_mq_header **hdrp,
shm_mq **outp, shm_mq **inp)
{
shm_toc_estimator e;
int i;
Size segsize;
dsm_segment *seg;
shm_toc *toc;
shm_toc_estimator e;
int i;
Size segsize;
dsm_segment *seg;
shm_toc *toc;
test_shm_mq_header *hdr;
/* Ensure a valid queue size. */
@ -140,7 +140,7 @@ setup_dynamic_shared_memory(int64 queue_size, int nworkers,
/* Set up one message queue per worker, plus one. */
for (i = 0; i <= nworkers; ++i)
{
shm_mq *mq;
shm_mq *mq;
mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size),
(Size) queue_size);
@ -171,10 +171,10 @@ setup_dynamic_shared_memory(int64 queue_size, int nworkers,
static worker_state *
setup_background_workers(int nworkers, dsm_segment *seg)
{
MemoryContext oldcontext;
MemoryContext oldcontext;
BackgroundWorker worker;
worker_state *wstate;
int i;
worker_state *wstate;
int i;
/*
* We need the worker_state object and the background worker handles to
@ -194,16 +194,16 @@ setup_background_workers(int nworkers, dsm_segment *seg)
* Arrange to kill all the workers if we abort before all workers are
* finished hooking themselves up to the dynamic shared memory segment.
*
* If we die after all the workers have finished hooking themselves up
* to the dynamic shared memory segment, we'll mark the two queues to
* which we're directly connected as detached, and the worker(s)
* connected to those queues will exit, marking any other queues to
* which they are connected as detached. This will cause any
* as-yet-unaware workers connected to those queues to exit in their
* turn, and so on, until everybody exits.
* If we die after all the workers have finished hooking themselves up to
* the dynamic shared memory segment, we'll mark the two queues to which
* we're directly connected as detached, and the worker(s) connected to
* those queues will exit, marking any other queues to which they are
* connected as detached. This will cause any as-yet-unaware workers
* connected to those queues to exit in their turn, and so on, until
* everybody exits.
*
* But suppose the workers which are supposed to connect to the queues
* to which we're directly attached exit due to some error before they
* But suppose the workers which are supposed to connect to the queues to
* which we're directly attached exit due to some error before they
* actually attach the queues. The remaining workers will have no way of
* knowing this. From their perspective, they're still waiting for those
* workers to start, when in fact they've already died.
@ -255,8 +255,8 @@ static void
wait_for_workers_to_become_ready(worker_state *wstate,
volatile test_shm_mq_header *hdr)
{
bool save_set_latch_on_sigusr1;
bool result = false;
bool save_set_latch_on_sigusr1;
bool result = false;
save_set_latch_on_sigusr1 = set_latch_on_sigusr1;
set_latch_on_sigusr1 = true;
@ -265,7 +265,7 @@ wait_for_workers_to_become_ready(worker_state *wstate,
{
for (;;)
{
int workers_ready;
int workers_ready;
/* If all the workers are ready, we have succeeded. */
SpinLockAcquire(&hdr->mutex);
@ -310,13 +310,13 @@ wait_for_workers_to_become_ready(worker_state *wstate,
static bool
check_worker_status(worker_state *wstate)
{
int n;
int n;
/* If any workers (or the postmaster) have died, we have failed. */
for (n = 0; n < wstate->nworkers; ++n)
{
BgwHandleStatus status;
pid_t pid;
pid_t pid;
status = GetBackgroundWorkerPid(wstate->handle[n], &pid);
if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED)

View File

@ -18,8 +18,7 @@
#include "test_shm_mq.h"
PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(test_shm_mq);
PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(test_shm_mq);
PG_FUNCTION_INFO_V1(test_shm_mq_pipelined);
void _PG_init(void);
@ -47,7 +46,7 @@ test_shm_mq(PG_FUNCTION_ARGS)
dsm_segment *seg;
shm_mq_handle *outqh;
shm_mq_handle *inqh;
shm_mq_result res;
shm_mq_result res;
Size len;
void *data;
@ -59,8 +58,8 @@ test_shm_mq(PG_FUNCTION_ARGS)
/*
* Since this test sends data using the blocking interfaces, it cannot
* send data to itself. Therefore, a minimum of 1 worker is required.
* Of course, a negative worker count is nonsensical.
* send data to itself. Therefore, a minimum of 1 worker is required. Of
* course, a negative worker count is nonsensical.
*/
if (nworkers < 1)
ereport(ERROR,
@ -139,7 +138,7 @@ test_shm_mq_pipelined(PG_FUNCTION_ARGS)
dsm_segment *seg;
shm_mq_handle *outqh;
shm_mq_handle *inqh;
shm_mq_result res;
shm_mq_result res;
Size len;
void *data;
@ -204,8 +203,8 @@ test_shm_mq_pipelined(PG_FUNCTION_ARGS)
}
else if (res == SHM_MQ_DETACHED)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not receive message")));
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not receive message")));
}
else
{
@ -216,18 +215,18 @@ test_shm_mq_pipelined(PG_FUNCTION_ARGS)
if (send_count != receive_count)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("message sent %d times, but received %d times",
send_count, receive_count)));
errmsg("message sent %d times, but received %d times",
send_count, receive_count)));
break;
}
if (wait)
{
/*
* If we made no progress, wait for one of the other processes
* to which we are connected to set our latch, indicating that
* they have read or written data and therefore there may now be
* work for us to do.
* If we made no progress, wait for one of the other processes to
* which we are connected to set our latch, indicating that they
* have read or written data and therefore there may now be work
* for us to do.
*/
WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
CHECK_FOR_INTERRUPTS();
@ -247,13 +246,13 @@ test_shm_mq_pipelined(PG_FUNCTION_ARGS)
static void
verify_message(Size origlen, char *origdata, Size newlen, char *newdata)
{
Size i;
Size i;
if (origlen != newlen)
ereport(ERROR,
(errmsg("message corrupted"),
errdetail("The original message was %zu bytes but the final message is %zu bytes.",
origlen, newlen)));
origlen, newlen)));
for (i = 0; i < origlen; ++i)
if (origdata[i] != newdata[i])

View File

@ -28,18 +28,18 @@
*/
typedef struct
{
slock_t mutex;
int workers_total;
int workers_attached;
int workers_ready;
slock_t mutex;
int workers_total;
int workers_attached;
int workers_ready;
} test_shm_mq_header;
/* Set up dynamic shared memory and background workers for test run. */
extern void test_shm_mq_setup(int64 queue_size, int32 nworkers,
dsm_segment **seg, shm_mq_handle **output,
shm_mq_handle **input);
dsm_segment **seg, shm_mq_handle **output,
shm_mq_handle **input);
/* Main entrypoint for a worker. */
extern void test_shm_mq_main(Datum);
extern void test_shm_mq_main(Datum);
#endif

View File

@ -30,8 +30,8 @@
static void handle_sigterm(SIGNAL_ARGS);
static void attach_to_queues(dsm_segment *seg, shm_toc *toc,
int myworkernumber, shm_mq_handle **inqhp,
shm_mq_handle **outqhp);
int myworkernumber, shm_mq_handle **inqhp,
shm_mq_handle **outqhp);
static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh);
/*
@ -48,7 +48,7 @@ void
test_shm_mq_main(Datum main_arg)
{
dsm_segment *seg;
shm_toc *toc;
shm_toc *toc;
shm_mq_handle *inqh;
shm_mq_handle *outqh;
volatile test_shm_mq_header *hdr;
@ -58,12 +58,12 @@ test_shm_mq_main(Datum main_arg)
/*
* Establish signal handlers.
*
* We want CHECK_FOR_INTERRUPTS() to kill off this worker process just
* as it would a normal user backend. To make that happen, we establish
* a signal handler that is a stripped-down version of die(). We don't
* have any equivalent of the backend's command-read loop, where interrupts
* can be processed immediately, so make sure ImmediateInterruptOK is
* turned off.
* We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as
* it would a normal user backend. To make that happen, we establish a
* signal handler that is a stripped-down version of die(). We don't have
* any equivalent of the backend's command-read loop, where interrupts can
* be processed immediately, so make sure ImmediateInterruptOK is turned
* off.
*/
pqsignal(SIGTERM, handle_sigterm);
ImmediateInterruptOK = false;
@ -76,8 +76,8 @@ test_shm_mq_main(Datum main_arg)
* memory segment to which we must attach for further instructions. In
* order to attach to dynamic shared memory, we need a resource owner.
* Once we've mapped the segment in our address space, attach to the table
* of contents so we can locate the various data structures we'll need
* to find within the segment.
* of contents so we can locate the various data structures we'll need to
* find within the segment.
*/
CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker");
seg = dsm_attach(DatumGetInt32(main_arg));
@ -89,7 +89,7 @@ test_shm_mq_main(Datum main_arg)
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("bad magic number in dynamic shared memory segment")));
errmsg("bad magic number in dynamic shared memory segment")));
/*
* Acquire a worker number.
@ -114,8 +114,8 @@ test_shm_mq_main(Datum main_arg)
attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);
/*
* Indicate that we're fully initialized and ready to begin the main
* part of the parallel operation.
* Indicate that we're fully initialized and ready to begin the main part
* of the parallel operation.
*
* Once we signal that we're ready, the user backend is entitled to assume
* that our on_dsm_detach callbacks will fire before we disconnect from