1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-05 16:55:50 +03:00

mpm_event, mod_status: Separate processing and write completion queues.

As a follow up to r1918022 which handled the new CONN_STATE_PROCESS(ing) and
existing CONN_STATE_WRITE_COMPLETION in the same async queue, let's now have
two separates ones which allows more relevant async accounting in mod_status.

Rename CONN_STATE_PROCESS to CONN_STATE_PROCESSING as it's how it will be
called in mod_status.

* include/ap_mmn.h:
  MMN minor bump for process_score->processing counter.

* include/httpd.h:
  Rename CONN_STATE_PROCESS to CONN_STATE_PROCESSING.

* include/scoreboard.h:
  Add process_score->processing field.

* include/httpd.h, modules/http/http_core.c, modules/http2/h2_c1.c,
    server/mpm/event/event.c, server/mpm/motorz/motorz.c,
    server/mpm/simple/simple_io.c:
  Rename CONN_STATE_PROCESS to CONN_STATE_PROCESSING.

* server/mpm/event/event.c:
  Restore write_completion_q to handle connections in CONN_STATE_WRITE_COMPLETION.
  Use processing_q (renamed from process_q) solely for CONN_STATE_PROCESSING.
  Update process_score->processing according to the length of processing_q.
  
* modules/generators/mod_status.c:
  Show the value of process_score->processing in the stats.



git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1918098 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yann Ylavic
2024-06-01 15:08:46 +00:00
parent 3cf40d93a9
commit d821182d76
10 changed files with 98 additions and 58 deletions

View File

@@ -723,7 +723,8 @@
* 20211221.17 (2.5.1-dev) Add ap_proxy_worker_get_name() * 20211221.17 (2.5.1-dev) Add ap_proxy_worker_get_name()
* 20211221.18 (2.5.1-dev) Add ap_regexec_ex() * 20211221.18 (2.5.1-dev) Add ap_regexec_ex()
* 20211221.19 (2.5.1-dev) Add AP_REG_NOTEMPTY_ATSTART * 20211221.19 (2.5.1-dev) Add AP_REG_NOTEMPTY_ATSTART
* 20211221.20 (2.5.1-dev) Add CONN_STATE_KEEPALIVE and CONN_STATE_PROCESS * 20211221.20 (2.5.1-dev) Add CONN_STATE_KEEPALIVE and CONN_STATE_PROCESSING
* 20211221.21 (2.5.1-dev) Add processing field struct process_score
*/ */
#define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */ #define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */

View File

@@ -1319,7 +1319,7 @@ struct conn_slave_rec {
*/ */
typedef enum { typedef enum {
CONN_STATE_KEEPALIVE, /* Kept alive in the MPM (using KeepAliveTimeout) */ CONN_STATE_KEEPALIVE, /* Kept alive in the MPM (using KeepAliveTimeout) */
CONN_STATE_PROCESS, /* Handled by process_connection() hooks, may be returned CONN_STATE_PROCESSING, /* Handled by process_connection() hooks, may be returned
to the MPM for POLLIN/POLLOUT (using Timeout) */ to the MPM for POLLIN/POLLOUT (using Timeout) */
CONN_STATE_HANDLER, /* Processed by the modules handlers */ CONN_STATE_HANDLER, /* Processed by the modules handlers */
CONN_STATE_WRITE_COMPLETION, /* Flushed by the MPM before entering CONN_STATE_KEEPALIVE */ CONN_STATE_WRITE_COMPLETION, /* Flushed by the MPM before entering CONN_STATE_KEEPALIVE */
@@ -1332,7 +1332,7 @@ typedef enum {
/* Aliases (legacy) */ /* Aliases (legacy) */
CONN_STATE_CHECK_REQUEST_LINE_READABLE = CONN_STATE_KEEPALIVE, CONN_STATE_CHECK_REQUEST_LINE_READABLE = CONN_STATE_KEEPALIVE,
CONN_STATE_READ_REQUEST_LINE = CONN_STATE_PROCESS, CONN_STATE_READ_REQUEST_LINE = CONN_STATE_PROCESSING,
} conn_state_e; } conn_state_e;
typedef enum { typedef enum {

View File

@@ -144,10 +144,12 @@ struct process_score {
* connections (for async MPMs) * connections (for async MPMs)
*/ */
apr_uint32_t connections; /* total connections (for async MPMs) */ apr_uint32_t connections; /* total connections (for async MPMs) */
apr_uint32_t write_completion; /* async connections in write completion or POLLIN/POLLOUT */ apr_uint32_t write_completion; /* async connections in write completion */
apr_uint32_t lingering_close; /* async connections in lingering close */ apr_uint32_t lingering_close; /* async connections in lingering close */
apr_uint32_t keep_alive; /* async connections in keep alive */ apr_uint32_t keep_alive; /* async connections in keep alive */
apr_uint32_t suspended; /* connections suspended by some module */ apr_uint32_t suspended; /* connections suspended by some module */
apr_uint32_t processing; /* async connections in processing (returned
to the MPM for POLLIN/POLLOUT) */
}; };
/* Scoreboard is now in 'local' memory, since it isn't updated once created, /* Scoreboard is now in 'local' memory, since it isn't updated once created,

View File

@@ -564,7 +564,7 @@ static int status_handler(request_rec *r)
ap_rputs("</dl>", r); ap_rputs("</dl>", r);
if (is_async) { if (is_async) {
int write_completion = 0, lingering_close = 0, keep_alive = 0, int processing, write_completion = 0, lingering_close = 0, keep_alive = 0,
connections = 0, stopping = 0, procs = 0; connections = 0, stopping = 0, procs = 0;
if (!short_report) if (!short_report)
ap_rputs("\n\n<table rules=\"all\" cellpadding=\"1%\">\n" ap_rputs("\n\n<table rules=\"all\" cellpadding=\"1%\">\n"
@@ -576,11 +576,13 @@ static int status_handler(request_rec *r)
"<th colspan=\"3\">Async connections</th></tr>\n" "<th colspan=\"3\">Async connections</th></tr>\n"
"<tr><th>total</th><th>accepting</th>" "<tr><th>total</th><th>accepting</th>"
"<th>busy</th><th>graceful</th><th>idle</th>" "<th>busy</th><th>graceful</th><th>idle</th>"
"<th>writing</th><th>keep-alive</th><th>closing</th></tr>\n", r); "<th>processing</th><th>writing</th><th>keep-alive</th>"
"<th>closing</th></tr>\n", r);
for (i = 0; i < server_limit; ++i) { for (i = 0; i < server_limit; ++i) {
ps_record = ap_get_scoreboard_process(i); ps_record = ap_get_scoreboard_process(i);
if (ps_record->pid) { if (ps_record->pid) {
connections += ps_record->connections; connections += ps_record->connections;
processing += ps_record->processing;
write_completion += ps_record->write_completion; write_completion += ps_record->write_completion;
keep_alive += ps_record->keep_alive; keep_alive += ps_record->keep_alive;
lingering_close += ps_record->lingering_close; lingering_close += ps_record->lingering_close;
@@ -600,7 +602,7 @@ static int status_handler(request_rec *r)
"<td>%s%s</td>" "<td>%s%s</td>"
"<td>%u</td><td>%s</td>" "<td>%u</td><td>%s</td>"
"<td>%u</td><td>%u</td><td>%u</td>" "<td>%u</td><td>%u</td><td>%u</td>"
"<td>%u</td><td>%u</td><td>%u</td>" "<td>%u</td><td>%u</td><td>%u</td><td>%u</td>"
"</tr>\n", "</tr>\n",
i, ps_record->pid, i, ps_record->pid,
dying, old, dying, old,
@@ -609,6 +611,7 @@ static int status_handler(request_rec *r)
thread_busy_buffer[i], thread_busy_buffer[i],
thread_graceful_buffer[i], thread_graceful_buffer[i],
thread_idle_buffer[i], thread_idle_buffer[i],
ps_record->processing,
ps_record->write_completion, ps_record->write_completion,
ps_record->keep_alive, ps_record->keep_alive,
ps_record->lingering_close); ps_record->lingering_close);
@@ -620,23 +623,26 @@ static int status_handler(request_rec *r)
"<td>%d</td><td>%d</td>" "<td>%d</td><td>%d</td>"
"<td>%d</td><td>&nbsp;</td>" "<td>%d</td><td>&nbsp;</td>"
"<td>%d</td><td>%d</td><td>%d</td>" "<td>%d</td><td>%d</td><td>%d</td>"
"<td>%d</td><td>%d</td><td>%d</td>" "<td>%d</td><td>%d</td><td>%d</td><td>%d</td>"
"</tr>\n</table>\n", "</tr>\n</table>\n",
procs, stopping, procs, stopping,
connections, connections,
busy, graceful, idle, busy, graceful, idle,
write_completion, keep_alive, lingering_close); processing, write_completion, keep_alive,
lingering_close);
} }
else { else {
ap_rprintf(r, "Processes: %d\n" ap_rprintf(r, "Processes: %d\n"
"Stopping: %d\n" "Stopping: %d\n"
"ConnsTotal: %d\n" "ConnsTotal: %d\n"
"ConnsAsyncProcessing: %d\n"
"ConnsAsyncWriting: %d\n" "ConnsAsyncWriting: %d\n"
"ConnsAsyncKeepAlive: %d\n" "ConnsAsyncKeepAlive: %d\n"
"ConnsAsyncClosing: %d\n", "ConnsAsyncClosing: %d\n",
procs, stopping, procs, stopping,
connections, connections,
write_completion, keep_alive, lingering_close); processing, write_completion, keep_alive,
lingering_close);
} }
} }

View File

@@ -142,9 +142,9 @@ static int ap_process_http_async_connection(conn_rec *c)
conn_state_t *cs = c->cs; conn_state_t *cs = c->cs;
AP_DEBUG_ASSERT(cs != NULL); AP_DEBUG_ASSERT(cs != NULL);
AP_DEBUG_ASSERT(cs->state == CONN_STATE_PROCESS); AP_DEBUG_ASSERT(cs->state == CONN_STATE_PROCESSING);
if (cs->state == CONN_STATE_PROCESS) { if (cs->state == CONN_STATE_PROCESSING) {
ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c); ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
if (ap_extended_status) { if (ap_extended_status) {
ap_set_conn_count(c->sbh, r, c->keepalives); ap_set_conn_count(c->sbh, r, c->keepalives);

View File

@@ -161,8 +161,8 @@ apr_status_t h2_c1_run(conn_rec *c)
* the Timeout behaviour instead of a KeepAliveTimeout * the Timeout behaviour instead of a KeepAliveTimeout
* See PR 63534. * See PR 63534.
*/ */
#if H2_USE_STATE_PROCESS #if H2_USE_STATE_PROCESSING
c->cs->state = CONN_STATE_PROCESS; c->cs->state = CONN_STATE_PROCESSING;
#else #else
c->cs->state = CONN_STATE_WRITE_COMPLETION; c->cs->state = CONN_STATE_WRITE_COMPLETION;
#endif #endif

View File

@@ -1264,6 +1264,10 @@ static int lua_ap_scoreboard_process(lua_State *L)
lua_pushnumber(L, ps_record->suspended); lua_pushnumber(L, ps_record->suspended);
lua_settable(L, -3); lua_settable(L, -3);
lua_pushstring(L, "processing");
lua_pushnumber(L, ps_record->processing);
lua_settable(L, -3);
lua_pushstring(L, "write_completion"); lua_pushstring(L, "write_completion");
lua_pushnumber(L, ps_record->write_completion); lua_pushnumber(L, ps_record->write_completion);
lua_settable(L, -3); lua_settable(L, -3);

View File

@@ -268,12 +268,14 @@ struct timeout_queue {
/* /*
* Several timeout queues that use different timeouts, so that we always can * Several timeout queues that use different timeouts, so that we always can
* simply append to the end. * simply append to the end.
* process_q uses vhost's TimeOut * processing_q uses vhost's TimeOut
* write_completion_q uses vhost's TimeOut
* keepalive_q uses vhost's KeepAliveTimeOut * keepalive_q uses vhost's KeepAliveTimeOut
* linger_q uses MAX_SECS_TO_LINGER * linger_q uses MAX_SECS_TO_LINGER
* short_linger_q uses SECONDS_TO_LINGER * short_linger_q uses SECONDS_TO_LINGER
*/ */
static struct timeout_queue *process_q, static struct timeout_queue *processing_q,
*write_completion_q,
*keepalive_q, *keepalive_q,
*linger_q, *linger_q,
*short_linger_q; *short_linger_q;
@@ -447,6 +449,7 @@ static int max_spawn_rate_per_bucket = MAX_SPAWN_RATE / 1;
struct event_srv_cfg_s { struct event_srv_cfg_s {
struct timeout_queue *ps_q, struct timeout_queue *ps_q,
*wc_q,
*ka_q; *ka_q;
}; };
@@ -1094,7 +1097,7 @@ static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * soc
* When the accept filter is active, sockets are kept in the * When the accept filter is active, sockets are kept in the
* kernel until a HTTP request is received. * kernel until a HTTP request is received.
*/ */
cs->pub.state = CONN_STATE_PROCESS; cs->pub.state = CONN_STATE_PROCESSING;
cs->pub.sense = CONN_SENSE_DEFAULT; cs->pub.sense = CONN_SENSE_DEFAULT;
rc = OK; rc = OK;
} }
@@ -1115,7 +1118,7 @@ static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * soc
/* fall through */ /* fall through */
} }
else { else {
if (cs->pub.state == CONN_STATE_PROCESS if (cs->pub.state == CONN_STATE_PROCESSING
/* If we have an input filter which 'clogs' the input stream, /* If we have an input filter which 'clogs' the input stream,
* like mod_ssl used to, lets just do the normal read from input * like mod_ssl used to, lets just do the normal read from input
* filters, like the Worker MPM does. Filters that need to write * filters, like the Worker MPM does. Filters that need to write
@@ -1132,8 +1135,8 @@ process_connection:
if (clogging) { if (clogging) {
apr_atomic_dec32(&clogged_count); apr_atomic_dec32(&clogged_count);
} }
/* The sense can be set for CONN_STATE_PROCESS only */ /* The sense can be set in CONN_STATE_PROCESSING only */
if (cs->pub.state != CONN_STATE_PROCESS) { if (cs->pub.state != CONN_STATE_PROCESSING) {
cs->pub.sense = CONN_SENSE_DEFAULT; cs->pub.sense = CONN_SENSE_DEFAULT;
} }
if (rc == DONE) { if (rc == DONE) {
@@ -1148,7 +1151,7 @@ process_connection:
* The process_connection hooks above should set the connection state * The process_connection hooks above should set the connection state
* appropriately upon return, for event MPM to either: * appropriately upon return, for event MPM to either:
* - CONN_STATE_LINGER: do lingering close; * - CONN_STATE_LINGER: do lingering close;
* - CONN_STATE_PROCESS: wait for read/write-ability of the underlying * - CONN_STATE_PROCESSING: wait for read/write-ability of the underlying
* socket with respect to its Timeout and come back to process_connection() * socket with respect to its Timeout and come back to process_connection()
* hooks when ready; * hooks when ready;
* - CONN_STATE_WRITE_COMPLETION: flush pending outputs using Timeout and * - CONN_STATE_WRITE_COMPLETION: flush pending outputs using Timeout and
@@ -1162,13 +1165,13 @@ process_connection:
* to one of the above expected value, we forcibly close the connection w/ * to one of the above expected value, we forcibly close the connection w/
* CONN_STATE_LINGER. This covers the cases where no process_connection * CONN_STATE_LINGER. This covers the cases where no process_connection
* hook executes (DECLINED), or one returns OK w/o touching the state (i.e. * hook executes (DECLINED), or one returns OK w/o touching the state (i.e.
* CONN_STATE_PROCESS remains after the call) which can happen with * CONN_STATE_PROCESSING remains after the call) which can happen with
* third-party modules not updated to work specifically with event MPM * third-party modules not updated to work specifically with event MPM
* while this was expected to do lingering close unconditionally with * while this was expected to do lingering close unconditionally with
* worker or prefork MPMs for instance. * worker or prefork MPMs for instance.
*/ */
if (rc != OK || (cs->pub.state != CONN_STATE_LINGER if (rc != OK || (cs->pub.state != CONN_STATE_LINGER
&& cs->pub.state != CONN_STATE_PROCESS && cs->pub.state != CONN_STATE_PROCESSING
&& cs->pub.state != CONN_STATE_WRITE_COMPLETION && cs->pub.state != CONN_STATE_WRITE_COMPLETION
&& cs->pub.state != CONN_STATE_SUSPENDED)) { && cs->pub.state != CONN_STATE_SUSPENDED)) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10111) ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10111)
@@ -1179,7 +1182,7 @@ process_connection:
cs->pub.state = CONN_STATE_LINGER; cs->pub.state = CONN_STATE_LINGER;
} }
if (cs->pub.state == CONN_STATE_PROCESS) { if (cs->pub.state == CONN_STATE_PROCESSING) {
/* Set a read/write timeout for this connection, and let the /* Set a read/write timeout for this connection, and let the
* event thread poll for read/writeability. * event thread poll for read/writeability.
*/ */
@@ -1201,7 +1204,7 @@ process_connection:
apr_thread_mutex_unlock(timeout_mutex); apr_thread_mutex_unlock(timeout_mutex);
ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(10503) ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(10503)
"process_socket: apr_pollset_add failure in " "process_socket: apr_pollset_add failure in "
"CONN_STATE_PROCESS"); "CONN_STATE_PROCESSING");
close_connection(cs); close_connection(cs);
signal_threads(ST_GRACEFUL); signal_threads(ST_GRACEFUL);
} }
@@ -1215,7 +1218,7 @@ process_connection:
int pending = DECLINED; int pending = DECLINED;
/* Flush all pending outputs before going to CONN_STATE_KEEPALIVE or /* Flush all pending outputs before going to CONN_STATE_KEEPALIVE or
* straight to CONN_STATE_PROCESS if inputs are pending already. * straight to CONN_STATE_PROCESSING if inputs are pending already.
*/ */
ap_update_child_status(cs->sbh, SERVER_BUSY_WRITE, NULL); ap_update_child_status(cs->sbh, SERVER_BUSY_WRITE, NULL);
@@ -1235,11 +1238,11 @@ process_connection:
/* Add work to pollset. */ /* Add work to pollset. */
update_reqevents_from_sense(cs, CONN_SENSE_WANT_WRITE); update_reqevents_from_sense(cs, CONN_SENSE_WANT_WRITE);
apr_thread_mutex_lock(timeout_mutex); apr_thread_mutex_lock(timeout_mutex);
TO_QUEUE_APPEND(cs->sc->ps_q, cs); TO_QUEUE_APPEND(cs->sc->wc_q, cs);
rv = apr_pollset_add(event_pollset, &cs->pfd); rv = apr_pollset_add(event_pollset, &cs->pfd);
if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) { if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
AP_DEBUG_ASSERT(0); AP_DEBUG_ASSERT(0);
TO_QUEUE_REMOVE(cs->sc->ps_q, cs); TO_QUEUE_REMOVE(cs->sc->wc_q, cs);
apr_thread_mutex_unlock(timeout_mutex); apr_thread_mutex_unlock(timeout_mutex);
ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03465) ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03465)
"process_socket: apr_pollset_add failure in " "process_socket: apr_pollset_add failure in "
@@ -1256,7 +1259,7 @@ process_connection:
cs->pub.state = CONN_STATE_LINGER; cs->pub.state = CONN_STATE_LINGER;
} }
else if (ap_run_input_pending(c) == OK) { else if (ap_run_input_pending(c) == OK) {
cs->pub.state = CONN_STATE_PROCESS; cs->pub.state = CONN_STATE_PROCESSING;
goto process_connection; goto process_connection;
} }
else if (!listener_may_exit) { else if (!listener_may_exit) {
@@ -1336,7 +1339,7 @@ static apr_status_t event_resume_suspended (conn_rec *c)
cs->pub.state = CONN_STATE_WRITE_COMPLETION; cs->pub.state = CONN_STATE_WRITE_COMPLETION;
update_reqevents_from_sense(cs, CONN_SENSE_WANT_WRITE); update_reqevents_from_sense(cs, CONN_SENSE_WANT_WRITE);
apr_thread_mutex_lock(timeout_mutex); apr_thread_mutex_lock(timeout_mutex);
TO_QUEUE_APPEND(cs->sc->ps_q, cs); TO_QUEUE_APPEND(cs->sc->wc_q, cs);
apr_pollset_add(event_pollset, &cs->pfd); apr_pollset_add(event_pollset, &cs->pfd);
apr_thread_mutex_unlock(timeout_mutex); apr_thread_mutex_unlock(timeout_mutex);
} }
@@ -1966,11 +1969,12 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
/* trace log status every second */ /* trace log status every second */
if (now - last_log > apr_time_from_sec(1)) { if (now - last_log > apr_time_from_sec(1)) {
ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf, ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
"connections: %u (process:%d keep-alive:%d " "connections: %u (processing:%d write-completion:%d"
"lingering:%d suspended:%u clogged:%u), " "keep-alive:%d lingering:%d suspended:%u clogged:%u), "
"workers: %u/%u shutdown", "workers: %u/%u shutdown",
apr_atomic_read32(&connection_count), apr_atomic_read32(&connection_count),
apr_atomic_read32(process_q->total), apr_atomic_read32(processing_q->total),
apr_atomic_read32(write_completion_q->total),
apr_atomic_read32(keepalive_q->total), apr_atomic_read32(keepalive_q->total),
apr_atomic_read32(&lingering_count), apr_atomic_read32(&lingering_count),
apr_atomic_read32(&suspended_count), apr_atomic_read32(&suspended_count),
@@ -2099,14 +2103,18 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
int blocking = 0; int blocking = 0;
switch (cs->pub.state) { switch (cs->pub.state) {
case CONN_STATE_PROCESS: case CONN_STATE_PROCESSING:
case CONN_STATE_WRITE_COMPLETION:
remove_from_q = cs->sc->ps_q; remove_from_q = cs->sc->ps_q;
blocking = 1; blocking = 1;
break; break;
case CONN_STATE_WRITE_COMPLETION:
remove_from_q = cs->sc->wc_q;
blocking = 1;
break;
case CONN_STATE_KEEPALIVE: case CONN_STATE_KEEPALIVE:
cs->pub.state = CONN_STATE_PROCESS; cs->pub.state = CONN_STATE_PROCESSING;
remove_from_q = cs->sc->ka_q; remove_from_q = cs->sc->ka_q;
break; break;
@@ -2307,23 +2315,28 @@ do_maintenance:
/* Steps below will recompute this. */ /* Steps below will recompute this. */
queues_next_expiry = 0; queues_next_expiry = 0;
/* Step 1: keepalive timeouts */ /* Step 1: keepalive queue timeouts are closed */
if (workers_were_busy || dying) { if (workers_were_busy || dying) {
process_keepalive_queue(0); /* kill'em all \m/ */ process_keepalive_queue(0); /* kill'em all \m/ */
} }
else { else {
process_keepalive_queue(now); process_keepalive_queue(now);
} }
/* Step 2: process timeouts */
process_timeout_queue(process_q, now, /* Step 2: processing queue timeouts are flushed */
defer_lingering_close); process_timeout_queue(processing_q, now, defer_lingering_close);
/* Step 3: (normal) lingering close completion timeouts */
/* Step 3: write completion queue timeouts are flushed */
process_timeout_queue(write_completion_q, now, defer_lingering_close);
/* Step 4: normal lingering close queue timeouts are closed */
if (dying && linger_q->timeout > short_linger_q->timeout) { if (dying && linger_q->timeout > short_linger_q->timeout) {
/* Dying, force short timeout for normal lingering close */ /* Dying, force short timeout for normal lingering close */
linger_q->timeout = short_linger_q->timeout; linger_q->timeout = short_linger_q->timeout;
} }
process_timeout_queue(linger_q, now, shutdown_connection); process_timeout_queue(linger_q, now, shutdown_connection);
/* Step 4: (short) lingering close completion timeouts */
/* Step 5: short lingering close queue timeouts are closed */
process_timeout_queue(short_linger_q, now, shutdown_connection); process_timeout_queue(short_linger_q, now, shutdown_connection);
apr_thread_mutex_unlock(timeout_mutex); apr_thread_mutex_unlock(timeout_mutex);
@@ -2332,11 +2345,12 @@ do_maintenance:
queues_next_expiry > now ? queues_next_expiry - now queues_next_expiry > now ? queues_next_expiry - now
: -1); : -1);
ps->processing = apr_atomic_read32(processing_q->total);
ps->write_completion = apr_atomic_read32(write_completion_q->total);
ps->keep_alive = apr_atomic_read32(keepalive_q->total); ps->keep_alive = apr_atomic_read32(keepalive_q->total);
ps->write_completion = apr_atomic_read32(process_q->total);
ps->connections = apr_atomic_read32(&connection_count);
ps->suspended = apr_atomic_read32(&suspended_count);
ps->lingering_close = apr_atomic_read32(&lingering_count); ps->lingering_close = apr_atomic_read32(&lingering_count);
ps->suspended = apr_atomic_read32(&suspended_count);
ps->connections = apr_atomic_read32(&connection_count);
} }
else if ((workers_were_busy || dying) else if ((workers_were_busy || dying)
&& apr_atomic_read32(keepalive_q->total)) { && apr_atomic_read32(keepalive_q->total)) {
@@ -3839,7 +3853,7 @@ static void setup_slave_conn(conn_rec *c, void *csd)
cs->bucket_alloc = c->bucket_alloc; cs->bucket_alloc = c->bucket_alloc;
cs->pfd = mcs->pfd; cs->pfd = mcs->pfd;
cs->pub = mcs->pub; cs->pub = mcs->pub;
cs->pub.state = CONN_STATE_PROCESS; cs->pub.state = CONN_STATE_PROCESSING;
cs->pub.sense = CONN_SENSE_DEFAULT; cs->pub.sense = CONN_SENSE_DEFAULT;
c->cs = &(cs->pub); c->cs = &(cs->pub);
@@ -4005,16 +4019,17 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
struct { struct {
struct timeout_queue *tail, *q; struct timeout_queue *tail, *q;
apr_hash_t *hash; apr_hash_t *hash;
} ps, ka; } ps, wc, ka;
/* Not needed in pre_config stage */ /* Not needed in pre_config stage */
if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) { if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
return OK; return OK;
} }
ps.tail = ka.tail = NULL;
ps.hash = apr_hash_make(ptemp); ps.hash = apr_hash_make(ptemp);
wc.hash = apr_hash_make(ptemp);
ka.hash = apr_hash_make(ptemp); ka.hash = apr_hash_make(ptemp);
ps.tail = wc.tail = ka.tail = NULL;
linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(MAX_SECS_TO_LINGER), linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(MAX_SECS_TO_LINGER),
NULL); NULL);
@@ -4029,7 +4044,11 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
/* The main server uses the global queues */ /* The main server uses the global queues */
ps.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL); ps.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
apr_hash_set(ps.hash, &s->timeout, sizeof s->timeout, ps.q); apr_hash_set(ps.hash, &s->timeout, sizeof s->timeout, ps.q);
ps.tail = process_q = ps.q; ps.tail = processing_q = ps.q;
wc.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
wc.tail = write_completion_q = wc.q;
ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, NULL); ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, NULL);
apr_hash_set(ka.hash, &s->keep_alive_timeout, apr_hash_set(ka.hash, &s->keep_alive_timeout,
@@ -4046,6 +4065,13 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
ps.tail = ps.tail->next = ps.q; ps.tail = ps.tail->next = ps.q;
} }
wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
if (!wc.q) {
wc.q = TO_QUEUE_MAKE(pconf, s->timeout, wc.tail);
apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
wc.tail = wc.tail->next = wc.q;
}
ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout, ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout,
sizeof s->keep_alive_timeout); sizeof s->keep_alive_timeout);
if (!ka.q) { if (!ka.q) {
@@ -4056,6 +4082,7 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
} }
} }
sc->ps_q = ps.q; sc->ps_q = ps.q;
sc->wc_q = wc.q;
sc->ka_q = ka.q; sc->ka_q = ka.q;
} }

View File

@@ -160,7 +160,7 @@ static void *motorz_io_setup_conn(apr_thread_t *thread, void *baton)
"motorz_io_setup_conn: connection aborted"); "motorz_io_setup_conn: connection aborted");
} }
scon->cs.state = CONN_STATE_PROCESS; scon->cs.state = CONN_STATE_PROCESSING;
scon->cs.sense = CONN_SENSE_DEFAULT; scon->cs.sense = CONN_SENSE_DEFAULT;
status = motorz_io_process(scon); status = motorz_io_process(scon);
@@ -376,14 +376,14 @@ static apr_status_t motorz_io_process(motorz_conn_t *scon)
if (scon->cs.state == CONN_STATE_KEEPALIVE) { if (scon->cs.state == CONN_STATE_KEEPALIVE) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03327) ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03327)
"motorz_io_process(): Set to CONN_STATE_PROCESS"); "motorz_io_process(): Set to CONN_STATE_PROCESSING");
scon->cs.state = CONN_STATE_PROCESS; scon->cs.state = CONN_STATE_PROCESSING;
} }
read_request: read_request:
if (scon->cs.state == CONN_STATE_PROCESS) { if (scon->cs.state == CONN_STATE_PROCESSING) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03328) ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03328)
"motorz_io_process(): CONN_STATE_PROCESS"); "motorz_io_process(): CONN_STATE_PROCESSING");
if (!c->aborted) { if (!c->aborted) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03329) ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03329)
"motorz_io_process(): !aborted"); "motorz_io_process(): !aborted");
@@ -438,7 +438,7 @@ read_request:
scon->cs.state = CONN_STATE_LINGER; scon->cs.state = CONN_STATE_LINGER;
} }
else if (ap_run_input_pending(c) == OK) { else if (ap_run_input_pending(c) == OK) {
scon->cs.state = CONN_STATE_PROCESS; scon->cs.state = CONN_STATE_PROCESSING;
goto read_request; goto read_request;
} }
else { else {

View File

@@ -79,7 +79,7 @@ static apr_status_t simple_io_process(simple_conn_t * scon)
scon->pfd.reqevents = 0; scon->pfd.reqevents = 0;
} }
if (scon->cs.state == CONN_STATE_PROCESS) { if (scon->cs.state == CONN_STATE_PROCESSING) {
if (!c->aborted) { if (!c->aborted) {
ap_run_process_connection(c); ap_run_process_connection(c);
/* state will be updated upon return /* state will be updated upon return
@@ -132,7 +132,7 @@ static apr_status_t simple_io_process(simple_conn_t * scon)
scon->cs.state = CONN_STATE_LINGER; scon->cs.state = CONN_STATE_LINGER;
} }
else if (ap_run_input_pending(c) == OK) { else if (ap_run_input_pending(c) == OK) {
scon->cs.state = CONN_STATE_PROCESS; scon->cs.state = CONN_STATE_PROCESSING;
} }
else { else {
scon->cs.state = CONN_STATE_KEEPALIVE; scon->cs.state = CONN_STATE_KEEPALIVE;
@@ -233,7 +233,7 @@ static void *simple_io_setup_conn(apr_thread_t * thread, void *baton)
"simple_io_setup_conn: connection aborted"); "simple_io_setup_conn: connection aborted");
} }
scon->cs.state = CONN_STATE_PROCESS; scon->cs.state = CONN_STATE_PROCESSING;
scon->cs.sense = CONN_SENSE_DEFAULT; scon->cs.sense = CONN_SENSE_DEFAULT;
rv = simple_io_process(scon); rv = simple_io_process(scon);