mirror of
https://github.com/apache/httpd.git
synced 2025-08-08 15:02:10 +03:00
mpm_event,mod_http2,mod_status: Follow up to r1918257: CONN_STATE_ASYNC_WAITIO.
Per discussion on PR #449, have a separate state for returning the connection to the MPM to wait for an IO (namely CONN_STATE_ASYNC_WAITIO), rather than (ab)using CONN_STATE_PROCESSING. This removes the need for AGAIN added in r1918257 (for now), and AP_MPMQ_CAN_AGAIN is renamed to AP_MPMQ_CAN_WAITIO. This is also the state that mod_status accounts for, so rename ->processing to ->wait_io in process_score (shows as "wait-io" in mod_status and mod_lua). git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1918482 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
@@ -723,9 +723,10 @@
|
|||||||
* 20211221.17 (2.5.1-dev) Add ap_proxy_worker_get_name()
|
* 20211221.17 (2.5.1-dev) Add ap_proxy_worker_get_name()
|
||||||
* 20211221.18 (2.5.1-dev) Add ap_regexec_ex()
|
* 20211221.18 (2.5.1-dev) Add ap_regexec_ex()
|
||||||
* 20211221.19 (2.5.1-dev) Add AP_REG_NOTEMPTY_ATSTART
|
* 20211221.19 (2.5.1-dev) Add AP_REG_NOTEMPTY_ATSTART
|
||||||
* 20211221.20 (2.5.1-dev) Add CONN_STATE_KEEPALIVE and CONN_STATE_PROCESSING
|
* 20211221.20 (2.5.1-dev) Add CONN_STATE_ASYNC_WAITIO, CONN_STATE_KEEPALIVE
|
||||||
* 20211221.21 (2.5.1-dev) Add processing field struct process_score
|
* and CONN_STATE_PROCESSING
|
||||||
* 20211221.22 (2.5.1-dev) Add AGAIN, AP_MPMQ_CAN_AGAIN.
|
* 20211221.21 (2.5.1-dev) Add wait_io field to struct process_score
|
||||||
|
* 20211221.22 (2.5.1-dev) Add AP_MPMQ_CAN_WAITIO
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */
|
#define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */
|
||||||
|
@@ -182,8 +182,8 @@ AP_DECLARE(apr_status_t) ap_os_create_privileged_process(
|
|||||||
#define AP_MPMQ_CAN_SUSPEND 17
|
#define AP_MPMQ_CAN_SUSPEND 17
|
||||||
/** MPM supports additional pollfds */
|
/** MPM supports additional pollfds */
|
||||||
#define AP_MPMQ_CAN_POLL 18
|
#define AP_MPMQ_CAN_POLL 18
|
||||||
/** MPM reacts to AGAIN response */
|
/** MPM supports CONN_STATE_ASYNC_WAITIO */
|
||||||
#define AP_MPMQ_CAN_AGAIN 19
|
#define AP_MPMQ_CAN_WAITIO 19
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@@ -465,9 +465,6 @@ AP_DECLARE(const char *) ap_get_server_built(void);
|
|||||||
*/
|
*/
|
||||||
#define SUSPENDED -3 /**< Module will handle the remainder of the request.
|
#define SUSPENDED -3 /**< Module will handle the remainder of the request.
|
||||||
* The core will never invoke the request again */
|
* The core will never invoke the request again */
|
||||||
#define AGAIN -4 /**< Module wants to be called again when more
|
|
||||||
* data is available.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Returned by the bottom-most filter if no data was written.
|
/** Returned by the bottom-most filter if no data was written.
|
||||||
* @see ap_pass_brigade(). */
|
* @see ap_pass_brigade(). */
|
||||||
@@ -1322,11 +1319,7 @@ struct conn_slave_rec {
|
|||||||
*/
|
*/
|
||||||
typedef enum {
|
typedef enum {
|
||||||
CONN_STATE_KEEPALIVE, /* Kept alive in the MPM (using KeepAliveTimeout) */
|
CONN_STATE_KEEPALIVE, /* Kept alive in the MPM (using KeepAliveTimeout) */
|
||||||
CONN_STATE_PROCESSING, /* Processed by process_connection() hooks, returning
|
CONN_STATE_PROCESSING, /* Processed by process_connection hooks */
|
||||||
* AGAIN to the MPM in this state will make it wait for
|
|
||||||
* the connection to be readable or writable according to
|
|
||||||
* CONN_SENSE_WANT_READ or CONN_SENSE_WANT_WRITE respectively,
|
|
||||||
* where Timeout applies */
|
|
||||||
CONN_STATE_HANDLER, /* Processed by the modules handlers */
|
CONN_STATE_HANDLER, /* Processed by the modules handlers */
|
||||||
CONN_STATE_WRITE_COMPLETION, /* Flushed by the MPM before entering CONN_STATE_KEEPALIVE */
|
CONN_STATE_WRITE_COMPLETION, /* Flushed by the MPM before entering CONN_STATE_KEEPALIVE */
|
||||||
CONN_STATE_SUSPENDED, /* Suspended in the MPM until ap_run_resume_suspended() */
|
CONN_STATE_SUSPENDED, /* Suspended in the MPM until ap_run_resume_suspended() */
|
||||||
@@ -1334,6 +1327,11 @@ typedef enum {
|
|||||||
CONN_STATE_LINGER_NORMAL, /* MPM has started lingering close with normal timeout */
|
CONN_STATE_LINGER_NORMAL, /* MPM has started lingering close with normal timeout */
|
||||||
CONN_STATE_LINGER_SHORT, /* MPM has started lingering close with short timeout */
|
CONN_STATE_LINGER_SHORT, /* MPM has started lingering close with short timeout */
|
||||||
|
|
||||||
|
CONN_STATE_ASYNC_WAITIO, /* Returning this state to the MPM will make it wait for
|
||||||
|
* the connection to be readable or writable according to
|
||||||
|
* c->cs->sense (resp. CONN_SENSE_WANT_READ or _WRITE),
|
||||||
|
* using the configured Timeout */
|
||||||
|
|
||||||
CONN_STATE_NUM, /* Number of states (keep here before aliases) */
|
CONN_STATE_NUM, /* Number of states (keep here before aliases) */
|
||||||
|
|
||||||
/* Aliases (legacy) */
|
/* Aliases (legacy) */
|
||||||
|
@@ -148,8 +148,7 @@ struct process_score {
|
|||||||
apr_uint32_t lingering_close; /* async connections in lingering close */
|
apr_uint32_t lingering_close; /* async connections in lingering close */
|
||||||
apr_uint32_t keep_alive; /* async connections in keep alive */
|
apr_uint32_t keep_alive; /* async connections in keep alive */
|
||||||
apr_uint32_t suspended; /* connections suspended by some module */
|
apr_uint32_t suspended; /* connections suspended by some module */
|
||||||
apr_uint32_t processing; /* async connections in processing (returned
|
apr_uint32_t wait_io; /* async connections waiting an IO in the MPM */
|
||||||
to the MPM for POLLIN/POLLOUT) */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Scoreboard is now in 'local' memory, since it isn't updated once created,
|
/* Scoreboard is now in 'local' memory, since it isn't updated once created,
|
||||||
|
@@ -564,7 +564,7 @@ static int status_handler(request_rec *r)
|
|||||||
ap_rputs("</dl>", r);
|
ap_rputs("</dl>", r);
|
||||||
|
|
||||||
if (is_async) {
|
if (is_async) {
|
||||||
int processing = 0, write_completion = 0, lingering_close = 0, keep_alive = 0,
|
int wait_io = 0, write_completion = 0, lingering_close = 0, keep_alive = 0,
|
||||||
connections = 0, stopping = 0, procs = 0;
|
connections = 0, stopping = 0, procs = 0;
|
||||||
if (!short_report)
|
if (!short_report)
|
||||||
ap_rputs("\n\n<table rules=\"all\" cellpadding=\"1%\">\n"
|
ap_rputs("\n\n<table rules=\"all\" cellpadding=\"1%\">\n"
|
||||||
@@ -576,13 +576,13 @@ static int status_handler(request_rec *r)
|
|||||||
"<th colspan=\"3\">Async connections</th></tr>\n"
|
"<th colspan=\"3\">Async connections</th></tr>\n"
|
||||||
"<tr><th>total</th><th>accepting</th>"
|
"<tr><th>total</th><th>accepting</th>"
|
||||||
"<th>busy</th><th>graceful</th><th>idle</th>"
|
"<th>busy</th><th>graceful</th><th>idle</th>"
|
||||||
"<th>processing</th><th>writing</th><th>keep-alive</th>"
|
"<th>wait-io</th><th>writing</th><th>keep-alive</th>"
|
||||||
"<th>closing</th></tr>\n", r);
|
"<th>closing</th></tr>\n", r);
|
||||||
for (i = 0; i < server_limit; ++i) {
|
for (i = 0; i < server_limit; ++i) {
|
||||||
ps_record = ap_get_scoreboard_process(i);
|
ps_record = ap_get_scoreboard_process(i);
|
||||||
if (ps_record->pid) {
|
if (ps_record->pid) {
|
||||||
connections += ps_record->connections;
|
connections += ps_record->connections;
|
||||||
processing += ps_record->processing;
|
wait_io += ps_record->wait_io;
|
||||||
write_completion += ps_record->write_completion;
|
write_completion += ps_record->write_completion;
|
||||||
keep_alive += ps_record->keep_alive;
|
keep_alive += ps_record->keep_alive;
|
||||||
lingering_close += ps_record->lingering_close;
|
lingering_close += ps_record->lingering_close;
|
||||||
@@ -611,7 +611,7 @@ static int status_handler(request_rec *r)
|
|||||||
thread_busy_buffer[i],
|
thread_busy_buffer[i],
|
||||||
thread_graceful_buffer[i],
|
thread_graceful_buffer[i],
|
||||||
thread_idle_buffer[i],
|
thread_idle_buffer[i],
|
||||||
ps_record->processing,
|
ps_record->wait_io,
|
||||||
ps_record->write_completion,
|
ps_record->write_completion,
|
||||||
ps_record->keep_alive,
|
ps_record->keep_alive,
|
||||||
ps_record->lingering_close);
|
ps_record->lingering_close);
|
||||||
@@ -628,20 +628,20 @@ static int status_handler(request_rec *r)
|
|||||||
procs, stopping,
|
procs, stopping,
|
||||||
connections,
|
connections,
|
||||||
busy, graceful, idle,
|
busy, graceful, idle,
|
||||||
processing, write_completion, keep_alive,
|
wait_io, write_completion, keep_alive,
|
||||||
lingering_close);
|
lingering_close);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ap_rprintf(r, "Processes: %d\n"
|
ap_rprintf(r, "Processes: %d\n"
|
||||||
"Stopping: %d\n"
|
"Stopping: %d\n"
|
||||||
"ConnsTotal: %d\n"
|
"ConnsTotal: %d\n"
|
||||||
"ConnsAsyncProcessing: %d\n"
|
"ConnsAsyncWaitIO: %d\n"
|
||||||
"ConnsAsyncWriting: %d\n"
|
"ConnsAsyncWriting: %d\n"
|
||||||
"ConnsAsyncKeepAlive: %d\n"
|
"ConnsAsyncKeepAlive: %d\n"
|
||||||
"ConnsAsyncClosing: %d\n",
|
"ConnsAsyncClosing: %d\n",
|
||||||
procs, stopping,
|
procs, stopping,
|
||||||
connections,
|
connections,
|
||||||
processing, write_completion, keep_alive,
|
wait_io, write_completion, keep_alive,
|
||||||
lingering_close);
|
lingering_close);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -47,7 +47,7 @@
|
|||||||
|
|
||||||
static struct h2_workers *workers;
|
static struct h2_workers *workers;
|
||||||
|
|
||||||
static int async_mpm, mpm_can_again;
|
static int async_mpm, mpm_can_waitio;
|
||||||
|
|
||||||
APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_c_logio_add_bytes_in;
|
APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_c_logio_add_bytes_in;
|
||||||
APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_c_logio_add_bytes_out;
|
APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_c_logio_add_bytes_out;
|
||||||
@@ -61,9 +61,9 @@ apr_status_t h2_c1_child_init(apr_pool_t *pool, server_rec *s)
|
|||||||
/* some MPMs do not implemnent this */
|
/* some MPMs do not implemnent this */
|
||||||
async_mpm = 0;
|
async_mpm = 0;
|
||||||
}
|
}
|
||||||
#ifdef AP_MPMQ_CAN_AGAIN
|
#ifdef AP_MPMQ_CAN_WAITIO
|
||||||
if (!async_mpm || ap_mpm_query(AP_MPMQ_CAN_AGAIN, &mpm_can_again)) {
|
if (!async_mpm || ap_mpm_query(AP_MPMQ_CAN_WAITIO, &mpm_can_waitio)) {
|
||||||
mpm_can_again = 0;
|
mpm_can_waitio = 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -117,7 +117,6 @@ cleanup:
|
|||||||
|
|
||||||
int h2_c1_run(conn_rec *c)
|
int h2_c1_run(conn_rec *c)
|
||||||
{
|
{
|
||||||
int rc = OK;
|
|
||||||
apr_status_t status;
|
apr_status_t status;
|
||||||
int mpm_state = 0, keepalive = 0;
|
int mpm_state = 0, keepalive = 0;
|
||||||
h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
|
h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
|
||||||
@@ -165,15 +164,14 @@ int h2_c1_run(conn_rec *c)
|
|||||||
* See PR 63534.
|
* See PR 63534.
|
||||||
*/
|
*/
|
||||||
c->cs->sense = CONN_SENSE_WANT_READ;
|
c->cs->sense = CONN_SENSE_WANT_READ;
|
||||||
#ifdef AP_MPMQ_CAN_AGAIN
|
#ifdef AP_MPMQ_CAN_WAITIO
|
||||||
if (mpm_can_again) {
|
if (mpm_can_waitio) {
|
||||||
/* This tells the MPM to wait for the connection to be
|
/* This tells the MPM to wait for the connection to be
|
||||||
* readable (CONN_SENSE_WANT_READ) within the configured
|
* readable (CONN_SENSE_WANT_READ) within the configured
|
||||||
* Timeout and then come back to the process_connection()
|
* Timeout and then come back to the process_connection()
|
||||||
* hooks again when ready.
|
* hooks again when ready.
|
||||||
*/
|
*/
|
||||||
c->cs->state = CONN_STATE_PROCESSING;
|
c->cs->state = CONN_STATE_ASYNC_WAITIO;
|
||||||
rc = AGAIN;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
@@ -199,7 +197,7 @@ int h2_c1_run(conn_rec *c)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc;
|
return OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
apr_status_t h2_c1_pre_close(struct h2_conn_ctx_t *ctx, conn_rec *c)
|
apr_status_t h2_c1_pre_close(struct h2_conn_ctx_t *ctx, conn_rec *c)
|
||||||
|
@@ -1264,8 +1264,8 @@ static int lua_ap_scoreboard_process(lua_State *L)
|
|||||||
lua_pushnumber(L, ps_record->suspended);
|
lua_pushnumber(L, ps_record->suspended);
|
||||||
lua_settable(L, -3);
|
lua_settable(L, -3);
|
||||||
|
|
||||||
lua_pushstring(L, "processing");
|
lua_pushstring(L, "wait_io");
|
||||||
lua_pushnumber(L, ps_record->processing);
|
lua_pushnumber(L, ps_record->wait_io);
|
||||||
lua_settable(L, -3);
|
lua_settable(L, -3);
|
||||||
|
|
||||||
lua_pushstring(L, "write_completion");
|
lua_pushstring(L, "write_completion");
|
||||||
|
@@ -268,13 +268,13 @@ struct timeout_queue {
|
|||||||
/*
|
/*
|
||||||
* Several timeout queues that use different timeouts, so that we always can
|
* Several timeout queues that use different timeouts, so that we always can
|
||||||
* simply append to the end.
|
* simply append to the end.
|
||||||
* processing_q uses vhost's TimeOut
|
* waitio_q uses vhost's TimeOut
|
||||||
* write_completion_q uses vhost's TimeOut
|
* write_completion_q uses vhost's TimeOut
|
||||||
* keepalive_q uses vhost's KeepAliveTimeOut
|
* keepalive_q uses vhost's KeepAliveTimeOut
|
||||||
* linger_q uses MAX_SECS_TO_LINGER
|
* linger_q uses MAX_SECS_TO_LINGER
|
||||||
* short_linger_q uses SECONDS_TO_LINGER
|
* short_linger_q uses SECONDS_TO_LINGER
|
||||||
*/
|
*/
|
||||||
static struct timeout_queue *processing_q,
|
static struct timeout_queue *waitio_q,
|
||||||
*write_completion_q,
|
*write_completion_q,
|
||||||
*keepalive_q,
|
*keepalive_q,
|
||||||
*linger_q,
|
*linger_q,
|
||||||
@@ -448,7 +448,7 @@ static event_retained_data *retained;
|
|||||||
static int max_spawn_rate_per_bucket = MAX_SPAWN_RATE / 1;
|
static int max_spawn_rate_per_bucket = MAX_SPAWN_RATE / 1;
|
||||||
|
|
||||||
struct event_srv_cfg_s {
|
struct event_srv_cfg_s {
|
||||||
struct timeout_queue *ps_q,
|
struct timeout_queue *io_q,
|
||||||
*wc_q,
|
*wc_q,
|
||||||
*ka_q;
|
*ka_q;
|
||||||
};
|
};
|
||||||
@@ -734,7 +734,7 @@ static int event_query(int query_code, int *result, apr_status_t *rv)
|
|||||||
case AP_MPMQ_CAN_POLL:
|
case AP_MPMQ_CAN_POLL:
|
||||||
*result = 1;
|
*result = 1;
|
||||||
break;
|
break;
|
||||||
case AP_MPMQ_CAN_AGAIN:
|
case AP_MPMQ_CAN_WAITIO:
|
||||||
*result = 1;
|
*result = 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -1148,17 +1148,19 @@ static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * soc
|
|||||||
* interacts with the MPM through suspend/resume_connection() hooks,
|
* interacts with the MPM through suspend/resume_connection() hooks,
|
||||||
* and/or registered poll callbacks (PT_USER), and/or registered
|
* and/or registered poll callbacks (PT_USER), and/or registered
|
||||||
* timed callbacks triggered by timer events;
|
* timed callbacks triggered by timer events;
|
||||||
* - CONN_STATE_PROCESSING: wait for read/write-ability of the underlying
|
* - CONN_STATE_ASYNC_WAITIO: wait for read/write-ability of the underlying
|
||||||
* socket using Timeout and come back to process_connection() hooks when
|
* socket using Timeout and come back to process_connection() hooks when
|
||||||
* ready (the return value should be AGAIN in this case to not break old
|
* ready;
|
||||||
* or third-party modules which might return OK w/o touching the state and
|
|
||||||
* expect lingering close, like with worker or prefork MPMs);
|
|
||||||
* - CONN_STATE_KEEPALIVE: now handled by CONN_STATE_WRITE_COMPLETION
|
* - CONN_STATE_KEEPALIVE: now handled by CONN_STATE_WRITE_COMPLETION
|
||||||
* to flush before waiting for next data (that might depend on it).
|
* to flush before waiting for next data (that might depend on it).
|
||||||
* If a process_connection hook returns an error or no hook sets the state
|
* If a process_connection hook returns an error or no hook sets the state
|
||||||
* to one of the above expected value, forcibly close the connection w/
|
* to one of the above expected value, forcibly close the connection w/
|
||||||
* CONN_STATE_LINGER. This covers the cases where no process_connection
|
* CONN_STATE_LINGER. This covers the cases where no process_connection
|
||||||
* hook executes (DECLINED).
|
* hook executes (DECLINED), or one returns OK w/o touching the state (i.e.
|
||||||
|
* CONN_STATE_PROCESSING remains after the call) which can happen with
|
||||||
|
* third-party modules not updated to work specifically with event MPM
|
||||||
|
* while this was expected to do lingering close unconditionally with
|
||||||
|
* worker or prefork MPMs for instance.
|
||||||
*/
|
*/
|
||||||
switch (rc) {
|
switch (rc) {
|
||||||
case DONE:
|
case DONE:
|
||||||
@@ -1171,14 +1173,9 @@ static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * soc
|
|||||||
cs->pub.state = CONN_STATE_WRITE_COMPLETION;
|
cs->pub.state = CONN_STATE_WRITE_COMPLETION;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case AGAIN:
|
|
||||||
if (cs->pub.state == CONN_STATE_PROCESSING) {
|
|
||||||
rc = OK;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
if (rc != OK || (cs->pub.state != CONN_STATE_LINGER
|
if (rc != OK || (cs->pub.state != CONN_STATE_LINGER
|
||||||
&& cs->pub.state != CONN_STATE_PROCESSING
|
&& cs->pub.state != CONN_STATE_ASYNC_WAITIO
|
||||||
&& cs->pub.state != CONN_STATE_WRITE_COMPLETION
|
&& cs->pub.state != CONN_STATE_WRITE_COMPLETION
|
||||||
&& cs->pub.state != CONN_STATE_SUSPENDED)) {
|
&& cs->pub.state != CONN_STATE_SUSPENDED)) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10111)
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(10111)
|
||||||
@@ -1198,7 +1195,7 @@ static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * soc
|
|||||||
from_wc_q = 1;
|
from_wc_q = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cs->pub.state == CONN_STATE_PROCESSING) {
|
if (cs->pub.state == CONN_STATE_ASYNC_WAITIO) {
|
||||||
/* Set a read/write timeout for this connection, and let the
|
/* Set a read/write timeout for this connection, and let the
|
||||||
* event thread poll for read/writeability.
|
* event thread poll for read/writeability.
|
||||||
*/
|
*/
|
||||||
@@ -1212,15 +1209,15 @@ static void process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * soc
|
|||||||
*/
|
*/
|
||||||
update_reqevents_from_sense(cs, CONN_SENSE_WANT_READ);
|
update_reqevents_from_sense(cs, CONN_SENSE_WANT_READ);
|
||||||
apr_thread_mutex_lock(timeout_mutex);
|
apr_thread_mutex_lock(timeout_mutex);
|
||||||
TO_QUEUE_APPEND(cs->sc->ps_q, cs);
|
TO_QUEUE_APPEND(cs->sc->io_q, cs);
|
||||||
rv = apr_pollset_add(event_pollset, &cs->pfd);
|
rv = apr_pollset_add(event_pollset, &cs->pfd);
|
||||||
if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
|
if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
|
||||||
AP_DEBUG_ASSERT(0);
|
AP_DEBUG_ASSERT(0);
|
||||||
TO_QUEUE_REMOVE(cs->sc->ps_q, cs);
|
TO_QUEUE_REMOVE(cs->sc->io_q, cs);
|
||||||
apr_thread_mutex_unlock(timeout_mutex);
|
apr_thread_mutex_unlock(timeout_mutex);
|
||||||
ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(10503)
|
ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(10503)
|
||||||
"process_socket: apr_pollset_add failure in "
|
"process_socket: apr_pollset_add failure in "
|
||||||
"CONN_STATE_PROCESSING");
|
"CONN_STATE_ASYNC_WAITIO");
|
||||||
close_connection(cs);
|
close_connection(cs);
|
||||||
signal_threads(ST_GRACEFUL);
|
signal_threads(ST_GRACEFUL);
|
||||||
}
|
}
|
||||||
@@ -1997,11 +1994,11 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
|
|||||||
/* trace log status every second */
|
/* trace log status every second */
|
||||||
if (now - last_log > apr_time_from_sec(1)) {
|
if (now - last_log > apr_time_from_sec(1)) {
|
||||||
ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
|
ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
|
||||||
"connections: %u (processing:%d write-completion:%d"
|
"connections: %u (waitio:%u write-completion:%u"
|
||||||
"keep-alive:%d lingering:%d suspended:%u clogged:%u), "
|
"keep-alive:%u lingering:%u suspended:%u clogged:%u), "
|
||||||
"workers: %u/%u shutdown",
|
"workers: %u/%u shutdown",
|
||||||
apr_atomic_read32(&connection_count),
|
apr_atomic_read32(&connection_count),
|
||||||
apr_atomic_read32(processing_q->total),
|
apr_atomic_read32(waitio_q->total),
|
||||||
apr_atomic_read32(write_completion_q->total),
|
apr_atomic_read32(write_completion_q->total),
|
||||||
apr_atomic_read32(keepalive_q->total),
|
apr_atomic_read32(keepalive_q->total),
|
||||||
apr_atomic_read32(&lingering_count),
|
apr_atomic_read32(&lingering_count),
|
||||||
@@ -2131,13 +2128,14 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
|
|||||||
int blocking = 0;
|
int blocking = 0;
|
||||||
|
|
||||||
switch (cs->pub.state) {
|
switch (cs->pub.state) {
|
||||||
case CONN_STATE_PROCESSING:
|
case CONN_STATE_WRITE_COMPLETION:
|
||||||
remove_from_q = cs->sc->ps_q;
|
remove_from_q = cs->sc->wc_q;
|
||||||
blocking = 1;
|
blocking = 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CONN_STATE_WRITE_COMPLETION:
|
case CONN_STATE_ASYNC_WAITIO:
|
||||||
remove_from_q = cs->sc->wc_q;
|
cs->pub.state = CONN_STATE_PROCESSING;
|
||||||
|
remove_from_q = cs->sc->io_q;
|
||||||
blocking = 1;
|
blocking = 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -2351,8 +2349,8 @@ do_maintenance:
|
|||||||
process_keepalive_queue(now);
|
process_keepalive_queue(now);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Step 2: processing queue timeouts are flushed */
|
/* Step 2: waitio queue timeouts are flushed */
|
||||||
process_timeout_queue(processing_q, now, defer_lingering_close);
|
process_timeout_queue(waitio_q, now, defer_lingering_close);
|
||||||
|
|
||||||
/* Step 3: write completion queue timeouts are flushed */
|
/* Step 3: write completion queue timeouts are flushed */
|
||||||
process_timeout_queue(write_completion_q, now, defer_lingering_close);
|
process_timeout_queue(write_completion_q, now, defer_lingering_close);
|
||||||
@@ -2373,7 +2371,7 @@ do_maintenance:
|
|||||||
queues_next_expiry > now ? queues_next_expiry - now
|
queues_next_expiry > now ? queues_next_expiry - now
|
||||||
: -1);
|
: -1);
|
||||||
|
|
||||||
ps->processing = apr_atomic_read32(processing_q->total);
|
ps->wait_io = apr_atomic_read32(waitio_q->total);
|
||||||
ps->write_completion = apr_atomic_read32(write_completion_q->total);
|
ps->write_completion = apr_atomic_read32(write_completion_q->total);
|
||||||
ps->keep_alive = apr_atomic_read32(keepalive_q->total);
|
ps->keep_alive = apr_atomic_read32(keepalive_q->total);
|
||||||
ps->lingering_close = apr_atomic_read32(&lingering_count);
|
ps->lingering_close = apr_atomic_read32(&lingering_count);
|
||||||
@@ -4047,17 +4045,17 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
|
|||||||
struct {
|
struct {
|
||||||
struct timeout_queue *tail, *q;
|
struct timeout_queue *tail, *q;
|
||||||
apr_hash_t *hash;
|
apr_hash_t *hash;
|
||||||
} ps, wc, ka;
|
} io, wc, ka;
|
||||||
|
|
||||||
/* Not needed in pre_config stage */
|
/* Not needed in pre_config stage */
|
||||||
if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
|
if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG) {
|
||||||
return OK;
|
return OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.hash = apr_hash_make(ptemp);
|
io.hash = apr_hash_make(ptemp);
|
||||||
wc.hash = apr_hash_make(ptemp);
|
wc.hash = apr_hash_make(ptemp);
|
||||||
ka.hash = apr_hash_make(ptemp);
|
ka.hash = apr_hash_make(ptemp);
|
||||||
ps.tail = wc.tail = ka.tail = NULL;
|
io.tail = wc.tail = ka.tail = NULL;
|
||||||
|
|
||||||
linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(MAX_SECS_TO_LINGER),
|
linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(MAX_SECS_TO_LINGER),
|
||||||
NULL);
|
NULL);
|
||||||
@@ -4068,11 +4066,11 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
|
|||||||
event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc);
|
event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc);
|
||||||
|
|
||||||
ap_set_module_config(s->module_config, &mpm_event_module, sc);
|
ap_set_module_config(s->module_config, &mpm_event_module, sc);
|
||||||
if (!ps.tail) {
|
if (!io.tail) {
|
||||||
/* The main server uses the global queues */
|
/* The main server uses the global queues */
|
||||||
ps.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
|
io.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
|
||||||
apr_hash_set(ps.hash, &s->timeout, sizeof s->timeout, ps.q);
|
apr_hash_set(io.hash, &s->timeout, sizeof s->timeout, io.q);
|
||||||
ps.tail = processing_q = ps.q;
|
io.tail = waitio_q = io.q;
|
||||||
|
|
||||||
wc.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
|
wc.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
|
||||||
apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
|
apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
|
||||||
@@ -4086,11 +4084,11 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
|
|||||||
else {
|
else {
|
||||||
/* The vhosts use any existing queue with the same timeout,
|
/* The vhosts use any existing queue with the same timeout,
|
||||||
* or their own queue(s) if there isn't */
|
* or their own queue(s) if there isn't */
|
||||||
ps.q = apr_hash_get(ps.hash, &s->timeout, sizeof s->timeout);
|
io.q = apr_hash_get(io.hash, &s->timeout, sizeof s->timeout);
|
||||||
if (!ps.q) {
|
if (!io.q) {
|
||||||
ps.q = TO_QUEUE_MAKE(pconf, s->timeout, ps.tail);
|
io.q = TO_QUEUE_MAKE(pconf, s->timeout, io.tail);
|
||||||
apr_hash_set(ps.hash, &s->timeout, sizeof s->timeout, ps.q);
|
apr_hash_set(io.hash, &s->timeout, sizeof s->timeout, io.q);
|
||||||
ps.tail = ps.tail->next = ps.q;
|
io.tail = io.tail->next = io.q;
|
||||||
}
|
}
|
||||||
|
|
||||||
wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
|
wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
|
||||||
@@ -4109,7 +4107,7 @@ static int event_post_config(apr_pool_t *pconf, apr_pool_t *plog,
|
|||||||
ka.tail = ka.tail->next = ka.q;
|
ka.tail = ka.tail->next = ka.q;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sc->ps_q = ps.q;
|
sc->io_q = io.q;
|
||||||
sc->wc_q = wc.q;
|
sc->wc_q = wc.q;
|
||||||
sc->ka_q = ka.q;
|
sc->ka_q = ka.q;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user