1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-08 15:02:10 +03:00

core: follow up to r1839997: recycle request filters to a delayed ring first.

We want not only ap_filter_output_pending() to be able to access each pending
filter's *f after the EOR is destroyed, but also each request filter to do
the same until it returns.

So request filters are now always cleaned up into a dead_filters ring which is
merged into spare_filters only when ap_filter_recycle() is called explicitely,
that is in ap_process_request_after_handler() and ap_filter_output_pending().

The former takes care of recycling at the end of the request, with any MPM,
while the latter keeps recycling during MPM event's write completion.


git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1840002 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yann Ylavic
2018-09-04 02:40:49 +00:00
parent c486973584
commit 5262e7e73a
5 changed files with 50 additions and 28 deletions

View File

@@ -601,6 +601,7 @@
* 20180903.1 (2.5.1-dev) Replace conn_rec pending_{in,out}put_filters by * 20180903.1 (2.5.1-dev) Replace conn_rec pending_{in,out}put_filters by
* filter_conn_ctx, remove argument pool from * filter_conn_ctx, remove argument pool from
* ap_filter_prepare_brigade() * ap_filter_prepare_brigade()
* 20180903.2 (2.5.1-dev) Add ap_filter_recyle()
*/ */
#define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */ #define MODULE_MAGIC_COOKIE 0x41503235UL /* "AP25" */
@@ -608,7 +609,7 @@
#ifndef MODULE_MAGIC_NUMBER_MAJOR #ifndef MODULE_MAGIC_NUMBER_MAJOR
#define MODULE_MAGIC_NUMBER_MAJOR 20180903 #define MODULE_MAGIC_NUMBER_MAJOR 20180903
#endif #endif
#define MODULE_MAGIC_NUMBER_MINOR 1 /* 0...n */ #define MODULE_MAGIC_NUMBER_MINOR 2 /* 0...n */
/** /**
* Determine if the server's current MODULE_MAGIC_NUMBER is at least a * Determine if the server's current MODULE_MAGIC_NUMBER is at least a

View File

@@ -643,6 +643,15 @@ AP_DECLARE_NONSTD(int) ap_filter_output_pending(conn_rec *c);
*/ */
AP_DECLARE_NONSTD(int) ap_filter_input_pending(conn_rec *c); AP_DECLARE_NONSTD(int) ap_filter_input_pending(conn_rec *c);
/**
* Recycle removed request filters so that they can be reused for filters
* added later on the same connection. This typically should happen after
* each request handling.
*
* @param c The connection.
*/
AP_DECLARE(void) ap_filter_recyle(conn_rec *c);
/** /**
* Flush function for apr_brigade_* calls. This calls ap_pass_brigade * Flush function for apr_brigade_* calls. This calls ap_pass_brigade
* to flush the brigade if the brigade buffer overflows. * to flush the brigade if the brigade buffer overflows.

View File

@@ -401,6 +401,10 @@ AP_DECLARE(void) ap_process_request_after_handler(request_rec *r)
(void)ap_check_pipeline(c, bb, DEFAULT_LIMIT_BLANK_LINES); (void)ap_check_pipeline(c, bb, DEFAULT_LIMIT_BLANK_LINES);
apr_brigade_cleanup(bb); apr_brigade_cleanup(bb);
if (!c->aborted) {
ap_filter_recyle(c);
}
if (c->cs) { if (c->cs) {
if (c->aborted) { if (c->aborted) {
c->cs->state = CONN_STATE_LINGER; c->cs->state = CONN_STATE_LINGER;

View File

@@ -2098,7 +2098,6 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_request_core_filter(ap_filter_t *f,
APR_BRIGADE_CONCAT(tmp_bb, bb); APR_BRIGADE_CONCAT(tmp_bb, bb);
ap_remove_output_filter(f); ap_remove_output_filter(f);
seen_eor = 1; seen_eor = 1;
f->r = NULL;
} }
else { else {
/* if the core has set aside data, back off and try later */ /* if the core has set aside data, back off and try later */
@@ -2134,8 +2133,7 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_request_core_filter(ap_filter_t *f,
status = ap_pass_brigade(f->next, tmp_bb); status = ap_pass_brigade(f->next, tmp_bb);
apr_brigade_cleanup(tmp_bb); apr_brigade_cleanup(tmp_bb);
if (seen_eor || (status != APR_SUCCESS && if (status != APR_SUCCESS || seen_eor) {
!APR_STATUS_IS_EOF(status))) {
return status; return status;
} }
} }

View File

@@ -66,8 +66,7 @@ struct ap_filter_conn_ctx {
struct ap_filter_spare_ring *spare_containers, struct ap_filter_spare_ring *spare_containers,
*spare_brigades, *spare_brigades,
*spare_filters, *spare_filters,
*spare_flushes; *dead_filters;
int flushing;
}; };
typedef struct filter_trie_node filter_trie_node; typedef struct filter_trie_node filter_trie_node;
@@ -356,19 +355,43 @@ static void put_spare(conn_rec *c, void *data,
APR_RING_INSERT_TAIL(*ring, sdata, spare_data, link); APR_RING_INSERT_TAIL(*ring, sdata, spare_data, link);
} }
static apr_status_t filter_recycle(void *arg) static apr_status_t request_filter_cleanup(void *arg)
{ {
ap_filter_t *f = arg; ap_filter_t *f = arg;
conn_rec *c = f->c; conn_rec *c = f->c;
struct ap_filter_conn_ctx *x = c->filter_conn_ctx; struct ap_filter_conn_ctx *x = c->filter_conn_ctx;
memset(f, 0, sizeof(*f)); /* A request filter is cleaned up with an EOR bucket, so possibly
put_spare(c, f, x->flushing ? &x->spare_flushes * while it is handling/passing the EOR, and we want each filter or
: &x->spare_filters); * ap_filter_output_pending() to be able to dereference f until they
* return. So request filters are recycled in dead_filters and will only
* be moved to spare_filters when ap_filter_recycle() is called explicitly.
* Set f->r to NULL still for any use after free to crash quite reliably.
*/
f->r = NULL;
put_spare(c, f, &x->dead_filters);
return APR_SUCCESS; return APR_SUCCESS;
} }
AP_DECLARE(void) ap_filter_recyle(conn_rec *c)
{
struct ap_filter_conn_ctx *x = get_conn_ctx(c);
if (!x || !x->dead_filters) {
return;
}
make_spare_ring(&x->spare_filters, c->pool);
while (!APR_RING_EMPTY(x->dead_filters, spare_data, link)) {
struct spare_data *sdata = APR_RING_FIRST(x->dead_filters);
APR_RING_REMOVE(sdata, link);
memset(sdata->data, 0, sizeof(ap_filter_t));
APR_RING_INSERT_TAIL(x->spare_filters, sdata, spare_data, link);
}
}
static ap_filter_t *add_any_filter_handle(ap_filter_rec_t *frec, void *ctx, static ap_filter_t *add_any_filter_handle(ap_filter_rec_t *frec, void *ctx,
request_rec *r, conn_rec *c, request_rec *r, conn_rec *c,
ap_filter_t **r_filters, ap_filter_t **r_filters,
@@ -413,7 +436,7 @@ static ap_filter_t *add_any_filter_handle(ap_filter_rec_t *frec, void *ctx,
f->ctx = ctx; f->ctx = ctx;
/* f->r must always be NULL for connection filters */ /* f->r must always be NULL for connection filters */
if (r && frec->ftype < AP_FTYPE_CONNECTION) { if (r && frec->ftype < AP_FTYPE_CONNECTION) {
apr_pool_cleanup_register(r->pool, f, filter_recycle, apr_pool_cleanup_register(r->pool, f, request_filter_cleanup,
apr_pool_cleanup_null); apr_pool_cleanup_null);
f->r = r; f->r = r;
} }
@@ -1113,16 +1136,6 @@ AP_DECLARE_NONSTD(int) ap_filter_output_pending(conn_rec *c)
return DECLINED; return DECLINED;
} }
/* Flushing pending filters' brigades, so any f->r->pool may be cleaned up
* with its EOR (underneath this loop). Tell filter_recycle() we want f->r
* filters to be recycled in spare_flushes rather than spare_filters, such
* that they can *not* be reused during the flush(es), so we can safely
* access *f after it's been recycled (i.e. to check f->bb). After the
* flush(es), we can then merge spare_flushes into spare_filters to make
* them finally reusable.
*/
x->flushing = 1;
bb = ap_reuse_brigade_from_pool("ap_fop_bb", c->pool, bb = ap_reuse_brigade_from_pool("ap_fop_bb", c->pool,
c->bucket_alloc); c->bucket_alloc);
@@ -1160,13 +1173,10 @@ AP_DECLARE_NONSTD(int) ap_filter_output_pending(conn_rec *c)
} }
} }
/* No more flushing, make spare_flushes reusable before leaving. */ /* No more flushing, all filters have returned, we can recycle dead request
if (x->spare_flushes && !APR_RING_EMPTY(x->spare_flushes, * filters before leaving (i.e. make them reusable, not leak).
spare_data, link)) { */
make_spare_ring(&x->spare_filters, c->pool); ap_filter_recyle(c);
APR_RING_CONCAT(x->spare_filters, x->spare_flushes, spare_data, link);
}
x->flushing = 0;
return rc; return rc;
} }