mirror of
https://github.com/apache/httpd.git
synced 2025-08-08 15:02:10 +03:00
Make HTTP_IN filter send 100 continue in blocking mode only.
When mod_proxy_http prefetches input data it calls the HTTP_IN filter in nonblocking mode, but since it does not want 100 continue to be sent for every case (e.g. 100-continue forwarding), it hacks r->expecting_100 (save in req->expecting_100, reset, eventually restore..) all over the place. Let's avoid this by making the HTTP_IN filter send 100 continue only when called in blocking mode (once still), instead of the first time it's called. * modules/http/http_filters.c (struct http_filter_ctx): Add the seen_data bit and rename eos_sent to at_eos (HTTP_IN does not send any EOS). * modules/http/http_filters.c (ap_http_filter): Move 100 continue handling outside the initialization/once block, and do it in blocking mode only. Track in ctx->seen_data whether some data were already received, and if so don't send 100 continue per RFC 7231 5.1.1. * modules/proxy/mod_proxy_http.c: Remove req->expecting_100 (and its danse with r->expecting_100) now that reading from the input filters does the right thing. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1883639 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
@@ -79,7 +79,8 @@ typedef struct http_filter_ctx
|
||||
BODY_CHUNK_END_LF, /* got CR after data, expect LF */
|
||||
BODY_CHUNK_TRAILER /* trailers */
|
||||
} state;
|
||||
unsigned int eos_sent :1;
|
||||
unsigned int at_eos:1,
|
||||
seen_data:1;
|
||||
} http_ctx_t;
|
||||
|
||||
/**
|
||||
@@ -270,7 +271,7 @@ static apr_status_t read_chunked_trailers(http_ctx_t *ctx, ap_filter_t *f,
|
||||
r->status = saved_status;
|
||||
e = apr_bucket_eos_create(f->c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(b, e);
|
||||
ctx->eos_sent = 1;
|
||||
ctx->at_eos = 1;
|
||||
rv = APR_SUCCESS;
|
||||
}
|
||||
else {
|
||||
@@ -402,53 +403,52 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
|
||||
* proxied *response*, proxy responses MUST be exempt.
|
||||
*/
|
||||
if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
|
||||
e = apr_bucket_eos_create(f->c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(b, e);
|
||||
ctx->eos_sent = 1;
|
||||
return APR_SUCCESS;
|
||||
ctx->at_eos = 1; /* send EOS below */
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
/* Since we're about to read data, send 100-Continue if needed.
|
||||
* Only valid on chunked and C-L bodies where the C-L is > 0. */
|
||||
if ((ctx->state == BODY_CHUNK
|
||||
* Only valid on chunked and C-L bodies where the C-L is > 0.
|
||||
*
|
||||
* If the read is to be nonblocking though, the caller may not want to
|
||||
* handle this just now (e.g. mod_proxy_http), and is prepared to read
|
||||
* nothing if the client really waits for 100 continue, so we don't
|
||||
* send it now and wait for later blocking read.
|
||||
*
|
||||
* In any case, even if r->expecting remains set at the end of the
|
||||
* request handling, ap_set_keepalive() will finally do the right
|
||||
* thing (i.e. "Connection: close" the connection).
|
||||
*/
|
||||
if (block == APR_BLOCK_READ
|
||||
&& (ctx->state == BODY_CHUNK
|
||||
|| (ctx->state == BODY_LENGTH && ctx->remaining > 0))
|
||||
&& f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)
|
||||
&& !(f->r->eos_sent || f->r->bytes_sent)) {
|
||||
&& !(ctx->at_eos || f->r->eos_sent || f->r->bytes_sent)) {
|
||||
if (!ap_is_HTTP_SUCCESS(f->r->status)) {
|
||||
ctx->state = BODY_NONE;
|
||||
ctx->eos_sent = 1;
|
||||
ctx->at_eos = 1; /* send EOS below */
|
||||
}
|
||||
else if (!ctx->seen_data) {
|
||||
int saved_status = f->r->status;
|
||||
f->r->status = HTTP_CONTINUE;
|
||||
ap_send_interim_response(f->r, 0);
|
||||
AP_DEBUG_ASSERT(!f->r->expecting_100);
|
||||
f->r->status = saved_status;
|
||||
}
|
||||
else {
|
||||
char *tmp;
|
||||
int len;
|
||||
apr_bucket_brigade *bb;
|
||||
|
||||
bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
|
||||
|
||||
/* if we send an interim response, we're no longer
|
||||
* in a state of expecting one.
|
||||
/* https://tools.ietf.org/html/rfc7231#section-5.1.1
|
||||
* A server MAY omit sending a 100 (Continue) response if it
|
||||
* has already received some or all of the message body for
|
||||
* the corresponding request [...]
|
||||
*/
|
||||
f->r->expecting_100 = 0;
|
||||
tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL " ",
|
||||
ap_get_status_line(HTTP_CONTINUE), CRLF CRLF, NULL);
|
||||
len = strlen(tmp);
|
||||
ap_xlate_proto_to_ascii(tmp, len);
|
||||
e = apr_bucket_pool_create(tmp, len, f->r->pool,
|
||||
f->c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_HEAD(bb, e);
|
||||
e = apr_bucket_flush_create(f->c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, e);
|
||||
|
||||
rv = ap_pass_brigade(f->c->output_filters, bb);
|
||||
if (rv != APR_SUCCESS) {
|
||||
return AP_FILTER_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* sanity check in case we're read twice */
|
||||
if (ctx->eos_sent) {
|
||||
if (ctx->at_eos) {
|
||||
e = apr_bucket_eos_create(f->c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(b, e);
|
||||
return APR_SUCCESS;
|
||||
@@ -492,8 +492,10 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
|
||||
|
||||
if (!APR_BUCKET_IS_METADATA(e)) {
|
||||
rv = apr_bucket_read(e, &buffer, &len, APR_BLOCK_READ);
|
||||
|
||||
if (rv == APR_SUCCESS) {
|
||||
if (len > 0) {
|
||||
ctx->seen_data = 1;
|
||||
}
|
||||
rv = parse_chunk_size(ctx, buffer, len,
|
||||
f->r->server->limit_req_fieldsize, strict);
|
||||
}
|
||||
@@ -549,6 +551,9 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
|
||||
|
||||
/* How many bytes did we just read? */
|
||||
apr_brigade_length(b, 0, &totalread);
|
||||
if (totalread > 0) {
|
||||
ctx->seen_data = 1;
|
||||
}
|
||||
|
||||
/* If this happens, we have a bucket of unknown length. Die because
|
||||
* it means our assumptions have changed. */
|
||||
@@ -595,7 +600,7 @@ apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
|
||||
if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
|
||||
e = apr_bucket_eos_create(f->c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(b, e);
|
||||
ctx->eos_sent = 1;
|
||||
ctx->at_eos = 1;
|
||||
}
|
||||
|
||||
break;
|
||||
|
@@ -259,7 +259,6 @@ typedef struct {
|
||||
apr_interval_time_t idle_timeout;
|
||||
|
||||
unsigned int can_go_async :1,
|
||||
expecting_100 :1,
|
||||
do_100_continue :1,
|
||||
prefetch_nonblocking :1,
|
||||
force10 :1;
|
||||
@@ -531,21 +530,6 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled)
|
||||
apr_file_t *tmpfile = NULL;
|
||||
apr_off_t limit;
|
||||
|
||||
/* Send "100 Continue" now if the client expects one, before
|
||||
* blocking on the body, otherwise we'd wait for each other.
|
||||
*/
|
||||
if (req->expecting_100) {
|
||||
int saved_status = r->status;
|
||||
|
||||
r->expecting_100 = 1;
|
||||
r->status = HTTP_CONTINUE;
|
||||
ap_send_interim_response(r, 0);
|
||||
AP_DEBUG_ASSERT(!r->expecting_100);
|
||||
|
||||
r->status = saved_status;
|
||||
req->expecting_100 = 0;
|
||||
}
|
||||
|
||||
body_brigade = apr_brigade_create(p, bucket_alloc);
|
||||
*bytes_spooled = 0;
|
||||
|
||||
@@ -719,7 +703,7 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
||||
apr_read_type_e block;
|
||||
int rv;
|
||||
|
||||
if (req->force10 && req->expecting_100) {
|
||||
if (req->force10 && r->expecting_100) {
|
||||
return HTTP_EXPECTATION_FAILED;
|
||||
}
|
||||
|
||||
@@ -821,18 +805,6 @@ static int ap_proxy_http_prefetch(proxy_http_req_t *req,
|
||||
apr_brigade_length(temp_brigade, 1, &bytes);
|
||||
bytes_read += bytes;
|
||||
|
||||
/* From https://tools.ietf.org/html/rfc7231#section-5.1.1
|
||||
* A server MAY omit sending a 100 (Continue) response if it has
|
||||
* already received some or all of the message body for the
|
||||
* corresponding request, or if [snip].
|
||||
*/
|
||||
if (req->expecting_100 && bytes > 0) {
|
||||
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(10206)
|
||||
"prefetching request body while 100-continue is"
|
||||
"expected, omit sending interim response");
|
||||
req->expecting_100 = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save temp_brigade in input_brigade. (At least) in the SSL case
|
||||
* temp_brigade contains transient buckets whose data would get
|
||||
@@ -1629,8 +1601,9 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
|
||||
*
|
||||
* So let's make it configurable.
|
||||
*
|
||||
* We need to set "r->expecting_100 = 1" otherwise origin
|
||||
* server behaviour will apply.
|
||||
* We need to force "r->expecting_100 = 1" for RFC behaviour
|
||||
* otherwise ap_send_interim_response() does nothing when
|
||||
* the client did not ask for 100-continue.
|
||||
*
|
||||
* 101 Switching Protocol has its own configuration which
|
||||
* shouldn't be interfered by "proxy-interim-response".
|
||||
@@ -1645,12 +1618,8 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
|
||||
if (!policy
|
||||
|| (!strcasecmp(policy, "RFC")
|
||||
&& (proxy_status != HTTP_CONTINUE
|
||||
|| (req->expecting_100 = 1)))) {
|
||||
|| (r->expecting_100 = 1)))) {
|
||||
switch (proxy_status) {
|
||||
case HTTP_CONTINUE:
|
||||
r->expecting_100 = req->expecting_100;
|
||||
req->expecting_100 = 0;
|
||||
break;
|
||||
case HTTP_SWITCHING_PROTOCOLS:
|
||||
AP_DEBUG_ASSERT(upgrade != NULL);
|
||||
apr_table_setn(r->headers_out, "Connection", "Upgrade");
|
||||
@@ -1723,12 +1692,10 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
|
||||
* here though, because error_override or a potential retry on
|
||||
* another backend could finally read that data and finalize
|
||||
* the request processing, making keep-alive possible. So what
|
||||
* we do is restoring r->expecting_100 for ap_set_keepalive()
|
||||
* to do the right thing according to the final response and
|
||||
* we do is leaving r->expecting_100 alone, ap_set_keepalive()
|
||||
* will do the right thing according to the final response and
|
||||
* any later update of r->expecting_100.
|
||||
*/
|
||||
r->expecting_100 = req->expecting_100;
|
||||
req->expecting_100 = 0;
|
||||
}
|
||||
|
||||
/* Once only! */
|
||||
@@ -1740,7 +1707,7 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
|
||||
|
||||
/* If we didn't send the full body yet, do it now */
|
||||
if (do_100_continue) {
|
||||
req->expecting_100 = 0;
|
||||
r->expecting_100 = 0;
|
||||
status = send_continue_body(req);
|
||||
if (status != OK) {
|
||||
return status;
|
||||
@@ -2228,17 +2195,7 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
|
||||
/* Should we handle end-to-end or ping 100-continue? */
|
||||
if ((r->expecting_100 && (dconf->forward_100_continue || input_brigade))
|
||||
|| PROXY_DO_100_CONTINUE(worker, r)) {
|
||||
/* We need to reset r->expecting_100 or prefetching will cause
|
||||
* ap_http_filter() to send "100 Continue" response by itself. So
|
||||
* we'll use req->expecting_100 in mod_proxy_http to determine whether
|
||||
* the client should be forwarded "100 continue", and r->expecting_100
|
||||
* will be restored at the end of the function with the actual value of
|
||||
* req->expecting_100 (i.e. cleared only if mod_proxy_http sent the
|
||||
* "100 Continue" according to its policy).
|
||||
*/
|
||||
req->do_100_continue = 1;
|
||||
req->expecting_100 = (r->expecting_100 != 0);
|
||||
r->expecting_100 = 0;
|
||||
}
|
||||
|
||||
/* Should we block while prefetching the body or try nonblocking and flush
|
||||
@@ -2385,10 +2342,6 @@ cleanup:
|
||||
ap_proxy_release_connection(proxy_function, req->backend,
|
||||
r->server);
|
||||
}
|
||||
if (req->expecting_100) {
|
||||
/* Restore r->expecting_100 if we didn't touch it */
|
||||
r->expecting_100 = req->expecting_100;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user