1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-07 04:02:58 +03:00

mod_proxy_http: follow up to r1869216.

Let's call stream_reqbody() for all rb_methods, no RB_SPOOL_CL special case.

This both simplifies code and allows to keep EOS into the input_brigade until
it's sent, and thus detect whether we already fetched the whole body if/when
proxy_http_handler() re-enters for different balancer members.


git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1869222 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yann Ylavic
2019-10-31 16:08:33 +00:00
parent 7299090182
commit e2d7af8692
4 changed files with 398 additions and 134 deletions

View File

@@ -82,6 +82,30 @@ AP_DECLARE(void) ap_get_mime_headers(request_rec *r);
AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r,
apr_bucket_brigade *bb); apr_bucket_brigade *bb);
/**
* @struct ap_mime_headers_ctx
* @brief Context for ap_get_mime_headers_ex()
*/
typedef struct ap_mime_headers_ctx ap_mime_headers_ctx_t;
struct ap_mime_headers_ctx {
int limit_req_fields;
int limit_req_fieldsize;
apr_table_t *headers;
apr_table_t *notes;
apr_bucket_brigade *bb;
unsigned int strict:1,
compress:1;
};
/**
* Generic version of ap_get_mime_headers_core() that takes a filter as
* parameter and options regarding limits to apply.
* @param f The filter to read from
* @param ctx The context/options (@see ap_mime_headers_ctx)
*/
AP_DECLARE(int) ap_get_mime_headers_ex(request_rec *r, ap_filter_t *f,
ap_mime_headers_ctx_t *ctx);
/* Finish up stuff after a request */ /* Finish up stuff after a request */
/** /**

View File

@@ -303,16 +303,18 @@ static int stream_reqbody_read(proxy_http_req_t *req, apr_bucket_brigade *bb,
return OK; return OK;
} }
static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method) static int stream_reqbody(proxy_http_req_t *req)
{ {
request_rec *r = req->r; request_rec *r = req->r;
int seen_eos = 0, rv = OK; int seen_eos = 0, rv = OK;
apr_size_t hdr_len; apr_size_t hdr_len;
char chunk_hdr[20]; /* must be here due to transient bucket. */ char chunk_hdr[20]; /* must be here due to transient bucket. */
conn_rec *origin = req->origin;
proxy_conn_rec *p_conn = req->backend; proxy_conn_rec *p_conn = req->backend;
apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
apr_bucket_brigade *header_brigade = req->header_brigade; apr_bucket_brigade *header_brigade = req->header_brigade;
apr_bucket_brigade *input_brigade = req->input_brigade; apr_bucket_brigade *input_brigade = req->input_brigade;
rb_methods rb_method = req->rb_method;
apr_off_t bytes, bytes_streamed = 0; apr_off_t bytes, bytes_streamed = 0;
apr_bucket *e; apr_bucket *e;
@@ -326,7 +328,7 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method)
} }
if (!APR_BRIGADE_EMPTY(input_brigade)) { if (!APR_BRIGADE_EMPTY(input_brigade)) {
/* If this brigade contains EOS, either stop or remove it. */ /* If this brigade contains EOS, remove it and be done. */
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
seen_eos = 1; seen_eos = 1;
@@ -368,7 +370,8 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method)
APR_BRIGADE_INSERT_TAIL(input_brigade, e); APR_BRIGADE_INSERT_TAIL(input_brigade, e);
} }
} }
else if (bytes_streamed > req->cl_val) { else if (rb_method == RB_STREAM_CL
&& bytes_streamed > req->cl_val) {
/* C-L < bytes streamed?!? /* C-L < bytes streamed?!?
* We will error out after the body is completely * We will error out after the body is completely
* consumed, but we can't stream more bytes at the * consumed, but we can't stream more bytes at the
@@ -400,7 +403,7 @@ static int stream_reqbody(proxy_http_req_t *req, rb_methods rb_method)
APR_BRIGADE_PREPEND(input_brigade, header_brigade); APR_BRIGADE_PREPEND(input_brigade, header_brigade);
/* Flush here on EOS because we won't stream_reqbody_read() again */ /* Flush here on EOS because we won't stream_reqbody_read() again */
rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin,
input_brigade, seen_eos); input_brigade, seen_eos);
if (rv != OK) { if (rv != OK) {
return rv; return rv;
@@ -462,10 +465,6 @@ static int spool_reqbody_cl(proxy_http_req_t *req, apr_off_t *bytes_spooled)
/* If this brigade contains EOS, either stop or remove it. */ /* If this brigade contains EOS, either stop or remove it. */
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
seen_eos = 1; seen_eos = 1;
/* We can't pass this EOS to the output_filters. */
e = APR_BRIGADE_LAST(input_brigade);
apr_bucket_delete(e);
} }
apr_brigade_length(input_brigade, 1, &bytes); apr_brigade_length(input_brigade, 1, &bytes);
@@ -859,35 +858,21 @@ static int ap_proxy_http_request(proxy_http_req_t *req)
{ {
int rv; int rv;
request_rec *r = req->r; request_rec *r = req->r;
apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc;
apr_bucket_brigade *header_brigade = req->header_brigade;
apr_bucket_brigade *input_brigade = req->input_brigade;
/* send the request header/body, if any. */ /* send the request header/body, if any. */
switch (req->rb_method) { switch (req->rb_method) {
case RB_SPOOL_CL:
case RB_STREAM_CL: case RB_STREAM_CL:
case RB_STREAM_CHUNKED: case RB_STREAM_CHUNKED:
if (req->do_100_continue) { if (req->do_100_continue) {
rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend, rv = ap_proxy_pass_brigade(req->bucket_alloc, r, req->backend,
req->origin, header_brigade, 1); req->origin, req->header_brigade, 1);
} }
else { else {
rv = stream_reqbody(req, req->rb_method); rv = stream_reqbody(req);
} }
break; break;
case RB_SPOOL_CL:
/* Prefetch has built the header and spooled the whole body;
* if we don't expect 100-continue we can flush both all at once,
* otherwise flush the header only.
*/
if (!req->do_100_continue) {
APR_BRIGADE_CONCAT(header_brigade, input_brigade);
}
rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backend,
req->origin, header_brigade, 1);
break;
default: default:
/* shouldn't be possible */ /* shouldn't be possible */
rv = HTTP_INTERNAL_SERVER_ERROR; rv = HTTP_INTERNAL_SERVER_ERROR;
@@ -1590,15 +1575,10 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
/* Send the request body (fully). */ /* Send the request body (fully). */
switch(req->rb_method) { switch(req->rb_method) {
case RB_SPOOL_CL:
case RB_STREAM_CL: case RB_STREAM_CL:
case RB_STREAM_CHUNKED: case RB_STREAM_CHUNKED:
status = stream_reqbody(req, req->rb_method); status = stream_reqbody(req);
break;
case RB_SPOOL_CL:
/* Prefetch has spooled the whole body, flush it. */
status = ap_proxy_pass_brigade(req->bucket_alloc, r,
backend, origin,
req->input_brigade, 1);
break; break;
default: default:
/* Shouldn't happen */ /* Shouldn't happen */

View File

@@ -294,6 +294,223 @@ static int proxy_wstunnel_canon(request_rec *r, char *url)
return OK; return OK;
} }
static request_rec *make_resp(conn_rec *c, request_rec *r)
{
apr_pool_t *pool;
request_rec *rp;
apr_pool_create(&pool, c->pool);
rp = apr_pcalloc(pool, sizeof(*r));
rp->pool = pool;
rp->status = HTTP_OK;
rp->headers_in = apr_table_make(pool, 50);
rp->trailers_in = apr_table_make(pool, 5);
rp->subprocess_env = apr_table_make(pool, 50);
rp->headers_out = apr_table_make(pool, 12);
rp->trailers_out = apr_table_make(pool, 5);
rp->err_headers_out = apr_table_make(pool, 5);
rp->notes = apr_table_make(pool, 5);
rp->server = r->server;
rp->log = r->log;
rp->proxyreq = r->proxyreq;
rp->request_time = r->request_time;
rp->connection = c;
rp->output_filters = c->output_filters;
rp->input_filters = c->input_filters;
rp->proto_output_filters = c->output_filters;
rp->proto_input_filters = c->input_filters;
rp->useragent_ip = c->client_ip;
rp->useragent_addr = c->client_addr;
rp->request_config = ap_create_request_config(pool);
return rp;
}
static int proxy_wstunnel_handle_http_response(request_rec *r,
proxy_conn_rec *backend,
proxy_server_conf *sconf,
apr_bucket_brigade *bb)
{
conn_rec *origin = backend->connection;
proxy_worker *worker = backend->worker;
char fixed_buffer[HUGE_STRING_LEN];
char *buffer = fixed_buffer;
int size = HUGE_STRING_LEN;
ap_mime_headers_ctx_t ctx;
request_rec *resp;
apr_status_t rv;
apr_size_t len;
int rc;
/* Only use dynamically sized buffer if user specifies ResponseFieldSize */
if (worker->s->response_field_size_set) {
size = worker->s->response_field_size;
if (size > HUGE_STRING_LEN) {
buffer = apr_palloc(r->pool, size);
}
}
resp = make_resp(origin, r);
rv = ap_rgetline(&buffer, size, &len, resp, 0, bb);
apr_brigade_cleanup(bb);
if (rv != APR_SUCCESS || !apr_date_checkmask(buffer, "HTTP/#.# ### *")) {
return HTTP_BAD_GATEWAY;
}
r->status = atoi(&buffer[9]);
if (!ap_is_HTTP_VALID_RESPONSE(r->status)) {
return HTTP_BAD_GATEWAY;
}
r->status_line = apr_pstrdup(r->pool, &buffer[9]);
memset(&ctx, 0, sizeof(ctx));
ctx.bb = bb;
ctx.headers = r->headers_out;
ctx.limit_req_fieldsize = size;
rc = ap_get_mime_headers_ex(r, origin->input_filters, &ctx);
apr_brigade_cleanup(bb);
if (rc != OK) {
r->status = HTTP_OK;
r->status_line = NULL;
apr_table_clear(r->headers_out);
return rc;
}
#if 0
if (r->status != HTTP_SWITCHING_PROTOCOLS) {
conn_rec *c = r->connection;
apr_read_type_e block = APR_NONBLOCK_READ;
apr_bucket_brigade *pass_bb = apr_brigade_create(r->pool,
c->bucket_alloc);
int finish = 0;
r->sent_bodyct = 1;
do {
apr_bucket *e;
apr_off_t readbytes = 0;
rv = ap_get_brigade(origin->input_filters, bb,
AP_MODE_READBYTES, block,
sconf->io_buffer_size);
/* ap_get_brigade will return success with an empty brigade
* for a non-blocking read which would block: */
if (block == APR_NONBLOCK_READ
&& (APR_STATUS_IS_EAGAIN(rv)
|| (rv == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)))) {
/* flush to the client and switch to blocking mode */
e = apr_bucket_flush_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
if (ap_pass_brigade(r->output_filters, bb)
|| c->aborted) {
finish = 1;
rc = DONE;
}
apr_brigade_cleanup(bb);
block = APR_BLOCK_READ;
continue;
}
if (rv == APR_EOF) {
break;
}
if (rv != APR_SUCCESS) {
if (rv == APR_ENOSPC) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02475)
"Response chunk/line was too large to parse");
}
else if (rv == APR_ENOTIMPL) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02476)
"Response Transfer-Encoding was not recognised");
}
else {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01110)
"Network error reading response");
}
/* In this case, we are in real trouble because
* our backend bailed on us. Given we're half way
* through a response, our only option is to
* disconnect the client too.
*/
e = ap_bucket_error_create(HTTP_BAD_GATEWAY, NULL,
r->pool, c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
e = ap_bucket_eoc_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
ap_pass_brigade(r->output_filters, bb);
apr_brigade_cleanup(bb);
rc = DONE;
break;
}
/* next time try a non-blocking read */
block = APR_NONBLOCK_READ;
if (!apr_is_empty_table(resp->trailers_in)) {
apr_table_do(add_trailers, r->trailers_out,
resp->trailers_in, NULL);
apr_table_clear(resp->trailers_in);
}
apr_brigade_length(bb, 0, &readbytes);
backend->worker->s->read += readbytes;
/* sanity check */
if (APR_BRIGADE_EMPTY(bb)) {
break;
}
/* Switch the allocator lifetime of the buckets */
ap_proxy_buckets_lifetime_transform(r, bb, pass_bb);
/* found the last brigade? */
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) {
/* the brigade may contain transient buckets that contain
* data that lives only as long as the backend connection.
* Force a setaside so these transient buckets become heap
* buckets that live as long as the request.
*/
for (e = APR_BRIGADE_FIRST(pass_bb);
e != APR_BRIGADE_SENTINEL(pass_bb);
e = APR_BUCKET_NEXT(e)) {
apr_bucket_setaside(e, r->pool);
}
/* finally it is safe to clean up the brigade from the
* connection pool, as we have forced a setaside on all
* buckets.
*/
apr_brigade_cleanup(bb);
finish = 1;
}
/* try send what we read */
if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS
|| c->aborted) {
/* Ack! Phbtt! Die! User aborted! */
finish = 1;
rc = DONE;
}
/* make sure we always clean up after ourselves */
apr_brigade_cleanup(pass_bb);
apr_brigade_cleanup(bb);
} while (!finish);
return rc;
}
#endif
return DECLINED;
}
/* /*
* process the request and write the response. * process the request and write the response.
*/ */
@@ -318,14 +535,13 @@ static int proxy_wstunnel_request(apr_pool_t *p, request_rec *r,
apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc); apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
apr_socket_t *client_socket = ap_get_conn_socket(c); apr_socket_t *client_socket = ap_get_conn_socket(c);
ws_baton_t *baton = apr_pcalloc(r->pool, sizeof(ws_baton_t)); ws_baton_t *baton = apr_pcalloc(r->pool, sizeof(ws_baton_t));
int status;
proxyws_dir_conf *dconf = ap_get_module_config(r->per_dir_config, &proxy_wstunnel_module); proxyws_dir_conf *dconf = ap_get_module_config(r->per_dir_config, &proxy_wstunnel_module);
const char *upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket"; const char *upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket";
int status;
header_brigade = apr_brigade_create(p, backconn->bucket_alloc);
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "sending request"); ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "sending request");
header_brigade = apr_brigade_create(p, backconn->bucket_alloc);
rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, conn, rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, conn,
worker, conf, uri, url, server_portstr, worker, conf, uri, url, server_portstr,
&old_cl_val, &old_te_val); &old_cl_val, &old_te_val);
@@ -334,13 +550,19 @@ static int proxy_wstunnel_request(apr_pool_t *p, request_rec *r,
} }
if (ap_cstr_casecmp(upgrade_method, "NONE") == 0) { if (ap_cstr_casecmp(upgrade_method, "NONE") == 0) {
buf = apr_pstrdup(p, "Upgrade: WebSocket" CRLF "Connection: Upgrade" CRLF CRLF); buf = apr_pstrdup(p, "Upgrade: WebSocket" CRLF
"Connection: Upgrade" CRLF
CRLF);
} else if (ap_cstr_casecmp(upgrade_method, "ANY") == 0) { } else if (ap_cstr_casecmp(upgrade_method, "ANY") == 0) {
const char *upgrade; const char *upgrade;
upgrade = apr_table_get(r->headers_in, "Upgrade"); upgrade = apr_table_get(r->headers_in, "Upgrade");
buf = apr_pstrcat(p, "Upgrade: ", upgrade, CRLF "Connection: Upgrade" CRLF CRLF, NULL); buf = apr_pstrcat(p, "Upgrade: ", upgrade, CRLF
"Connection: Upgrade" CRLF
CRLF, NULL);
} else { } else {
buf = apr_pstrcat(p, "Upgrade: ", upgrade_method, CRLF "Connection: Upgrade" CRLF CRLF, NULL); buf = apr_pstrcat(p, "Upgrade: ", upgrade_method, CRLF
"Connection: Upgrade" CRLF
CRLF, NULL);
} }
ap_xlate_proto_to_ascii(buf, strlen(buf)); ap_xlate_proto_to_ascii(buf, strlen(buf));
e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
@@ -350,7 +572,9 @@ static int proxy_wstunnel_request(apr_pool_t *p, request_rec *r,
header_brigade, 1)) != OK) header_brigade, 1)) != OK)
return rv; return rv;
apr_brigade_cleanup(header_brigade); if ((rv = proxy_wstunnel_handle_http_response(r, conn, conf,
header_brigade)) != DECLINED)
return rv;
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()"); ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");
@@ -466,7 +690,7 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker,
char *locurl = url; char *locurl = url;
apr_uri_t *uri; apr_uri_t *uri;
int is_ssl = 0; int is_ssl = 0;
const char *upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket"; const char *upgrade_method;
if (ap_cstr_casecmpn(url, "wss:", 4) == 0) { if (ap_cstr_casecmpn(url, "wss:", 4) == 0) {
scheme = "WSS"; scheme = "WSS";
@@ -480,6 +704,7 @@ static int proxy_wstunnel_handler(request_rec *r, proxy_worker *worker,
return DECLINED; return DECLINED;
} }
upgrade_method = *worker->s->upgrade ? worker->s->upgrade : "WebSocket";
if (ap_cstr_casecmp(upgrade_method, "NONE") != 0) { if (ap_cstr_casecmp(upgrade_method, "NONE") != 0) {
const char *upgrade; const char *upgrade;
upgrade = apr_table_get(r->headers_in, "Upgrade"); upgrade = apr_table_get(r->headers_in, "Upgrade");

View File

@@ -554,22 +554,7 @@ AP_DECLARE(apr_status_t) ap_rgetline(char **s, apr_size_t n,
apr_size_t *read, request_rec *r, apr_size_t *read, request_rec *r,
int flags, apr_bucket_brigade *bb) int flags, apr_bucket_brigade *bb)
{ {
apr_status_t rv; return ap_fgetline(s, n, read, r->proto_input_filters, flags, bb, r->pool);
rv = ap_fgetline_core(s, n, read, r->proto_input_filters, flags,
bb, r->pool);
#if APR_CHARSET_EBCDIC
/* On EBCDIC boxes, each complete http protocol input line needs to be
* translated into the code page used by the compiler. Since
* ap_fgetline_core uses recursion, we do the translation in a wrapper
* function to ensure that each input character gets translated only once.
*/
if (*read) {
ap_xlate_proto_from_ascii(*s, *read);
}
#endif
return rv;
} }
AP_DECLARE(int) ap_getline(char *s, int n, request_rec *r, int flags) AP_DECLARE(int) ap_getline(char *s, int n, request_rec *r, int flags)
@@ -1004,23 +989,30 @@ rrl_failed:
return 0; return 0;
} }
static int table_do_fn_check_lengths(void *r_, const char *key, struct table_do_fn_check_lengths_baton {
request_rec *r;
ap_mime_headers_ctx_t *ctx;
};
static int table_do_fn_check_lengths(void *arg, const char *key,
const char *value) const char *value)
{ {
request_rec *r = r_; struct table_do_fn_check_lengths_baton *baton = arg;
if (value == NULL || r->server->limit_req_fieldsize >= strlen(value) )
if (value == NULL || baton->ctx->limit_req_fieldsize >= strlen(value))
return 1; return 1;
r->status = HTTP_BAD_REQUEST; if (baton->ctx->notes) {
apr_table_setn(r->notes, "error-notes", apr_table_setn(baton->ctx->notes, "error-notes",
"Size of a request header field exceeds server limit."); "Size of a header field exceeds limit.");
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00560) "Request " }
"header exceeds LimitRequestFieldSize after merging: %.*s", ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, baton->r, APLOGNO(00560)
"Header exceeds size limit after merging: %.*s",
field_name_len(key), key); field_name_len(key), key);
return 0; return 0;
} }
AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb) AP_DECLARE(int) ap_get_mime_headers_ex(request_rec *r, ap_filter_t *f,
ap_mime_headers_ctx_t *ctx)
{ {
char *last_field = NULL; char *last_field = NULL;
apr_size_t last_len = 0; apr_size_t last_len = 0;
@@ -1030,44 +1022,53 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
apr_size_t len; apr_size_t len;
int fields_read = 0; int fields_read = 0;
char *tmp_field; char *tmp_field;
core_server_config *conf = ap_get_core_module_config(r->server->module_config); apr_bucket_brigade *bb = ctx->bb;
int strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE); int rc = OK;
if (bb == NULL) {
bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
}
if (ctx->headers == NULL) {
ctx->headers = apr_table_make(r->pool, 25);
}
if (ctx->limit_req_fieldsize <= 0) {
ctx->limit_req_fieldsize = HUGE_STRING_LEN;
}
/* /*
* Read header lines until we get the empty separator line, a read error, * Read header lines until we get the empty separator line, a read error,
* the connection closes (EOF), reach the server limit, or we timeout. * the connection closes (EOF), reach the size limit(s), or we timeout.
*/ */
while(1) { while(1) {
apr_status_t rv; apr_status_t rv;
field = NULL; field = NULL;
rv = ap_rgetline(&field, r->server->limit_req_fieldsize + 2, rv = ap_fgetline(&field, ctx->limit_req_fieldsize + 2, &len, f,
&len, r, strict ? AP_GETLINE_CRLF : 0, bb); ctx->strict ? AP_GETLINE_CRLF : 0, bb, r->pool);
if (rv != APR_SUCCESS) { if (rv != APR_SUCCESS) {
if (APR_STATUS_IS_TIMEUP(rv)) {
r->status = HTTP_REQUEST_TIME_OUT;
}
else {
r->status = HTTP_BAD_REQUEST;
}
/* ap_rgetline returns APR_ENOSPC if it fills up the buffer before /* ap_rgetline returns APR_ENOSPC if it fills up the buffer before
* finding the end-of-line. This is only going to happen if it * finding the end-of-line. This is only going to happen if it
* exceeds the configured limit for a field size. * exceeds the configured limit for a field size.
*/ */
if (rv == APR_ENOSPC) { if (rv == APR_ENOSPC) {
apr_table_setn(r->notes, "error-notes", if (ctx->notes) {
"Size of a request header field " apr_table_setn(ctx->notes, "error-notes",
"exceeds server limit."); "Size of a header field exceeds limit.");
}
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00561) ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00561)
"Request header exceeds LimitRequestFieldSize%s" "Header exceeds size limit%s%.*s",
"%.*s",
(field && *field) ? ": " : "", (field && *field) ? ": " : "",
(field) ? field_name_len(field) : 0, (field) ? field_name_len(field) : 0,
(field) ? field : ""); (field) ? field : "");
} }
return; if (APR_STATUS_IS_TIMEUP(rv)) {
rc = HTTP_REQUEST_TIME_OUT;
}
else {
rc = HTTP_BAD_REQUEST;
}
goto cleanup;
} }
/* For all header values, and all obs-fold lines, the presence of /* For all header values, and all obs-fold lines, the presence of
@@ -1087,18 +1088,18 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
apr_size_t fold_len; apr_size_t fold_len;
if (last_field == NULL) { if (last_field == NULL) {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03442) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03442)
"Line folding encountered before first" "Line folding encountered before first"
" header line"); " header line");
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
if (field[1] == '\0') { if (field[1] == '\0') {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03443) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03443)
"Empty folded line encountered"); "Empty folded line encountered");
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
/* Leading whitespace on an obs-fold line can be /* Leading whitespace on an obs-fold line can be
@@ -1115,19 +1116,19 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
*/ */
fold_len = last_len + len + 1; /* trailing null */ fold_len = last_len + len + 1; /* trailing null */
if (fold_len >= (apr_size_t)(r->server->limit_req_fieldsize)) { if (fold_len >= (apr_size_t)ctx->limit_req_fieldsize) {
r->status = HTTP_BAD_REQUEST; if (ctx->notes) {
/* report what we have accumulated so far before the /* report what we have accumulated so far before the
* overflow (last_field) as the field with the problem * overflow (last_field) as the field with the problem
*/ */
apr_table_setn(r->notes, "error-notes", apr_table_setn(ctx->notes, "error-notes",
"Size of a request header field " "Size of a header field exceeds limit.");
"exceeds server limit."); }
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00562) ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00562)
"Request header exceeds LimitRequestFieldSize " "Header exceeds size limit after folding: %.*s",
"after folding: %.*s",
field_name_len(last_field), last_field); field_name_len(last_field), last_field);
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
if (fold_len > alloc_len) { if (fold_len > alloc_len) {
@@ -1157,46 +1158,47 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
* most recently read input line). * most recently read input line).
*/ */
if (r->server->limit_req_fields if (ctx->limit_req_fields
&& (++fields_read > r->server->limit_req_fields)) { && (++fields_read > ctx->limit_req_fields)) {
r->status = HTTP_BAD_REQUEST; if (ctx->notes) {
apr_table_setn(r->notes, "error-notes", apr_table_setn(ctx->notes, "error-notes",
"The number of request header fields " "The number of header fields "
"exceeds this server's limit."); "exceeds the limit.");
}
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00563) ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00563)
"Number of request headers exceeds " "Number of headers exceeds the limit");
"LimitRequestFields"); rc = HTTP_BAD_REQUEST;
return; goto cleanup;
} }
if (!strict) if (!ctx->strict)
{ {
/* Not Strict ('Unsafe' mode), using the legacy parser */ /* Not Strict ('Unsafe' mode), using the legacy parser */
if (!(value = strchr(last_field, ':'))) { /* Find ':' or */ if (!(value = strchr(last_field, ':'))) { /* Find ':' or */
r->status = HTTP_BAD_REQUEST; /* abort bad request */
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00564) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00564)
"Request header field is missing ':' " "Header field is missing ':' "
"separator: %.*s", (int)LOG_NAME_MAX_LEN, "separator: %.*s", (int)LOG_NAME_MAX_LEN,
last_field); last_field);
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
if (value == last_field) { if (value == last_field) {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03453) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03453)
"Request header field name was empty"); "Header field name was empty");
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
*value++ = '\0'; /* NUL-terminate at colon */ *value++ = '\0'; /* NUL-terminate at colon */
if (strpbrk(last_field, "\t\n\v\f\r ")) { if (strpbrk(last_field, "\t\n\v\f\r ")) {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03452) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03452)
"Request header field name presented" "Header field name presented"
" invalid whitespace"); " invalid whitespace");
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
while (*value == ' ' || *value == '\t') { while (*value == ' ' || *value == '\t') {
@@ -1204,11 +1206,11 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
} }
if (strpbrk(value, "\n\v\f\r")) { if (strpbrk(value, "\n\v\f\r")) {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03451) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03451)
"Request header field value presented" "Header field value presented"
" bad whitespace"); " bad whitespace");
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
} }
else /* Using strict RFC7230 parsing */ else /* Using strict RFC7230 parsing */
@@ -1216,11 +1218,11 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
/* Ensure valid token chars before ':' per RFC 7230 3.2.4 */ /* Ensure valid token chars before ':' per RFC 7230 3.2.4 */
value = (char *)ap_scan_http_token(last_field); value = (char *)ap_scan_http_token(last_field);
if ((value == last_field) || *value != ':') { if ((value == last_field) || *value != ':') {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02426) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02426)
"Request header field name is malformed: " "Request header field name is malformed: "
"%.*s", (int)LOG_NAME_MAX_LEN, last_field); "%.*s", (int)LOG_NAME_MAX_LEN, last_field);
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
*value++ = '\0'; /* NUL-terminate last_field name at ':' */ *value++ = '\0'; /* NUL-terminate last_field name at ':' */
@@ -1238,15 +1240,15 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
* for specific header handler logic later in the cycle * for specific header handler logic later in the cycle
*/ */
if (*tmp_field != '\0') { if (*tmp_field != '\0') {
r->status = HTTP_BAD_REQUEST;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02427) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02427)
"Request header value is malformed: " "Request header value is malformed: "
"%.*s", (int)LOG_NAME_MAX_LEN, value); "%.*s", (int)LOG_NAME_MAX_LEN, value);
return; rc = HTTP_BAD_REQUEST;
goto cleanup;
} }
} }
apr_table_addn(r->headers_in, last_field, value); apr_table_addn(ctx->headers, last_field, value);
/* This last_field header is now stored in headers_in, /* This last_field header is now stored in headers_in,
* resume processing of the current input line. * resume processing of the current input line.
@@ -1260,7 +1262,7 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
/* Keep track of this new header line so that we can extend it across /* Keep track of this new header line so that we can extend it across
* any obs-fold or parse it on the next loop iteration. We referenced * any obs-fold or parse it on the next loop iteration. We referenced
* our previously allocated buffer in r->headers_in, * our previously allocated buffer in ctx->headers,
* so allocate a fresh buffer if required. * so allocate a fresh buffer if required.
*/ */
alloc_len = 0; alloc_len = 0;
@@ -1271,18 +1273,51 @@ AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb
/* Combine multiple message-header fields with the same /* Combine multiple message-header fields with the same
* field-name, following RFC 2616, 4.2. * field-name, following RFC 2616, 4.2.
*/ */
apr_table_compress(r->headers_in, APR_OVERLAP_TABLES_MERGE); if (ctx->compress) {
apr_table_compress(ctx->headers, APR_OVERLAP_TABLES_MERGE);
}
/* enforce LimitRequestFieldSize for merged headers */ /* Enforce limit for merged headers */
apr_table_do(table_do_fn_check_lengths, r, r->headers_in, NULL); {
struct table_do_fn_check_lengths_baton baton = { r, ctx };
if (!apr_table_do(table_do_fn_check_lengths, &baton,
ctx->headers, NULL)) {
rc = HTTP_BAD_REQUEST;
goto cleanup;
}
}
cleanup:
if (bb != ctx->bb) {
apr_brigade_destroy(bb);
}
return rc;
}
AP_DECLARE(void) ap_get_mime_headers_core(request_rec *r, apr_bucket_brigade *bb)
{
core_server_config *conf = ap_get_core_module_config(r->server->module_config);
ap_mime_headers_ctx_t ctx;
int status;
memset(&ctx, 0, sizeof(ctx));
ctx.strict = (conf->http_conformance != AP_HTTP_CONFORMANCE_UNSAFE);
ctx.limit_req_fields = r->server->limit_req_fields;
ctx.limit_req_fieldsize = r->server->limit_req_fieldsize;
ctx.headers = r->headers_in;
ctx.notes = r->notes;
ctx.compress = 1;
ctx.bb = bb;
status = ap_get_mime_headers_ex(r, r->proto_input_filters, &ctx);
if (status != OK) {
r->status = status;
}
} }
AP_DECLARE(void) ap_get_mime_headers(request_rec *r) AP_DECLARE(void) ap_get_mime_headers(request_rec *r)
{ {
apr_bucket_brigade *tmp_bb; ap_get_mime_headers_core(r, NULL);
tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
ap_get_mime_headers_core(r, tmp_bb);
apr_brigade_destroy(tmp_bb);
} }
AP_DECLARE(request_rec *) ap_create_request(conn_rec *conn) AP_DECLARE(request_rec *) ap_create_request(conn_rec *conn)