1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-07 04:02:58 +03:00

core: Limit to ten the number of tolerated empty lines between request,

and consume them before the pipelining check to avoid possible response
delay when reading the next request without flushing.

Before this commit, the maximum number of empty lines was the same as
configured LimitRequestFields, defaulting to 100, which was way too much.
We now use a fixed/hard limit of 10 (DEFAULT_LIMIT_BLANK_LINES).

check_pipeline() is changed to check for (up to the limit) and comsume the
trailing [CR]LFs so that they won't be interpreted as pipelined requests,
otherwise we would block on the next read without flushing data, and hence
possibly delay pending response(s) until the next/real request comes in or
the keepalive timeout expires.
 
Finally, when the maximum number of empty line is reached in
read_request_line(), or that request line does not contains at least a method
and an (valid) URI, we can fail early and avoid some failure detected in
further processing.


git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1710095 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yann Ylavic
2015-10-22 20:26:12 +00:00
parent 306542f55f
commit f9b56afc0e
4 changed files with 94 additions and 19 deletions

View File

@@ -1,6 +1,10 @@
-*- coding: utf-8 -*- -*- coding: utf-8 -*-
Changes with Apache 2.5.0 Changes with Apache 2.5.0
*) core: Limit to ten the number of tolerated empty lines between request,
and consume them before the pipelining check to avoid possible response
delay when reading the next request without flushing. [Yann Ylavic]
*) mod_session: Introduce SessionExpiryUpdateInterval which allows to *) mod_session: Introduce SessionExpiryUpdateInterval which allows to
configure the session/cookie expiry's update interval. PR 57300. configure the session/cookie expiry's update interval. PR 57300.
[Paul Spangler <paul.spangler ni.com>] [Paul Spangler <paul.spangler ni.com>]

View File

@@ -201,6 +201,10 @@ extern "C" {
#ifndef DEFAULT_LIMIT_REQUEST_FIELDS #ifndef DEFAULT_LIMIT_REQUEST_FIELDS
#define DEFAULT_LIMIT_REQUEST_FIELDS 100 #define DEFAULT_LIMIT_REQUEST_FIELDS 100
#endif #endif
/** default/hard limit on number of leading/trailing empty lines */
#ifndef DEFAULT_LIMIT_BLANK_LINES
#define DEFAULT_LIMIT_BLANK_LINES 10
#endif
/** /**
* The default default character set name to add if AddDefaultCharset is * The default default character set name to add if AddDefaultCharset is

View File

@@ -230,23 +230,85 @@ AP_DECLARE(void) ap_die(int type, request_rec *r)
static void check_pipeline(conn_rec *c, apr_bucket_brigade *bb) static void check_pipeline(conn_rec *c, apr_bucket_brigade *bb)
{ {
c->data_in_input_filters = 0;
if (c->keepalive != AP_CONN_CLOSE && !c->aborted) { if (c->keepalive != AP_CONN_CLOSE && !c->aborted) {
apr_status_t rv; apr_status_t rv;
int num_blank_lines = DEFAULT_LIMIT_BLANK_LINES;
char buf[2], cr = 0;
apr_size_t len;
AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(bb)); do {
const apr_size_t n = (cr) ? 2 : 1;
apr_brigade_cleanup(bb);
rv = ap_get_brigade(c->input_filters, bb, AP_MODE_SPECULATIVE, rv = ap_get_brigade(c->input_filters, bb, AP_MODE_SPECULATIVE,
APR_NONBLOCK_READ, 1); APR_NONBLOCK_READ, n);
if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb)) { if (rv != APR_SUCCESS || APR_BRIGADE_EMPTY(bb)) {
/* /*
* Error or empty brigade: There is no data present in the input * Error or empty brigade: There is no data present in the input
* filter * filter
*/ */
c->data_in_input_filters = 0; return;
}
/* Ignore trailing blank lines (which must not be interpreted as
* pipelined requests) up to the limit, otherwise we would block
* on the next read without flushing data, and hence possibly delay
* pending response(s) until the next/real request comes in or the
* keepalive timeout expires.
*/
len = n;
rv = apr_brigade_flatten(bb, buf, &len);
if (rv != APR_SUCCESS || len != n) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c,
"check pipeline can't fetch CRLF "
"(%" APR_SIZE_T_FMT "/%" APR_SIZE_T_FMT ")",
len, n);
c->data_in_input_filters = 1;
return;
}
if (cr) {
AP_DEBUG_ASSERT(len == 2 && buf[0] == APR_ASCII_CR);
if (buf[1] != APR_ASCII_LF) {
return;
}
num_blank_lines--;
cr = 0;
}
else {
if (buf[0] == APR_ASCII_CR) {
cr = 1;
}
else if (buf[0] == APR_ASCII_LF) {
num_blank_lines--;
} }
else { else {
c->data_in_input_filters = 1; c->data_in_input_filters = 1;
return;
} }
} }
if (!cr) {
/* Consume this [CR]LF, then next */
apr_brigade_cleanup(bb);
rv = ap_get_brigade(c->input_filters, bb, AP_MODE_READBYTES,
APR_NONBLOCK_READ, n);
if (rv == APR_SUCCESS) {
rv = apr_brigade_flatten(bb, buf, &len);
}
if (rv != APR_SUCCESS || len != n) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c,
"check pipeline can't read CRLF "
"(%" APR_SIZE_T_FMT "/%" APR_SIZE_T_FMT ")",
len, n);
c->data_in_input_filters = 1;
return;
}
}
} while (num_blank_lines >= 0);
/* Don't recycle this (abused) connection */
c->keepalive = AP_CONN_CLOSE;
}
} }

View File

@@ -567,16 +567,11 @@ static int read_request_line(request_rec *r, apr_bucket_brigade *bb)
unsigned int major = 1, minor = 0; /* Assume HTTP/1.0 if non-"HTTP" protocol */ unsigned int major = 1, minor = 0; /* Assume HTTP/1.0 if non-"HTTP" protocol */
char http[5]; char http[5];
apr_size_t len; apr_size_t len;
int num_blank_lines = 0; int num_blank_lines = DEFAULT_LIMIT_BLANK_LINES;
int max_blank_lines = r->server->limit_req_fields;
core_server_config *conf = ap_get_core_module_config(r->server->module_config); core_server_config *conf = ap_get_core_module_config(r->server->module_config);
int strict = conf->http_conformance & AP_HTTP_CONFORMANCE_STRICT; int strict = conf->http_conformance & AP_HTTP_CONFORMANCE_STRICT;
int enforce_strict = !(conf->http_conformance & AP_HTTP_CONFORMANCE_LOGONLY); int enforce_strict = !(conf->http_conformance & AP_HTTP_CONFORMANCE_LOGONLY);
if (max_blank_lines <= 0) {
max_blank_lines = DEFAULT_LIMIT_REQUEST_FIELDS;
}
/* Read past empty lines until we get a real request line, /* Read past empty lines until we get a real request line,
* a read error, the connection closes (EOF), or we timeout. * a read error, the connection closes (EOF), or we timeout.
* *
@@ -622,7 +617,7 @@ static int read_request_line(request_rec *r, apr_bucket_brigade *bb)
r->protocol = apr_pstrdup(r->pool, "HTTP/1.0"); r->protocol = apr_pstrdup(r->pool, "HTTP/1.0");
return 0; return 0;
} }
} while ((len <= 0) && (++num_blank_lines < max_blank_lines)); } while ((len <= 0) && (--num_blank_lines >= 0));
if (APLOGrtrace5(r)) { if (APLOGrtrace5(r)) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r,
@@ -636,6 +631,13 @@ static int read_request_line(request_rec *r, apr_bucket_brigade *bb)
uri = ap_getword_white(r->pool, &ll); uri = ap_getword_white(r->pool, &ll);
if (!*r->method || !*uri) {
r->status = HTTP_BAD_REQUEST;
r->proto_num = HTTP_VERSION(1,0);
r->protocol = apr_pstrdup(r->pool, "HTTP/1.0");
return 0;
}
/* Provide quick information about the request method as soon as known */ /* Provide quick information about the request method as soon as known */
r->method_number = ap_method_number_of(r->method); r->method_number = ap_method_number_of(r->method);
@@ -644,6 +646,9 @@ static int read_request_line(request_rec *r, apr_bucket_brigade *bb)
} }
ap_parse_uri(r, uri); ap_parse_uri(r, uri);
if (r->status != HTTP_OK) {
return 0;
}
if (ll[0]) { if (ll[0]) {
r->assbackwards = 0; r->assbackwards = 0;