1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-08 15:02:10 +03:00

mod_proxy_http: handle async tunneling of Upgrade(d) protocols.

When supported by the MPM (i.e. "event"), provide async callbacks and let
them be scheduled by ap_mpm_register_poll_callback_timeout(), while the
handler returns SUSPENDED.

The new ProxyAsyncDelay directive (if positive) enables async handling,
while ProxyAsyncIdleTimeout determines the timeout applied on both ends
while tunneling.

Github: closes #126



git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1879419 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yann Ylavic
2020-07-02 00:14:26 +00:00
parent 29bcc0eaa3
commit b5faaa48c3
3 changed files with 219 additions and 53 deletions

View File

@@ -1842,6 +1842,7 @@ static void *create_proxy_dir_config(apr_pool_t *p, char *dummy)
new->add_forwarded_headers_set = 0; new->add_forwarded_headers_set = 0;
new->forward_100_continue = 1; new->forward_100_continue = 1;
new->forward_100_continue_set = 0; new->forward_100_continue_set = 0;
new->async_delay = -1;
return (void *) new; return (void *) new;
} }
@@ -1889,17 +1890,30 @@ static void *merge_proxy_dir_config(apr_pool_t *p, void *basev, void *addv)
new->error_override_set = add->error_override_set || base->error_override_set; new->error_override_set = add->error_override_set || base->error_override_set;
new->alias = (add->alias_set == 0) ? base->alias : add->alias; new->alias = (add->alias_set == 0) ? base->alias : add->alias;
new->alias_set = add->alias_set || base->alias_set; new->alias_set = add->alias_set || base->alias_set;
new->add_forwarded_headers = new->add_forwarded_headers =
(add->add_forwarded_headers_set == 0) ? base->add_forwarded_headers (add->add_forwarded_headers_set == 0) ? base->add_forwarded_headers
: add->add_forwarded_headers; : add->add_forwarded_headers;
new->add_forwarded_headers_set = add->add_forwarded_headers_set new->add_forwarded_headers_set = add->add_forwarded_headers_set
|| base->add_forwarded_headers_set; || base->add_forwarded_headers_set;
new->forward_100_continue = new->forward_100_continue =
(add->forward_100_continue_set == 0) ? base->forward_100_continue (add->forward_100_continue_set == 0) ? base->forward_100_continue
: add->forward_100_continue; : add->forward_100_continue;
new->forward_100_continue_set = add->forward_100_continue_set new->forward_100_continue_set = add->forward_100_continue_set
|| base->forward_100_continue_set; || base->forward_100_continue_set;
new->async_delay =
(add->async_delay_set == 0) ? base->async_delay
: add->async_delay;
new->async_delay_set = add->async_delay_set
|| base->async_delay_set;
new->async_idle_timeout =
(add->async_idle_timeout_set == 0) ? base->async_idle_timeout
: add->async_idle_timeout;
new->async_idle_timeout_set = add->async_idle_timeout_set
|| base->async_idle_timeout_set;
return new; return new;
} }
@@ -2479,6 +2493,33 @@ static const char *
return NULL; return NULL;
} }
static const char *
set_proxy_async_delay(cmd_parms *parms, void *dconf, const char *arg)
{
proxy_dir_conf *conf = dconf;
if (strcmp(arg, "-1") == 0) {
conf->async_delay = -1;
}
else if (ap_timeout_parameter_parse(arg, &conf->async_delay, "s")
|| conf->async_delay < 0) {
return "ProxyAsyncDelay has wrong format";
}
conf->async_delay_set = 1;
return NULL;
}
static const char *
set_proxy_async_idle(cmd_parms *parms, void *dconf, const char *arg)
{
proxy_dir_conf *conf = dconf;
if (ap_timeout_parameter_parse(arg, &conf->async_idle_timeout, "s")
|| conf->async_idle_timeout < 0) {
return "ProxyAsyncIdleTimeout has wrong format";
}
conf->async_idle_timeout_set = 1;
return NULL;
}
static const char * static const char *
set_recv_buffer_size(cmd_parms *parms, void *dummy, const char *arg) set_recv_buffer_size(cmd_parms *parms, void *dummy, const char *arg)
{ {
@@ -3068,6 +3109,10 @@ static const command_rec proxy_cmds[] =
AP_INIT_FLAG("Proxy100Continue", forward_100_continue, NULL, RSRC_CONF|ACCESS_CONF, AP_INIT_FLAG("Proxy100Continue", forward_100_continue, NULL, RSRC_CONF|ACCESS_CONF,
"on if 100-Continue should be forwarded to the origin server, off if the " "on if 100-Continue should be forwarded to the origin server, off if the "
"proxy should handle it by itself"), "proxy should handle it by itself"),
AP_INIT_TAKE1("ProxyAsyncDelay", set_proxy_async_delay, NULL, RSRC_CONF|ACCESS_CONF,
"Amount of time to poll before going asynchronous"),
AP_INIT_TAKE1("ProxyAsyncIdleTimeout", set_proxy_async_idle, NULL, RSRC_CONF|ACCESS_CONF,
"Timeout for asynchronous inactivity, ProxyTimeout by default"),
{NULL} {NULL}
}; };

View File

@@ -247,6 +247,11 @@ typedef struct {
unsigned int forward_100_continue_set:1; unsigned int forward_100_continue_set:1;
apr_array_header_t *error_override_codes; apr_array_header_t *error_override_codes;
apr_interval_time_t async_delay;
apr_interval_time_t async_idle_timeout;
unsigned int async_delay_set:1;
unsigned int async_idle_timeout_set:1;
} proxy_dir_conf; } proxy_dir_conf;
/* if we interpolate env vars per-request, we'll need a per-request /* if we interpolate env vars per-request, we'll need a per-request

View File

@@ -18,19 +18,17 @@
#include "mod_proxy.h" #include "mod_proxy.h"
#include "ap_regex.h" #include "ap_regex.h"
#include "ap_mpm.h"
module AP_MODULE_DECLARE_DATA proxy_http_module; module AP_MODULE_DECLARE_DATA proxy_http_module;
static int (*ap_proxy_clear_connection_fn)(request_rec *r, apr_table_t *headers) = static int (*ap_proxy_clear_connection_fn)(request_rec *r, apr_table_t *headers) =
NULL; NULL;
static apr_status_t ap_proxy_http_cleanup(const char *scheme,
request_rec *r,
proxy_conn_rec *backend);
static apr_status_t ap_proxygetline(apr_bucket_brigade *bb, char *s, int n, static apr_status_t ap_proxygetline(apr_bucket_brigade *bb, char *s, int n,
request_rec *r, int flags, int *read); request_rec *r, int flags, int *read);
/* /*
* Canonicalise http-like URLs. * Canonicalise http-like URLs.
* scheme is the scheme for the URL * scheme is the scheme for the URL
@@ -219,6 +217,12 @@ static void add_cl(apr_pool_t *p,
#define MAX_MEM_SPOOL 16384 #define MAX_MEM_SPOOL 16384
typedef enum {
PROXY_HTTP_REQ_HAVE_HEADER = 0,
PROXY_HTTP_TUNNELING
} proxy_http_state;
typedef enum { typedef enum {
RB_INIT = 0, RB_INIT = 0,
RB_STREAM_CL, RB_STREAM_CL,
@@ -229,29 +233,129 @@ typedef enum {
typedef struct { typedef struct {
apr_pool_t *p; apr_pool_t *p;
request_rec *r; request_rec *r;
const char *proto;
proxy_worker *worker; proxy_worker *worker;
proxy_dir_conf *dconf;
proxy_server_conf *sconf; proxy_server_conf *sconf;
char server_portstr[32]; char server_portstr[32];
proxy_conn_rec *backend; proxy_conn_rec *backend;
conn_rec *origin; conn_rec *origin;
apr_bucket_alloc_t *bucket_alloc; apr_bucket_alloc_t *bucket_alloc;
apr_bucket_brigade *header_brigade; apr_bucket_brigade *header_brigade;
apr_bucket_brigade *input_brigade; apr_bucket_brigade *input_brigade;
char *old_cl_val, *old_te_val; char *old_cl_val, *old_te_val;
apr_off_t cl_val; apr_off_t cl_val;
proxy_http_state state;
rb_methods rb_method; rb_methods rb_method;
int force10;
const char *upgrade; const char *upgrade;
proxy_tunnel_rec *tunnel;
apr_array_header_t *pfds;
apr_interval_time_t idle_timeout;
int expecting_100; unsigned int can_go_async :1,
unsigned int do_100_continue:1, expecting_100 :1,
prefetch_nonblocking:1; do_100_continue :1,
prefetch_nonblocking :1,
force10 :1;
} proxy_http_req_t; } proxy_http_req_t;
static void proxy_http_async_finish(proxy_http_req_t *req)
{
conn_rec *c = req->r->connection;
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, req->r,
"proxy %s: finish async", req->proto);
proxy_run_detach_backend(req->r, req->backend);
ap_proxy_release_connection(req->proto, req->backend, req->r->server);
ap_finalize_request_protocol(req->r);
ap_process_request_after_handler(req->r);
/* don't touch req or req->r from here */
c->cs->state = CONN_STATE_LINGER;
ap_mpm_resume_suspended(c);
}
/* If neither socket becomes readable in the specified timeout,
* this callback will kill the request.
* We do not have to worry about having a cancel and a IO both queued.
*/
static void proxy_http_async_cancel_cb(void *baton)
{
proxy_http_req_t *req = (proxy_http_req_t *)baton;
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, req->r,
"proxy %s: cancel async", req->proto);
req->r->connection->keepalive = AP_CONN_CLOSE;
req->backend->close = 1;
proxy_http_async_finish(req);
}
/* Invoked by the event loop when data is ready on either end.
* We don't need the invoke_mtx, since we never put multiple callback events
* in the queue.
*/
static void proxy_http_async_cb(void *baton)
{
proxy_http_req_t *req = (proxy_http_req_t *)baton;
int status;
if (req->pfds) {
apr_pool_clear(req->pfds->pool);
}
switch (req->state) {
case PROXY_HTTP_TUNNELING:
/* Pump both ends until they'd block and then start over again */
status = ap_proxy_tunnel_run(req->tunnel);
if (status == HTTP_GATEWAY_TIME_OUT) {
if (req->pfds) {
apr_pollfd_t *async_pfds = (void *)req->pfds->elts;
apr_pollfd_t *tunnel_pfds = (void *)req->tunnel->pfds->elts;
async_pfds[0].reqevents = tunnel_pfds[0].reqevents;
async_pfds[1].reqevents = tunnel_pfds[1].reqevents;
}
else {
req->pfds = apr_array_copy(req->p, req->tunnel->pfds);
apr_pool_create(&req->pfds->pool, req->p);
}
status = SUSPENDED;
}
break;
default:
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, req->r,
"proxy %s: unexpected async state (%i)",
req->proto, (int)req->state);
status = HTTP_INTERNAL_SERVER_ERROR;
break;
}
if (status == SUSPENDED) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, req->r,
"proxy %s: suspended, going async",
req->proto);
ap_mpm_register_poll_callback_timeout(req->pfds,
proxy_http_async_cb,
proxy_http_async_cancel_cb,
req, req->idle_timeout);
}
else if (status != OK) {
proxy_http_async_cancel_cb(req);
}
else {
proxy_http_async_finish(req);
}
}
/* Read what's in the client pipe. If nonblocking is set and read is EAGAIN, /* Read what's in the client pipe. If nonblocking is set and read is EAGAIN,
* pass a FLUSH bucket to the backend and read again in blocking mode. * pass a FLUSH bucket to the backend and read again in blocking mode.
*/ */
@@ -1200,13 +1304,11 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
int i; int i;
const char *te = NULL; const char *te = NULL;
int original_status = r->status; int original_status = r->status;
int proxy_status = OK;
const char *original_status_line = r->status_line; const char *original_status_line = r->status_line;
const char *proxy_status_line = NULL; const char *proxy_status_line = NULL;
apr_interval_time_t old_timeout = 0; apr_interval_time_t old_timeout = 0;
proxy_dir_conf *dconf; proxy_dir_conf *dconf = req->dconf;
int proxy_status = OK;
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
bb = apr_brigade_create(p, c->bucket_alloc); bb = apr_brigade_create(p, c->bucket_alloc);
pass_bb = apr_brigade_create(p, c->bucket_alloc); pass_bb = apr_brigade_create(p, c->bucket_alloc);
@@ -1634,9 +1736,6 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
if (proxy_status == HTTP_SWITCHING_PROTOCOLS) { if (proxy_status == HTTP_SWITCHING_PROTOCOLS) {
apr_status_t rv; apr_status_t rv;
proxy_tunnel_rec *tunnel;
apr_interval_time_t client_timeout = -1,
backend_timeout = -1;
/* If we didn't send the full body yet, do it now */ /* If we didn't send the full body yet, do it now */
if (do_100_continue) { if (do_100_continue) {
@@ -1650,41 +1749,35 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10239) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(10239)
"HTTP: tunneling protocol %s", upgrade); "HTTP: tunneling protocol %s", upgrade);
rv = ap_proxy_tunnel_create(&tunnel, r, origin, "HTTP"); rv = ap_proxy_tunnel_create(&req->tunnel, r, origin, upgrade);
if (rv != APR_SUCCESS) { if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10240) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(10240)
"can't create tunnel for %s", upgrade); "can't create tunnel for %s", upgrade);
return HTTP_INTERNAL_SERVER_ERROR; return HTTP_INTERNAL_SERVER_ERROR;
} }
/* Set timeout to the lowest configured for client or backend */ req->proto = upgrade;
apr_socket_timeout_get(backend->sock, &backend_timeout);
apr_socket_timeout_get(ap_get_conn_socket(c), &client_timeout); if (req->can_go_async) {
if (backend_timeout >= 0 && backend_timeout < client_timeout) { /* Let the MPM schedule the work when idle */
tunnel->timeout = backend_timeout; req->state = PROXY_HTTP_TUNNELING;
} req->tunnel->timeout = dconf->async_delay;
else { proxy_http_async_cb(req);
tunnel->timeout = client_timeout; return SUSPENDED;
} }
/* Let proxy tunnel forward everything */ /* Let proxy tunnel forward everything within this thread */
status = ap_proxy_tunnel_run(tunnel); req->tunnel->timeout = req->idle_timeout;
if (ap_is_HTTP_ERROR(status)) { status = ap_proxy_tunnel_run(req->tunnel);
/* Tunnel always return HTTP_GATEWAY_TIME_OUT on timeout, if (!ap_is_HTTP_ERROR(status)) {
* but we can differentiate between client and backend here.
*/
if (status == HTTP_GATEWAY_TIME_OUT
&& tunnel->timeout == client_timeout) {
status = HTTP_REQUEST_TIME_OUT;
}
}
else {
/* Update r->status for custom log */ /* Update r->status for custom log */
status = HTTP_SWITCHING_PROTOCOLS; status = HTTP_SWITCHING_PROTOCOLS;
} }
r->status = status; r->status = status;
/* We are done with both connections */ /* We are done with both connections */
r->connection->keepalive = AP_CONN_CLOSE;
backend->close = 1;
return DONE; return DONE;
} }
@@ -2000,14 +2093,6 @@ int ap_proxy_http_process_response(proxy_http_req_t *req)
return OK; return OK;
} }
static
apr_status_t ap_proxy_http_cleanup(const char *scheme, request_rec *r,
proxy_conn_rec *backend)
{
ap_proxy_release_connection(scheme, backend, r->server);
return OK;
}
/* /*
* This handles http:// URLs, and other URLs using a remote proxy over http * This handles http:// URLs, and other URLs using a remote proxy over http
* If proxyhost is NULL, then contact the server directly, otherwise * If proxyhost is NULL, then contact the server directly, otherwise
@@ -2029,6 +2114,7 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
proxy_http_req_t *req = NULL; proxy_http_req_t *req = NULL;
proxy_conn_rec *backend = NULL; proxy_conn_rec *backend = NULL;
apr_bucket_brigade *input_brigade = NULL; apr_bucket_brigade *input_brigade = NULL;
int mpm_can_poll = 0;
int is_ssl = 0; int is_ssl = 0;
conn_rec *c = r->connection; conn_rec *c = r->connection;
proxy_dir_conf *dconf; proxy_dir_conf *dconf;
@@ -2080,20 +2166,26 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
worker, r->server)) != OK) { worker, r->server)) != OK) {
return status; return status;
} }
backend->is_ssl = is_ssl; backend->is_ssl = is_ssl;
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
ap_mpm_query(AP_MPMQ_CAN_POLL, &mpm_can_poll);
req = apr_pcalloc(p, sizeof(*req)); req = apr_pcalloc(p, sizeof(*req));
req->p = p; req->p = p;
req->r = r; req->r = r;
req->sconf = conf; req->sconf = conf;
req->dconf = dconf;
req->worker = worker; req->worker = worker;
req->backend = backend; req->backend = backend;
req->proto = proxy_function;
req->bucket_alloc = c->bucket_alloc; req->bucket_alloc = c->bucket_alloc;
req->can_go_async = (mpm_can_poll &&
dconf->async_delay_set &&
dconf->async_delay >= 0);
req->state = PROXY_HTTP_REQ_HAVE_HEADER;
req->rb_method = RB_INIT; req->rb_method = RB_INIT;
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) {
req->force10 = 1; req->force10 = 1;
} }
@@ -2105,6 +2197,22 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
} }
} }
if (req->can_go_async || req->upgrade) {
/* If ProxyAsyncIdleTimeout is not set, use backend timeout */
if (req->can_go_async && dconf->async_idle_timeout_set) {
req->idle_timeout = dconf->async_idle_timeout;
}
else if (worker->s->timeout_set) {
req->idle_timeout = worker->s->timeout;
}
else if (conf->timeout_set) {
req->idle_timeout = conf->timeout;
}
else {
req->idle_timeout = r->server->timeout;
}
}
/* We possibly reuse input data prefetched in previous call(s), e.g. for a /* We possibly reuse input data prefetched in previous call(s), e.g. for a
* balancer fallback scenario, and in this case the 100 continue settings * balancer fallback scenario, and in this case the 100 continue settings
* should be consistent between balancer members. If not, we need to ignore * should be consistent between balancer members. If not, we need to ignore
@@ -2128,15 +2236,19 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
* req->expecting_100 (i.e. cleared only if mod_proxy_http sent the * req->expecting_100 (i.e. cleared only if mod_proxy_http sent the
* "100 Continue" according to its policy). * "100 Continue" according to its policy).
*/ */
req->do_100_continue = req->prefetch_nonblocking = 1; req->do_100_continue = 1;
req->expecting_100 = r->expecting_100; req->expecting_100 = (r->expecting_100 != 0);
r->expecting_100 = 0; r->expecting_100 = 0;
} }
/* Should we block while prefetching the body or try nonblocking and flush /* Should we block while prefetching the body or try nonblocking and flush
* data to the backend ASAP? * data to the backend ASAP?
*/ */
else if (input_brigade || apr_table_get(r->subprocess_env, if (input_brigade
"proxy-prefetch-nonblocking")) { || req->can_go_async
|| req->do_100_continue
|| apr_table_get(r->subprocess_env,
"proxy-prefetch-nonblocking")) {
req->prefetch_nonblocking = 1; req->prefetch_nonblocking = 1;
} }
@@ -2255,6 +2367,9 @@ static int proxy_http_handler(request_rec *r, proxy_worker *worker,
/* Step Five: Receive the Response... Fall thru to cleanup */ /* Step Five: Receive the Response... Fall thru to cleanup */
status = ap_proxy_http_process_response(req); status = ap_proxy_http_process_response(req);
if (status == SUSPENDED) {
return SUSPENDED;
}
if (req->backend) { if (req->backend) {
proxy_run_detach_backend(r, req->backend); proxy_run_detach_backend(r, req->backend);
} }
@@ -2267,7 +2382,8 @@ cleanup:
if (req->backend) { if (req->backend) {
if (status != OK) if (status != OK)
req->backend->close = 1; req->backend->close = 1;
ap_proxy_http_cleanup(proxy_function, r, req->backend); ap_proxy_release_connection(proxy_function, req->backend,
r->server);
} }
if (req->expecting_100) { if (req->expecting_100) {
/* Restore r->expecting_100 if we didn't touch it */ /* Restore r->expecting_100 if we didn't touch it */