1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-08 15:02:10 +03:00

mod_http2: slave connections are reused

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1735416 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Stefan Eissing
2016-03-17 12:55:11 +00:00
parent 09dd2d426d
commit 07d45e357e
14 changed files with 132 additions and 129 deletions

View File

@@ -1,6 +1,8 @@
-*- coding: utf-8 -*-
Changes with Apache 2.5.0
*) mod_http2: slave connections are now reused. [Stefan Eissing]
*) mod_proxy_http2: using HTTP/2 flow control for backend streams by
observing data actually send out on the frontend h2 connection.
[Stefan Eissing]

View File

@@ -239,7 +239,6 @@ apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c)
return status;
}
conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent,
apr_allocator_t *allocator)
{
@@ -324,3 +323,9 @@ void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator)
}
apr_pool_destroy(slave->pool);
}
apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
{
return ap_run_pre_connection(slave, csd);
}

View File

@@ -70,4 +70,7 @@ conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent,
apr_allocator_t *allocator);
void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator);
apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
void h2_slave_run_connection(conn_rec *slave);
#endif /* defined(__mod_h2__h2_conn__) */

View File

@@ -218,6 +218,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
m->tx_handles_reserved = 0;
m->tx_chunk_size = 4;
m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams,
m->stream_max_mem);
h2_ngn_shed_set_ctx(m->ngn_shed , m);
@@ -278,8 +280,6 @@ static int io_out_consumed_signal(h2_mplx *m, h2_io *io)
static void io_destroy(h2_mplx *m, h2_io *io, int events)
{
apr_pool_t *pool;
/* cleanup any buffered input */
h2_io_in_shutdown(io);
if (events) {
@@ -299,23 +299,20 @@ static void io_destroy(h2_mplx *m, h2_io *io, int events)
}
if (io->task) {
if (m->spare_allocator) {
apr_allocator_destroy(m->spare_allocator);
m->spare_allocator = NULL;
}
h2_slave_destroy(io->task->c, &m->spare_allocator);
conn_rec *slave = io->task->c;
h2_task_destroy(io->task);
io->task = NULL;
if (m->spare_slaves->nelts < m->spare_slaves->nalloc) {
APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave;
}
else {
h2_slave_destroy(slave, NULL);
}
}
pool = io->pool;
io->pool = NULL;
if (0 && pool) {
apr_pool_clear(pool);
if (m->spare_pool) {
apr_pool_destroy(m->spare_pool);
}
m->spare_pool = pool;
if (io->pool) {
apr_pool_destroy(io->pool);
}
check_tx_free(m);
@@ -1006,17 +1003,11 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
static h2_io *open_io(h2_mplx *m, int stream_id, const h2_request *request)
{
apr_pool_t *io_pool = m->spare_pool;
apr_pool_t *io_pool;
h2_io *io;
if (!io_pool) {
apr_pool_create(&io_pool, m->pool);
apr_pool_tag(io_pool, "h2_io");
}
else {
m->spare_pool = NULL;
}
apr_pool_create(&io_pool, m->pool);
apr_pool_tag(io_pool, "h2_io");
io = h2_io_create(stream_id, io_pool, m->bucket_alloc, request);
h2_io_set_add(m->stream_ios, io);
@@ -1074,10 +1065,21 @@ static h2_task *pop_task(h2_mplx *m)
}
}
else if (io) {
conn_rec *slave = h2_slave_create(m->c, m->pool, m->spare_allocator);
m->spare_allocator = NULL;
conn_rec *slave, **pslave;
pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
if (pslave) {
slave = *pslave;
}
else {
slave = h2_slave_create(m->c, m->pool, NULL);
h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave));
}
io->task = task = h2_task_create(m->id, io->request, slave, m);
apr_table_setn(slave->notes, H2_TASK_ID_NOTE, task->id);
io->worker_started = 1;
io->started_at = apr_time_now();
if (sid > m->max_stream_started) {

View File

@@ -96,8 +96,7 @@ struct h2_mplx {
apr_size_t stream_max_mem;
apr_interval_time_t stream_timeout;
apr_pool_t *spare_pool; /* spare pool, ready for next io */
apr_allocator_t *spare_allocator;
apr_array_header_t *spare_slaves; /* spare slave connections */
struct h2_workers *workers;
apr_size_t tx_handles_reserved;

View File

@@ -145,7 +145,7 @@ void h2_ngn_shed_abort(h2_ngn_shed *shed)
static void ngn_add_task(h2_req_engine *ngn, h2_task *task)
{
h2_ngn_entry *entry = apr_pcalloc(task->c->pool, sizeof(*entry));
h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry));
APR_RING_ELEM_INIT(entry, link);
entry->task = task;
H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry);
@@ -186,7 +186,7 @@ apr_status_t h2_ngn_shed_push_task(h2_ngn_shed *shed, const char *ngn_type,
/* no existing engine or being shut down, start a new one */
if (einit) {
apr_status_t status;
apr_pool_t *pool = task->c->pool;
apr_pool_t *pool = task->pool;
h2_req_engine *newngn;
newngn = apr_pcalloc(pool, sizeof(*ngn));

View File

@@ -89,8 +89,6 @@ static apr_status_t h2_filter_read_response(ap_filter_t* f,
/*******************************************************************************
* Register various hooks
*/
static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
static int h2_task_pre_conn(conn_rec* c, void *arg);
static int h2_task_process_conn(conn_rec* c);
APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
@@ -98,12 +96,6 @@ APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
void h2_task_register_hooks(void)
{
/* This hook runs on new connections before mod_ssl has a say.
* Its purpose is to prevent mod_ssl from touching our pseudo-connections
* for streams.
*/
ap_hook_pre_connection(h2_task_pre_conn,
NULL, mod_ssl, APR_HOOK_FIRST);
/* When the connection processing actually starts, we might
* take over, if the connection is for a task.
*/
@@ -131,34 +123,14 @@ apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s)
return APR_SUCCESS;
}
static int h2_task_pre_conn(conn_rec* c, void *arg)
{
h2_ctx *ctx;
if (!c->master) {
return OK;
}
ctx = h2_ctx_get(c, 0);
(void)arg;
if (h2_ctx_is_task(ctx)) {
h2_task *task = h2_ctx_get_task(ctx);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
"h2_h2, pre_connection, found stream task");
/* Add our own, network level in- and output filters.
*/
ap_add_input_filter("H2_TO_H1", task, NULL, c);
ap_add_output_filter("H1_TO_H2", task, NULL, c);
}
return OK;
}
h2_task *h2_task_create(long session_id, const h2_request *req,
conn_rec *c, h2_mplx *mplx)
{
h2_task *task = apr_pcalloc(c->pool, sizeof(h2_task));
apr_pool_t *pool;
h2_task *task;
apr_pool_create(&pool, c->pool);
task = apr_pcalloc(pool, sizeof(h2_task));
if (task == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, c,
APLOGNO(02941) "h2_task(%ld-%d): create stream task",
@@ -167,20 +139,33 @@ h2_task *h2_task_create(long session_id, const h2_request *req,
return NULL;
}
task->id = apr_psprintf(c->pool, "%ld-%d", session_id, req->id);
task->id = apr_psprintf(pool, "%ld-%d", session_id, req->id);
task->stream_id = req->id;
task->c = c;
task->mplx = mplx;
task->pool = pool;
task->request = req;
task->input_eos = !req->body;
task->ser_headers = req->serialize;
task->blocking = 1;
h2_ctx_create_for(c, task);
/* Add our own, network level in- and output filters. */
ap_add_input_filter("H2_TO_H1", task, NULL, c);
ap_add_output_filter("H1_TO_H2", task, NULL, c);
return task;
}
void h2_task_destroy(h2_task *task)
{
ap_remove_input_filter_byhandle(task->c->output_filters, "H2_TO_H1");
ap_remove_output_filter_byhandle(task->c->output_filters, "H1_TO_H2");
if (task->pool) {
apr_pool_destroy(task->pool);
}
}
void h2_task_set_io_blocking(h2_task *task, int blocking)
{
task->blocking = blocking;
@@ -197,7 +182,7 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_cond_t *cond)
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
"h2_task(%s): process connection", task->id);
ap_process_connection(task->c, ap_get_conn_socket(task->c));
ap_run_process_connection(task->c);
if (task->frozen) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,

View File

@@ -53,6 +53,7 @@ struct h2_task {
int stream_id;
conn_rec *c;
struct h2_mplx *mplx;
apr_pool_t *pool;
const struct h2_request *request;
unsigned int filters_set : 1;
@@ -74,6 +75,8 @@ struct h2_task {
h2_task *h2_task_create(long session_id, const struct h2_request *req,
conn_rec *c, struct h2_mplx *mplx);
void h2_task_destroy(h2_task *task);
apr_status_t h2_task_do(h2_task *task, struct apr_thread_cond_t *cond);
void h2_task_register_hooks(void);

View File

@@ -45,9 +45,8 @@ static int ser_header(void *ctx, const char *name, const char *value)
h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c)
{
h2_task_input *input = apr_pcalloc(c->pool, sizeof(h2_task_input));
h2_task_input *input = apr_pcalloc(task->pool, sizeof(h2_task_input));
if (input) {
input->c = c;
input->task = task;
input->bb = NULL;
input->block = APR_BLOCK_READ;
@@ -56,7 +55,7 @@ h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c)
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_task_input(%s): serialize request %s %s",
task->id, task->request->method, task->request->path);
input->bb = apr_brigade_create(c->pool, c->bucket_alloc);
input->bb = apr_brigade_create(task->pool, c->bucket_alloc);
apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n",
task->request->method, task->request->path);
apr_table_do(ser_header, input, task->request->headers, NULL);
@@ -66,7 +65,7 @@ h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c)
}
}
else if (!input->task->input_eos) {
input->bb = apr_brigade_create(c->pool, c->bucket_alloc);
input->bb = apr_brigade_create(task->pool, c->bucket_alloc);
}
else {
/* We do not serialize and have eos already, no need to

View File

@@ -26,7 +26,6 @@ struct h2_task;
typedef struct h2_task_input h2_task_input;
struct h2_task_input {
conn_rec *c;
struct h2_task *task;
apr_bucket_brigade *bb;
apr_read_type_e block;

View File

@@ -36,12 +36,10 @@
h2_task_output *h2_task_output_create(h2_task *task, conn_rec *c)
{
h2_task_output *output = apr_pcalloc(c->pool, sizeof(h2_task_output));
h2_task_output *output = apr_pcalloc(task->pool, sizeof(h2_task_output));
if (output) {
output->c = c;
output->task = task;
output->state = H2_TASK_OUT_INIT;
output->from_h1 = h2_from_h1_create(task->stream_id, c->pool);
output->from_h1 = h2_from_h1_create(task->stream_id, task->pool);
}
return output;
}
@@ -54,7 +52,7 @@ static apr_table_t *get_trailers(h2_task_output *output)
output->trailers_passed = 1;
if (h2_task_logio_add_bytes_out) {
/* counter trailers as if we'd do a HTTP/1.1 serialization */
h2_task_logio_add_bytes_out(output->c,
h2_task_logio_add_bytes_out(output->task->c,
h2_util_table_bytes(response->trailers, 3)+1);
}
return response->trailers;
@@ -72,14 +70,14 @@ static apr_status_t open_response(h2_task_output *output, ap_filter_t *f,
if (f) {
/* This happens currently when ap_die(status, r) is invoked
* by a read request filter. */
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->c, APLOGNO(03204)
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->task->c, APLOGNO(03204)
"h2_task_output(%s): write without response by %s "
"for %s %s %s",
output->task->id, caller,
output->task->request->method,
output->task->request->authority,
output->task->request->path);
output->c->aborted = 1;
output->task->c->aborted = 1;
}
if (output->task->io) {
apr_thread_cond_broadcast(output->task->io);
@@ -90,10 +88,10 @@ static apr_status_t open_response(h2_task_output *output, ap_filter_t *f,
if (h2_task_logio_add_bytes_out) {
/* count headers as if we'd do a HTTP/1.1 serialization */
output->written = h2_util_table_bytes(response->headers, 3)+1;
h2_task_logio_add_bytes_out(output->c, output->written);
h2_task_logio_add_bytes_out(output->task->c, output->written);
}
get_trailers(output);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->c, APLOGNO(03348)
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->task->c, APLOGNO(03348)
"h2_task(%s): open response to %s %s %s",
output->task->id, output->task->request->method,
output->task->request->authority,
@@ -109,7 +107,7 @@ static apr_status_t write_brigade_raw(h2_task_output *output,
apr_status_t status;
apr_brigade_length(bb, 0, &written);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->c,
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c,
"h2_task(%s): write response body (%ld bytes)",
output->task->id, (long)written);
@@ -125,7 +123,7 @@ static apr_status_t write_brigade_raw(h2_task_output *output,
if (status == APR_SUCCESS) {
output->written += written;
if (h2_task_logio_add_bytes_out) {
h2_task_logio_add_bytes_out(output->c, written);
h2_task_logio_add_bytes_out(output->task->c, written);
}
}
return status;
@@ -141,20 +139,20 @@ apr_status_t h2_task_output_write(h2_task_output *output,
apr_status_t status = APR_SUCCESS;
if (APR_BRIGADE_EMPTY(bb)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->c,
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c,
"h2_task(%s): empty write", output->task->id);
return APR_SUCCESS;
}
if (output->task->frozen) {
h2_util_bb_log(output->c, output->task->stream_id, APLOG_TRACE2,
h2_util_bb_log(output->task->c, output->task->stream_id, APLOG_TRACE2,
"frozen task output write, ignored", bb);
return APR_SUCCESS;
}
if (output->state == H2_TASK_OUT_INIT) {
if (!output->response_open) {
status = open_response(output, f, bb, "write");
output->state = H2_TASK_OUT_STARTED;
output->response_open = 1;
}
/* Attempt to write saved brigade first */
@@ -171,13 +169,13 @@ apr_status_t h2_task_output_write(h2_task_output *output,
/* If the passed brigade is not empty, save it before return */
if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, output->c,
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, output->task->c,
"h2_task(%s): could not write all, saving brigade",
output->task->id);
if (!output->bb) {
output->bb = apr_brigade_create(output->c->pool, output->c->bucket_alloc);
output->bb = apr_brigade_create(output->task->pool, output->task->c->bucket_alloc);
}
return ap_save_brigade(f, &output->bb, &bb, output->c->pool);
return ap_save_brigade(f, &output->bb, &bb, output->task->pool);
}
return status;

View File

@@ -26,20 +26,13 @@ struct h2_mplx;
struct h2_task;
struct h2_from_h1;
typedef enum {
H2_TASK_OUT_INIT,
H2_TASK_OUT_STARTED,
H2_TASK_OUT_DONE,
} h2_task_out_state_t;
typedef struct h2_task_output h2_task_output;
struct h2_task_output {
conn_rec *c;
struct h2_task *task;
h2_task_out_state_t state;
struct h2_from_h1 *from_h1;
unsigned int response_open : 1;
unsigned int trailers_passed : 1;
apr_off_t written;

View File

@@ -494,9 +494,6 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from,
if (APR_BUCKET_IS_EOS(b)) {
APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc));
}
else if (APR_BUCKET_IS_FLUSH(b)) {
APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc));
}
else {
/* ignore */
}

View File

@@ -51,6 +51,7 @@ static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn);
typedef struct h2_proxy_ctx {
conn_rec *owner;
apr_pool_t *pool;
request_rec *rbase;
server_rec *server;
const char *proxy_func;
@@ -218,12 +219,28 @@ static apr_status_t proxy_engine_init(h2_req_engine *engine,
h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
&proxy_http2_module);
if (ctx) {
conn_rec *c = ctx->owner;
h2_proxy_ctx *nctx;
/* we need another lifetime for this. If we do not host
* an engine, the context lives in r->pool. Since we expect
* to server more than r, we need to live longer */
nctx = apr_pcalloc(pool, sizeof(*nctx));
if (nctx == NULL) {
return APR_ENOMEM;
}
memcpy(nctx, ctx, sizeof(*nctx));
ctx = nctx;
ctx->pool = pool;
ctx->engine = engine;
ctx->engine_id = id;
ctx->engine_type = type;
ctx->engine_pool = pool;
ctx->req_buffer_size = req_buffer_size;
ctx->capacity = 100;
ap_set_module_config(c->conn_config, &proxy_http2_module, ctx);
*pconsumed = out_consumed;
*pctx = ctx;
return APR_SUCCESS;
@@ -381,14 +398,14 @@ static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
return status;
}
static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx)
static h2_proxy_ctx *push_request_somewhere(h2_proxy_ctx *ctx)
{
conn_rec *c = ctx->owner;
const char *engine_type, *hostname;
hostname = (ctx->p_conn->ssl_hostname?
ctx->p_conn->ssl_hostname : ctx->p_conn->hostname);
engine_type = apr_psprintf(c->pool, "proxy_http2 %s%s", hostname,
engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
ctx->server_portstr);
if (c->master && req_engine_push && ctx->next && is_h2 && is_h2(c)) {
@@ -397,22 +414,25 @@ static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx)
* uses the same backend. We may be called to create an engine
* ourself. */
if (req_engine_push(engine_type, ctx->next, proxy_engine_init)
== APR_SUCCESS && ctx->engine == NULL) {
/* Another engine instance has taken over processing of this
* request. */
ctx->r_status = SUSPENDED;
ctx->next = NULL;
return APR_SUCCESS;
== APR_SUCCESS) {
/* to renew the lifetime, we might have set a new ctx */
ctx = ap_get_module_config(c->conn_config, &proxy_http2_module);
if (ctx->engine == NULL) {
/* Another engine instance has taken over processing of this
* request. */
ctx->r_status = SUSPENDED;
ctx->next = NULL;
return ctx;
}
}
}
if (!ctx->engine) {
/* No engine was available or has been initialized, handle this
* request just by ourself. */
ctx->engine_id = apr_psprintf(c->pool, "eng-proxy-%ld", c->id);
ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id);
ctx->engine_type = engine_type;
ctx->engine_pool = c->pool;
ctx->engine_pool = ctx->pool;
ctx->req_buffer_size = (32*1024);
ctx->standalone = 1;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
@@ -423,7 +443,7 @@ static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx)
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"H2: hosting engine %s", ctx->engine_id);
}
return APR_SUCCESS;
return ctx;
}
static int proxy_http2_handler(request_rec *r,
@@ -438,11 +458,8 @@ static int proxy_http2_handler(request_rec *r,
apr_size_t slen;
int is_ssl = 0;
apr_status_t status;
conn_rec *c = r->connection;
server_rec *s = r->server;
apr_pool_t *p = c->pool;
apr_uri_t *uri = apr_palloc(p, sizeof(*uri));
h2_proxy_ctx *ctx;
apr_uri_t uri;
int reconnected = 0;
/* find the scheme */
@@ -468,11 +485,11 @@ static int proxy_http2_handler(request_rec *r,
default:
return DECLINED;
}
ctx = apr_pcalloc(p, sizeof(*ctx));
ctx->owner = c;
ctx = apr_pcalloc(r->pool, sizeof(*ctx));
ctx->owner = r->connection;
ctx->pool = r->pool;
ctx->rbase = r;
ctx->server = s;
ctx->server = r->server;
ctx->proxy_func = proxy_func;
ctx->is_ssl = is_ssl;
ctx->worker = worker;
@@ -481,7 +498,7 @@ static int proxy_http2_handler(request_rec *r,
ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
ctx->next = r;
r = NULL;
ap_set_module_config(c->conn_config, &proxy_http2_module, ctx);
ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
/* scheme says, this is for us. */
apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url);
@@ -493,7 +510,7 @@ run_connect:
* be one still open from another request, or it might fail if the
* worker is stopped or in error. */
if ((status = ap_proxy_acquire_connection(ctx->proxy_func, &ctx->p_conn,
ctx->worker, s)) != OK) {
ctx->worker, ctx->server)) != OK) {
goto cleanup;
}
@@ -507,8 +524,8 @@ run_connect:
/* Step One: Determine the URL to connect to (might be a proxy),
* initialize the backend accordingly and determine the server
* port string we can expect in responses. */
if ((status = ap_proxy_determine_connection(p, ctx->rbase, conf, worker,
ctx->p_conn, uri, &locurl,
if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker,
ctx->p_conn, &uri, &locurl,
proxyname, proxyport,
ctx->server_portstr,
sizeof(ctx->server_portstr))) != OK) {
@@ -518,7 +535,7 @@ run_connect:
/* If we are not already hosting an engine, try to push the request
* to an already existing engine or host a new engine here. */
if (!ctx->engine) {
push_request_somewhere(ctx);
ctx = push_request_somewhere(ctx);
if (ctx->r_status == SUSPENDED) {
/* request was pushed to another engine */
goto cleanup;
@@ -601,7 +618,8 @@ cleanup:
ctx->p_conn = NULL;
}
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, "leaving handler");
ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, "leaving handler");
return ctx->r_status;
}