mirror of
https://github.com/apache/httpd.git
synced 2025-08-07 04:02:58 +03:00
Sync with v2.0.10 from github:
* Extensive testing in production done by Alessandro Bianchi (@alexskynet) on the v2.0.x versions for stability. Many thanks! * refactored stream response handling to reflect the different phases (response/data/trailers) more clearly and help resolving cpu busy loops. * Adding more negative tests for handling of errored responses to cover edge cases. * mod_http2: fixed handling of response where neiter an EOS nor an ERROR was received as a cause to reset the stream. * mod_proxy_http2: generating error buckets for fault response bodies, to signal failure to fron when response header were already sent. v2.0.9 -------------------------------------------------------------------------------- * Fixed a bug where errors during reponse body handling did not lead to a proper RST_STREAM. Instead processing went into an infinite loop. Extended test cases to catch this condition. v2.0.8 -------------------------------------------------------------------------------- * Delaying input setup of a stream just before processing starts. This allows any EOS indicator arriving from the client before that to take effect. Without knowing that a stream has no input, internal processing has to simulate chunked encoding. This is not wrong, but somewhat more expensive and mod_security has been reported to be allergic to seeing 'chunked' on some requests. See <https://bz.apache.org/bugzilla/show_bug.cgi?id=66282>. * mod_proxy_http2: fixed #235 by no longer forwarding 'Host:' header when request ':authority' is known. Improved test case that did not catch that the previous 'fix' was incorrect. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1904522 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
@@ -156,7 +156,6 @@ struct h2_request {
|
|||||||
apr_table_t *headers;
|
apr_table_t *headers;
|
||||||
|
|
||||||
apr_time_t request_time;
|
apr_time_t request_time;
|
||||||
unsigned int chunked : 1; /* iff request body needs to be forwarded as chunked */
|
|
||||||
apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
|
apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
|
||||||
int http_status; /* Store a possible HTTP status code that gets
|
int http_status; /* Store a possible HTTP status code that gets
|
||||||
* defined before creating the dummy HTTP/1.1
|
* defined before creating the dummy HTTP/1.1
|
||||||
|
@@ -529,7 +529,10 @@ apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from,
|
|||||||
space_left = calc_space_left(beam);
|
space_left = calc_space_left(beam);
|
||||||
while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) {
|
while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) {
|
||||||
rv = append_bucket(beam, sender_bb, block, &space_left, pwritten);
|
rv = append_bucket(beam, sender_bb, block, &space_left, pwritten);
|
||||||
if (!beam->aborted && APR_EAGAIN == rv) {
|
if (beam->aborted) {
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
else if (APR_EAGAIN == rv) {
|
||||||
/* bucket was not added, as beam buffer has no space left.
|
/* bucket was not added, as beam buffer has no space left.
|
||||||
* Trigger event callbacks, so receiver can know there is something
|
* Trigger event callbacks, so receiver can know there is something
|
||||||
* to receive before we do a conditional wait. */
|
* to receive before we do a conditional wait. */
|
||||||
@@ -548,6 +551,7 @@ apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cleanup:
|
||||||
if (beam->send_cb && !buffer_is_empty(beam)) {
|
if (beam->send_cb && !buffer_is_empty(beam)) {
|
||||||
beam->send_cb(beam->send_ctx, beam);
|
beam->send_cb(beam->send_ctx, beam);
|
||||||
}
|
}
|
||||||
|
@@ -464,13 +464,18 @@ static int c2_post_read_request(request_rec *r)
|
|||||||
{
|
{
|
||||||
h2_conn_ctx_t *conn_ctx;
|
h2_conn_ctx_t *conn_ctx;
|
||||||
conn_rec *c2 = r->connection;
|
conn_rec *c2 = r->connection;
|
||||||
|
apr_time_t timeout;
|
||||||
|
|
||||||
if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
|
if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
|
||||||
return DECLINED;
|
return DECLINED;
|
||||||
}
|
}
|
||||||
/* Now that the request_rec is fully initialized, set relevant params */
|
/* Now that the request_rec is fully initialized, set relevant params */
|
||||||
conn_ctx->server = r->server;
|
conn_ctx->server = r->server;
|
||||||
h2_conn_ctx_set_timeout(conn_ctx, r->server->timeout);
|
timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
|
||||||
|
if (timeout <= 0) {
|
||||||
|
timeout = r->server->timeout;
|
||||||
|
}
|
||||||
|
h2_conn_ctx_set_timeout(conn_ctx, timeout);
|
||||||
/* We only handle this one request on the connection and tell everyone
|
/* We only handle this one request on the connection and tell everyone
|
||||||
* that there is no need to keep it "clean" if something fails. Also,
|
* that there is no need to keep it "clean" if something fails. Also,
|
||||||
* this prevents mod_reqtimeout from doing funny business with monitoring
|
* this prevents mod_reqtimeout from doing funny business with monitoring
|
||||||
@@ -651,8 +656,10 @@ static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
|
|||||||
const h2_request *req = conn_ctx->request;
|
const h2_request *req = conn_ctx->request;
|
||||||
conn_state_t *cs = c->cs;
|
conn_state_t *cs = c->cs;
|
||||||
request_rec *r;
|
request_rec *r;
|
||||||
|
const char *tenc;
|
||||||
|
apr_time_t timeout;
|
||||||
|
|
||||||
r = h2_create_request_rec(conn_ctx->request, c);
|
r = h2_create_request_rec(conn_ctx->request, c, conn_ctx->beam_in == NULL);
|
||||||
if (!r) {
|
if (!r) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
|
||||||
"h2_c2(%s-%d): create request_rec failed, r=NULL",
|
"h2_c2(%s-%d): create request_rec failed, r=NULL",
|
||||||
@@ -666,13 +673,18 @@ static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
|
||||||
|
conn_ctx->input_chunked = tenc && ap_is_chunked(r->pool, tenc);
|
||||||
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
|
||||||
"h2_c2(%s-%d): created request_rec for %s",
|
"h2_c2(%s-%d): created request_rec for %s",
|
||||||
conn_ctx->id, conn_ctx->stream_id, r->the_request);
|
conn_ctx->id, conn_ctx->stream_id, r->the_request);
|
||||||
conn_ctx->server = r->server;
|
conn_ctx->server = r->server;
|
||||||
|
timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
|
||||||
/* the request_rec->server carries the timeout value that applies */
|
if (timeout <= 0) {
|
||||||
h2_conn_ctx_set_timeout(conn_ctx, r->server->timeout);
|
timeout = r->server->timeout;
|
||||||
|
}
|
||||||
|
h2_conn_ctx_set_timeout(conn_ctx, timeout);
|
||||||
|
|
||||||
if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
|
if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
|
||||||
|
@@ -635,7 +635,7 @@ apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb)
|
|||||||
*/
|
*/
|
||||||
int result = ap_map_http_request_error(conn_ctx->last_err,
|
int result = ap_map_http_request_error(conn_ctx->last_err,
|
||||||
HTTP_INTERNAL_SERVER_ERROR);
|
HTTP_INTERNAL_SERVER_ERROR);
|
||||||
request_rec *r = h2_create_request_rec(conn_ctx->request, f->c);
|
request_rec *r = h2_create_request_rec(conn_ctx->request, f->c, 1);
|
||||||
ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
|
ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
|
||||||
b = ap_bucket_eor_create(f->c->bucket_alloc, r);
|
b = ap_bucket_eor_create(f->c->bucket_alloc, r);
|
||||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||||
@@ -918,7 +918,7 @@ apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
|
|||||||
"readbytes=%ld, exp=%d",
|
"readbytes=%ld, exp=%d",
|
||||||
conn_ctx->id, conn_ctx->stream_id, mode, block,
|
conn_ctx->id, conn_ctx->stream_id, mode, block,
|
||||||
(long)readbytes, r->expecting_100);
|
(long)readbytes, r->expecting_100);
|
||||||
if (!conn_ctx->request->chunked) {
|
if (!conn_ctx->input_chunked) {
|
||||||
status = ap_get_brigade(f->next, bb, mode, block, readbytes);
|
status = ap_get_brigade(f->next, bb, mode, block, readbytes);
|
||||||
/* pipe data through, just take care of trailers */
|
/* pipe data through, just take care of trailers */
|
||||||
for (b = APR_BRIGADE_FIRST(bb);
|
for (b = APR_BRIGADE_FIRST(bb);
|
||||||
|
@@ -574,6 +574,9 @@ static const char *h2_conf_set_max_worker_idle_limit(cmd_parms *cmd,
|
|||||||
if (rv != APR_SUCCESS) {
|
if (rv != APR_SUCCESS) {
|
||||||
return "Invalid idle limit value";
|
return "Invalid idle limit value";
|
||||||
}
|
}
|
||||||
|
if (timeout <= 0) {
|
||||||
|
timeout = DEF_VAL;
|
||||||
|
}
|
||||||
CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_LIMIT, timeout);
|
CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_LIMIT, timeout);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@@ -53,6 +53,7 @@ struct h2_conn_ctx_t {
|
|||||||
const struct h2_request *request; /* c2: the request to process */
|
const struct h2_request *request; /* c2: the request to process */
|
||||||
struct h2_bucket_beam *beam_out; /* c2: data out, created from req_pool */
|
struct h2_bucket_beam *beam_out; /* c2: data out, created from req_pool */
|
||||||
struct h2_bucket_beam *beam_in; /* c2: data in or NULL, borrowed from request stream */
|
struct h2_bucket_beam *beam_in; /* c2: data in or NULL, borrowed from request stream */
|
||||||
|
unsigned int input_chunked; /* c2: if input needs HTTP/1.1 chunking applied */
|
||||||
|
|
||||||
apr_file_t *pipe_in[2]; /* c2: input produced notification pipe */
|
apr_file_t *pipe_in[2]; /* c2: input produced notification pipe */
|
||||||
apr_pollfd_t pfd; /* c1: poll socket input, c2: NUL */
|
apr_pollfd_t pfd; /* c1: poll socket input, c2: NUL */
|
||||||
|
@@ -665,6 +665,10 @@ static apr_status_t c1_process_stream(h2_mplx *m,
|
|||||||
H2_STRM_MSG(stream, "process, ready already"));
|
H2_STRM_MSG(stream, "process, ready already"));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
/* last chance to set anything up before stream is processed
|
||||||
|
* by worker threads. */
|
||||||
|
rv = h2_stream_prepare_processing(stream);
|
||||||
|
if (APR_SUCCESS != rv) goto cleanup;
|
||||||
h2_iq_add(m->q, stream->id, cmp, session);
|
h2_iq_add(m->q, stream->id, cmp, session);
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
|
||||||
H2_STRM_MSG(stream, "process, added to q"));
|
H2_STRM_MSG(stream, "process, added to q"));
|
||||||
@@ -791,23 +795,21 @@ static apr_status_t c2_setup_io(h2_mplx *m, conn_rec *c2, h2_stream *stream, h2_
|
|||||||
h2_beam_on_was_empty(conn_ctx->beam_out, c2_beam_output_write_notify, c2);
|
h2_beam_on_was_empty(conn_ctx->beam_out, c2_beam_output_write_notify, c2);
|
||||||
}
|
}
|
||||||
|
|
||||||
conn_ctx->beam_in = stream->input;
|
memset(&conn_ctx->pipe_in, 0, sizeof(conn_ctx->pipe_in));
|
||||||
h2_beam_on_send(stream->input, c2_beam_input_write_notify, c2);
|
if (stream->input) {
|
||||||
h2_beam_on_received(stream->input, c2_beam_input_read_notify, c2);
|
conn_ctx->beam_in = stream->input;
|
||||||
h2_beam_on_consumed(stream->input, c1_input_consumed, stream);
|
h2_beam_on_send(stream->input, c2_beam_input_write_notify, c2);
|
||||||
|
h2_beam_on_received(stream->input, c2_beam_input_read_notify, c2);
|
||||||
|
h2_beam_on_consumed(stream->input, c1_input_consumed, stream);
|
||||||
#if H2_USE_PIPES
|
#if H2_USE_PIPES
|
||||||
if (!conn_ctx->pipe_in[H2_PIPE_OUT]) {
|
|
||||||
action = "create input write pipe";
|
action = "create input write pipe";
|
||||||
rv = apr_file_pipe_create_pools(&conn_ctx->pipe_in[H2_PIPE_OUT],
|
rv = apr_file_pipe_create_pools(&conn_ctx->pipe_in[H2_PIPE_OUT],
|
||||||
&conn_ctx->pipe_in[H2_PIPE_IN],
|
&conn_ctx->pipe_in[H2_PIPE_IN],
|
||||||
APR_READ_BLOCK,
|
APR_READ_BLOCK,
|
||||||
c2->pool, c2->pool);
|
c2->pool, c2->pool);
|
||||||
if (APR_SUCCESS != rv) goto cleanup;
|
if (APR_SUCCESS != rv) goto cleanup;
|
||||||
}
|
|
||||||
#else
|
|
||||||
memset(&conn_ctx->pipe_in, 0, sizeof(conn_ctx->pipe_in));
|
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
stream->output = (APR_SUCCESS == rv)? conn_ctx->beam_out : NULL;
|
stream->output = (APR_SUCCESS == rv)? conn_ctx->beam_out : NULL;
|
||||||
|
@@ -169,7 +169,7 @@ void h2_mplx_c1_process(h2_mplx *m,
|
|||||||
apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp,
|
apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp,
|
||||||
struct h2_session *session);
|
struct h2_session *session);
|
||||||
|
|
||||||
typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
|
typedef void stream_ev_callback(void *ctx, struct h2_stream *stream);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Poll the primary connection for input and the active streams for output.
|
* Poll the primary connection for input and the active streams for output.
|
||||||
|
@@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
#include <mpm_common.h>
|
#include <mpm_common.h>
|
||||||
#include <httpd.h>
|
#include <httpd.h>
|
||||||
|
#include <http_protocol.h>
|
||||||
#include <mod_proxy.h>
|
#include <mod_proxy.h>
|
||||||
|
|
||||||
#include "mod_http2.h"
|
#include "mod_http2.h"
|
||||||
@@ -854,6 +855,11 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
|
|||||||
"authority=%s from uri.hostname=%s and uri.port=%d",
|
"authority=%s from uri.hostname=%s and uri.port=%d",
|
||||||
authority, puri.hostname, puri.port);
|
authority, puri.hostname, puri.port);
|
||||||
}
|
}
|
||||||
|
/* See #235, we use only :authority when available and remove Host:
|
||||||
|
* since differing values are not acceptable, see RFC 9113 ch. 8.3.1 */
|
||||||
|
if (authority && strlen(authority)) {
|
||||||
|
apr_table_unset(r->headers_in, "Host");
|
||||||
|
}
|
||||||
|
|
||||||
/* we need this for mapping relative uris in headers ("Link") back
|
/* we need this for mapping relative uris in headers ("Link") back
|
||||||
* to local uris */
|
* to local uris */
|
||||||
@@ -861,7 +867,6 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
|
|||||||
stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority);
|
stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority);
|
||||||
path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
|
path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
|
||||||
|
|
||||||
|
|
||||||
h2_proxy_req_make(stream->req, stream->pool, r->method, scheme,
|
h2_proxy_req_make(stream->req, stream->pool, r->method, scheme,
|
||||||
authority, path, r->headers_in);
|
authority, path, r->headers_in);
|
||||||
|
|
||||||
@@ -890,7 +895,6 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
|
|||||||
r->server->server_hostname);
|
r->server->server_hostname);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
apr_table_unset(r->headers_in, "Host");
|
|
||||||
|
|
||||||
/* Tuck away all already existing cookies */
|
/* Tuck away all already existing cookies */
|
||||||
stream->saves = apr_table_make(r->pool, 2);
|
stream->saves = apr_table_make(r->pool, 2);
|
||||||
@@ -1350,31 +1354,40 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id,
|
|||||||
const char *msg)
|
const char *msg)
|
||||||
{
|
{
|
||||||
h2_proxy_stream *stream;
|
h2_proxy_stream *stream;
|
||||||
|
apr_bucket *b;
|
||||||
|
|
||||||
stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
|
stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
|
||||||
if (stream) {
|
if (stream) {
|
||||||
|
/* if the stream's connection is aborted, do not send anything
|
||||||
|
* more on it. */
|
||||||
|
apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL;
|
||||||
int touched = (stream->data_sent ||
|
int touched = (stream->data_sent ||
|
||||||
stream_id <= session->last_stream_id);
|
stream_id <= session->last_stream_id);
|
||||||
apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL;
|
if (!session->c->aborted) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364)
|
||||||
"h2_proxy_sesssion(%s): stream(%d) closed "
|
"h2_proxy_sesssion(%s): stream(%d) closed "
|
||||||
"(touched=%d, error=%d)",
|
"(touched=%d, error=%d)",
|
||||||
session->id, stream_id, touched, stream->error_code);
|
session->id, stream_id, touched, stream->error_code);
|
||||||
|
|
||||||
if (status != APR_SUCCESS) {
|
if (status != APR_SUCCESS) {
|
||||||
stream->r->status = 500;
|
b = ap_bucket_error_create(HTTP_SERVICE_UNAVAILABLE, NULL, stream->r->pool,
|
||||||
}
|
stream->r->connection->bucket_alloc);
|
||||||
else if (!stream->data_received) {
|
APR_BRIGADE_INSERT_TAIL(stream->output, b);
|
||||||
apr_bucket *b;
|
b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
|
||||||
/* if the response had no body, this is the time to flush
|
APR_BRIGADE_INSERT_TAIL(stream->output, b);
|
||||||
* an empty brigade which will also write the response headers */
|
ap_pass_brigade(stream->r->output_filters, stream->output);
|
||||||
h2_proxy_stream_end_headers_out(stream);
|
}
|
||||||
stream->data_received = 1;
|
else if (!stream->data_received) {
|
||||||
b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
|
/* if the response had no body, this is the time to flush
|
||||||
APR_BRIGADE_INSERT_TAIL(stream->output, b);
|
* an empty brigade which will also write the response headers */
|
||||||
b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
|
h2_proxy_stream_end_headers_out(stream);
|
||||||
APR_BRIGADE_INSERT_TAIL(stream->output, b);
|
stream->data_received = 1;
|
||||||
ap_pass_brigade(stream->r->output_filters, stream->output);
|
b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
|
||||||
|
APR_BRIGADE_INSERT_TAIL(stream->output, b);
|
||||||
|
b = apr_bucket_eos_create(stream->r->connection->bucket_alloc);
|
||||||
|
APR_BRIGADE_INSERT_TAIL(stream->output, b);
|
||||||
|
ap_pass_brigade(stream->r->output_filters, stream->output);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stream->state = H2_STREAM_ST_CLOSED;
|
stream->state = H2_STREAM_ST_CLOSED;
|
||||||
|
@@ -351,7 +351,7 @@ static int add_push(link_ctx *ctx)
|
|||||||
req = h2_request_create(0, ctx->pool, method, ctx->req->scheme,
|
req = h2_request_create(0, ctx->pool, method, ctx->req->scheme,
|
||||||
ctx->req->authority, path, headers);
|
ctx->req->authority, path, headers);
|
||||||
/* atm, we do not push on pushes */
|
/* atm, we do not push on pushes */
|
||||||
h2_request_end_headers(req, ctx->pool, 1, 0);
|
h2_request_end_headers(req, ctx->pool, 0);
|
||||||
push->req = req;
|
push->req = req;
|
||||||
if (has_param(ctx, "critical")) {
|
if (has_param(ctx, "critical")) {
|
||||||
h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio));
|
h2_priority *prio = apr_pcalloc(ctx->pool, sizeof(*prio));
|
||||||
|
@@ -185,14 +185,13 @@ apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes)
|
apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
|
||||||
|
size_t raw_bytes)
|
||||||
{
|
{
|
||||||
const char *s;
|
/* rfc7540, ch. 8.1.2.3: without :authority, Host: must be there */
|
||||||
|
if (req->authority && !strlen(req->authority)) {
|
||||||
/* rfc7540, ch. 8.1.2.3:
|
req->authority = NULL;
|
||||||
* - if we have :authority, it overrides any Host header
|
}
|
||||||
* - :authority MUST be omitted when converting h1->h2, so we
|
|
||||||
* might get a stream without, but then Host needs to be there */
|
|
||||||
if (!req->authority) {
|
if (!req->authority) {
|
||||||
const char *host = apr_table_get(req->headers, "Host");
|
const char *host = apr_table_get(req->headers, "Host");
|
||||||
if (!host) {
|
if (!host) {
|
||||||
@@ -203,40 +202,6 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos,
|
|||||||
else {
|
else {
|
||||||
apr_table_setn(req->headers, "Host", req->authority);
|
apr_table_setn(req->headers, "Host", req->authority);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if AP_HAS_RESPONSE_BUCKETS
|
|
||||||
if (eos) {
|
|
||||||
s = apr_table_get(req->headers, "Content-Length");
|
|
||||||
if (!s && apr_table_get(req->headers, "Content-Type")) {
|
|
||||||
/* If we have a content-type, but already seen eos, no more
|
|
||||||
* data will come. Signal a zero content length explicitly.
|
|
||||||
*/
|
|
||||||
apr_table_setn(req->headers, "Content-Length", "0");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else /* AP_HAS_RESPONSE_BUCKETS */
|
|
||||||
s = apr_table_get(req->headers, "Content-Length");
|
|
||||||
if (!s) {
|
|
||||||
/* HTTP/2 does not need a Content-Length for framing, but our
|
|
||||||
* internal request processing is used to HTTP/1.1, so we
|
|
||||||
* need to either add a Content-Length or a Transfer-Encoding
|
|
||||||
* if any content can be expected. */
|
|
||||||
if (!eos) {
|
|
||||||
/* We have not seen a content-length and have no eos,
|
|
||||||
* simulate a chunked encoding for our HTTP/1.1 infrastructure,
|
|
||||||
* in case we have "H2SerializeHeaders on" here
|
|
||||||
*/
|
|
||||||
req->chunked = 1;
|
|
||||||
apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
|
|
||||||
}
|
|
||||||
else if (apr_table_get(req->headers, "Content-Type")) {
|
|
||||||
/* If we have a content-type, but already seen eos, no more
|
|
||||||
* data will come. Signal a zero content length explicitly.
|
|
||||||
*/
|
|
||||||
apr_table_setn(req->headers, "Content-Length", "0");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* else AP_HAS_RESPONSE_BUCKETS */
|
|
||||||
req->raw_bytes += raw_bytes;
|
req->raw_bytes += raw_bytes;
|
||||||
|
|
||||||
return APR_SUCCESS;
|
return APR_SUCCESS;
|
||||||
@@ -333,7 +298,59 @@ apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
|
static void assign_headers(request_rec *r, const h2_request *req,
|
||||||
|
int no_body)
|
||||||
|
{
|
||||||
|
const char *cl;
|
||||||
|
|
||||||
|
r->headers_in = apr_table_copy(r->pool, req->headers);
|
||||||
|
if (req->authority) {
|
||||||
|
/* for internal handling, we have to simulate that :authority
|
||||||
|
* came in as Host:, RFC 9113 ch. says that mismatches between
|
||||||
|
* :authority and Host: SHOULD be rejected as malformed. However,
|
||||||
|
* we are more lenient and just replace any Host: if we have
|
||||||
|
* an :authority.
|
||||||
|
*/
|
||||||
|
const char *orig_host = apr_table_get(req->headers, "Host");
|
||||||
|
if (orig_host && strcmp(req->authority, orig_host)) {
|
||||||
|
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO()
|
||||||
|
"overwriting 'Host: %s' with :authority: %s'",
|
||||||
|
orig_host, req->authority);
|
||||||
|
apr_table_setn(r->subprocess_env, "H2_ORIGINAL_HOST", orig_host);
|
||||||
|
}
|
||||||
|
apr_table_setn(r->headers_in, "Host", req->authority);
|
||||||
|
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
|
||||||
|
"set 'Host: %s' from :authority", req->authority);
|
||||||
|
}
|
||||||
|
|
||||||
|
cl = apr_table_get(req->headers, "Content-Length");
|
||||||
|
if (no_body) {
|
||||||
|
if (!cl && apr_table_get(req->headers, "Content-Type")) {
|
||||||
|
/* If we have a content-type, but already seen eos, no more
|
||||||
|
* data will come. Signal a zero content length explicitly.
|
||||||
|
*/
|
||||||
|
apr_table_setn(req->headers, "Content-Length", "0");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#if !AP_HAS_RESPONSE_BUCKETS
|
||||||
|
else if (!cl) {
|
||||||
|
/* there may be a body and we have internal HTTP/1.1 processing.
|
||||||
|
* If the Content-Length is unspecified, we MUST simulate
|
||||||
|
* chunked Transfer-Encoding.
|
||||||
|
*
|
||||||
|
* HTTP/2 does not need a Content-Length for framing. Ideally
|
||||||
|
* all clients set the EOS flag on the header frame if they
|
||||||
|
* do not intent to send a body. However, forwarding proxies
|
||||||
|
* might just no know at the time and send an empty DATA
|
||||||
|
* frame with EOS much later.
|
||||||
|
*/
|
||||||
|
apr_table_mergen(r->headers_in, "Transfer-Encoding", "chunked");
|
||||||
|
}
|
||||||
|
#endif /* else AP_HAS_RESPONSE_BUCKETS */
|
||||||
|
}
|
||||||
|
|
||||||
|
request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c,
|
||||||
|
int no_body)
|
||||||
{
|
{
|
||||||
int access_status = HTTP_OK;
|
int access_status = HTTP_OK;
|
||||||
|
|
||||||
@@ -344,6 +361,7 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if AP_MODULE_MAGIC_AT_LEAST(20120211, 107)
|
#if AP_MODULE_MAGIC_AT_LEAST(20120211, 107)
|
||||||
|
assign_headers(r, req, no_body);
|
||||||
ap_run_pre_read_request(r, c);
|
ap_run_pre_read_request(r, c);
|
||||||
|
|
||||||
/* Time to populate r with the data we have. */
|
/* Time to populate r with the data we have. */
|
||||||
@@ -371,8 +389,6 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
|
|||||||
r->the_request = apr_psprintf(r->pool, "%s / HTTP/2.0", req->method);
|
r->the_request = apr_psprintf(r->pool, "%s / HTTP/2.0", req->method);
|
||||||
}
|
}
|
||||||
|
|
||||||
r->headers_in = apr_table_copy(r->pool, req->headers);
|
|
||||||
|
|
||||||
/* Start with r->hostname = NULL, ap_check_request_header() will get it
|
/* Start with r->hostname = NULL, ap_check_request_header() will get it
|
||||||
* form Host: header, otherwise we get complains about port numbers.
|
* form Host: header, otherwise we get complains about port numbers.
|
||||||
*/
|
*/
|
||||||
@@ -397,7 +413,7 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
|
|||||||
{
|
{
|
||||||
const char *s;
|
const char *s;
|
||||||
|
|
||||||
r->headers_in = apr_table_clone(r->pool, req->headers);
|
assign_headers(r, req, no_body);
|
||||||
ap_run_pre_read_request(r, c);
|
ap_run_pre_read_request(r, c);
|
||||||
|
|
||||||
/* Time to populate r with the data we have. */
|
/* Time to populate r with the data we have. */
|
||||||
|
@@ -35,7 +35,8 @@ apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool,
|
|||||||
const char *name, size_t nlen,
|
const char *name, size_t nlen,
|
||||||
const char *value, size_t vlen);
|
const char *value, size_t vlen);
|
||||||
|
|
||||||
apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, size_t raw_bytes);
|
apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool,
|
||||||
|
size_t raw_bytes);
|
||||||
|
|
||||||
h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
|
h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
|
||||||
|
|
||||||
@@ -45,9 +46,11 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
|
|||||||
*
|
*
|
||||||
* @param req the h2 request to process
|
* @param req the h2 request to process
|
||||||
* @param conn the connection to process the request on
|
* @param conn the connection to process the request on
|
||||||
|
* @param no_body != 0 iff the request is known to have no body
|
||||||
* @return the request_rec representing the request
|
* @return the request_rec representing the request
|
||||||
*/
|
*/
|
||||||
request_rec *h2_create_request_rec(const h2_request *req, conn_rec *conn);
|
request_rec *h2_create_request_rec(const h2_request *req, conn_rec *conn,
|
||||||
|
int no_body);
|
||||||
|
|
||||||
#if AP_HAS_RESPONSE_BUCKETS
|
#if AP_HAS_RESPONSE_BUCKETS
|
||||||
apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r);
|
apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r);
|
||||||
|
@@ -89,7 +89,7 @@ void h2_session_event(h2_session *session, h2_session_event_t ev,
|
|||||||
|
|
||||||
static int rst_unprocessed_stream(h2_stream *stream, void *ctx)
|
static int rst_unprocessed_stream(h2_stream *stream, void *ctx)
|
||||||
{
|
{
|
||||||
int unprocessed = (!h2_stream_was_closed(stream)
|
int unprocessed = (!h2_stream_is_at_or_past(stream, H2_SS_CLOSED)
|
||||||
&& (H2_STREAM_CLIENT_INITIATED(stream->id)?
|
&& (H2_STREAM_CLIENT_INITIATED(stream->id)?
|
||||||
(!stream->session->local.accepting
|
(!stream->session->local.accepting
|
||||||
&& stream->id > stream->session->local.accepted_max)
|
&& stream->id > stream->session->local.accepted_max)
|
||||||
@@ -1298,58 +1298,37 @@ cleanup:
|
|||||||
/**
|
/**
|
||||||
* A streams input state has changed.
|
* A streams input state has changed.
|
||||||
*/
|
*/
|
||||||
static apr_status_t on_stream_input(void *ctx, h2_stream *stream)
|
static void on_stream_input(void *ctx, h2_stream *stream)
|
||||||
{
|
{
|
||||||
h2_session *session = ctx;
|
h2_session *session = ctx;
|
||||||
apr_status_t rv = APR_EAGAIN;
|
|
||||||
|
|
||||||
ap_assert(stream);
|
ap_assert(stream);
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
|
||||||
H2_STRM_MSG(stream, "on_input change"));
|
H2_STRM_MSG(stream, "on_input change"));
|
||||||
|
update_child_status(session, SERVER_BUSY_READ, "read", stream);
|
||||||
if (stream->id == 0) {
|
if (stream->id == 0) {
|
||||||
/* input on primary connection available? read */
|
/* input on primary connection available? read */
|
||||||
rv = h2_c1_read(session);
|
h2_c1_read(session);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ap_assert(stream->input);
|
h2_stream_on_input_change(stream);
|
||||||
if (stream->state == H2_SS_CLOSED_L
|
|
||||||
&& !h2_mplx_c1_stream_is_running(session->mplx, stream)) {
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
|
|
||||||
H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
|
|
||||||
nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
|
|
||||||
stream->id, NGHTTP2_NO_ERROR);
|
|
||||||
update_child_status(session, SERVER_BUSY_WRITE, "reset", stream);
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
update_child_status(session, SERVER_BUSY_READ, "read", stream);
|
|
||||||
h2_beam_report_consumption(stream->input);
|
|
||||||
if (stream->state == H2_SS_CLOSED_R) {
|
|
||||||
/* TODO: remove this stream from input polling */
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
|
|
||||||
H2_STRM_MSG(stream, "should not longer be input polled"));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
cleanup:
|
|
||||||
return rv;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A streams output state has changed.
|
* A streams output state has changed.
|
||||||
*/
|
*/
|
||||||
static apr_status_t on_stream_output(void *ctx, h2_stream *stream)
|
static void on_stream_output(void *ctx, h2_stream *stream)
|
||||||
{
|
{
|
||||||
h2_session *session = ctx;
|
h2_session *session = ctx;
|
||||||
|
|
||||||
ap_assert(stream);
|
ap_assert(stream);
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
|
||||||
H2_STRM_MSG(stream, "on_output change"));
|
H2_STRM_MSG(stream, "on_output change"));
|
||||||
if (stream->id == 0) {
|
if (stream->id != 0) {
|
||||||
/* we dont poll output of stream 0, this should not be called */
|
update_child_status(session, SERVER_BUSY_WRITE, "write", stream);
|
||||||
return APR_SUCCESS;
|
h2_stream_on_output_change(stream);
|
||||||
}
|
}
|
||||||
update_child_status(session, SERVER_BUSY_WRITE, "write", stream);
|
|
||||||
return h2_stream_read_output(stream);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -1878,6 +1857,9 @@ apr_status_t h2_session_process(h2_session *session, int async)
|
|||||||
apr_time_t timeout = (session->open_streams == 0)?
|
apr_time_t timeout = (session->open_streams == 0)?
|
||||||
session->s->keep_alive_timeout :
|
session->s->keep_alive_timeout :
|
||||||
session->s->timeout;
|
session->s->timeout;
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
|
||||||
|
H2_SSSN_MSG(session, "polling timeout=%d"),
|
||||||
|
(int)apr_time_sec(timeout));
|
||||||
status = h2_mplx_c1_poll(session->mplx, timeout,
|
status = h2_mplx_c1_poll(session->mplx, timeout,
|
||||||
on_stream_input,
|
on_stream_input,
|
||||||
on_stream_output, session);
|
on_stream_output, session);
|
||||||
@@ -1922,10 +1904,16 @@ apr_status_t h2_session_process(h2_session *session, int async)
|
|||||||
if (session->open_streams == 0) {
|
if (session->open_streams == 0) {
|
||||||
h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS,
|
h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS,
|
||||||
0, "streams really done");
|
0, "streams really done");
|
||||||
|
if (session->state != H2_SESSION_ST_WAIT) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/* No IO happening and input is exhausted. Make sure we have
|
/* No IO happening and input is exhausted. Make sure we have
|
||||||
* flushed any possibly pending output and then wait with
|
* flushed any possibly pending output and then wait with
|
||||||
* the c1 connection timeout for sth to happen in our c1/c2 sockets/pipes */
|
* the c1 connection timeout for sth to happen in our c1/c2 sockets/pipes */
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
|
||||||
|
H2_SSSN_MSG(session, "polling timeout=%d, open_streams=%d"),
|
||||||
|
(int)apr_time_sec(session->s->timeout), session->open_streams);
|
||||||
status = h2_mplx_c1_poll(session->mplx, session->s->timeout,
|
status = h2_mplx_c1_poll(session->mplx, session->s->timeout,
|
||||||
on_stream_input, on_stream_output, session);
|
on_stream_input, on_stream_output, session);
|
||||||
if (APR_STATUS_IS_TIMEUP(status)) {
|
if (APR_STATUS_IS_TIMEUP(status)) {
|
||||||
|
@@ -196,33 +196,45 @@ static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
apr_status_t h2_stream_setup_input(h2_stream *stream)
|
static void stream_setup_input(h2_stream *stream)
|
||||||
{
|
{
|
||||||
/* already done? */
|
if (stream->input != NULL) return;
|
||||||
if (stream->input != NULL) goto cleanup;
|
ap_assert(!stream->input_closed);
|
||||||
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
|
||||||
H2_STRM_MSG(stream, "setup input beam"));
|
H2_STRM_MSG(stream, "setup input beam"));
|
||||||
h2_beam_create(&stream->input, stream->session->c1,
|
h2_beam_create(&stream->input, stream->session->c1,
|
||||||
stream->pool, stream->id,
|
stream->pool, stream->id,
|
||||||
"input", 0, stream->session->s->timeout);
|
"input", 0, stream->session->s->timeout);
|
||||||
cleanup:
|
}
|
||||||
|
|
||||||
|
apr_status_t h2_stream_prepare_processing(h2_stream *stream)
|
||||||
|
{
|
||||||
|
/* Right before processing starts, last chance to decide if
|
||||||
|
* there is need to an input beam. */
|
||||||
|
if (!stream->input_closed) {
|
||||||
|
stream_setup_input(stream);
|
||||||
|
}
|
||||||
return APR_SUCCESS;
|
return APR_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int input_buffer_is_empty(h2_stream *stream)
|
||||||
|
{
|
||||||
|
return !stream->in_buffer || APR_BRIGADE_EMPTY(stream->in_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
static apr_status_t input_flush(h2_stream *stream)
|
static apr_status_t input_flush(h2_stream *stream)
|
||||||
{
|
{
|
||||||
apr_status_t status = APR_SUCCESS;
|
apr_status_t status = APR_SUCCESS;
|
||||||
apr_off_t written;
|
apr_off_t written;
|
||||||
|
|
||||||
if (!stream->in_buffer) goto cleanup;
|
if (input_buffer_is_empty(stream)) goto cleanup;
|
||||||
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
|
||||||
H2_STRM_MSG(stream, "flush input"));
|
H2_STRM_MSG(stream, "flush input"));
|
||||||
status = h2_beam_send(stream->input, stream->session->c1,
|
status = h2_beam_send(stream->input, stream->session->c1,
|
||||||
stream->in_buffer, APR_BLOCK_READ, &written);
|
stream->in_buffer, APR_BLOCK_READ, &written);
|
||||||
stream->in_last_write = apr_time_now();
|
stream->in_last_write = apr_time_now();
|
||||||
if (APR_SUCCESS != status && stream->state == H2_SS_CLOSED_L) {
|
if (APR_SUCCESS != status && h2_stream_is_at(stream, H2_SS_CLOSED_L)) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c1,
|
||||||
H2_STRM_MSG(stream, "send input error"));
|
H2_STRM_MSG(stream, "send input error"));
|
||||||
h2_stream_dispatch(stream, H2_SEV_IN_ERROR);
|
h2_stream_dispatch(stream, H2_SEV_IN_ERROR);
|
||||||
@@ -234,6 +246,7 @@ cleanup:
|
|||||||
static void input_append_bucket(h2_stream *stream, apr_bucket *b)
|
static void input_append_bucket(h2_stream *stream, apr_bucket *b)
|
||||||
{
|
{
|
||||||
if (!stream->in_buffer) {
|
if (!stream->in_buffer) {
|
||||||
|
stream_setup_input(stream);
|
||||||
stream->in_buffer = apr_brigade_create(
|
stream->in_buffer = apr_brigade_create(
|
||||||
stream->pool, stream->session->c1->bucket_alloc);
|
stream->pool, stream->session->c1->bucket_alloc);
|
||||||
}
|
}
|
||||||
@@ -243,6 +256,7 @@ static void input_append_bucket(h2_stream *stream, apr_bucket *b)
|
|||||||
static void input_append_data(h2_stream *stream, const char *data, apr_size_t len)
|
static void input_append_data(h2_stream *stream, const char *data, apr_size_t len)
|
||||||
{
|
{
|
||||||
if (!stream->in_buffer) {
|
if (!stream->in_buffer) {
|
||||||
|
stream_setup_input(stream);
|
||||||
stream->in_buffer = apr_brigade_create(
|
stream->in_buffer = apr_brigade_create(
|
||||||
stream->pool, stream->session->c1->bucket_alloc);
|
stream->pool, stream->session->c1->bucket_alloc);
|
||||||
}
|
}
|
||||||
@@ -278,12 +292,14 @@ static apr_status_t close_input(h2_stream *stream)
|
|||||||
}
|
}
|
||||||
|
|
||||||
stream->input_closed = 1;
|
stream->input_closed = 1;
|
||||||
b = apr_bucket_eos_create(c->bucket_alloc);
|
if (stream->input) {
|
||||||
input_append_bucket(stream, b);
|
b = apr_bucket_eos_create(c->bucket_alloc);
|
||||||
input_flush(stream);
|
input_append_bucket(stream, b);
|
||||||
h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
|
input_flush(stream);
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
|
h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
|
||||||
H2_STRM_MSG(stream, "input flush + EOS"));
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
|
||||||
|
H2_STRM_MSG(stream, "input flush + EOS"));
|
||||||
|
}
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
return rv;
|
return rv;
|
||||||
@@ -467,7 +483,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
|
|||||||
|
|
||||||
case NGHTTP2_HEADERS:
|
case NGHTTP2_HEADERS:
|
||||||
eos = (flags & NGHTTP2_FLAG_END_STREAM);
|
eos = (flags & NGHTTP2_FLAG_END_STREAM);
|
||||||
if (stream->state == H2_SS_OPEN) {
|
if (h2_stream_is_at_or_past(stream, H2_SS_OPEN)) {
|
||||||
/* trailer HEADER */
|
/* trailer HEADER */
|
||||||
if (!eos) {
|
if (!eos) {
|
||||||
h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
|
h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
|
||||||
@@ -546,7 +562,6 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
|
|||||||
stream->session->ngh2, stream->id);
|
stream->session->ngh2, stream->id);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
h2_stream_setup_input(stream);
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
|
||||||
H2_STRM_LOG(APLOGNO(03082), stream, "created"));
|
H2_STRM_LOG(APLOGNO(03082), stream, "created"));
|
||||||
on_state_enter(stream);
|
on_state_enter(stream);
|
||||||
@@ -776,8 +791,10 @@ apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
|
|||||||
int is_http_or_https;
|
int is_http_or_https;
|
||||||
h2_request *req = stream->rtmp;
|
h2_request *req = stream->rtmp;
|
||||||
|
|
||||||
status = h2_request_end_headers(req, stream->pool, eos, raw_bytes);
|
status = h2_request_end_headers(req, stream->pool, raw_bytes);
|
||||||
if (APR_SUCCESS != status || req->http_status != H2_HTTP_STATUS_UNSET) goto cleanup;
|
if (APR_SUCCESS != status || req->http_status != H2_HTTP_STATUS_UNSET) {
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
/* keep on returning APR_SUCCESS for error responses, so that we
|
/* keep on returning APR_SUCCESS for error responses, so that we
|
||||||
* send it and do not RST the stream.
|
* send it and do not RST the stream.
|
||||||
@@ -903,6 +920,23 @@ static apr_bucket *get_first_response_bucket(apr_bucket_brigade *bb)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void stream_do_error_bucket(h2_stream *stream, apr_bucket *b)
|
||||||
|
{
|
||||||
|
int err = ((ap_bucket_error *)(b->data))->status;
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
|
||||||
|
H2_STRM_MSG(stream, "error bucket received, err=%d"), err);
|
||||||
|
if (err >= 500) {
|
||||||
|
err = NGHTTP2_INTERNAL_ERROR;
|
||||||
|
}
|
||||||
|
else if (err >= 400) {
|
||||||
|
err = NGHTTP2_STREAM_CLOSED;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
err = NGHTTP2_PROTOCOL_ERROR;
|
||||||
|
}
|
||||||
|
h2_stream_rst(stream, err);
|
||||||
|
}
|
||||||
|
|
||||||
static apr_status_t buffer_output_receive(h2_stream *stream)
|
static apr_status_t buffer_output_receive(h2_stream *stream)
|
||||||
{
|
{
|
||||||
apr_status_t rv = APR_EAGAIN;
|
apr_status_t rv = APR_EAGAIN;
|
||||||
@@ -913,6 +947,10 @@ static apr_status_t buffer_output_receive(h2_stream *stream)
|
|||||||
if (!stream->output) {
|
if (!stream->output) {
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
if (stream->rst_error) {
|
||||||
|
rv = APR_ECONNRESET;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
if (!stream->out_buffer) {
|
if (!stream->out_buffer) {
|
||||||
stream->out_buffer = apr_brigade_create(stream->pool, c1->bucket_alloc);
|
stream->out_buffer = apr_brigade_create(stream->pool, c1->bucket_alloc);
|
||||||
@@ -951,7 +989,6 @@ static apr_status_t buffer_output_receive(h2_stream *stream)
|
|||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
|
||||||
H2_STRM_MSG(stream, "out_buffer, receive unsuccessful"));
|
H2_STRM_MSG(stream, "out_buffer, receive unsuccessful"));
|
||||||
}
|
}
|
||||||
goto cleanup;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -968,6 +1005,10 @@ static apr_status_t buffer_output_receive(h2_stream *stream)
|
|||||||
else if (APR_BUCKET_IS_EOS(b)) {
|
else if (APR_BUCKET_IS_EOS(b)) {
|
||||||
stream->output_eos = 1;
|
stream->output_eos = 1;
|
||||||
}
|
}
|
||||||
|
else if (AP_BUCKET_IS_ERROR(b)) {
|
||||||
|
stream_do_error_bucket(stream, b);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else if (b->length == 0) { /* zero length data */
|
else if (b->length == 0) { /* zero length data */
|
||||||
APR_BUCKET_REMOVE(b);
|
APR_BUCKET_REMOVE(b);
|
||||||
@@ -1008,215 +1049,66 @@ apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
|
|||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
static apr_status_t buffer_output_process_headers(h2_stream *stream)
|
static apr_status_t stream_do_trailers(h2_stream *stream)
|
||||||
{
|
{
|
||||||
conn_rec *c1 = stream->session->c1;
|
conn_rec *c1 = stream->session->c1;
|
||||||
apr_status_t rv = APR_EAGAIN;
|
int ngrv;
|
||||||
int ngrv = 0, is_empty;
|
|
||||||
h2_ngheader *nh = NULL;
|
h2_ngheader *nh = NULL;
|
||||||
apr_bucket *b, *e;
|
apr_bucket *b, *e;
|
||||||
#if AP_HAS_RESPONSE_BUCKETS
|
#if AP_HAS_RESPONSE_BUCKETS
|
||||||
ap_bucket_response *resp = NULL;
|
|
||||||
ap_bucket_headers *headers = NULL;
|
ap_bucket_headers *headers = NULL;
|
||||||
#else
|
#else
|
||||||
h2_headers *headers = NULL, *resp = NULL;
|
h2_headers *headers = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
apr_status_t rv;
|
||||||
|
|
||||||
if (!stream->out_buffer) goto cleanup;
|
ap_assert(stream->response);
|
||||||
|
ap_assert(stream->out_buffer);
|
||||||
|
|
||||||
b = APR_BRIGADE_FIRST(stream->out_buffer);
|
b = APR_BRIGADE_FIRST(stream->out_buffer);
|
||||||
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
|
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
|
||||||
e = APR_BUCKET_NEXT(b);
|
e = APR_BUCKET_NEXT(b);
|
||||||
if (APR_BUCKET_IS_METADATA(b)) {
|
if (APR_BUCKET_IS_METADATA(b)) {
|
||||||
#if AP_HAS_RESPONSE_BUCKETS
|
#if AP_HAS_RESPONSE_BUCKETS
|
||||||
if (AP_BUCKET_IS_RESPONSE(b)) {
|
if (AP_BUCKET_IS_HEADERS(b)) {
|
||||||
resp = b->data;
|
|
||||||
APR_BUCKET_REMOVE(b);
|
|
||||||
apr_bucket_destroy(b);
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
|
|
||||||
H2_STRM_MSG(stream, "process response %d"),
|
|
||||||
resp->status);
|
|
||||||
b = e;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
else if (AP_BUCKET_IS_HEADERS(b)) {
|
|
||||||
headers = b->data;
|
headers = b->data;
|
||||||
APR_BUCKET_REMOVE(b);
|
|
||||||
apr_bucket_destroy(b);
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
|
|
||||||
H2_STRM_MSG(stream, "process headers"));
|
|
||||||
b = e;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
#else /* AP_HAS_RESPONSE_BUCKETS */
|
#else /* AP_HAS_RESPONSE_BUCKETS */
|
||||||
if (H2_BUCKET_IS_HEADERS(b)) {
|
if (H2_BUCKET_IS_HEADERS(b)) {
|
||||||
headers = h2_bucket_headers_get(b);
|
headers = h2_bucket_headers_get(b);
|
||||||
|
#endif /* else AP_HAS_RESPONSE_BUCKETS */
|
||||||
APR_BUCKET_REMOVE(b);
|
APR_BUCKET_REMOVE(b);
|
||||||
apr_bucket_destroy(b);
|
apr_bucket_destroy(b);
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
|
||||||
H2_STRM_MSG(stream, "process headers, response %d"),
|
H2_STRM_MSG(stream, "process trailers"));
|
||||||
headers->status);
|
break;
|
||||||
if (!stream->response) {
|
}
|
||||||
resp = headers;
|
else if (APR_BUCKET_IS_EOS(b)) {
|
||||||
headers = NULL;
|
|
||||||
}
|
|
||||||
b = e;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif /* else AP_HAS_RESPONSE_BUCKETS */
|
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (!stream->response) {
|
break;
|
||||||
/* data buckets before response headers, an error */
|
|
||||||
rv = APR_EINVAL;
|
|
||||||
}
|
|
||||||
/* data bucket, need to send those before processing
|
|
||||||
* any subsequent headers (trailers) */
|
|
||||||
goto cleanup;
|
|
||||||
}
|
}
|
||||||
b = e;
|
b = e;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (resp) {
|
if (!headers) {
|
||||||
nghttp2_data_provider provider, *pprovider = NULL;
|
rv = APR_EAGAIN;
|
||||||
|
goto cleanup;
|
||||||
if (resp->status < 100) {
|
|
||||||
h2_stream_rst(stream, resp->status);
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (resp->status == HTTP_FORBIDDEN && resp->notes) {
|
|
||||||
const char *cause = apr_table_get(resp->notes, "ssl-renegotiate-forbidden");
|
|
||||||
if (cause) {
|
|
||||||
/* This request triggered a TLS renegotiation that is not allowed
|
|
||||||
* in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
|
|
||||||
*/
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, resp->status, c1,
|
|
||||||
H2_STRM_LOG(APLOGNO(03061), stream,
|
|
||||||
"renegotiate forbidden, cause: %s"), cause);
|
|
||||||
h2_stream_rst(stream, H2_ERR_HTTP_1_1_REQUIRED);
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
|
|
||||||
H2_STRM_LOG(APLOGNO(03073), stream,
|
|
||||||
"submit response %d"), resp->status);
|
|
||||||
|
|
||||||
/* If this stream is not a pushed one itself,
|
|
||||||
* and HTTP/2 server push is enabled here,
|
|
||||||
* and the response HTTP status is not sth >= 400,
|
|
||||||
* and the remote side has pushing enabled,
|
|
||||||
* -> find and perform any pushes on this stream
|
|
||||||
* *before* we submit the stream response itself.
|
|
||||||
* This helps clients avoid opening new streams on Link
|
|
||||||
* resp that get pushed right afterwards.
|
|
||||||
*
|
|
||||||
* *) the response code is relevant, as we do not want to
|
|
||||||
* make pushes on 401 or 403 codes and friends.
|
|
||||||
* And if we see a 304, we do not push either
|
|
||||||
* as the client, having this resource in its cache, might
|
|
||||||
* also have the pushed ones as well.
|
|
||||||
*/
|
|
||||||
if (!stream->initiated_on
|
|
||||||
&& !stream->response
|
|
||||||
&& stream->request && stream->request->method
|
|
||||||
&& !strcmp("GET", stream->request->method)
|
|
||||||
&& (resp->status < 400)
|
|
||||||
&& (resp->status != 304)
|
|
||||||
&& h2_session_push_enabled(stream->session)) {
|
|
||||||
/* PUSH is possible and enabled on server, unless the request
|
|
||||||
* denies it, submit resources to push */
|
|
||||||
const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE);
|
|
||||||
if (!s || strcmp(s, "0")) {
|
|
||||||
h2_stream_submit_pushes(stream, resp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!stream->pref_priority) {
|
|
||||||
stream->pref_priority = h2_stream_get_priority(stream, resp);
|
|
||||||
}
|
|
||||||
h2_session_set_prio(stream->session, stream, stream->pref_priority);
|
|
||||||
|
|
||||||
if (resp->status == 103
|
|
||||||
&& !h2_config_sgeti(stream->session->s, H2_CONF_EARLY_HINTS)) {
|
|
||||||
/* suppress sending this to the client, it might have triggered
|
|
||||||
* pushes and served its purpose nevertheless */
|
|
||||||
rv = APR_SUCCESS;
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
if (resp->status >= 200) {
|
|
||||||
stream->response = resp;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Do we know if this stream has no response body? */
|
|
||||||
is_empty = 0;
|
|
||||||
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
|
|
||||||
if (APR_BUCKET_IS_METADATA(b)) {
|
|
||||||
#if AP_HAS_RESPONSE_BUCKETS
|
|
||||||
if (AP_BUCKET_IS_HEADERS(b)) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
if (H2_BUCKET_IS_HEADERS(b)) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
else if (APR_BUCKET_IS_EOS(b)) {
|
|
||||||
is_empty = 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else { /* data, not empty */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
b = APR_BUCKET_NEXT(b);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_empty) {
|
|
||||||
memset(&provider, 0, sizeof(provider));
|
|
||||||
provider.source.fd = stream->id;
|
|
||||||
provider.read_callback = stream_data_cb;
|
|
||||||
pprovider = &provider;
|
|
||||||
}
|
|
||||||
|
|
||||||
rv = h2_res_create_ngheader(&nh, stream->pool, resp);
|
|
||||||
if (APR_SUCCESS != rv) {
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
|
|
||||||
H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
|
|
||||||
h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
ngrv = nghttp2_submit_response(stream->session->ngh2, stream->id,
|
|
||||||
nh->nv, nh->nvlen, pprovider);
|
|
||||||
if (stream->initiated_on) {
|
|
||||||
++stream->session->pushes_submitted;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
++stream->session->responses_submitted;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else if (headers) {
|
|
||||||
if (!stream->response) {
|
rv = h2_res_create_ngtrailer(&nh, stream->pool, headers);
|
||||||
h2_stream_rst(stream, HTTP_INTERNAL_SERVER_ERROR);
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
|
||||||
goto cleanup;
|
H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
|
||||||
}
|
(int)nh->nvlen);
|
||||||
rv = h2_res_create_ngtrailer(&nh, stream->pool, headers);
|
if (APR_SUCCESS != rv) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
|
||||||
H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
|
H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
|
||||||
(int)nh->nvlen);
|
h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
|
||||||
if (APR_SUCCESS != rv) {
|
goto cleanup;
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
|
|
||||||
H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
|
|
||||||
h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
|
|
||||||
ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen);
|
|
||||||
stream->sent_trailers = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup:
|
ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen);
|
||||||
if (nghttp2_is_fatal(ngrv)) {
|
if (nghttp2_is_fatal(ngrv)) {
|
||||||
rv = APR_EGENERAL;
|
rv = APR_EGENERAL;
|
||||||
h2_session_dispatch_event(stream->session,
|
h2_session_dispatch_event(stream->session,
|
||||||
@@ -1225,6 +1117,9 @@ cleanup:
|
|||||||
APLOGNO(02940) "submit_response: %s",
|
APLOGNO(02940) "submit_response: %s",
|
||||||
nghttp2_strerror(rv));
|
nghttp2_strerror(rv));
|
||||||
}
|
}
|
||||||
|
stream->sent_trailers = 1;
|
||||||
|
|
||||||
|
cleanup:
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1290,12 +1185,26 @@ int h2_stream_is_ready(h2_stream *stream)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int h2_stream_was_closed(const h2_stream *stream)
|
int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state)
|
||||||
{
|
{
|
||||||
switch (stream->state) {
|
return stream->state == state;
|
||||||
|
}
|
||||||
|
|
||||||
|
int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state)
|
||||||
|
{
|
||||||
|
switch (state) {
|
||||||
|
case H2_SS_IDLE:
|
||||||
|
return 1; /* by definition */
|
||||||
|
case H2_SS_RSVD_R: /*fall through*/
|
||||||
|
case H2_SS_RSVD_L: /*fall through*/
|
||||||
|
case H2_SS_OPEN:
|
||||||
|
return stream->state == state || stream->state >= H2_SS_OPEN;
|
||||||
|
case H2_SS_CLOSED_R: /*fall through*/
|
||||||
|
case H2_SS_CLOSED_L: /*fall through*/
|
||||||
case H2_SS_CLOSED:
|
case H2_SS_CLOSED:
|
||||||
|
return stream->state == state || stream->state >= H2_SS_CLOSED;
|
||||||
case H2_SS_CLEANUP:
|
case H2_SS_CLEANUP:
|
||||||
return 1;
|
return stream->state == state;
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1415,35 +1324,25 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
|
|||||||
apr_status_t rv;
|
apr_status_t rv;
|
||||||
h2_stream *stream;
|
h2_stream *stream;
|
||||||
|
|
||||||
/* nghttp2 wants to send more DATA for the stream. We need
|
/* nghttp2 wants to send more DATA for the stream.
|
||||||
* to find out how much of the requested length we can send without
|
* we should have submitted the final response at this time
|
||||||
* blocking.
|
* after receiving output via stream_do_responses() */
|
||||||
* Indicate EOS when we encounter it or DEFERRED if the stream
|
|
||||||
* should be suspended. Beware of trailers.
|
|
||||||
*/
|
|
||||||
ap_assert(session);
|
ap_assert(session);
|
||||||
(void)ng2s;
|
(void)ng2s;
|
||||||
(void)buf;
|
(void)buf;
|
||||||
(void)source;
|
(void)source;
|
||||||
stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
|
stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
|
||||||
if (!stream || !stream->output) {
|
|
||||||
|
if (!stream) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c1,
|
||||||
APLOGNO(02937)
|
APLOGNO(02937)
|
||||||
H2_SSSN_STRM_MSG(session, stream_id, "data_cb, stream not found"));
|
H2_SSSN_STRM_MSG(session, stream_id, "data_cb, stream not found"));
|
||||||
return NGHTTP2_ERR_CALLBACK_FAILURE;
|
return NGHTTP2_ERR_CALLBACK_FAILURE;
|
||||||
}
|
}
|
||||||
if (!stream->response) {
|
if (!stream->output || !stream->response || !stream->out_buffer) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
|
|
||||||
APLOGNO(10299)
|
|
||||||
H2_SSSN_STRM_MSG(session, stream_id, "data_cb, no response seen yet"));
|
|
||||||
return NGHTTP2_ERR_DEFERRED;
|
return NGHTTP2_ERR_DEFERRED;
|
||||||
}
|
}
|
||||||
if (stream->rst_error) {
|
if (stream->rst_error) {
|
||||||
return NGHTTP2_ERR_CALLBACK_FAILURE;
|
|
||||||
}
|
|
||||||
if (!stream->out_buffer) {
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
|
||||||
H2_SSSN_STRM_MSG(session, stream_id, "suspending"));
|
|
||||||
return NGHTTP2_ERR_DEFERRED;
|
return NGHTTP2_ERR_DEFERRED;
|
||||||
}
|
}
|
||||||
if (h2_c1_io_needs_flush(&session->io)) {
|
if (h2_c1_io_needs_flush(&session->io)) {
|
||||||
@@ -1463,68 +1362,76 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* How much data do we have in our buffers that we can write? */
|
/* How much data do we have in our buffers that we can write?
|
||||||
check_and_receive:
|
* if not enough, receive more. */
|
||||||
buf_len = output_data_buffered(stream, &eos, &header_blocked);
|
buf_len = output_data_buffered(stream, &eos, &header_blocked);
|
||||||
while (buf_len < (apr_off_t)length && !eos && !header_blocked) {
|
if (buf_len < (apr_off_t)length && !eos
|
||||||
|
&& !header_blocked && !stream->rst_error) {
|
||||||
/* read more? */
|
/* read more? */
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
||||||
H2_SSSN_STRM_MSG(session, stream_id,
|
H2_SSSN_STRM_MSG(session, stream_id,
|
||||||
"need more (read len=%ld, %ld in buffer)"),
|
"need more (read len=%ld, %ld in buffer)"),
|
||||||
(long)length, (long)buf_len);
|
(long)length, (long)buf_len);
|
||||||
rv = buffer_output_receive(stream);
|
rv = buffer_output_receive(stream);
|
||||||
if (APR_EOF == rv) {
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
|
||||||
|
H2_SSSN_STRM_MSG(session, stream_id,
|
||||||
|
"buffer_output_received"));
|
||||||
|
if (APR_STATUS_IS_EAGAIN(rv)) {
|
||||||
|
/* currently, no more is available */
|
||||||
|
}
|
||||||
|
else if (APR_SUCCESS == rv) {
|
||||||
|
/* got some, re-assess */
|
||||||
|
buf_len = output_data_buffered(stream, &eos, &header_blocked);
|
||||||
|
}
|
||||||
|
else if (APR_EOF == rv) {
|
||||||
|
if (!stream->output_eos) {
|
||||||
|
/* Seeing APR_EOF without an EOS bucket received before indicates
|
||||||
|
* that stream output is incomplete. Commonly, we expect to see
|
||||||
|
* an ERROR bucket to have been generated. But faulty handlers
|
||||||
|
* may not have generated one.
|
||||||
|
* We need to RST the stream bc otherwise the client thinks
|
||||||
|
* it is all fine. */
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
|
||||||
|
H2_SSSN_STRM_MSG(session, stream_id, "rst stream"));
|
||||||
|
h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
|
||||||
|
return NGHTTP2_ERR_CALLBACK_FAILURE;
|
||||||
|
}
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
|
||||||
|
H2_SSSN_STRM_MSG(session, stream_id,
|
||||||
|
"eof on receive (read len=%ld, %ld in buffer)"),
|
||||||
|
(long)length, (long)buf_len);
|
||||||
eos = 1;
|
eos = 1;
|
||||||
rv = APR_SUCCESS;
|
rv = APR_SUCCESS;
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
if (APR_SUCCESS == rv) {
|
|
||||||
/* re-assess */
|
|
||||||
buf_len = output_data_buffered(stream, &eos, &header_blocked);
|
|
||||||
}
|
|
||||||
else if (APR_STATUS_IS_EAGAIN(rv)) {
|
|
||||||
/* currently, no more is available */
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
else if (APR_SUCCESS != rv) {
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
|
||||||
H2_STRM_LOG(APLOGNO(02938), stream, "data_cb, reading data"));
|
H2_STRM_LOG(APLOGNO(02938), stream, "data_cb, reading data"));
|
||||||
return NGHTTP2_ERR_CALLBACK_FAILURE;
|
return NGHTTP2_ERR_CALLBACK_FAILURE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buf_len == 0 && header_blocked) {
|
if (stream->rst_error) {
|
||||||
/* we are blocked from having data to send by a HEADER bucket sitting
|
return NGHTTP2_ERR_DEFERRED;
|
||||||
* at buffer start. Send it and check again what DATA we can send. */
|
|
||||||
rv = buffer_output_process_headers(stream);
|
|
||||||
if (APR_SUCCESS == rv) {
|
|
||||||
goto check_and_receive;
|
|
||||||
}
|
|
||||||
else if (APR_STATUS_IS_EAGAIN(rv)) {
|
|
||||||
/* unable to send the HEADER at this time. */
|
|
||||||
eos = 0;
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
|
|
||||||
H2_STRM_LOG(APLOGNO(10300), stream,
|
|
||||||
"data_cb, error processing headers"));
|
|
||||||
return NGHTTP2_ERR_CALLBACK_FAILURE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buf_len > (apr_off_t)length) {
|
if (buf_len == 0 && header_blocked) {
|
||||||
|
rv = stream_do_trailers(stream);
|
||||||
|
if (APR_SUCCESS != rv && !APR_STATUS_IS_EAGAIN(rv)) {
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
|
||||||
|
H2_STRM_LOG(APLOGNO(10300), stream,
|
||||||
|
"data_cb, error processing trailers"));
|
||||||
|
return NGHTTP2_ERR_CALLBACK_FAILURE;
|
||||||
|
}
|
||||||
|
length = 0;
|
||||||
|
eos = 0;
|
||||||
|
}
|
||||||
|
else if (buf_len > (apr_off_t)length) {
|
||||||
eos = 0; /* Any EOS we have in the buffer does not apply yet */
|
eos = 0; /* Any EOS we have in the buffer does not apply yet */
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
length = (size_t)buf_len;
|
length = (size_t)buf_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stream->sent_trailers) {
|
|
||||||
/* We already sent trailers and will/can not send more DATA. */
|
|
||||||
eos = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (length) {
|
if (length) {
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
||||||
H2_STRM_MSG(stream, "data_cb, sending len=%ld, eos=%d"),
|
H2_STRM_MSG(stream, "data_cb, sending len=%ld, eos=%d"),
|
||||||
@@ -1538,14 +1445,208 @@ check_and_receive:
|
|||||||
return NGHTTP2_ERR_DEFERRED;
|
return NGHTTP2_ERR_DEFERRED;
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup:
|
|
||||||
if (eos) {
|
if (eos) {
|
||||||
*data_flags |= NGHTTP2_DATA_FLAG_EOF;
|
*data_flags |= NGHTTP2_DATA_FLAG_EOF;
|
||||||
}
|
}
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
|
|
||||||
apr_status_t h2_stream_read_output(h2_stream *stream)
|
static apr_status_t stream_do_response(h2_stream *stream)
|
||||||
|
{
|
||||||
|
conn_rec *c1 = stream->session->c1;
|
||||||
|
apr_status_t rv = APR_EAGAIN;
|
||||||
|
int ngrv, is_empty = 0;
|
||||||
|
h2_ngheader *nh = NULL;
|
||||||
|
apr_bucket *b, *e;
|
||||||
|
#if AP_HAS_RESPONSE_BUCKETS
|
||||||
|
ap_bucket_response *resp = NULL;
|
||||||
|
#else
|
||||||
|
h2_headers *resp = NULL;
|
||||||
|
#endif
|
||||||
|
nghttp2_data_provider provider, *pprovider = NULL;
|
||||||
|
|
||||||
|
ap_assert(!stream->response);
|
||||||
|
ap_assert(stream->out_buffer);
|
||||||
|
|
||||||
|
b = APR_BRIGADE_FIRST(stream->out_buffer);
|
||||||
|
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
|
||||||
|
e = APR_BUCKET_NEXT(b);
|
||||||
|
if (APR_BUCKET_IS_METADATA(b)) {
|
||||||
|
#if AP_HAS_RESPONSE_BUCKETS
|
||||||
|
if (AP_BUCKET_IS_RESPONSE(b)) {
|
||||||
|
resp = b->data;
|
||||||
|
#else /* AP_HAS_RESPONSE_BUCKETS */
|
||||||
|
if (H2_BUCKET_IS_HEADERS(b)) {
|
||||||
|
resp = h2_bucket_headers_get(b);
|
||||||
|
#endif /* else AP_HAS_RESPONSE_BUCKETS */
|
||||||
|
APR_BUCKET_REMOVE(b);
|
||||||
|
apr_bucket_destroy(b);
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
|
||||||
|
H2_STRM_MSG(stream, "process response %d"),
|
||||||
|
resp->status);
|
||||||
|
is_empty = (e != APR_BRIGADE_SENTINEL(stream->out_buffer)
|
||||||
|
&& APR_BUCKET_IS_EOS(e));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else if (APR_BUCKET_IS_EOS(b)) {
|
||||||
|
h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
|
||||||
|
rv = APR_EINVAL;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
else if (AP_BUCKET_IS_ERROR(b)) {
|
||||||
|
stream_do_error_bucket(stream, b);
|
||||||
|
rv = APR_EINVAL;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
/* data buckets before response headers, an error */
|
||||||
|
h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
|
||||||
|
rv = APR_EINVAL;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
b = e;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!resp) {
|
||||||
|
rv = APR_EAGAIN;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (resp->status < 100) {
|
||||||
|
h2_stream_rst(stream, resp->status);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (resp->status == HTTP_FORBIDDEN && resp->notes) {
|
||||||
|
const char *cause = apr_table_get(resp->notes, "ssl-renegotiate-forbidden");
|
||||||
|
if (cause) {
|
||||||
|
/* This request triggered a TLS renegotiation that is not allowed
|
||||||
|
* in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
|
||||||
|
*/
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, resp->status, c1,
|
||||||
|
H2_STRM_LOG(APLOGNO(03061), stream,
|
||||||
|
"renegotiate forbidden, cause: %s"), cause);
|
||||||
|
h2_stream_rst(stream, H2_ERR_HTTP_1_1_REQUIRED);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
|
||||||
|
H2_STRM_LOG(APLOGNO(03073), stream,
|
||||||
|
"submit response %d"), resp->status);
|
||||||
|
|
||||||
|
/* If this stream is not a pushed one itself,
|
||||||
|
* and HTTP/2 server push is enabled here,
|
||||||
|
* and the response HTTP status is not sth >= 400,
|
||||||
|
* and the remote side has pushing enabled,
|
||||||
|
* -> find and perform any pushes on this stream
|
||||||
|
* *before* we submit the stream response itself.
|
||||||
|
* This helps clients avoid opening new streams on Link
|
||||||
|
* resp that get pushed right afterwards.
|
||||||
|
*
|
||||||
|
* *) the response code is relevant, as we do not want to
|
||||||
|
* make pushes on 401 or 403 codes and friends.
|
||||||
|
* And if we see a 304, we do not push either
|
||||||
|
* as the client, having this resource in its cache, might
|
||||||
|
* also have the pushed ones as well.
|
||||||
|
*/
|
||||||
|
if (!stream->initiated_on
|
||||||
|
&& !stream->response
|
||||||
|
&& stream->request && stream->request->method
|
||||||
|
&& !strcmp("GET", stream->request->method)
|
||||||
|
&& (resp->status < 400)
|
||||||
|
&& (resp->status != 304)
|
||||||
|
&& h2_session_push_enabled(stream->session)) {
|
||||||
|
/* PUSH is possible and enabled on server, unless the request
|
||||||
|
* denies it, submit resources to push */
|
||||||
|
const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE);
|
||||||
|
if (!s || strcmp(s, "0")) {
|
||||||
|
h2_stream_submit_pushes(stream, resp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!stream->pref_priority) {
|
||||||
|
stream->pref_priority = h2_stream_get_priority(stream, resp);
|
||||||
|
}
|
||||||
|
h2_session_set_prio(stream->session, stream, stream->pref_priority);
|
||||||
|
|
||||||
|
if (resp->status == 103
|
||||||
|
&& !h2_config_sgeti(stream->session->s, H2_CONF_EARLY_HINTS)) {
|
||||||
|
/* suppress sending this to the client, it might have triggered
|
||||||
|
* pushes and served its purpose nevertheless */
|
||||||
|
rv = APR_SUCCESS;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
if (resp->status >= 200) {
|
||||||
|
stream->response = resp;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_empty) {
|
||||||
|
memset(&provider, 0, sizeof(provider));
|
||||||
|
provider.source.fd = stream->id;
|
||||||
|
provider.read_callback = stream_data_cb;
|
||||||
|
pprovider = &provider;
|
||||||
|
}
|
||||||
|
|
||||||
|
rv = h2_res_create_ngheader(&nh, stream->pool, resp);
|
||||||
|
if (APR_SUCCESS != rv) {
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
|
||||||
|
H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
|
||||||
|
h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
ngrv = nghttp2_submit_response(stream->session->ngh2, stream->id,
|
||||||
|
nh->nv, nh->nvlen, pprovider);
|
||||||
|
if (nghttp2_is_fatal(ngrv)) {
|
||||||
|
rv = APR_EGENERAL;
|
||||||
|
h2_session_dispatch_event(stream->session,
|
||||||
|
H2_SESSION_EV_PROTO_ERROR, ngrv, nghttp2_strerror(rv));
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
|
||||||
|
APLOGNO(02940) "submit_response: %s",
|
||||||
|
nghttp2_strerror(rv));
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stream->initiated_on) {
|
||||||
|
++stream->session->pushes_submitted;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
++stream->session->responses_submitted;
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
return rv;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void stream_do_responses(h2_stream *stream)
|
||||||
|
{
|
||||||
|
h2_session *session = stream->session;
|
||||||
|
conn_rec *c1 = session->c1;
|
||||||
|
apr_status_t rv;
|
||||||
|
|
||||||
|
ap_assert(!stream->response);
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
||||||
|
H2_STRM_MSG(stream, "do_response"));
|
||||||
|
rv = buffer_output_receive(stream);
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
|
||||||
|
H2_SSSN_STRM_MSG(session, stream->id,
|
||||||
|
"buffer_output_received2"));
|
||||||
|
if (APR_SUCCESS != rv && APR_EAGAIN != rv) {
|
||||||
|
h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
/* process all headers sitting at the buffer head. */
|
||||||
|
do {
|
||||||
|
rv = stream_do_response(stream);
|
||||||
|
} while (APR_SUCCESS == rv
|
||||||
|
&& !stream->rst_error
|
||||||
|
&& !stream->response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void h2_stream_on_output_change(h2_stream *stream)
|
||||||
{
|
{
|
||||||
conn_rec *c1 = stream->session->c1;
|
conn_rec *c1 = stream->session->c1;
|
||||||
apr_status_t rv = APR_EAGAIN;
|
apr_status_t rv = APR_EAGAIN;
|
||||||
@@ -1556,47 +1657,56 @@ apr_status_t h2_stream_read_output(h2_stream *stream)
|
|||||||
/* c2 has not assigned the output beam to the stream (yet). */
|
/* c2 has not assigned the output beam to the stream (yet). */
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c1,
|
||||||
H2_STRM_MSG(stream, "read_output, no output beam registered"));
|
H2_STRM_MSG(stream, "read_output, no output beam registered"));
|
||||||
rv = APR_EAGAIN;
|
|
||||||
goto cleanup;
|
|
||||||
}
|
}
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
else if (h2_stream_is_at_or_past(stream, H2_SS_CLOSED)) {
|
||||||
H2_STRM_MSG(stream, "read_output"));
|
|
||||||
|
|
||||||
if (h2_stream_was_closed(stream)) {
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
|
||||||
H2_STRM_LOG(APLOGNO(10301), stream, "already closed"));
|
H2_STRM_LOG(APLOGNO(10301), stream, "already closed"));
|
||||||
rv = APR_EOF;
|
|
||||||
goto cleanup;
|
|
||||||
}
|
}
|
||||||
else if (stream->state == H2_SS_CLOSED_L) {
|
else if (h2_stream_is_at(stream, H2_SS_CLOSED_L)) {
|
||||||
/* We have delivered a response to a stream that was not closed
|
/* We have delivered a response to a stream that was not closed
|
||||||
* by the client. This could be a POST with body that we negate
|
* by the client. This could be a POST with body that we negate
|
||||||
* and we need to RST_STREAM to end if. */
|
* and we need to RST_STREAM to end if. */
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
|
||||||
H2_STRM_LOG(APLOGNO(10313), stream, "remote close missing"));
|
H2_STRM_LOG(APLOGNO(10313), stream, "remote close missing"));
|
||||||
nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
|
h2_stream_rst(stream, H2_ERR_NO_ERROR);
|
||||||
stream->id, NGHTTP2_NO_ERROR);
|
|
||||||
rv = APR_EOF;
|
|
||||||
goto cleanup;
|
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
rv = buffer_output_receive(stream);
|
/* stream is not closed, a change in output happened. There are
|
||||||
if (APR_SUCCESS != rv && APR_EAGAIN != rv) goto cleanup;
|
* two modes of operation here:
|
||||||
|
* 1) the final response has been submitted. nghttp2 is invoking
|
||||||
/* process all headers sitting at the buffer head. */
|
* stream_data_cb() to progress the stream. This handles DATA,
|
||||||
while (1) {
|
* trailers, EOS and ERRORs.
|
||||||
rv = buffer_output_process_headers(stream);
|
* When stream_data_cb() runs out of things to send, it returns
|
||||||
if (APR_EAGAIN == rv) {
|
* NGHTTP2_ERR_DEFERRED and nghttp2 *suspends* further processing
|
||||||
rv = APR_SUCCESS;
|
* until we tell it to resume.
|
||||||
break;
|
* 2) We have not seen the *final* response yet. The stream can not
|
||||||
|
* send any response DATA. The nghttp2 stream_data_cb() is not
|
||||||
|
* invoked. We need to receive output, expecting not DATA but
|
||||||
|
* RESPONSEs (intermediate may arrive) and submit those. On
|
||||||
|
* the final response, nghttp2 will start calling stream_data_cb().
|
||||||
|
*/
|
||||||
|
if (stream->response) {
|
||||||
|
nghttp2_session_resume_data(stream->session->ngh2, stream->id);
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
||||||
|
H2_STRM_MSG(stream, "resumed"));
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
stream_do_responses(stream);
|
||||||
|
if (!stream->rst_error) {
|
||||||
|
nghttp2_session_resume_data(stream->session->ngh2, stream->id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (APR_SUCCESS != rv) goto cleanup;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
nghttp2_session_resume_data(stream->session->ngh2, stream->id);
|
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
|
void h2_stream_on_input_change(h2_stream *stream)
|
||||||
H2_STRM_MSG(stream, "resumed"));
|
{
|
||||||
|
ap_assert(stream->input);
|
||||||
cleanup:
|
h2_beam_report_consumption(stream->input);
|
||||||
return rv;
|
if (h2_stream_is_at(stream, H2_SS_CLOSED_L)
|
||||||
|
&& !h2_mplx_c1_stream_is_running(stream->session->mplx, stream)) {
|
||||||
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c1,
|
||||||
|
H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
|
||||||
|
h2_stream_rst(stream, H2_ERR_NO_ERROR);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@@ -138,9 +138,9 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool,
|
|||||||
void h2_stream_destroy(h2_stream *stream);
|
void h2_stream_destroy(h2_stream *stream);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Setup the input for the stream.
|
* Perform any late initialization before stream starts processing.
|
||||||
*/
|
*/
|
||||||
apr_status_t h2_stream_setup_input(h2_stream *stream);
|
apr_status_t h2_stream_prepare_processing(h2_stream *stream);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set a new monitor for this stream, replacing any existing one. Can
|
* Set a new monitor for this stream, replacing any existing one. Can
|
||||||
@@ -155,6 +155,22 @@ void h2_stream_set_monitor(h2_stream *stream, h2_stream_monitor *monitor);
|
|||||||
*/
|
*/
|
||||||
void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev);
|
void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine if stream is at given state.
|
||||||
|
* @param stream the stream to check
|
||||||
|
* @param state the state to look for
|
||||||
|
* @return != 0 iff stream is at given state.
|
||||||
|
*/
|
||||||
|
int h2_stream_is_at(const h2_stream *stream, h2_stream_state_t state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine if stream is reached given state or is past this state.
|
||||||
|
* @param stream the stream to check
|
||||||
|
* @param state the state to look for
|
||||||
|
* @return != 0 iff stream is at or past given state.
|
||||||
|
*/
|
||||||
|
int h2_stream_is_at_or_past(const h2_stream *stream, h2_stream_state_t state);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cleanup references into requst processing.
|
* Cleanup references into requst processing.
|
||||||
*
|
*
|
||||||
@@ -228,20 +244,16 @@ apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
|
|||||||
void h2_stream_rst(h2_stream *stream, int error_code);
|
void h2_stream_rst(h2_stream *stream, int error_code);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Determine if stream was closed already. This is true for
|
* Stream input signals change. Take necessary actions.
|
||||||
* states H2_SS_CLOSED, H2_SS_CLEANUP. But not true
|
|
||||||
* for H2_SS_CLOSED_L and H2_SS_CLOSED_R.
|
|
||||||
*
|
|
||||||
* @param stream the stream to check on
|
|
||||||
* @return != 0 iff stream has been closed
|
|
||||||
*/
|
|
||||||
int h2_stream_was_closed(const h2_stream *stream);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Inspect the c2 output for response(s) and data.
|
|
||||||
* @param stream the stream to read output for
|
* @param stream the stream to read output for
|
||||||
*/
|
*/
|
||||||
apr_status_t h2_stream_read_output(h2_stream *stream);
|
void h2_stream_on_input_change(h2_stream *stream);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stream output signals change. Take necessary actions.
|
||||||
|
* @param stream the stream to read output for
|
||||||
|
*/
|
||||||
|
void h2_stream_on_output_change(h2_stream *stream);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read a maximum number of bytes into the bucket brigade.
|
* Read a maximum number of bytes into the bucket brigade.
|
||||||
|
@@ -27,7 +27,7 @@
|
|||||||
* @macro
|
* @macro
|
||||||
* Version number of the http2 module as c string
|
* Version number of the http2 module as c string
|
||||||
*/
|
*/
|
||||||
#define MOD_HTTP2_VERSION "2.0.8-dev"
|
#define MOD_HTTP2_VERSION "2.0.10"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @macro
|
* @macro
|
||||||
@@ -35,7 +35,7 @@
|
|||||||
* release. This is a 24 bit number with 8 bits for major number, 8 bits
|
* release. This is a 24 bit number with 8 bits for major number, 8 bits
|
||||||
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
|
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
|
||||||
*/
|
*/
|
||||||
#define MOD_HTTP2_VERSION_NUM 0x020008
|
#define MOD_HTTP2_VERSION_NUM 0x02000a
|
||||||
|
|
||||||
|
|
||||||
#endif /* mod_h2_h2_version_h */
|
#endif /* mod_h2_h2_version_h */
|
||||||
|
@@ -432,6 +432,7 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pchild,
|
|||||||
|
|
||||||
ap_assert(s);
|
ap_assert(s);
|
||||||
ap_assert(pchild);
|
ap_assert(pchild);
|
||||||
|
ap_assert(idle_limit > 0);
|
||||||
|
|
||||||
/* let's have our own pool that will be parent to all h2_worker
|
/* let's have our own pool that will be parent to all h2_worker
|
||||||
* instances we create. This happens in various threads, but always
|
* instances we create. This happens in various threads, but always
|
||||||
@@ -458,7 +459,7 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pchild,
|
|||||||
workers->pool = pool;
|
workers->pool = pool;
|
||||||
workers->min_active = min_active;
|
workers->min_active = min_active;
|
||||||
workers->max_slots = max_slots;
|
workers->max_slots = max_slots;
|
||||||
workers->idle_limit = (idle_limit > 0)? idle_limit : apr_time_from_sec(10);
|
workers->idle_limit = idle_limit;
|
||||||
workers->dynamic = (workers->min_active < workers->max_slots);
|
workers->dynamic = (workers->min_active < workers->max_slots);
|
||||||
|
|
||||||
ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
|
ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
|
||||||
|
@@ -335,7 +335,6 @@ static int h2_h2_fixups(request_rec *r)
|
|||||||
if (r->connection->master) {
|
if (r->connection->master) {
|
||||||
h2_conn_ctx_t *ctx = h2_conn_ctx_get(r->connection);
|
h2_conn_ctx_t *ctx = h2_conn_ctx_get(r->connection);
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
apr_interval_time_t stream_timeout;
|
|
||||||
|
|
||||||
for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
|
for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
|
||||||
h2_var_def *vdef = &H2_VARS[i];
|
h2_var_def *vdef = &H2_VARS[i];
|
||||||
@@ -345,10 +344,6 @@ static int h2_h2_fixups(request_rec *r)
|
|||||||
r, ctx));
|
r, ctx));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stream_timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
|
|
||||||
if (stream_timeout > 0) {
|
|
||||||
h2_conn_ctx_set_timeout(ctx, stream_timeout);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return DECLINED;
|
return DECLINED;
|
||||||
}
|
}
|
||||||
|
@@ -249,7 +249,7 @@ static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
|
|||||||
ctx->r_done = 0;
|
ctx->r_done = 0;
|
||||||
add_request(ctx->session, ctx->r);
|
add_request(ctx->session, ctx->r);
|
||||||
|
|
||||||
while (!ctx->master->aborted && !ctx->r_done) {
|
while (!ctx->owner->aborted && !ctx->r_done) {
|
||||||
|
|
||||||
status = h2_proxy_session_process(ctx->session);
|
status = h2_proxy_session_process(ctx->session);
|
||||||
if (status != APR_SUCCESS) {
|
if (status != APR_SUCCESS) {
|
||||||
@@ -267,16 +267,13 @@ static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (ctx->master->aborted) {
|
if (ctx->owner->aborted) {
|
||||||
/* master connection gone */
|
/* master connection gone */
|
||||||
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
|
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
|
||||||
APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
|
APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
|
||||||
/* cancel all ongoing requests */
|
/* cancel all ongoing requests */
|
||||||
h2_proxy_session_cancel_all(ctx->session);
|
h2_proxy_session_cancel_all(ctx->session);
|
||||||
h2_proxy_session_process(ctx->session);
|
h2_proxy_session_process(ctx->session);
|
||||||
if (!ctx->master->aborted) {
|
|
||||||
status = ctx->r_status = APR_SUCCESS;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->session->user_data = NULL;
|
ctx->session->user_data = NULL;
|
||||||
@@ -348,7 +345,7 @@ static int proxy_http2_handler(request_rec *r,
|
|||||||
"H2: serving URL %s", url);
|
"H2: serving URL %s", url);
|
||||||
|
|
||||||
run_connect:
|
run_connect:
|
||||||
if (ctx->master->aborted) goto cleanup;
|
if (ctx->owner->aborted) goto cleanup;
|
||||||
|
|
||||||
/* Get a proxy_conn_rec from the worker, might be a new one, might
|
/* Get a proxy_conn_rec from the worker, might be a new one, might
|
||||||
* be one still open from another request, or it might fail if the
|
* be one still open from another request, or it might fail if the
|
||||||
@@ -400,10 +397,10 @@ run_connect:
|
|||||||
"proxy-request-alpn-protos", "h2");
|
"proxy-request-alpn-protos", "h2");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->master->aborted) goto cleanup;
|
if (ctx->owner->aborted) goto cleanup;
|
||||||
status = ctx_run(ctx);
|
status = ctx_run(ctx);
|
||||||
|
|
||||||
if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->master->aborted) {
|
if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->owner->aborted) {
|
||||||
/* Not successfully processed, but may retry, tear down old conn and start over */
|
/* Not successfully processed, but may retry, tear down old conn and start over */
|
||||||
if (ctx->p_conn) {
|
if (ctx->p_conn) {
|
||||||
ctx->p_conn->close = 1;
|
ctx->p_conn->close = 1;
|
||||||
|
@@ -95,6 +95,9 @@ class H2TestEnv(HttpdTestEnv):
|
|||||||
'AH02429', # invalid chars in response header names, see test_h2_200
|
'AH02429', # invalid chars in response header names, see test_h2_200
|
||||||
'AH02430', # invalid chars in response header values, see test_h2_200
|
'AH02430', # invalid chars in response header values, see test_h2_200
|
||||||
'AH10373', # SSL errors on uncompleted handshakes, see test_h2_105
|
'AH10373', # SSL errors on uncompleted handshakes, see test_h2_105
|
||||||
|
'AH01247', # mod_cgid sometimes freaks out on load tests
|
||||||
|
'AH01110', # error by proxy reading response
|
||||||
|
'AH10400', # warning that 'enablereuse' has not effect in certain configs test_h2_600
|
||||||
])
|
])
|
||||||
self.httpd_error_log.add_ignored_patterns([
|
self.httpd_error_log.add_ignored_patterns([
|
||||||
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
|
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
|
||||||
@@ -126,6 +129,9 @@ class H2Conf(HttpdConf):
|
|||||||
"<Location \"/h2test/delay\">",
|
"<Location \"/h2test/delay\">",
|
||||||
" SetHandler h2test-delay",
|
" SetHandler h2test-delay",
|
||||||
"</Location>",
|
"</Location>",
|
||||||
|
"<Location \"/h2test/error\">",
|
||||||
|
" SetHandler h2test-error",
|
||||||
|
"</Location>",
|
||||||
]
|
]
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
4
test/modules/http2/htdocs/cgi/alive.json
Normal file
4
test/modules/http2/htdocs/cgi/alive.json
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"host" : "cgi",
|
||||||
|
"alive" : true
|
||||||
|
}
|
@@ -6,12 +6,15 @@ print("Content-Type: application/json")
|
|||||||
print()
|
print()
|
||||||
print("{")
|
print("{")
|
||||||
print(" \"https\" : \"%s\"," % (os.getenv('HTTPS', '')))
|
print(" \"https\" : \"%s\"," % (os.getenv('HTTPS', '')))
|
||||||
print(" \"x_host\" : \"%s\"," % (os.getenv('X_HOST', '')))
|
print(" \"host\" : \"%s\"," % (os.getenv('X_HOST', '') \
|
||||||
print(" \"host\" : \"%s\"," % (os.getenv('SERVER_NAME', '')))
|
if 'X_HOST' in os.environ else os.getenv('SERVER_NAME', '')))
|
||||||
|
print(" \"server\" : \"%s\"," % (os.getenv('SERVER_NAME', '')))
|
||||||
|
print(" \"h2_original_host\" : \"%s\"," % (os.getenv('H2_ORIGINAL_HOST', '')))
|
||||||
print(" \"port\" : \"%s\"," % (os.getenv('SERVER_PORT', '')))
|
print(" \"port\" : \"%s\"," % (os.getenv('SERVER_PORT', '')))
|
||||||
print(" \"protocol\" : \"%s\"," % (os.getenv('SERVER_PROTOCOL', '')))
|
print(" \"protocol\" : \"%s\"," % (os.getenv('SERVER_PROTOCOL', '')))
|
||||||
print(" \"ssl_protocol\" : \"%s\"," % (os.getenv('SSL_PROTOCOL', '')))
|
print(" \"ssl_protocol\" : \"%s\"," % (os.getenv('SSL_PROTOCOL', '')))
|
||||||
print(" \"h2\" : \"%s\"," % (os.getenv('HTTP2', '')))
|
print(" \"h2\" : \"%s\"," % (os.getenv('HTTP2', '')))
|
||||||
print(" \"h2push\" : \"%s\"" % (os.getenv('H2PUSH', '')))
|
print(" \"h2push\" : \"%s\"," % (os.getenv('H2PUSH', '')))
|
||||||
|
print(" \"h2_stream_id\" : \"%s\"" % (os.getenv('H2_STREAM_ID', '')))
|
||||||
print("}")
|
print("}")
|
||||||
|
|
||||||
|
@@ -280,7 +280,7 @@ static int h2test_delay_handler(request_rec *r)
|
|||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
|
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
|
||||||
"delay_handler: request cleanup, r->status=%d, aborted=%d",
|
"delay_handler: request cleanup, r->status=%d, aborte=%d",
|
||||||
r->status, c->aborted);
|
r->status, c->aborted);
|
||||||
if (rv == APR_SUCCESS
|
if (rv == APR_SUCCESS
|
||||||
|| r->status != HTTP_OK
|
|| r->status != HTTP_OK
|
||||||
@@ -297,7 +297,6 @@ static int h2test_trailer_handler(request_rec *r)
|
|||||||
apr_bucket *b;
|
apr_bucket *b;
|
||||||
apr_status_t rv;
|
apr_status_t rv;
|
||||||
char buffer[8192];
|
char buffer[8192];
|
||||||
int i, chunks = 3;
|
|
||||||
long l;
|
long l;
|
||||||
int body_len = 0;
|
int body_len = 0;
|
||||||
|
|
||||||
@@ -345,7 +344,7 @@ static int h2test_trailer_handler(request_rec *r)
|
|||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
|
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
|
||||||
"trailer_handler: request cleanup, r->status=%d, aborted=%d",
|
"trailer_handler: request cleanup, r->status=%d, aborte=%d",
|
||||||
r->status, c->aborted);
|
r->status, c->aborted);
|
||||||
if (rv == APR_SUCCESS
|
if (rv == APR_SUCCESS
|
||||||
|| r->status != HTTP_OK
|
|| r->status != HTTP_OK
|
||||||
@@ -355,6 +354,154 @@ cleanup:
|
|||||||
return AP_FILTER_ERROR;
|
return AP_FILTER_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int status_from_str(const char *s, apr_status_t *pstatus)
|
||||||
|
{
|
||||||
|
if (!strcmp("timeout", s)) {
|
||||||
|
*pstatus = APR_TIMEUP;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
else if (!strcmp("reset", s)) {
|
||||||
|
*pstatus = APR_ECONNRESET;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int h2test_error_handler(request_rec *r)
|
||||||
|
{
|
||||||
|
conn_rec *c = r->connection;
|
||||||
|
apr_bucket_brigade *bb;
|
||||||
|
apr_bucket *b;
|
||||||
|
apr_status_t rv;
|
||||||
|
char buffer[8192];
|
||||||
|
int i, chunks = 3, error_bucket = 1;
|
||||||
|
long l;
|
||||||
|
apr_time_t delay = 0, body_delay = 0;
|
||||||
|
apr_array_header_t *args = NULL;
|
||||||
|
int http_status = 200;
|
||||||
|
apr_status_t error = APR_SUCCESS, body_error = APR_SUCCESS;
|
||||||
|
|
||||||
|
if (strcmp(r->handler, "h2test-error")) {
|
||||||
|
return DECLINED;
|
||||||
|
}
|
||||||
|
if (r->method_number != M_GET && r->method_number != M_POST) {
|
||||||
|
return DECLINED;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (r->args) {
|
||||||
|
args = apr_cstr_split(r->args, "&", 1, r->pool);
|
||||||
|
for (i = 0; i < args->nelts; ++i) {
|
||||||
|
char *s, *val, *arg = APR_ARRAY_IDX(args, i, char*);
|
||||||
|
s = strchr(arg, '=');
|
||||||
|
if (s) {
|
||||||
|
*s = '\0';
|
||||||
|
val = s + 1;
|
||||||
|
if (!strcmp("status", arg)) {
|
||||||
|
http_status = (int)apr_atoi64(val);
|
||||||
|
if (val > 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (!strcmp("error", arg)) {
|
||||||
|
if (status_from_str(val, &error)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (!strcmp("error_bucket", arg)) {
|
||||||
|
error_bucket = (int)apr_atoi64(val);
|
||||||
|
if (val >= 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (!strcmp("body_error", arg)) {
|
||||||
|
if (status_from_str(val, &body_error)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (!strcmp("delay", arg)) {
|
||||||
|
rv = duration_parse(&delay, r->args, "s");
|
||||||
|
if (APR_SUCCESS == rv) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (!strcmp("body_delay", arg)) {
|
||||||
|
rv = duration_parse(&body_delay, r->args, "s");
|
||||||
|
if (APR_SUCCESS == rv) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ap_die(HTTP_BAD_REQUEST, r);
|
||||||
|
return OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "error_handler: processing request, %s",
|
||||||
|
r->args? r->args : "(no args)");
|
||||||
|
r->status = http_status;
|
||||||
|
r->clength = -1;
|
||||||
|
r->chunked = 1;
|
||||||
|
apr_table_unset(r->headers_out, "Content-Length");
|
||||||
|
/* Discourage content-encodings */
|
||||||
|
apr_table_unset(r->headers_out, "Content-Encoding");
|
||||||
|
apr_table_setn(r->subprocess_env, "no-brotli", "1");
|
||||||
|
apr_table_setn(r->subprocess_env, "no-gzip", "1");
|
||||||
|
|
||||||
|
ap_set_content_type(r, "application/octet-stream");
|
||||||
|
bb = apr_brigade_create(r->pool, c->bucket_alloc);
|
||||||
|
|
||||||
|
if (delay) {
|
||||||
|
apr_sleep(delay);
|
||||||
|
}
|
||||||
|
if (error != APR_SUCCESS) {
|
||||||
|
return ap_map_http_request_error(error, HTTP_BAD_REQUEST);
|
||||||
|
}
|
||||||
|
/* flush response */
|
||||||
|
b = apr_bucket_flush_create(c->bucket_alloc);
|
||||||
|
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||||
|
rv = ap_pass_brigade(r->output_filters, bb);
|
||||||
|
if (APR_SUCCESS != rv) goto cleanup;
|
||||||
|
|
||||||
|
memset(buffer, 'X', sizeof(buffer));
|
||||||
|
l = sizeof(buffer);
|
||||||
|
for (i = 0; i < chunks; ++i) {
|
||||||
|
if (body_delay) {
|
||||||
|
apr_sleep(body_delay);
|
||||||
|
}
|
||||||
|
rv = apr_brigade_write(bb, NULL, NULL, buffer, l);
|
||||||
|
if (APR_SUCCESS != rv) goto cleanup;
|
||||||
|
rv = ap_pass_brigade(r->output_filters, bb);
|
||||||
|
if (APR_SUCCESS != rv) goto cleanup;
|
||||||
|
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
|
||||||
|
"error_handler: passed %ld bytes as response body", l);
|
||||||
|
if (body_error != APR_SUCCESS) {
|
||||||
|
rv = body_error;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* we are done */
|
||||||
|
b = apr_bucket_eos_create(c->bucket_alloc);
|
||||||
|
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||||
|
rv = ap_pass_brigade(r->output_filters, bb);
|
||||||
|
apr_brigade_cleanup(bb);
|
||||||
|
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "error_handler: response passed");
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
|
||||||
|
"error_handler: request cleanup, r->status=%d, aborted=%d",
|
||||||
|
r->status, c->aborted);
|
||||||
|
if (rv == APR_SUCCESS) {
|
||||||
|
return OK;
|
||||||
|
}
|
||||||
|
if (error_bucket) {
|
||||||
|
http_status = ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
|
||||||
|
b = ap_bucket_error_create(http_status, NULL, r->pool, c->bucket_alloc);
|
||||||
|
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||||
|
ap_pass_brigade(r->output_filters, bb);
|
||||||
|
}
|
||||||
|
return AP_FILTER_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
/* Install this module into the apache2 infrastructure.
|
/* Install this module into the apache2 infrastructure.
|
||||||
*/
|
*/
|
||||||
static void h2test_hooks(apr_pool_t *pool)
|
static void h2test_hooks(apr_pool_t *pool)
|
||||||
@@ -375,5 +522,6 @@ static void h2test_hooks(apr_pool_t *pool)
|
|||||||
ap_hook_handler(h2test_echo_handler, NULL, NULL, APR_HOOK_MIDDLE);
|
ap_hook_handler(h2test_echo_handler, NULL, NULL, APR_HOOK_MIDDLE);
|
||||||
ap_hook_handler(h2test_delay_handler, NULL, NULL, APR_HOOK_MIDDLE);
|
ap_hook_handler(h2test_delay_handler, NULL, NULL, APR_HOOK_MIDDLE);
|
||||||
ap_hook_handler(h2test_trailer_handler, NULL, NULL, APR_HOOK_MIDDLE);
|
ap_hook_handler(h2test_trailer_handler, NULL, NULL, APR_HOOK_MIDDLE);
|
||||||
|
ap_hook_handler(h2test_error_handler, NULL, NULL, APR_HOOK_MIDDLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -237,3 +237,31 @@ content-type: text/html
|
|||||||
assert r.response['status'] == 200
|
assert r.response['status'] == 200
|
||||||
assert 'date' in r.response['header']
|
assert 'date' in r.response['header']
|
||||||
assert 'server' in r.response['header']
|
assert 'server' in r.response['header']
|
||||||
|
|
||||||
|
# lets do some error tests
|
||||||
|
def test_h2_003_70(self, env):
|
||||||
|
url = env.mkurl("https", "cgi", "/h2test/error?status=500")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code == 0, r
|
||||||
|
assert r.response['status'] == 500
|
||||||
|
url = env.mkurl("https", "cgi", "/h2test/error?error=timeout")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code == 0, r
|
||||||
|
assert r.response['status'] == 408
|
||||||
|
|
||||||
|
# produce an error during response body
|
||||||
|
def test_h2_003_71(self, env, repeat):
|
||||||
|
pytest.skip("needs fix in core protocol handling")
|
||||||
|
url = env.mkurl("https", "cgi", "/h2test/error?body_error=timeout")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code != 0, f"{r}"
|
||||||
|
url = env.mkurl("https", "cgi", "/h2test/error?body_error=reset")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code != 0, f"{r}"
|
||||||
|
|
||||||
|
# produce an error, fail to generate an error bucket
|
||||||
|
def test_h2_003_72(self, env, repeat):
|
||||||
|
pytest.skip("needs fix in core protocol handling")
|
||||||
|
url = env.mkurl("https", "cgi", "/h2test/error?body_error=timeout&error_bucket=0")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code != 0, f"{r}"
|
||||||
|
@@ -146,4 +146,4 @@ class TestTimeout:
|
|||||||
break
|
break
|
||||||
piper.close()
|
piper.close()
|
||||||
assert piper.response
|
assert piper.response
|
||||||
assert piper.response['status'] == 408
|
assert piper.response['status'] == 408, f"{piper.response}"
|
||||||
|
@@ -86,7 +86,7 @@ class TestTrailers:
|
|||||||
url = env.mkurl("https", "cgi", "/h2test/trailer?0")
|
url = env.mkurl("https", "cgi", "/h2test/trailer?0")
|
||||||
r = env.nghttp().get(url)
|
r = env.nghttp().get(url)
|
||||||
assert r.response["status"] == 200
|
assert r.response["status"] == 200
|
||||||
assert len(r.response["body"]) == 0
|
assert len(r.response["body"]) == 0, f'{r.response["body"]}'
|
||||||
assert 'trailer' in r.response
|
assert 'trailer' in r.response
|
||||||
assert 'trailer-content-length' in r.response['trailer']
|
assert 'trailer-content-length' in r.response['trailer']
|
||||||
assert r.response['trailer']['trailer-content-length'] == '0'
|
assert r.response['trailer']['trailer-content-length'] == '0'
|
||||||
|
42
test/modules/http2/test_203_rfc9113.py
Normal file
42
test/modules/http2/test_203_rfc9113.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from pyhttpd.env import HttpdTestEnv
|
||||||
|
from .env import H2Conf
|
||||||
|
|
||||||
|
|
||||||
|
class TestRfc9113:
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True, scope='class')
|
||||||
|
def _class_scope(self, env):
|
||||||
|
H2Conf(env).add_vhost_test1().install()
|
||||||
|
assert env.apache_restart() == 0
|
||||||
|
|
||||||
|
# by default, we ignore leading/trailing ws
|
||||||
|
# tests with leading ws are not present as curl seems to silently eat those
|
||||||
|
def test_h2_203_01_ws_ignore(self, env):
|
||||||
|
url = env.mkurl("https", "test1", "/")
|
||||||
|
r = env.curl_get(url, options=['-H', 'trailing-space: must not '])
|
||||||
|
assert r.exit_code == 0, f'curl output: {r.stderr}'
|
||||||
|
assert r.response["status"] == 200, f'curl output: {r.stdout}'
|
||||||
|
r = env.curl_get(url, options=['-H', 'trailing-space: must not\t'])
|
||||||
|
assert r.exit_code == 0, f'curl output: {r.stderr}'
|
||||||
|
assert r.response["status"] == 200, f'curl output: {r.stdout}'
|
||||||
|
|
||||||
|
# When enabled, leading/trailing make the stream RST
|
||||||
|
# tests with leading ws are not present as curl seems to silently eat those
|
||||||
|
def test_h2_203_02_ws_reject(self, env):
|
||||||
|
if not env.h2load_is_at_least('1.50.0'):
|
||||||
|
pytest.skip(f'need nghttp2 >= 1.50.0')
|
||||||
|
conf = H2Conf(env)
|
||||||
|
conf.add([
|
||||||
|
"H2HeaderStrictness rfc9113"
|
||||||
|
])
|
||||||
|
conf.add_vhost_test1()
|
||||||
|
conf.install()
|
||||||
|
assert env.apache_restart() == 0
|
||||||
|
url = env.mkurl("https", "test1", "/")
|
||||||
|
r = env.curl_get(url, options=['-H', 'trailing-space: must not '])
|
||||||
|
assert r.exit_code != 0, f'curl output: {r.stderr}'
|
||||||
|
r = env.curl_get(url, options=['-H', 'trailing-space: must not\t'])
|
||||||
|
assert r.exit_code != 0, f'curl output: {r.stderr}'
|
||||||
|
|
@@ -26,7 +26,7 @@ class TestEarlyHints:
|
|||||||
assert env.apache_restart() == 0
|
assert env.apache_restart() == 0
|
||||||
|
|
||||||
# H2EarlyHints enabled in general, check that it works for H2PushResource
|
# H2EarlyHints enabled in general, check that it works for H2PushResource
|
||||||
def test_h2_401_31(self, env):
|
def test_h2_401_31(self, env, repeat):
|
||||||
url = env.mkurl("https", "hints", "/006-hints.html")
|
url = env.mkurl("https", "hints", "/006-hints.html")
|
||||||
r = env.nghttp().get(url)
|
r = env.nghttp().get(url)
|
||||||
assert r.response["status"] == 200
|
assert r.response["status"] == 200
|
||||||
@@ -38,7 +38,7 @@ class TestEarlyHints:
|
|||||||
assert early["header"]["link"]
|
assert early["header"]["link"]
|
||||||
|
|
||||||
# H2EarlyHints enabled in general, but does not trigger on added response headers
|
# H2EarlyHints enabled in general, but does not trigger on added response headers
|
||||||
def test_h2_401_32(self, env):
|
def test_h2_401_32(self, env, repeat):
|
||||||
url = env.mkurl("https", "hints", "/006-nohints.html")
|
url = env.mkurl("https", "hints", "/006-nohints.html")
|
||||||
r = env.nghttp().get(url)
|
r = env.nghttp().get(url)
|
||||||
assert r.response["status"] == 200
|
assert r.response["status"] == 200
|
||||||
|
@@ -126,3 +126,28 @@ class TestProxy:
|
|||||||
def test_h2_500_24(self, env):
|
def test_h2_500_24(self, env):
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
self.nghttp_upload_stat(env, "data-1k", ["--no-content-length"])
|
self.nghttp_upload_stat(env, "data-1k", ["--no-content-length"])
|
||||||
|
|
||||||
|
# lets do some error tests
|
||||||
|
def test_h2_500_30(self, env):
|
||||||
|
url = env.mkurl("https", "cgi", "/proxy/h2test/error?status=500")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code == 0, r
|
||||||
|
assert r.response['status'] == 500
|
||||||
|
url = env.mkurl("https", "cgi", "/proxy/h2test/error?error=timeout")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code == 0, r
|
||||||
|
assert r.response['status'] == 408
|
||||||
|
|
||||||
|
# produce an error during response body
|
||||||
|
def test_h2_500_31(self, env, repeat):
|
||||||
|
pytest.skip("needs fix in core protocol handling")
|
||||||
|
url = env.mkurl("https", "cgi", "/proxy/h2test/error?body_error=timeout")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code != 0, r
|
||||||
|
|
||||||
|
# produce an error, fail to generate an error bucket
|
||||||
|
def test_h2_500_32(self, env, repeat):
|
||||||
|
pytest.skip("needs fix in core protocol handling")
|
||||||
|
url = env.mkurl("https", "cgi", "/proxy/h2test/error?body_error=timeout&error_bucket=0")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code != 0, r
|
||||||
|
@@ -23,7 +23,7 @@ class TestH2Proxy:
|
|||||||
assert r.response["json"]["ssl_protocol"] != ""
|
assert r.response["json"]["ssl_protocol"] != ""
|
||||||
assert r.response["json"]["h2"] == "on"
|
assert r.response["json"]["h2"] == "on"
|
||||||
assert r.response["json"]["h2push"] == "off"
|
assert r.response["json"]["h2push"] == "off"
|
||||||
assert r.response["json"]["x_host"] == f"cgi.{env.http_tld}:{env.https_port}"
|
assert r.response["json"]["host"] == f"cgi.{env.http_tld}:{env.https_port}"
|
||||||
|
|
||||||
def test_h2_600_02(self, env):
|
def test_h2_600_02(self, env):
|
||||||
conf = H2Conf(env, extras={
|
conf = H2Conf(env, extras={
|
||||||
@@ -42,7 +42,8 @@ class TestH2Proxy:
|
|||||||
assert r.response["json"]["protocol"] == "HTTP/2.0"
|
assert r.response["json"]["protocol"] == "HTTP/2.0"
|
||||||
assert r.response["json"]["https"] == ""
|
assert r.response["json"]["https"] == ""
|
||||||
# the proxied backend sees Host header as passed on front
|
# the proxied backend sees Host header as passed on front
|
||||||
assert r.response["json"]["x_host"] == f"cgi.{env.http_tld}:{env.https_port}"
|
assert r.response["json"]["host"] == f"cgi.{env.http_tld}:{env.https_port}"
|
||||||
|
assert r.response["json"]["h2_original_host"] == ""
|
||||||
|
|
||||||
def test_h2_600_03(self, env):
|
def test_h2_600_03(self, env):
|
||||||
conf = H2Conf(env, extras={
|
conf = H2Conf(env, extras={
|
||||||
@@ -61,4 +62,116 @@ class TestH2Proxy:
|
|||||||
assert r.response["json"]["protocol"] == "HTTP/2.0"
|
assert r.response["json"]["protocol"] == "HTTP/2.0"
|
||||||
assert r.response["json"]["https"] == ""
|
assert r.response["json"]["https"] == ""
|
||||||
# the proxied backend sees Host as using in connecting to it
|
# the proxied backend sees Host as using in connecting to it
|
||||||
assert r.response["json"]["x_host"] == f"127.0.0.1:{env.http_port}"
|
assert r.response["json"]["host"] == f"127.0.0.1:{env.http_port}"
|
||||||
|
assert r.response["json"]["h2_original_host"] == ""
|
||||||
|
|
||||||
|
# check that connection reuse actually happens as configured
|
||||||
|
@pytest.mark.parametrize("enable_reuse", [ "on", "off" ])
|
||||||
|
def test_h2_600_04(self, env, enable_reuse):
|
||||||
|
conf = H2Conf(env, extras={
|
||||||
|
f'cgi.{env.http_tld}': [
|
||||||
|
f"ProxyPassMatch ^/h2proxy/([0-9]+)/(.*)$ "
|
||||||
|
f" h2c://127.0.0.1:$1/$2 enablereuse={enable_reuse} keepalive=on",
|
||||||
|
]
|
||||||
|
})
|
||||||
|
conf.add_vhost_cgi()
|
||||||
|
conf.install()
|
||||||
|
assert env.apache_restart() == 0
|
||||||
|
url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port}/hello.py")
|
||||||
|
r = env.curl_get(url, 5)
|
||||||
|
assert r.response["status"] == 200
|
||||||
|
assert r.json["h2_stream_id"] == "1"
|
||||||
|
# httpd 2.5.0 disables reuse, not matter the config
|
||||||
|
if enable_reuse == "on" and not env.httpd_is_at_least("2.5.0"):
|
||||||
|
# reuse is not guarantueed for each request, but we expect some
|
||||||
|
# to do it and run on a h2 stream id > 1
|
||||||
|
reused = False
|
||||||
|
for _ in range(10):
|
||||||
|
r = env.curl_get(url, 5)
|
||||||
|
assert r.response["status"] == 200
|
||||||
|
if int(r.json["h2_stream_id"]) > 1:
|
||||||
|
reused = True
|
||||||
|
break
|
||||||
|
assert reused
|
||||||
|
else:
|
||||||
|
r = env.curl_get(url, 5)
|
||||||
|
assert r.response["status"] == 200
|
||||||
|
assert r.json["h2_stream_id"] == "1"
|
||||||
|
|
||||||
|
# do some flexible setup from #235 to proper connection selection
|
||||||
|
@pytest.mark.parametrize("enable_reuse", [ "on", "off" ])
|
||||||
|
def test_h2_600_05(self, env, enable_reuse):
|
||||||
|
conf = H2Conf(env, extras={
|
||||||
|
f'cgi.{env.http_tld}': [
|
||||||
|
f"ProxyPassMatch ^/h2proxy/([0-9]+)/(.*)$ "
|
||||||
|
f" h2c://127.0.0.1:$1/$2 enablereuse={enable_reuse} keepalive=on",
|
||||||
|
]
|
||||||
|
})
|
||||||
|
conf.add_vhost_cgi()
|
||||||
|
conf.add([
|
||||||
|
f'Listen {env.http_port2}',
|
||||||
|
'UseCanonicalName On',
|
||||||
|
'UseCanonicalPhysicalPort On'
|
||||||
|
])
|
||||||
|
conf.start_vhost(domains=[f'cgi.{env.http_tld}'],
|
||||||
|
port=5004, doc_root="htdocs/cgi")
|
||||||
|
conf.add("AddHandler cgi-script .py")
|
||||||
|
conf.end_vhost()
|
||||||
|
conf.install()
|
||||||
|
assert env.apache_restart() == 0
|
||||||
|
url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port}/hello.py")
|
||||||
|
r = env.curl_get(url, 5)
|
||||||
|
assert r.response["status"] == 200
|
||||||
|
assert int(r.json["port"]) == env.http_port
|
||||||
|
# going to another backend port must create a new connection and
|
||||||
|
# we should see stream id one again
|
||||||
|
url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port2}/hello.py")
|
||||||
|
r = env.curl_get(url, 5)
|
||||||
|
assert r.response["status"] == 200
|
||||||
|
exp_port = env.http_port if enable_reuse == "on" \
|
||||||
|
and not env.httpd_is_at_least("2.5.0")\
|
||||||
|
else env.http_port2
|
||||||
|
assert int(r.json["port"]) == exp_port
|
||||||
|
|
||||||
|
# lets do some error tests
|
||||||
|
def test_h2_600_30(self, env):
|
||||||
|
conf = H2Conf(env)
|
||||||
|
conf.add_vhost_cgi(h2proxy_self=True)
|
||||||
|
conf.install()
|
||||||
|
assert env.apache_restart() == 0
|
||||||
|
url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?status=500")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code == 0, r
|
||||||
|
assert r.response['status'] == 500
|
||||||
|
url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?error=timeout")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
assert r.exit_code == 0, r
|
||||||
|
assert r.response['status'] == 408
|
||||||
|
|
||||||
|
# produce an error during response body
|
||||||
|
def test_h2_600_31(self, env, repeat):
|
||||||
|
pytest.skip("needs fix in core protocol handling")
|
||||||
|
conf = H2Conf(env)
|
||||||
|
conf.add_vhost_cgi(h2proxy_self=True)
|
||||||
|
conf.install()
|
||||||
|
assert env.apache_restart() == 0
|
||||||
|
url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?body_error=timeout")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
# depending on when the error is detect in proxying, if may RST the
|
||||||
|
# stream (exit_code != 0) or give a 503 response.
|
||||||
|
if r.exit_code == 0:
|
||||||
|
assert r.response['status'] == 503
|
||||||
|
|
||||||
|
# produce an error, fail to generate an error bucket
|
||||||
|
def test_h2_600_32(self, env, repeat):
|
||||||
|
pytest.skip("needs fix in core protocol handling")
|
||||||
|
conf = H2Conf(env)
|
||||||
|
conf.add_vhost_cgi(h2proxy_self=True)
|
||||||
|
conf.install()
|
||||||
|
assert env.apache_restart() == 0
|
||||||
|
url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?body_error=timeout&error_bucket=0")
|
||||||
|
r = env.curl_get(url)
|
||||||
|
# depending on when the error is detect in proxying, if may RST the
|
||||||
|
# stream (exit_code != 0) or give a 503 response.
|
||||||
|
if r.exit_code == 0:
|
||||||
|
assert r.response['status'] == 503
|
||||||
|
@@ -157,8 +157,6 @@ class HttpdConf(object):
|
|||||||
self.start_vhost(domains=[domain, f"cgi-alias.{self.env.http_tld}"],
|
self.start_vhost(domains=[domain, f"cgi-alias.{self.env.http_tld}"],
|
||||||
port=self.env.https_port, doc_root="htdocs/cgi")
|
port=self.env.https_port, doc_root="htdocs/cgi")
|
||||||
self.add_proxies("cgi", proxy_self=proxy_self, h2proxy_self=h2proxy_self)
|
self.add_proxies("cgi", proxy_self=proxy_self, h2proxy_self=h2proxy_self)
|
||||||
if domain in self._extras:
|
|
||||||
self.add(self._extras[domain])
|
|
||||||
self.end_vhost()
|
self.end_vhost()
|
||||||
self.start_vhost(domains=[domain, f"cgi-alias.{self.env.http_tld}"],
|
self.start_vhost(domains=[domain, f"cgi-alias.{self.env.http_tld}"],
|
||||||
port=self.env.http_port, doc_root="htdocs/cgi")
|
port=self.env.http_port, doc_root="htdocs/cgi")
|
||||||
|
@@ -25,6 +25,7 @@ gen_dir = @abs_srcdir@/../gen
|
|||||||
http_port = 5002
|
http_port = 5002
|
||||||
https_port = 5001
|
https_port = 5001
|
||||||
proxy_port = 5003
|
proxy_port = 5003
|
||||||
|
http_port2 = 5004
|
||||||
http_tld = tests.httpd.apache.org
|
http_tld = tests.httpd.apache.org
|
||||||
test_dir = @abs_srcdir@
|
test_dir = @abs_srcdir@
|
||||||
test_src_dir = @abs_srcdir@
|
test_src_dir = @abs_srcdir@
|
||||||
|
@@ -244,6 +244,7 @@ class HttpdTestEnv:
|
|||||||
self._h2load = 'h2load'
|
self._h2load = 'h2load'
|
||||||
|
|
||||||
self._http_port = int(self.config.get('test', 'http_port'))
|
self._http_port = int(self.config.get('test', 'http_port'))
|
||||||
|
self._http_port2 = int(self.config.get('test', 'http_port2'))
|
||||||
self._https_port = int(self.config.get('test', 'https_port'))
|
self._https_port = int(self.config.get('test', 'https_port'))
|
||||||
self._proxy_port = int(self.config.get('test', 'proxy_port'))
|
self._proxy_port = int(self.config.get('test', 'proxy_port'))
|
||||||
self._http_tld = self.config.get('test', 'http_tld')
|
self._http_tld = self.config.get('test', 'http_tld')
|
||||||
@@ -345,6 +346,10 @@ class HttpdTestEnv:
|
|||||||
def http_port(self) -> int:
|
def http_port(self) -> int:
|
||||||
return self._http_port
|
return self._http_port
|
||||||
|
|
||||||
|
@property
|
||||||
|
def http_port2(self) -> int:
|
||||||
|
return self._http_port2
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def https_port(self) -> int:
|
def https_port(self) -> int:
|
||||||
return self._https_port
|
return self._https_port
|
||||||
|
@@ -122,6 +122,7 @@ class Nghttp:
|
|||||||
response["previous"] = prev
|
response["previous"] = prev
|
||||||
response[hkey] = s["header"]
|
response[hkey] = s["header"]
|
||||||
s["header"] = {}
|
s["header"] = {}
|
||||||
|
body = ''
|
||||||
continue
|
continue
|
||||||
|
|
||||||
m = re.match(r'(.*)\[.*] recv DATA frame <length=(\d+), .*stream_id=(\d+)>', l)
|
m = re.match(r'(.*)\[.*] recv DATA frame <length=(\d+), .*stream_id=(\d+)>', l)
|
||||||
|
@@ -9,21 +9,21 @@ class ExecResult:
|
|||||||
stdout: bytes, stderr: bytes = None, duration: timedelta = None):
|
stdout: bytes, stderr: bytes = None, duration: timedelta = None):
|
||||||
self._args = args
|
self._args = args
|
||||||
self._exit_code = exit_code
|
self._exit_code = exit_code
|
||||||
self._raw = stdout if stdout else b''
|
self._stdout = stdout if stdout is not None else b''
|
||||||
self._stdout = stdout.decode() if stdout is not None else ""
|
self._stderr = stderr if stderr is not None else b''
|
||||||
self._stderr = stderr.decode() if stderr is not None else ""
|
|
||||||
self._duration = duration if duration is not None else timedelta()
|
self._duration = duration if duration is not None else timedelta()
|
||||||
self._response = None
|
self._response = None
|
||||||
self._results = {}
|
self._results = {}
|
||||||
self._assets = []
|
self._assets = []
|
||||||
# noinspection PyBroadException
|
# noinspection PyBroadException
|
||||||
try:
|
try:
|
||||||
self._json_out = json.loads(self._stdout)
|
out = self._stdout.decode()
|
||||||
|
self._json_out = json.loads(out)
|
||||||
except:
|
except:
|
||||||
self._json_out = None
|
self._json_out = None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f"ExecResult[code={self.exit_code}, args={self._args}, stdout={self.stdout}, stderr={self.stderr}]"
|
return f"ExecResult[code={self.exit_code}, args={self._args}, stdout={self._stdout}, stderr={self._stderr}]"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def exit_code(self) -> int:
|
def exit_code(self) -> int:
|
||||||
@@ -35,11 +35,11 @@ class ExecResult:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def outraw(self) -> bytes:
|
def outraw(self) -> bytes:
|
||||||
return self._raw
|
return self._stdout
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def stdout(self) -> str:
|
def stdout(self) -> str:
|
||||||
return self._stdout
|
return self._stdout.decode()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def json(self) -> Optional[Dict]:
|
def json(self) -> Optional[Dict]:
|
||||||
@@ -48,7 +48,7 @@ class ExecResult:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def stderr(self) -> str:
|
def stderr(self) -> str:
|
||||||
return self._stderr
|
return self._stderr.decode()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def duration(self) -> timedelta:
|
def duration(self) -> timedelta:
|
||||||
|
Reference in New Issue
Block a user