From b93c96a6ba89a360710a81974a2a597f80fbd82c Mon Sep 17 00:00:00 2001 From: Stefan Eissing Date: Tue, 5 Jan 2016 13:25:48 +0000 Subject: [PATCH] dynamic allocation of transfer file handles used to pass buckets to master connection git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1723069 13f79535-47bb-0310-9956-ffa450edef68 --- modules/http2/h2_config.c | 29 +---------------------- modules/http2/h2_conn.c | 28 +++++++++++++++++++---- modules/http2/h2_io.c | 10 ++++---- modules/http2/h2_io.h | 2 +- modules/http2/h2_mplx.c | 47 ++++++++++++++++++++++++++++++-------- modules/http2/h2_mplx.h | 3 ++- modules/http2/h2_private.h | 3 +++ modules/http2/h2_session.c | 2 -- modules/http2/h2_stream.c | 2 +- modules/http2/h2_util.c | 14 ++++++------ modules/http2/h2_util.h | 2 +- modules/http2/h2_version.h | 4 ++-- modules/http2/h2_workers.c | 43 +++++++++++++++++++++++++++++++++- modules/http2/h2_workers.h | 41 +++++++++++++++++++++++++++++---- 14 files changed, 162 insertions(+), 68 deletions(-) diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c index 1458f5a475..ecfe949dc9 100644 --- a/modules/http2/h2_config.c +++ b/modules/http2/h2_config.c @@ -64,31 +64,9 @@ static h2_config defconf = { 0, /* stream timeout */ }; -static int files_per_session; - void h2_config_init(apr_pool_t *pool) { - /* Determine a good default for this platform and mpm? - * TODO: not sure how APR wants to hand out this piece of - * information. - */ - int max_files = 256; - int conn_threads = 1; - int tx_files = max_files / 4; - (void)pool; - ap_mpm_query(AP_MPMQ_MAX_THREADS, &conn_threads); - switch (h2_conn_mpm_type()) { - case H2_MPM_PREFORK: - case H2_MPM_WORKER: - case H2_MPM_EVENT: - /* allow that many transfer open files per mplx */ - files_per_session = (tx_files / conn_threads); - break; - default: - /* don't know anything about it, stay safe */ - break; - } } static void *h2_config_create(apr_pool_t *pool, @@ -178,7 +156,6 @@ int h2_config_geti(const h2_config *conf, h2_config_var_t var) apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) { - int n; switch(var) { case H2_CONF_MAX_STREAMS: return H2_CONFIG_GET(conf, &defconf, h2_max_streams); @@ -203,11 +180,7 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) case H2_CONF_DIRECT: return H2_CONFIG_GET(conf, &defconf, h2_direct); case H2_CONF_SESSION_FILES: - n = H2_CONFIG_GET(conf, &defconf, session_extra_files); - if (n < 0) { - n = files_per_session; - } - return n; + return H2_CONFIG_GET(conf, &defconf, session_extra_files); case H2_CONF_TLS_WARMUP_SIZE: return H2_CONFIG_GET(conf, &defconf, tls_warmup_size); case H2_CONF_TLS_COOLDOWN_SECS: diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c index d0b5ada215..0f99076a1c 100644 --- a/modules/http2/h2_conn.c +++ b/modules/http2/h2_conn.c @@ -78,7 +78,7 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) { const h2_config *config = h2_config_sget(s); apr_status_t status = APR_SUCCESS; - int minw, maxw; + int minw, maxw, max_tx_handles, n; int max_threads_per_child = 0; int idle_secs = 0; @@ -105,11 +105,29 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) maxw = minw; } - ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, - "h2_workers: min=%d max=%d, mthrpchild=%d", - minw, maxw, max_threads_per_child); + /* How many file handles is it safe to use for transfer + * to the master connection to be streamed out? + * Is there a portable APR rlimit on NOFILES? Have not + * found it. And if, how many of those would we set aside? + * This leads all into a process wide handle allocation strategy + * which ultimately would limit the number of accepted connections + * with the assumption of implicitly reserving n handles for every + * connection and requiring modules with excessive needs to allocate + * from a central pool. + */ + n = h2_config_geti(config, H2_CONF_SESSION_FILES); + if (n < 0) { + max_tx_handles = 256; + } + else { + max_tx_handles = maxw * n; + } + + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, + "h2_workers: min=%d max=%d, mthrpchild=%d, tx_files=%d", + minw, maxw, max_threads_per_child, max_tx_handles); + workers = h2_workers_create(s, pool, minw, maxw, max_tx_handles); - workers = h2_workers_create(s, pool, minw, maxw); idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS); h2_workers_set_max_idle_secs(workers, idle_secs); diff --git a/modules/http2/h2_io.c b/modules/http2/h2_io.c index 5a2ad8f8b6..092a37c391 100644 --- a/modules/http2/h2_io.c +++ b/modules/http2/h2_io.c @@ -355,7 +355,7 @@ static void process_trailers(h2_io *io, apr_table_t *trailers) apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, apr_size_t maxlen, apr_table_t *trailers, - int *pfile_handles_allowed) + apr_size_t *pfile_buckets_allowed) { apr_status_t status; int start_allowed; @@ -397,12 +397,12 @@ apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, * many open files already buffered. Otherwise we will run out of * file handles. */ - start_allowed = *pfile_handles_allowed; - status = h2_util_move(io->bbout, bb, maxlen, pfile_handles_allowed, + start_allowed = *pfile_buckets_allowed; + status = h2_util_move(io->bbout, bb, maxlen, pfile_buckets_allowed, "h2_io_out_write"); /* track # file buckets moved into our pool */ - if (start_allowed != *pfile_handles_allowed) { - io->files_handles_owned += (start_allowed - *pfile_handles_allowed); + if (start_allowed != *pfile_buckets_allowed) { + io->files_handles_owned += (start_allowed - *pfile_buckets_allowed); } return status; } diff --git a/modules/http2/h2_io.h b/modules/http2/h2_io.h index 647d30431e..fc09cef666 100644 --- a/modules/http2/h2_io.h +++ b/modules/http2/h2_io.h @@ -158,7 +158,7 @@ apr_status_t h2_io_out_read_to(h2_io *io, apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, apr_size_t maxlen, apr_table_t *trailers, - int *pfile_buckets_allowed); + apr_size_t *pfile_buckets_allowed); /** * Closes the input. After existing data has been read, APR_EOF will diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c index 53da5c5633..28c1d10159 100644 --- a/modules/http2/h2_mplx.c +++ b/modules/http2/h2_mplx.c @@ -72,6 +72,28 @@ static int is_aborted(h2_mplx *m, apr_status_t *pstatus) static void have_out_data_for(h2_mplx *m, int stream_id); +static void check_tx_reservation(h2_mplx *m) +{ + if (m->tx_handles_reserved == 0) { + m->tx_handles_reserved += h2_workers_tx_reserve(m->workers, + H2MIN(m->tx_chunk_size, h2_io_set_size(m->stream_ios))); + } +} + +static void check_tx_free(h2_mplx *m) +{ + if (m->tx_handles_reserved > m->tx_chunk_size) { + apr_size_t count = m->tx_handles_reserved - m->tx_chunk_size; + m->tx_handles_reserved = m->tx_chunk_size; + h2_workers_tx_free(m->workers, count); + } + else if (m->tx_handles_reserved + && (!m->stream_ios || h2_io_set_is_empty(m->stream_ios))) { + h2_workers_tx_free(m->workers, m->tx_handles_reserved); + m->tx_handles_reserved = 0; + } +} + static void h2_mplx_destroy(h2_mplx *m) { AP_DEBUG_ASSERT(m); @@ -88,6 +110,8 @@ static void h2_mplx_destroy(h2_mplx *m) m->stream_ios = NULL; } + check_tx_free(m); + if (m->pool) { apr_pool_destroy(m->pool); } @@ -142,7 +166,9 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->workers = workers; - m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES); + m->tx_handles_reserved = 0; + m->tx_chunk_size = 4; + m->stream_timeout_secs = h2_config_geti(conf, H2_CONF_STREAM_TIMEOUT_SECS); } return m; @@ -164,11 +190,6 @@ static void workers_register(h2_mplx *m) h2_workers_register(m->workers, m); } -static void workers_unregister(h2_mplx *m) -{ - h2_workers_unregister(m->workers, m); -} - static int io_process_events(h2_mplx *m, h2_io *io) { if (io->input_consumed && m->input_consumed) { @@ -195,7 +216,8 @@ static void io_destroy(h2_mplx *m, h2_io *io, int events) /* The pool is cleared/destroyed which also closes all * allocated file handles. Give this count back to our * file handle pool. */ - m->file_handles_allowed += io->files_handles_owned; + m->tx_handles_reserved += io->files_handles_owned; + h2_io_set_remove(m->stream_ios, io); h2_io_set_remove(m->ready_ios, io); h2_io_destroy(io); @@ -207,6 +229,8 @@ static void io_destroy(h2_mplx *m, h2_io *io, int events) } m->spare_pool = pool; } + + check_tx_free(m); } static int io_stream_done(h2_mplx *m, h2_io *io, int rst_error) @@ -235,7 +259,7 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) { apr_status_t status; - workers_unregister(m); + h2_workers_unregister(m->workers, m); status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { int i, wait_secs = 5; @@ -613,7 +637,7 @@ static apr_status_t out_write(h2_mplx *m, h2_io *io, && !is_aborted(m, &status)) { status = h2_io_out_write(io, bb, m->stream_max_mem, trailers, - &m->file_handles_allowed); + &m->tx_handles_reserved); /* Wait for data to drain until there is room again or * stream timeout expires */ h2_io_signal_init(io, H2_IO_WRITE, m->stream_timeout_secs, iowait); @@ -654,6 +678,11 @@ static apr_status_t out_open(h2_mplx *m, int stream_id, h2_response *response, h2_io_set_response(io, response); h2_io_set_add(m->ready_ios, io); + if (response && response->http_status < 300) { + /* we might see some file buckets in the output, see + * if we have enough handles reserved. */ + check_tx_reservation(m); + } if (bb) { status = out_write(m, io, f, bb, response->trailers, iowait); } diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h index 419f3f0ef9..970b7265c3 100644 --- a/modules/http2/h2_mplx.h +++ b/modules/http2/h2_mplx.h @@ -80,7 +80,8 @@ struct h2_mplx { apr_pool_t *spare_pool; /* spare pool, ready for next io */ struct h2_workers *workers; - int file_handles_allowed; + apr_size_t tx_handles_reserved; + apr_size_t tx_chunk_size; h2_mplx_consumed_cb *input_consumed; void *input_consumed_ctx; diff --git a/modules/http2/h2_private.h b/modules/http2/h2_private.h index 0ffaf50dc8..0ad02d3b71 100644 --- a/modules/http2/h2_private.h +++ b/modules/http2/h2_private.h @@ -35,4 +35,7 @@ APLOG_USE_MODULE(http2); #define H2_ALEN(a) (sizeof(a)/sizeof((a)[0])) +#define H2MAX(x,y) ((x) > (y) ? (x) : (y)) +#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) + #endif diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c index 1dea34b934..d12b110792 100644 --- a/modules/http2/h2_session.c +++ b/modules/http2/h2_session.c @@ -44,8 +44,6 @@ #include "h2_version.h" #include "h2_workers.h" -#define H2MAX(x,y) ((x) > (y) ? (x) : (y)) -#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) static int frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen); diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c index 95c5b4a157..1777c990f4 100644 --- a/modules/http2/h2_stream.c +++ b/modules/http2/h2_stream.c @@ -219,7 +219,7 @@ apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response, stream->response = response; if (bb && !APR_BRIGADE_EMPTY(bb)) { - int move_all = INT_MAX; + apr_size_t move_all = INT_MAX; /* we can move file handles from h2_mplx into this h2_stream as many * as we want, since the lifetimes are the same and we are not freeing * the ones in h2_mplx->io before this stream is done. */ diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c index 1ab71fb3c7..68d12324ac 100644 --- a/modules/http2/h2_util.c +++ b/modules/http2/h2_util.c @@ -211,7 +211,7 @@ static const int FILE_MOVE = 1; static apr_status_t last_not_included(apr_bucket_brigade *bb, apr_off_t maxlen, int same_alloc, - int *pfile_buckets_allowed, + apr_size_t *pfile_buckets_allowed, apr_bucket **pend) { apr_bucket *b; @@ -269,7 +269,7 @@ static apr_status_t last_not_included(apr_bucket_brigade *bb, #define LOG_LEVEL APLOG_INFO apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_off_t maxlen, int *pfile_handles_allowed, + apr_off_t maxlen, apr_size_t *pfile_buckets_allowed, const char *msg) { apr_status_t status = APR_SUCCESS; @@ -281,14 +281,14 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, || to->p == from->p); if (!FILE_MOVE) { - pfile_handles_allowed = NULL; + pfile_buckets_allowed = NULL; } if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end; status = last_not_included(from, maxlen, same_alloc, - pfile_handles_allowed, &end); + pfile_buckets_allowed, &end); if (status != APR_SUCCESS) { return status; } @@ -332,8 +332,8 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, /* ignore */ } } - else if (pfile_handles_allowed - && *pfile_handles_allowed > 0 + else if (pfile_buckets_allowed + && *pfile_buckets_allowed > 0 && APR_BUCKET_IS_FILE(b)) { /* We do not want to read files when passing buckets, if * we can avoid it. However, what we've come up so far @@ -362,7 +362,7 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, } apr_brigade_insert_file(to, fd, b->start, b->length, to->p); - --(*pfile_handles_allowed); + --(*pfile_buckets_allowed); } else { const char *data; diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h index 0a3790d030..10ad7d6b20 100644 --- a/modules/http2/h2_util.h +++ b/modules/http2/h2_util.h @@ -97,7 +97,7 @@ h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p, * @param msg message for use in logging */ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_off_t maxlen, int *pfile_buckets_allowed, + apr_off_t maxlen, apr_size_t *pfile_buckets_allowed, const char *msg); /** diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h index ebdb24430c..829cada1d9 100644 --- a/modules/http2/h2_version.h +++ b/modules/http2/h2_version.h @@ -26,7 +26,7 @@ * @macro * Version number of the http2 module as c string */ -#define MOD_HTTP2_VERSION "1.0.17" +#define MOD_HTTP2_VERSION "1.1.0-DEV" /** * @macro @@ -34,7 +34,7 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define MOD_HTTP2_VERSION_NUM 0x010011 +#define MOD_HTTP2_VERSION_NUM 0x010100 #endif /* mod_h2_h2_version_h */ diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c index 7542daa699..f0020c81eb 100644 --- a/modules/http2/h2_workers.c +++ b/modules/http2/h2_workers.c @@ -30,6 +30,7 @@ #include "h2_worker.h" #include "h2_workers.h" + static int in_list(h2_workers *workers, h2_mplx *m) { h2_mplx *e; @@ -222,7 +223,8 @@ static apr_status_t h2_workers_start(h2_workers *workers) } h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, - int min_size, int max_size) + int min_size, int max_size, + apr_size_t max_tx_handles) { apr_status_t status; h2_workers *workers; @@ -245,6 +247,9 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, workers->max_size = max_size; apr_atomic_set32(&workers->max_idle_secs, 10); + workers->max_tx_handles = max_tx_handles; + workers->spare_tx_handles = workers->max_tx_handles; + apr_threadattr_create(&workers->thread_attr, workers->pool); if (ap_thread_stacksize != 0) { apr_threadattr_stacksize_set(workers->thread_attr, @@ -265,6 +270,12 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, status = apr_thread_cond_create(&workers->mplx_added, workers->pool); } + if (status == APR_SUCCESS) { + status = apr_thread_mutex_create(&workers->tx_lock, + APR_THREAD_MUTEX_DEFAULT, + workers->pool); + } + if (status == APR_SUCCESS) { status = h2_workers_start(workers); } @@ -363,3 +374,33 @@ void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs) } apr_atomic_set32(&workers->max_idle_secs, idle_secs); } + +apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count) +{ + apr_status_t status = apr_thread_mutex_lock(workers->tx_lock); + if (status == APR_SUCCESS) { + count = H2MIN(workers->spare_tx_handles, count); + workers->spare_tx_handles -= count; + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s, + "h2_workers: reserved %d tx handles, %d/%d left", + (int)count, (int)workers->spare_tx_handles, + (int)workers->max_tx_handles); + apr_thread_mutex_unlock(workers->tx_lock); + return count; + } + return 0; +} + +void h2_workers_tx_free(h2_workers *workers, apr_size_t count) +{ + apr_status_t status = apr_thread_mutex_lock(workers->tx_lock); + if (status == APR_SUCCESS) { + workers->spare_tx_handles += count; + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s, + "h2_workers: freed %d tx handles, %d/%d left", + (int)count, (int)workers->spare_tx_handles, + (int)workers->max_tx_handles); + apr_thread_mutex_unlock(workers->tx_lock); + } +} + diff --git a/modules/http2/h2_workers.h b/modules/http2/h2_workers.h index 16ec4443b7..7ec3881310 100644 --- a/modules/http2/h2_workers.h +++ b/modules/http2/h2_workers.h @@ -39,6 +39,9 @@ struct h2_workers { int min_size; int max_size; + apr_size_t max_tx_handles; + apr_size_t spare_tx_handles; + unsigned int aborted : 1; apr_threadattr_t *thread_attr; @@ -53,6 +56,8 @@ struct h2_workers { struct apr_thread_mutex_t *lock; struct apr_thread_cond_t *mplx_added; + + struct apr_thread_mutex_t *tx_lock; }; @@ -60,7 +65,8 @@ struct h2_workers { * threads. */ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool, - int min_size, int max_size); + int min_size, int max_size, + apr_size_t max_tx_handles); /* Destroy the worker pool and all its threads. */ @@ -71,14 +77,12 @@ void h2_workers_destroy(h2_workers *workers); * out of tasks, it will be automatically be unregistered. Should * new tasks arrive, it needs to be registered again. */ -apr_status_t h2_workers_register(h2_workers *workers, - struct h2_mplx *m); +apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m); /** * Remove a h2_mplx from the worker registry. */ -apr_status_t h2_workers_unregister(h2_workers *workers, - struct h2_mplx *m); +apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m); /** * Set the amount of seconds a h2_worker should wait for new tasks @@ -87,4 +91,31 @@ apr_status_t h2_workers_unregister(h2_workers *workers, */ void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs); +/** + * Reservation of file handles available for transfer between workers + * and master connections. + * + * When handling output from request processing, file handles are often + * encountered when static files are served. The most efficient way is then + * to forward the handle itself to the master connection where it can be + * read or sendfile'd to the client. But file handles are a scarce resource, + * so there needs to be a limit on how many handles are transferred this way. + * + * h2_workers keeps track of the number of reserved handles and observes a + * configurable maximum value. + * + * @param workers the workers instance + * @param count how many handles the caller wishes to reserve + * @return the number of reserved handles, may be 0. + */ +apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count); + +/** + * Return a number of reserved file handles back to the pool. The number + * overall may not exceed the numbers reserved. + * @param workers the workers instance + * @param count how many handles are returned to the pool + */ +void h2_workers_tx_free(h2_workers *workers, apr_size_t count); + #endif /* defined(__mod_h2__h2_workers__) */