1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-08 15:02:10 +03:00

Revert r1035504, this was the wrong way to do it.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1035576 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Graham Leggett
2010-11-16 10:27:26 +00:00
parent 38437740bb
commit 807be6db70
4 changed files with 86 additions and 18 deletions

View File

@@ -284,13 +284,12 @@
* 20101106.1 (2.3.9-dev) Add ap_pool_cleanup_set_null() generic cleanup
* 20101106.2 (2.3.9-dev) Add suexec_disabled_reason field to ap_unixd_config
* 20101113.0 (2.3.9-dev) Add source address to mod_proxy.h
* 20101116.0 (2.3.9-dev) Remove ap_proxy_buckets_lifetime_transform()
*/
#define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */
#ifndef MODULE_MAGIC_NUMBER_MAJOR
#define MODULE_MAGIC_NUMBER_MAJOR 20101116
#define MODULE_MAGIC_NUMBER_MAJOR 20101113
#endif
#define MODULE_MAGIC_NUMBER_MINOR 0 /* 0...n */

View File

@@ -778,6 +778,28 @@ PROXY_DECLARE(int) ap_proxy_connection_create(const char *proxy_function,
PROXY_DECLARE(void) ap_proxy_backend_broke(request_rec *r,
apr_bucket_brigade *brigade);
/**
* Transform buckets from one bucket allocator to another one by creating a
* transient bucket for each data bucket and let it use the data read from
* the old bucket. Metabuckets are transformed by just recreating them.
* Attention: Currently only the following bucket types are handled:
*
* All data buckets
* FLUSH
* EOS
*
* If an other bucket type is found its type is logged as a debug message
* and APR_EGENERAL is returned.
* @param r current request record of client request. Only used for logging
* purposes
* @param from the brigade that contains the buckets to transform
* @param to the brigade that will receive the transformed buckets
* @return APR_SUCCESS if all buckets could be transformed APR_EGENERAL
* otherwise
*/
PROXY_DECLARE(apr_status_t)
ap_proxy_buckets_lifetime_transform(request_rec *r, apr_bucket_brigade *from,
apr_bucket_brigade *to);
/**
* Return a hash based on the passed string
* @param str string to produce hash from

View File

@@ -1394,6 +1394,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
request_rec *rp;
apr_bucket *e;
apr_bucket_brigade *bb, *tmp_bb;
apr_bucket_brigade *pass_bb;
int len, backasswards;
int interim_response = 0; /* non-zero whilst interim 1xx responses
* are being read. */
@@ -1421,6 +1422,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
&& !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")));
bb = apr_brigade_create(p, c->bucket_alloc);
pass_bb = apr_brigade_create(p, c->bucket_alloc);
/* Setup for 100-Continue timeout if appropriate */
if (do_100_continue) {
@@ -1898,23 +1900,16 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
break;
}
/* Switch the allocator lifetime of the buckets */
ap_proxy_buckets_lifetime_transform(r, bb, pass_bb);
apr_brigade_cleanup(bb);
/* found the last brigade? */
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) {
/* signal that we must leave */
finish = TRUE;
/* the brigade may contain transient buckets that contain
* data that lives only as long as the backend connection.
* Force a setaside so these transient buckets become heap
* buckets that live as long as the request.
*/
for (e = APR_BRIGADE_FIRST(bb); e
!= APR_BRIGADE_SENTINEL(bb); e
= APR_BUCKET_NEXT(e)) {
apr_bucket_setaside(e, r->pool);
}
/* make sure we release the backend connection as soon
* as we know we are done, so that the backend isn't
* left waiting for a slow client to eventually
@@ -1926,7 +1921,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
}
/* try send what we read */
if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS
if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS
|| c->aborted) {
/* Ack! Phbtt! Die! User aborted! */
backend->close = 1; /* this causes socket close below */
@@ -1934,7 +1929,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
}
/* make sure we always clean up after ourselves */
apr_brigade_cleanup(bb);
apr_brigade_cleanup(pass_bb);
} while (!finish);
}

View File

@@ -2643,17 +2643,19 @@ PROXY_DECLARE(int) ap_proxy_connection_create(const char *proxy_function,
apr_sockaddr_t *backend_addr = conn->addr;
int rc;
apr_interval_time_t current_timeout;
apr_bucket_alloc_t *bucket_alloc;
if (conn->connection) {
return OK;
}
bucket_alloc = apr_bucket_alloc_create(conn->scpool);
/*
* The socket is now open, create a new backend server connection
*/
conn->connection = ap_run_create_connection(conn->scpool, s, conn->sock,
0, NULL,
c->bucket_alloc);
bucket_alloc);
if (!conn->connection) {
/*
@@ -2740,6 +2742,56 @@ PROXY_DECLARE(void) ap_proxy_backend_broke(request_rec *r,
APR_BRIGADE_INSERT_TAIL(brigade, e);
}
/*
* Transform buckets from one bucket allocator to another one by creating a
* transient bucket for each data bucket and let it use the data read from
* the old bucket. Metabuckets are transformed by just recreating them.
* Attention: Currently only the following bucket types are handled:
*
* All data buckets
* FLUSH
* EOS
*
* If an other bucket type is found its type is logged as a debug message
* and APR_EGENERAL is returned.
*/
PROXY_DECLARE(apr_status_t)
ap_proxy_buckets_lifetime_transform(request_rec *r, apr_bucket_brigade *from,
apr_bucket_brigade *to)
{
apr_bucket *e;
apr_bucket *new;
const char *data;
apr_size_t bytes;
apr_status_t rv = APR_SUCCESS;
apr_brigade_cleanup(to);
for (e = APR_BRIGADE_FIRST(from);
e != APR_BRIGADE_SENTINEL(from);
e = APR_BUCKET_NEXT(e)) {
if (!APR_BUCKET_IS_METADATA(e)) {
apr_bucket_read(e, &data, &bytes, APR_BLOCK_READ);
new = apr_bucket_transient_create(data, bytes, r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(to, new);
}
else if (APR_BUCKET_IS_FLUSH(e)) {
new = apr_bucket_flush_create(r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(to, new);
}
else if (APR_BUCKET_IS_EOS(e)) {
new = apr_bucket_eos_create(r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(to, new);
}
else {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
"proxy: Unhandled bucket type of type %s in"
" ap_proxy_buckets_lifetime_transform", e->type->name);
rv = APR_EGENERAL;
}
}
return rv;
}
/*
* Provide a string hashing function for the proxy.
* We offer 2 methods: one is the APR model but we