mirror of
https://github.com/apache/httpd.git
synced 2025-08-08 15:02:10 +03:00
* server/core.c (default_handler): Use apr_brigade_insert_file() to
append the file to the brigade. * server/protocol.c (ap_send_fd), modules/proxy/mod_proxy_http.c (spool_reqbody_cl), modules/cache/mod_mem_cache.c (recall_body), modules/cache/mod_disk_cache.c (recall_body), modules/mappers/mod_negotiation.c (handle_map_file), modules/generators/mod_asis.c (asis_handler), modules/dav/fs/repos.c [DEBUG_GET_HANDLER] (dav_fs_deliver), modules/arch/win32/mod_isapi.c (ServerSupportFunction): Likewise. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@414238 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
@@ -1050,28 +1050,7 @@ int APR_THREAD_FUNC ServerSupportFunction(isapi_cid *cid,
|
||||
}
|
||||
|
||||
sent += (apr_uint32_t)fsize;
|
||||
#if APR_HAS_LARGE_FILES
|
||||
if (r->finfo.size > AP_MAX_SENDFILE) {
|
||||
/* APR_HAS_LARGE_FILES issue; must split into mutiple buckets,
|
||||
* no greater than MAX(apr_size_t), and more granular than that
|
||||
* in case the brigade code/filters attempt to read it directly.
|
||||
*/
|
||||
b = apr_bucket_file_create(fd, tf->Offset, AP_MAX_SENDFILE,
|
||||
r->pool, c->bucket_alloc);
|
||||
while (fsize > AP_MAX_SENDFILE) {
|
||||
apr_bucket *bc;
|
||||
apr_bucket_copy(b, &bc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, bc);
|
||||
b->start += AP_MAX_SENDFILE;
|
||||
fsize -= AP_MAX_SENDFILE;
|
||||
}
|
||||
b->length = (apr_size_t)fsize; /* Resize just the last bucket */
|
||||
}
|
||||
else
|
||||
#endif
|
||||
b = apr_bucket_file_create(fd, tf->Offset, (apr_size_t)fsize,
|
||||
r->pool, c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
apr_brigade_insert_file(bb, fd, tf->Offset, fsize, r->pool);
|
||||
|
||||
if (tf->pTail && tf->TailLength) {
|
||||
sent += tf->TailLength;
|
||||
|
5
modules/cache/mod_disk_cache.c
vendored
5
modules/cache/mod_disk_cache.c
vendored
@@ -776,9 +776,8 @@ static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_bri
|
||||
apr_bucket *e;
|
||||
disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj;
|
||||
|
||||
e = apr_bucket_file_create(dobj->fd, 0, (apr_size_t) dobj->file_size, p,
|
||||
bb->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_HEAD(bb, e);
|
||||
apr_brigade_insert_file(bb, dobj->fd, 0, dobj->file_size, p);
|
||||
|
||||
e = apr_bucket_eos_create(bb->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, e);
|
||||
|
||||
|
5
modules/cache/mod_file_cache.c
vendored
5
modules/cache/mod_file_cache.c
vendored
@@ -297,9 +297,8 @@ static int sendfile_handler(request_rec *r, a_file *file)
|
||||
apr_bucket *b;
|
||||
apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc);
|
||||
|
||||
b = apr_bucket_file_create(file->file, 0, (apr_size_t)file->finfo.size,
|
||||
r->pool, c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
apr_brigade_insert_file(bb, file->file, 0, file->finfo.size, r->pool);
|
||||
|
||||
b = apr_bucket_eos_create(c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
|
||||
|
5
modules/cache/mod_mem_cache.c
vendored
5
modules/cache/mod_mem_cache.c
vendored
@@ -652,13 +652,14 @@ static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_bri
|
||||
/* CACHE_TYPE_FILE */
|
||||
apr_file_t *file;
|
||||
apr_os_file_put(&file, &mobj->fd, mobj->flags, p);
|
||||
b = apr_bucket_file_create(file, 0, mobj->m_len, p, bb->bucket_alloc);
|
||||
|
||||
apr_brigade_insert_file(bb, file, 0, mobj->m_len, p);
|
||||
}
|
||||
else {
|
||||
/* CACHE_TYPE_HEAP */
|
||||
b = apr_bucket_immortal_create(mobj->m, mobj->m_len, bb->bucket_alloc);
|
||||
}
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
}
|
||||
b = apr_bucket_eos_create(bb->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
|
||||
|
@@ -980,11 +980,7 @@ static dav_error * dav_fs_deliver(const dav_resource *resource,
|
||||
|
||||
bb = apr_brigade_create(pool, output->c->bucket_alloc);
|
||||
|
||||
/* ### this does not handle large files. but this is test code anyway */
|
||||
bkt = apr_bucket_file_create(fd, 0,
|
||||
(apr_size_t)resource->info->finfo.size,
|
||||
pool, output->c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, bkt);
|
||||
apr_brigade_insert_file(bb, fd, 0, resource->info->finfo.size, pool);
|
||||
|
||||
bkt = apr_bucket_eos_create(output->c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, bkt);
|
||||
|
@@ -90,28 +90,8 @@ static int asis_handler(request_rec *r)
|
||||
}
|
||||
|
||||
bb = apr_brigade_create(r->pool, c->bucket_alloc);
|
||||
#if APR_HAS_LARGE_FILES
|
||||
if (r->finfo.size - pos > AP_MAX_SENDFILE) {
|
||||
/* APR_HAS_LARGE_FILES issue; must split into mutiple buckets,
|
||||
* no greater than MAX(apr_size_t), and more granular than that
|
||||
* in case the brigade code/filters attempt to read it directly.
|
||||
*/
|
||||
apr_off_t fsize = r->finfo.size - pos;
|
||||
b = apr_bucket_file_create(f, pos, AP_MAX_SENDFILE,
|
||||
r->pool, c->bucket_alloc);
|
||||
while (fsize > AP_MAX_SENDFILE) {
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
apr_bucket_copy(b, &b);
|
||||
b->start += AP_MAX_SENDFILE;
|
||||
fsize -= AP_MAX_SENDFILE;
|
||||
}
|
||||
b->length = (apr_size_t)fsize; /* Resize just the last bucket */
|
||||
}
|
||||
else
|
||||
#endif
|
||||
b = apr_bucket_file_create(f, pos, (apr_size_t) (r->finfo.size - pos),
|
||||
r->pool, c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
apr_brigade_insert_file(bb, f, pos, r->finfo.size - pos, r->pool);
|
||||
|
||||
b = apr_bucket_eos_create(c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
rv = ap_pass_brigade(r->output_filters, bb);
|
||||
|
@@ -3043,10 +3043,9 @@ static int handle_map_file(request_rec *r)
|
||||
return res;
|
||||
}
|
||||
bb = apr_brigade_create(r->pool, c->bucket_alloc);
|
||||
e = apr_bucket_file_create(map, best->body,
|
||||
(apr_size_t)best->bytes, r->pool,
|
||||
c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, e);
|
||||
|
||||
apr_brigade_insert_file(bb, map, best->body, best->bytes, r->pool);
|
||||
|
||||
e = apr_bucket_eos_create(c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, e);
|
||||
|
||||
|
@@ -542,28 +542,7 @@ static apr_status_t spool_reqbody_cl(apr_pool_t *p,
|
||||
terminate_headers(bucket_alloc, header_brigade);
|
||||
APR_BRIGADE_CONCAT(header_brigade, body_brigade);
|
||||
if (tmpfile) {
|
||||
/* For platforms where the size of the file may be larger than
|
||||
* that which can be stored in a single bucket (where the
|
||||
* length field is an apr_size_t), split it into several
|
||||
* buckets: */
|
||||
if (sizeof(apr_off_t) > sizeof(apr_size_t)
|
||||
&& fsize > AP_MAX_SENDFILE) {
|
||||
e = apr_bucket_file_create(tmpfile, 0, AP_MAX_SENDFILE, p,
|
||||
bucket_alloc);
|
||||
while (fsize > AP_MAX_SENDFILE) {
|
||||
apr_bucket *ce;
|
||||
apr_bucket_copy(e, &ce);
|
||||
APR_BRIGADE_INSERT_TAIL(header_brigade, ce);
|
||||
e->start += AP_MAX_SENDFILE;
|
||||
fsize -= AP_MAX_SENDFILE;
|
||||
}
|
||||
e->length = (apr_size_t)fsize; /* Resize just the last bucket */
|
||||
}
|
||||
else {
|
||||
e = apr_bucket_file_create(tmpfile, 0, (apr_size_t)fsize, p,
|
||||
bucket_alloc);
|
||||
}
|
||||
APR_BRIGADE_INSERT_TAIL(header_brigade, e);
|
||||
apr_brigade_insert_file(header_brigade, tmpfile, 0, fsize, p);
|
||||
}
|
||||
/* This is all a single brigade, pass with flush flagged */
|
||||
status = pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1);
|
||||
|
@@ -3581,35 +3581,13 @@ static int default_handler(request_rec *r)
|
||||
ap_md5digest(r->pool, fd));
|
||||
}
|
||||
|
||||
/* For platforms where the size of the file may be larger than
|
||||
* that which can be stored in a single bucket (where the
|
||||
* length field is an apr_size_t), split it into several
|
||||
* buckets: */
|
||||
if (sizeof(apr_off_t) > sizeof(apr_size_t)
|
||||
&& r->finfo.size > AP_MAX_SENDFILE) {
|
||||
apr_off_t fsize = r->finfo.size;
|
||||
e = apr_bucket_file_create(fd, 0, AP_MAX_SENDFILE, r->pool,
|
||||
c->bucket_alloc);
|
||||
while (fsize > AP_MAX_SENDFILE) {
|
||||
apr_bucket *ce;
|
||||
apr_bucket_copy(e, &ce);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, ce);
|
||||
e->start += AP_MAX_SENDFILE;
|
||||
fsize -= AP_MAX_SENDFILE;
|
||||
}
|
||||
e->length = (apr_size_t)fsize; /* Resize just the last bucket */
|
||||
}
|
||||
else {
|
||||
e = apr_bucket_file_create(fd, 0, (apr_size_t)r->finfo.size,
|
||||
r->pool, c->bucket_alloc);
|
||||
}
|
||||
e = apr_brigade_insert_file(bb, fd, 0, r->finfo.size, r->pool);
|
||||
|
||||
#if APR_HAS_MMAP
|
||||
if (d->enable_mmap == ENABLE_MMAP_OFF) {
|
||||
(void)apr_bucket_file_enable_mmap(e, 0);
|
||||
}
|
||||
#endif
|
||||
APR_BRIGADE_INSERT_TAIL(bb, e);
|
||||
}
|
||||
|
||||
e = apr_bucket_eos_create(c->bucket_alloc);
|
||||
|
@@ -1346,12 +1346,11 @@ AP_DECLARE(apr_status_t) ap_send_fd(apr_file_t *fd, request_rec *r,
|
||||
{
|
||||
conn_rec *c = r->connection;
|
||||
apr_bucket_brigade *bb = NULL;
|
||||
apr_bucket *b;
|
||||
apr_status_t rv;
|
||||
|
||||
bb = apr_brigade_create(r->pool, c->bucket_alloc);
|
||||
b = apr_bucket_file_create(fd, offset, len, r->pool, c->bucket_alloc);
|
||||
APR_BRIGADE_INSERT_TAIL(bb, b);
|
||||
|
||||
apr_brigade_insert_file(bb, fd, 0, len, r->pool);
|
||||
|
||||
rv = ap_pass_brigade(r->output_filters, bb);
|
||||
if (rv != APR_SUCCESS) {
|
||||
|
Reference in New Issue
Block a user