1
0
mirror of https://github.com/apache/httpd.git synced 2025-08-08 15:02:10 +03:00

Make proxy modules compile if APR_HAS_THREADS is not defined.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1852442 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Stefan Sperling
2019-01-29 12:28:35 +00:00
parent 38c9a8d845
commit 3ee1b624b8
4 changed files with 66 additions and 3 deletions

View File

@@ -472,7 +472,9 @@ struct proxy_worker {
proxy_conn_pool *cp; /* Connection pool to use */ proxy_conn_pool *cp; /* Connection pool to use */
proxy_worker_shared *s; /* Shared data */ proxy_worker_shared *s; /* Shared data */
proxy_balancer *balancer; /* which balancer am I in? */ proxy_balancer *balancer; /* which balancer am I in? */
#if APR_HAS_THREADS
apr_thread_mutex_t *tmutex; /* Thread lock for updating address cache */ apr_thread_mutex_t *tmutex; /* Thread lock for updating address cache */
#endif
void *context; /* general purpose storage */ void *context; /* general purpose storage */
ap_conf_vector_t *section_config; /* <Proxy>-section wherein defined */ ap_conf_vector_t *section_config; /* <Proxy>-section wherein defined */
}; };
@@ -528,8 +530,10 @@ struct proxy_balancer {
proxy_hashes hash; proxy_hashes hash;
apr_time_t wupdated; /* timestamp of last change to workers list */ apr_time_t wupdated; /* timestamp of last change to workers list */
proxy_balancer_method *lbmethod; proxy_balancer_method *lbmethod;
#if APR_HAS_THREADS
apr_global_mutex_t *gmutex; /* global lock for updating list of workers */ apr_global_mutex_t *gmutex; /* global lock for updating list of workers */
apr_thread_mutex_t *tmutex; /* Thread lock for updating shm */ apr_thread_mutex_t *tmutex; /* Thread lock for updating shm */
#endif
proxy_server_conf *sconf; proxy_server_conf *sconf;
void *context; /* general purpose storage */ void *context; /* general purpose storage */
proxy_balancer_shared *s; /* Shared data */ proxy_balancer_shared *s; /* Shared data */

View File

@@ -346,23 +346,27 @@ static proxy_worker *find_best_worker(proxy_balancer *balancer,
proxy_worker *candidate = NULL; proxy_worker *candidate = NULL;
apr_status_t rv; apr_status_t rv;
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01163)
"%s: Lock failed for find_best_worker()", "%s: Lock failed for find_best_worker()",
balancer->s->name); balancer->s->name);
return NULL; return NULL;
} }
#endif
candidate = (*balancer->lbmethod->finder)(balancer, r); candidate = (*balancer->lbmethod->finder)(balancer, r);
if (candidate) if (candidate)
candidate->s->elected++; candidate->s->elected++;
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01164)
"%s: Unlock failed for find_best_worker()", "%s: Unlock failed for find_best_worker()",
balancer->s->name); balancer->s->name);
} }
#endif
if (candidate == NULL) { if (candidate == NULL) {
/* All the workers are in error state or disabled. /* All the workers are in error state or disabled.
@@ -492,11 +496,13 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
/* Step 2: Lock the LoadBalancer /* Step 2: Lock the LoadBalancer
* XXX: perhaps we need the process lock here * XXX: perhaps we need the process lock here
*/ */
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01166)
"%s: Lock failed for pre_request", (*balancer)->s->name); "%s: Lock failed for pre_request", (*balancer)->s->name);
return DECLINED; return DECLINED;
} }
#endif
/* Step 3: force recovery */ /* Step 3: force recovery */
force_recovery(*balancer, r->server); force_recovery(*balancer, r->server);
@@ -557,20 +563,24 @@ static int proxy_balancer_pre_request(proxy_worker **worker,
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167) ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01167)
"%s: All workers are in error state for route (%s)", "%s: All workers are in error state for route (%s)",
(*balancer)->s->name, route); (*balancer)->s->name, route);
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01168)
"%s: Unlock failed for pre_request", "%s: Unlock failed for pre_request",
(*balancer)->s->name); (*balancer)->s->name);
} }
#endif
return HTTP_SERVICE_UNAVAILABLE; return HTTP_SERVICE_UNAVAILABLE;
} }
} }
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01169)
"%s: Unlock failed for pre_request", "%s: Unlock failed for pre_request",
(*balancer)->s->name); (*balancer)->s->name);
} }
#endif
if (!*worker) { if (!*worker) {
runtime = find_best_worker(*balancer, r); runtime = find_best_worker(*balancer, r);
if (!runtime) { if (!runtime) {
@@ -644,12 +654,14 @@ static int proxy_balancer_post_request(proxy_worker *worker,
apr_status_t rv; apr_status_t rv;
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01173)
"%s: Lock failed for post_request", "%s: Lock failed for post_request",
balancer->s->name); balancer->s->name);
return HTTP_INTERNAL_SERVER_ERROR; return HTTP_INTERNAL_SERVER_ERROR;
} }
#endif
if (!apr_is_empty_array(balancer->errstatuses) if (!apr_is_empty_array(balancer->errstatuses)
&& !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) { && !(worker->s->status & PROXY_WORKER_IGNORE_ERRORS)) {
@@ -681,11 +693,12 @@ static int proxy_balancer_post_request(proxy_worker *worker,
worker->s->error_time = apr_time_now(); worker->s->error_time = apr_time_now();
} }
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01175)
"%s: Unlock failed for post_request", balancer->s->name); "%s: Unlock failed for post_request", balancer->s->name);
} }
#endif
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176) ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01176)
"proxy_balancer_post_request for (%s)", balancer->s->name); "proxy_balancer_post_request for (%s)", balancer->s->name);
@@ -715,19 +728,23 @@ static void recalc_factors(proxy_balancer *balancer)
static apr_status_t lock_remove(void *data) static apr_status_t lock_remove(void *data)
{ {
#if APR_HAS_THREADS
int i; int i;
#endif
proxy_balancer *balancer; proxy_balancer *balancer;
server_rec *s = data; server_rec *s = data;
void *sconf = s->module_config; void *sconf = s->module_config;
proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
balancer = (proxy_balancer *)conf->balancers->elts; balancer = (proxy_balancer *)conf->balancers->elts;
#if APR_HAS_THREADS
for (i = 0; i < conf->balancers->nelts; i++, balancer++) { for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
if (balancer->gmutex) { if (balancer->gmutex) {
apr_global_mutex_destroy(balancer->gmutex); apr_global_mutex_destroy(balancer->gmutex);
balancer->gmutex = NULL; balancer->gmutex = NULL;
} }
} }
#endif
return(0); return(0);
} }
@@ -943,7 +960,7 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */ PROXY_STRNCPY(balancer->s->sname, sname); /* We know this will succeed */
balancer->max_workers = balancer->workers->nelts + balancer->growth; balancer->max_workers = balancer->workers->nelts + balancer->growth;
#if APR_HAS_THREADS
/* Create global mutex */ /* Create global mutex */
rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type, rv = ap_global_mutex_create(&(balancer->gmutex), NULL, balancer_mutex_type,
balancer->s->sname, s, pconf, 0); balancer->s->sname, s, pconf, 0);
@@ -953,7 +970,7 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
balancer->s->sname); balancer->s->sname);
return HTTP_INTERNAL_SERVER_ERROR; return HTTP_INTERNAL_SERVER_ERROR;
} }
#endif
apr_pool_cleanup_register(pconf, (void *)s, lock_remove, apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
apr_pool_cleanup_null); apr_pool_cleanup_null);
@@ -1133,17 +1150,21 @@ static int balancer_handler(request_rec *r)
balancer = (proxy_balancer *)conf->balancers->elts; balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++, balancer++) { for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01189)
"%s: Lock failed for balancer_handler", "%s: Lock failed for balancer_handler",
balancer->s->name); balancer->s->name);
} }
#endif
ap_proxy_sync_balancer(balancer, r->server, conf); ap_proxy_sync_balancer(balancer, r->server, conf);
#if APR_HAS_THREADS
if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) { if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01190)
"%s: Unlock failed for balancer_handler", "%s: Unlock failed for balancer_handler",
balancer->s->name); balancer->s->name);
} }
#endif
} }
if (r->args && (r->method_number == M_GET)) { if (r->args && (r->method_number == M_GET)) {
@@ -1356,11 +1377,13 @@ static int balancer_handler(request_rec *r)
proxy_worker *nworker; proxy_worker *nworker;
nworker = ap_proxy_get_worker(r->pool, bsel, conf, val); nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
if (!nworker && storage->num_free_slots(bsel->wslot)) { if (!nworker && storage->num_free_slots(bsel->wslot)) {
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) { if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
"%s: Lock failed for adding worker", "%s: Lock failed for adding worker",
bsel->s->name); bsel->s->name);
} }
#endif
ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0); ret = ap_proxy_define_worker(conf->pool, &nworker, bsel, conf, val, 0);
if (!ret) { if (!ret) {
unsigned int index; unsigned int index;
@@ -1369,41 +1392,49 @@ static int balancer_handler(request_rec *r)
if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) { if ((rv = storage->grab(bsel->wslot, &index)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195) ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01195)
"worker slotmem_grab failed"); "worker slotmem_grab failed");
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01196)
"%s: Unlock failed for adding worker", "%s: Unlock failed for adding worker",
bsel->s->name); bsel->s->name);
} }
#endif
return HTTP_BAD_REQUEST; return HTTP_BAD_REQUEST;
} }
if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) { if ((rv = storage->dptr(bsel->wslot, index, (void *)&shm)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197) ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01197)
"worker slotmem_dptr failed"); "worker slotmem_dptr failed");
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01198)
"%s: Unlock failed for adding worker", "%s: Unlock failed for adding worker",
bsel->s->name); bsel->s->name);
} }
#endif
return HTTP_BAD_REQUEST; return HTTP_BAD_REQUEST;
} }
if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) { if ((rv = ap_proxy_share_worker(nworker, shm, index)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199) ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01199)
"Cannot share worker"); "Cannot share worker");
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01200)
"%s: Unlock failed for adding worker", "%s: Unlock failed for adding worker",
bsel->s->name); bsel->s->name);
} }
#endif
return HTTP_BAD_REQUEST; return HTTP_BAD_REQUEST;
} }
if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) { if ((rv = ap_proxy_initialize_worker(nworker, r->server, conf->pool)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201) ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, APLOGNO(01201)
"Cannot init worker"); "Cannot init worker");
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01202)
"%s: Unlock failed for adding worker", "%s: Unlock failed for adding worker",
bsel->s->name); bsel->s->name);
} }
#endif
return HTTP_BAD_REQUEST; return HTTP_BAD_REQUEST;
} }
/* sync all timestamps */ /* sync all timestamps */
@@ -1414,14 +1445,18 @@ static int balancer_handler(request_rec *r)
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207) ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207)
"%s: failed to add worker %s", "%s: failed to add worker %s",
bsel->s->name, val); bsel->s->name, val);
#if APR_HAS_THREADS
PROXY_GLOBAL_UNLOCK(bsel); PROXY_GLOBAL_UNLOCK(bsel);
#endif
return HTTP_BAD_REQUEST; return HTTP_BAD_REQUEST;
} }
#if APR_HAS_THREADS
if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) { if ((rv = PROXY_GLOBAL_UNLOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203) ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01203)
"%s: Unlock failed for adding worker", "%s: Unlock failed for adding worker",
bsel->s->name); bsel->s->name);
} }
#endif
} else { } else {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207) ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01207)
"%s: failed to add worker %s", "%s: failed to add worker %s",

View File

@@ -979,7 +979,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
apr_status_t rv; apr_status_t rv;
conn_rec *origin, *data = NULL; conn_rec *origin, *data = NULL;
apr_status_t err = APR_SUCCESS; apr_status_t err = APR_SUCCESS;
#if APR_HAS_THREADS
apr_status_t uerr = APR_SUCCESS; apr_status_t uerr = APR_SUCCESS;
#endif
apr_bucket_brigade *bb; apr_bucket_brigade *bb;
char *buf, *connectname; char *buf, *connectname;
apr_port_t connectport; apr_port_t connectport;
@@ -1119,10 +1121,12 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
if (worker->s->is_address_reusable) { if (worker->s->is_address_reusable) {
if (!worker->cp->addr) { if (!worker->cp->addr) {
#if APR_HAS_THREADS
if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) { if ((err = PROXY_THREAD_LOCK(worker->balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) "lock"); ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(01037) "lock");
return HTTP_INTERNAL_SERVER_ERROR; return HTTP_INTERNAL_SERVER_ERROR;
} }
#endif
} }
connect_addr = worker->cp->addr; connect_addr = worker->cp->addr;
address_pool = worker->cp->pool; address_pool = worker->cp->pool;
@@ -1138,9 +1142,11 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
address_pool); address_pool);
if (worker->s->is_address_reusable && !worker->cp->addr) { if (worker->s->is_address_reusable && !worker->cp->addr) {
worker->cp->addr = connect_addr; worker->cp->addr = connect_addr;
#if APR_HAS_THREADS
if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) { if ((uerr = PROXY_THREAD_UNLOCK(worker->balancer)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) "unlock"); ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(01038) "unlock");
} }
#endif
} }
/* /*
* get all the possible IP addresses for the destname and loop through * get all the possible IP addresses for the destname and loop through

View File

@@ -1167,8 +1167,10 @@ PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0"); lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0");
(*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *)); (*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
#if APR_HAS_THREADS
(*balancer)->gmutex = NULL; (*balancer)->gmutex = NULL;
(*balancer)->tmutex = NULL; (*balancer)->tmutex = NULL;
#endif
(*balancer)->lbmethod = lbmethod; (*balancer)->lbmethod = lbmethod;
if (do_malloc) if (do_malloc)
@@ -1257,7 +1259,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_balancer(proxy_balancer *balancer,
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balancer, server_rec *s, apr_pool_t *p) PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balancer, server_rec *s, apr_pool_t *p)
{ {
#if APR_HAS_THREADS
apr_status_t rv = APR_SUCCESS; apr_status_t rv = APR_SUCCESS;
#endif
ap_slotmem_provider_t *storage = balancer->storage; ap_slotmem_provider_t *storage = balancer->storage;
apr_size_t size; apr_size_t size;
unsigned int num; unsigned int num;
@@ -1271,6 +1275,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
* for each balancer we need to init the global * for each balancer we need to init the global
* mutex and then attach to the shared worker shm * mutex and then attach to the shared worker shm
*/ */
#if APR_HAS_THREADS
if (!balancer->gmutex) { if (!balancer->gmutex) {
ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(00919) ap_log_error(APLOG_MARK, APLOG_CRIT, 0, s, APLOGNO(00919)
"no mutex %s", balancer->s->name); "no mutex %s", balancer->s->name);
@@ -1287,6 +1292,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
balancer->s->name); balancer->s->name);
return rv; return rv;
} }
#endif
/* now attach */ /* now attach */
storage->attach(&(balancer->wslot), balancer->s->sname, &size, &num, p); storage->attach(&(balancer->wslot), balancer->s->sname, &size, &num, p);
@@ -1297,6 +1303,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
if (balancer->lbmethod && balancer->lbmethod->reset) if (balancer->lbmethod && balancer->lbmethod->reset)
balancer->lbmethod->reset(balancer, s); balancer->lbmethod->reset(balancer, s);
#if APR_HAS_THREADS
if (balancer->tmutex == NULL) { if (balancer->tmutex == NULL) {
rv = apr_thread_mutex_create(&(balancer->tmutex), APR_THREAD_MUTEX_DEFAULT, p); rv = apr_thread_mutex_create(&(balancer->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) { if (rv != APR_SUCCESS) {
@@ -1305,6 +1312,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
return rv; return rv;
} }
} }
#endif
return APR_SUCCESS; return APR_SUCCESS;
} }
@@ -2035,6 +2043,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
ap_proxy_worker_name(p, worker)); ap_proxy_worker_name(p, worker));
apr_global_mutex_lock(proxy_mutex); apr_global_mutex_lock(proxy_mutex);
/* Now init local worker data */ /* Now init local worker data */
#if APR_HAS_THREADS
if (worker->tmutex == NULL) { if (worker->tmutex == NULL) {
rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p); rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) { if (rv != APR_SUCCESS) {
@@ -2044,6 +2053,7 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
return rv; return rv;
} }
} }
#endif
if (worker->cp == NULL) if (worker->cp == NULL)
init_conn_pool(p, worker); init_conn_pool(p, worker);
if (worker->cp == NULL) { if (worker->cp == NULL) {
@@ -2401,7 +2411,9 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
{ {
int server_port; int server_port;
apr_status_t err = APR_SUCCESS; apr_status_t err = APR_SUCCESS;
#if APR_HAS_THREADS
apr_status_t uerr = APR_SUCCESS; apr_status_t uerr = APR_SUCCESS;
#endif
const char *uds_path; const char *uds_path;
/* /*
@@ -2535,10 +2547,12 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
* we can reuse the address. * we can reuse the address.
*/ */
if (!worker->cp->addr) { if (!worker->cp->addr) {
#if APR_HAS_THREADS
if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) { if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock"); ap_log_rerror(APLOG_MARK, APLOG_ERR, err, r, APLOGNO(00945) "lock");
return HTTP_INTERNAL_SERVER_ERROR; return HTTP_INTERNAL_SERVER_ERROR;
} }
#endif
/* /*
* Worker can have the single constant backend address. * Worker can have the single constant backend address.
@@ -2551,9 +2565,11 @@ ap_proxy_determine_connection(apr_pool_t *p, request_rec *r,
conn->port, 0, conn->port, 0,
worker->cp->pool); worker->cp->pool);
conn->addr = worker->cp->addr; conn->addr = worker->cp->addr;
#if APR_HAS_THREADS
if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) { if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock"); ap_log_rerror(APLOG_MARK, APLOG_ERR, uerr, r, APLOGNO(00946) "unlock");
} }
#endif
} }
else { else {
conn->addr = worker->cp->addr; conn->addr = worker->cp->addr;
@@ -3483,7 +3499,9 @@ PROXY_DECLARE(apr_status_t) ap_proxy_sync_balancer(proxy_balancer *b, server_rec
(*runtime)->cp = NULL; (*runtime)->cp = NULL;
(*runtime)->balancer = b; (*runtime)->balancer = b;
(*runtime)->s = shm; (*runtime)->s = shm;
#if APR_HAS_THREADS
(*runtime)->tmutex = NULL; (*runtime)->tmutex = NULL;
#endif
rv = ap_proxy_initialize_worker(*runtime, s, conf->pool); rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
if (rv != APR_SUCCESS) { if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker"); ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker");