--- a/docs/log-message-tags/next-number
+++ a/docs/log-message-tags/next-number
@@ -1,1 +1,1 @@
-10033
+10034
--- a/docs/manual/howto/reverse_proxy.xml
+++ a/docs/manual/howto/reverse_proxy.xml
@@ -147,7 +147,7 @@
Hint
- BalancerMembers are also sometimes referred to as workers.
+ BalancerMembers are also sometimes referred to as workers.
@@ -182,20 +182,41 @@
Failover
- You can also fine-tune various failover scenarios, detailing which
- workers and even which balancers should accessed in such cases. For
- example, the below setup implements 2 failover cases: In the first,
- http://hstandby.example.com:8080
is only sent traffic
- if all other workers in the myset balancer are not available.
- If that worker itself is not available, only then will the
- http://bkup1.example.com:8080
and http://bkup2.example.com:8080
- workers be brought into rotation:
+ You can also fine-tune various failover scenarios, detailing which workers
+ and even which balancers should accessed in such cases. For example, the
+ below setup implements three failover cases:
+
+
+ -
+
http://spare1.example.com:8080
and
+ http://spare2.example.com:8080
are only sent traffic if one
+ or both of http://www2.example.com:8080
or
+ http://www3.example.com:8080
is unavailable. (One spare
+ will be used to replace one unusable member of the same balancer set.)
+
+ -
+
http://hstandby.example.com:8080
is only sent traffic if
+ all other workers in balancer set 0
are not available.
+
+ -
+ If all load balancer set
0
workers, spares, and the standby
+ are unavailable, only then will the
+ http://bkup1.example.com:8080
and
+ http://bkup2.example.com:8080
workers from balancer set
+ 1
be brought into rotation.
+
+
+
+ Thus, it is possible to have one or more hot spares and hot standbys for
+ each load balancer set.
<Proxy balancer://myset>
BalancerMember http://www2.example.com:8080
BalancerMember http://www3.example.com:8080 loadfactor=3 timeout=1
+ BalancerMember http://spare1.example.com:8080 status=+R
+ BalancerMember http://spare2.example.com:8080 status=+R
BalancerMember http://hstandby.example.com:8080 status=+H
BalancerMember http://bkup1.example.com:8080 lbset=1
BalancerMember http://bkup2.example.com:8080 lbset=1
@@ -207,11 +228,11 @@
- The magic of this failover setup is setting http://hstandby.example.com:8080
- with the +H
status flag, which puts it in hot standby mode,
- and making the 2 bkup#
servers part of the #1 load balancer set (the
- default set is 0); for failover, hot standbys (if they exist) are used 1st, when all regular
- workers are unavailable; load balancer sets with lowest number are always tried first.
+ For failover, hot spares are used as replacements for failed or unusable
+ workers in the same load balancer set. Hot standbys are used if all
+ workers and spares in the load balancer set are unavailable. Load balancer
+ sets (with their respective hot spares and standbys) are always tried in
+ order from lowest to highest.
@@ -221,14 +242,14 @@
One of the most unique and useful features of Apache httpd's reverse proxy is
- the embedded balancer-manager application. Similar to
- mod_status, balancer-manager displays
- the current working configuration and status of the enabled
- balancers and workers currently in use. However, not only does it
- display these parameters, it also allows for dynamic, runtime, on-the-fly
- reconfiguration of almost all of them, including adding new BalancerMembers
- (workers) to an existing balancer. To enable these capability, the following
- needs to be added to your configuration:
+ the embedded balancer-manager application. Similar to
+ mod_status, balancer-manager displays
+ the current working configuration and status of the enabled
+ balancers and workers currently in use. However, not only does it
+ display these parameters, it also allows for dynamic, runtime, on-the-fly
+ reconfiguration of almost all of them, including adding new BalancerMembers
+ (workers) to an existing balancer. To enable these capability, the following
+ needs to be added to your configuration:
@@ -293,16 +314,20 @@
is displayed and can be set/reset. The meanings of these statuses are as follows:
- Flag | String | Description |
- | Ok | Worker is available |
- | Init | Worker has been initialized |
+ Flag | String | Description |
+ | Ok | Worker is available |
+ | Init | Worker has been initialized |
D | Dis | Worker is disabled and will not accept any requests; will be
automatically retried. |
S | Stop | Worker is administratively stopped; will not accept requests
and will not be automatically retried |
I | Ign | Worker is in ignore-errors mode and will always be considered available. |
+ R | Spar | Worker is a hot spare. For each worker in a given lbset that is unusable
+ (disabled, stopped, in error, etc.), a usable hot spare with the same lbset will be used in
+ its place. Hot spares can help ensure that a specific number of workers are always available
+ for use by a balancer. |
H | Stby | Worker is in hot-standby mode and will only be used if no other
- viable workers are available. |
+ viable workers or spares are available in the balancer set.
E | Err | Worker is in an error state, usually due to failing pre-request check;
requests will not be proxied to this worker, but it will be retried depending on
the retry setting of the worker. |
--- a/docs/manual/mod/mod_proxy.xml
+++ a/docs/manual/mod/mod_proxy.xml
@@ -306,10 +306,10 @@
DNS resolution for origin domains
DNS resolution happens when the socket to
the origin domain is created for the first time.
- When connection pooling is used, each backend domain is resolved
- only once per child process, and reused for all further connections
- until the child is recycled. This information should to be considered
- while planning DNS maintenance tasks involving backend domains.
+ When connection pooling is used, each backend domain is resolved
+ only once per child process, and reused for all further connections
+ until the child is recycled. This information should to be considered
+ while planning DNS maintenance tasks involving backend domains.
Please also check ProxyPass
parameters for more details about connection reuse.
@@ -398,7 +398,7 @@
</Location>
- In 2.4.26 and later, the "no-proxy" environment variable can be set to disable
+
In 2.4.26 and later, the "no-proxy" environment variable can be set to disable
mod_proxy processing the current request.
This variable should be set with SetEnvIf, as SetEnv
@@ -983,7 +983,7 @@
general ProxyPass directives. In 2.4.26 and later, the "no-proxy"
environment variable is an alternative to exclusions, and is the only
way to configure an exclusion of a ProxyPass
- directive in Location context.
+ directive in Location context.
This variable should be set with SetEnvIf, as SetEnv
is not evaluated early enough.
@@ -1180,8 +1180,12 @@
S | Worker is administratively stopped; will not accept requests
and will not be automatically retried |
I | Worker is in ignore-errors mode and will always be considered available. |
+ R | Worker is a hot spare. For each worker in a given lbset that is unusable
+ (disabled, stopped, in error, etc.), a usable hot spare with the same lbset will be used in
+ its place. Hot spares can help ensure that a specific number of workers are always available
+ for use by a balancer. |
H | Worker is in hot-standby mode and will only be used if no other
- viable workers are available. |
+ viable workers or spares are available in the balancer set.
E | Worker is in an error state. |
N | Worker is in drain mode and will only accept existing sticky sessions
destined for itself and ignore all other requests. |
@@ -1335,8 +1339,24 @@
</Proxy>
+ Configuring hot spares can help ensure that a certain number of
+ workers are always available for use per load balancer set:
+
+ProxyPass "/" "balancer://sparecluster/"
+<Proxy balancer://sparecluster>
+ BalancerMember ajp://1.2.3.4:8009
+ BalancerMember ajp://1.2.3.5:8009
+ # The servers below are hot spares. For each server above that is unusable
+ # (disabled, stopped, unreachable, in error state, etc.), one of these spares
+ # will be used in its place. Two servers will always be available for a request
+ # unless one or more of the spares is also unusable.
+ BalancerMember ajp://1.2.3.6:8009 status=+R
+ BalancerMember ajp://1.2.3.7:8009 status=+R
+</Proxy>
+
+
Setting up a hot-standby that will only be used if no other
- members are available:
+ members (or spares) are available in the load balancer set:
ProxyPass "/" "balancer://hotcluster/"
<Proxy balancer://hotcluster>
@@ -1393,7 +1413,7 @@
ProxyPassMatch
Maps remote servers into the local server URL-space using regular expressions
ProxyPassMatch [regex] !|url [key=value
- [key=value ...]]
+ [key=value ...]]
server configvirtual host
directory
--- a/modules/proxy/balancers/mod_lbmethod_bybusyness.c
+++ a/modules/proxy/balancers/mod_lbmethod_bybusyness.c
@@ -26,17 +26,14 @@
proxy_worker *worker, server_rec *s) = NULL;
static proxy_worker *find_best_bybusyness(proxy_balancer *balancer,
- request_rec *r)
+ request_rec *r)
{
int i;
+ int total_factor = 0;
+ apr_array_header_t *workers;
proxy_worker **worker;
proxy_worker *mycandidate = NULL;
- int cur_lbset = 0;
- int max_lbset = 0;
- int checking_standby;
- int checked_standby;
-
- int total_factor = 0;
+ apr_pool_t *wpool;
if (!ap_proxy_retry_worker_fn) {
ap_proxy_retry_worker_fn =
@@ -51,67 +48,34 @@
"proxy: Entering bybusyness for BALANCER (%s)",
balancer->s->name);
- /* First try to see if we have available candidate */
- do {
-
- checking_standby = checked_standby = 0;
- while (!mycandidate && !checked_standby) {
-
- worker = (proxy_worker **)balancer->workers->elts;
- for (i = 0; i < balancer->workers->nelts; i++, worker++) {
- if (!checking_standby) { /* first time through */
- if ((*worker)->s->lbset > max_lbset)
- max_lbset = (*worker)->s->lbset;
- }
- if (
- ((*worker)->s->lbset != cur_lbset) ||
- (checking_standby ? !PROXY_WORKER_IS_STANDBY(*worker) : PROXY_WORKER_IS_STANDBY(*worker)) ||
- (PROXY_WORKER_IS_DRAINING(*worker))
- ) {
- continue;
- }
+ apr_pool_create(&wpool, r->pool);
- /* If the worker is in error state run
- * retry on that worker. It will be marked as
- * operational if the retry timeout is elapsed.
- * The worker might still be unusable, but we try
- * anyway.
- */
- if (!PROXY_WORKER_IS_USABLE(*worker)) {
- ap_proxy_retry_worker_fn("BALANCER", *worker, r->server);
- }
-
- /* Take into calculation only the workers that are
- * not in error state or not disabled.
- */
- if (PROXY_WORKER_IS_USABLE(*worker)) {
+ workers = ap_proxy_balancer_usable_workers(balancer, r, wpool);
- (*worker)->s->lbstatus += (*worker)->s->lbfactor;
- total_factor += (*worker)->s->lbfactor;
-
- if (!mycandidate
- || (*worker)->s->busy < mycandidate->s->busy
- || ((*worker)->s->busy == mycandidate->s->busy && (*worker)->s->lbstatus > mycandidate->s->lbstatus))
- mycandidate = *worker;
+ for (i = 0; i < workers->nelts; i++) {
+ worker = &APR_ARRAY_IDX(workers, i, proxy_worker *);
+ (*worker)->s->lbstatus += (*worker)->s->lbfactor;
+ total_factor += (*worker)->s->lbfactor;
- }
-
- }
+ if (
+ !mycandidate
+ || ((*worker)->s->busy < mycandidate->s->busy)
+ || (
+ ((*worker)->s->busy == mycandidate->s->busy)
+ && ((*worker)->s->lbstatus > mycandidate->s->lbstatus)
+ )
+ ) {
+ mycandidate = *worker;
+ }
+ }
- checked_standby = checking_standby++;
-
- }
-
- cur_lbset++;
-
- } while (cur_lbset <= max_lbset && !mycandidate);
+ apr_pool_destroy(wpool);
if (mycandidate) {
mycandidate->s->lbstatus -= total_factor;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01212)
"proxy: bybusyness selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d",
mycandidate->s->name, mycandidate->s->busy, mycandidate->s->lbstatus);
-
}
return mycandidate;
--- a/modules/proxy/balancers/mod_lbmethod_byrequests.c
+++ a/modules/proxy/balancers/mod_lbmethod_byrequests.c
@@ -72,16 +72,14 @@
*/
static proxy_worker *find_best_byrequests(proxy_balancer *balancer,
- request_rec *r)
+ request_rec *r)
{
int i;
int total_factor = 0;
+ apr_array_header_t *workers;
proxy_worker **worker;
proxy_worker *mycandidate = NULL;
- int cur_lbset = 0;
- int max_lbset = 0;
- int checking_standby;
- int checked_standby;
+ apr_pool_t *wpool;
if (!ap_proxy_retry_worker_fn) {
ap_proxy_retry_worker_fn =
@@ -96,53 +94,30 @@
"proxy: Entering byrequests for BALANCER (%s)",
balancer->s->name);
- /* First try to see if we have available candidate */
- do {
- checking_standby = checked_standby = 0;
- while (!mycandidate && !checked_standby) {
- worker = (proxy_worker **)balancer->workers->elts;
- for (i = 0; i < balancer->workers->nelts; i++, worker++) {
- if (!checking_standby) { /* first time through */
- if ((*worker)->s->lbset > max_lbset)
- max_lbset = (*worker)->s->lbset;
- }
- if (
- ((*worker)->s->lbset != cur_lbset) ||
- (checking_standby ? !PROXY_WORKER_IS_STANDBY(*worker) : PROXY_WORKER_IS_STANDBY(*worker)) ||
- (PROXY_WORKER_IS_DRAINING(*worker))
- ) {
- continue;
- }
+ apr_pool_create(&wpool, r->pool);
- /* If the worker is in error state run
- * retry on that worker. It will be marked as
- * operational if the retry timeout is elapsed.
- * The worker might still be unusable, but we try
- * anyway.
- */
- if (!PROXY_WORKER_IS_USABLE(*worker))
- ap_proxy_retry_worker_fn("BALANCER", *worker, r->server);
- /* Take into calculation only the workers that are
- * not in error state or not disabled.
- */
- if (PROXY_WORKER_IS_USABLE(*worker)) {
- (*worker)->s->lbstatus += (*worker)->s->lbfactor;
- total_factor += (*worker)->s->lbfactor;
- if (!mycandidate || (*worker)->s->lbstatus > mycandidate->s->lbstatus)
- mycandidate = *worker;
- }
- }
- checked_standby = checking_standby++;
+ workers = ap_proxy_balancer_usable_workers(balancer, r, wpool);
+
+ for (i = 0; i < workers->nelts; i++) {
+ worker = &APR_ARRAY_IDX(workers, i, proxy_worker *);
+ (*worker)->s->lbstatus += (*worker)->s->lbfactor;
+ total_factor += (*worker)->s->lbfactor;
+
+ if (
+ !mycandidate
+ || ((*worker)->s->lbstatus > mycandidate->s->lbstatus)
+ ) {
+ mycandidate = *worker;
}
- cur_lbset++;
- } while (cur_lbset <= max_lbset && !mycandidate);
+ }
+
+ apr_pool_destroy(wpool);
if (mycandidate) {
mycandidate->s->lbstatus -= total_factor;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01208)
"proxy: byrequests selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d",
mycandidate->s->name, mycandidate->s->busy, mycandidate->s->lbstatus);
-
}
return mycandidate;
--- a/modules/proxy/balancers/mod_lbmethod_bytraffic.c
+++ a/modules/proxy/balancers/mod_lbmethod_bytraffic.c
@@ -48,12 +48,10 @@
int i;
apr_off_t mytraffic = 0;
apr_off_t curmin = 0;
+ apr_array_header_t *workers;
proxy_worker **worker;
proxy_worker *mycandidate = NULL;
- int cur_lbset = 0;
- int max_lbset = 0;
- int checking_standby;
- int checked_standby;
+ apr_pool_t *wpool;
if (!ap_proxy_retry_worker_fn) {
ap_proxy_retry_worker_fn =
@@ -68,48 +66,22 @@
"proxy: Entering bytraffic for BALANCER (%s)",
balancer->s->name);
- /* First try to see if we have available candidate */
- do {
- checking_standby = checked_standby = 0;
- while (!mycandidate && !checked_standby) {
- worker = (proxy_worker **)balancer->workers->elts;
- for (i = 0; i < balancer->workers->nelts; i++, worker++) {
- if (!checking_standby) { /* first time through */
- if ((*worker)->s->lbset > max_lbset)
- max_lbset = (*worker)->s->lbset;
- }
- if (
- ((*worker)->s->lbset != cur_lbset) ||
- (checking_standby ? !PROXY_WORKER_IS_STANDBY(*worker) : PROXY_WORKER_IS_STANDBY(*worker)) ||
- (PROXY_WORKER_IS_DRAINING(*worker))
- ) {
- continue;
- }
+ apr_pool_create(&wpool, r->pool);
- /* If the worker is in error state run
- * retry on that worker. It will be marked as
- * operational if the retry timeout is elapsed.
- * The worker might still be unusable, but we try
- * anyway.
- */
- if (!PROXY_WORKER_IS_USABLE(*worker))
- ap_proxy_retry_worker_fn("BALANCER", *worker, r->server);
- /* Take into calculation only the workers that are
- * not in error state or not disabled.
- */
- if (PROXY_WORKER_IS_USABLE(*worker)) {
- mytraffic = ((*worker)->s->transferred/(*worker)->s->lbfactor) +
- ((*worker)->s->read/(*worker)->s->lbfactor);
- if (!mycandidate || mytraffic < curmin) {
- mycandidate = *worker;
- curmin = mytraffic;
- }
- }
- }
- checked_standby = checking_standby++;
+ workers = ap_proxy_balancer_usable_workers(balancer, r, wpool);
+
+ for (i = 0; i < workers->nelts; i++) {
+ worker = &APR_ARRAY_IDX(workers, i, proxy_worker *);
+ mytraffic = (*worker)->s->transferred/(*worker)->s->lbfactor
+ + (*worker)->s->read/(*worker)->s->lbfactor;
+
+ if (!mycandidate || (mytraffic < curmin)) {
+ mycandidate = *worker;
+ curmin = mytraffic;
}
- cur_lbset++;
- } while (cur_lbset <= max_lbset && !mycandidate);
+ }
+
+ apr_pool_destroy(wpool);
if (mycandidate) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01210)
--- a/modules/proxy/mod_proxy.c
+++ a/modules/proxy/mod_proxy.c
@@ -68,6 +68,7 @@
{PROXY_WORKER_STOPPED, PROXY_WORKER_STOPPED_FLAG, "Stop "},
{PROXY_WORKER_IN_ERROR, PROXY_WORKER_IN_ERROR_FLAG, "Err "},
{PROXY_WORKER_HOT_STANDBY, PROXY_WORKER_HOT_STANDBY_FLAG, "Stby "},
+ {PROXY_WORKER_HOT_SPARE, PROXY_WORKER_HOT_SPARE_FLAG, "Spar "},
{PROXY_WORKER_FREE, PROXY_WORKER_FREE_FLAG, "Free "},
{PROXY_WORKER_HC_FAIL, PROXY_WORKER_HC_FAIL_FLAG, "HcFl "},
{0x0, '\0', NULL}
@@ -365,7 +366,7 @@
if (strlen(val) != 1) {
if (!strcasecmp(val, "off"))
balancer->s->sticky_separator = 0;
- else
+ else
return "stickysessionsep must be a single character or Off";
}
else
@@ -787,8 +788,8 @@
|| !r->uri || r->uri[0] != '/') {
return DECLINED;
}
-
- if (apr_table_get(r->subprocess_env, "no-proxy")) {
+
+ if (apr_table_get(r->subprocess_env, "no-proxy")) {
return DECLINED;
}
@@ -1292,7 +1293,7 @@
if (DECLINED == access_status) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01144)
- "No protocol handler was valid for the URL %s "
+ "No protocol handler was valid for the URL %s "
"(scheme '%s'). "
"If you are using a DSO version of mod_proxy, make sure "
"the proxy submodules are included in the configuration "
@@ -3114,4 +3115,3 @@
APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(proxy, PROXY, int, detach_backend,
(request_rec *r, proxy_conn_rec *backend),
(r, backend), OK, DECLINED)
-
--- a/modules/proxy/mod_proxy.h
+++ a/modules/proxy/mod_proxy.h
@@ -306,6 +306,7 @@
#define PROXY_WORKER_HOT_STANDBY 0x0100
#define PROXY_WORKER_FREE 0x0200
#define PROXY_WORKER_HC_FAIL 0x0400
+#define PROXY_WORKER_HOT_SPARE 0x0800
/* worker status flags */
#define PROXY_WORKER_INITIALIZED_FLAG 'O'
@@ -319,6 +320,7 @@
#define PROXY_WORKER_HOT_STANDBY_FLAG 'H'
#define PROXY_WORKER_FREE_FLAG 'F'
#define PROXY_WORKER_HC_FAIL_FLAG 'C'
+#define PROXY_WORKER_HOT_SPARE_FLAG 'R'
#define PROXY_WORKER_NOT_USABLE_BITMAP ( PROXY_WORKER_IN_SHUTDOWN | \
PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR | \
@@ -329,6 +331,8 @@
#define PROXY_WORKER_IS_STANDBY(f) ( (f)->s->status & PROXY_WORKER_HOT_STANDBY )
+#define PROXY_WORKER_IS_SPARE(f) ( (f)->s->status & PROXY_WORKER_HOT_SPARE )
+
#define PROXY_WORKER_IS_USABLE(f) ( ( !( (f)->s->status & PROXY_WORKER_NOT_USABLE_BITMAP) ) && \
PROXY_WORKER_IS_INITIALIZED(f) )
@@ -503,7 +507,7 @@
char sticky_separator; /* separator for sessionid/route */
unsigned int forcerecovery_set:1;
unsigned int scolonsep_set:1;
- unsigned int sticky_force_set:1;
+ unsigned int sticky_force_set:1;
unsigned int nonce_set:1;
unsigned int sticky_separator_set:1;
} proxy_balancer_shared;
@@ -665,7 +669,7 @@
* @param addr resolved address of hostname, or NULL if not known
* @return OK on success, or else an errro
*/
-PROXY_DECLARE(int) ap_proxy_checkproxyblock(request_rec *r, proxy_server_conf *conf,
+PROXY_DECLARE(int) ap_proxy_checkproxyblock(request_rec *r, proxy_server_conf *conf,
const char *hostname, apr_sockaddr_t *addr);
@@ -857,6 +861,17 @@
apr_pool_t *p);
/**
+ * Retrieve a list of usable workers for the balancer
+ * @param balancer balancer for which usable workers should be retrieved
+ * @param r current request record
+ * @param p memory pool used for array
+ * @return APR array of usable workers
+ */
+PROXY_DECLARE(apr_array_header_t *) ap_proxy_balancer_usable_workers(proxy_balancer *balancer,
+ request_rec *r,
+ apr_pool_t *p);
+
+/**
* Find the shm of the worker as needed
* @param storage slotmem provider
* @param slot slotmem instance
--- a/modules/proxy/mod_proxy_balancer.c
+++ a/modules/proxy/mod_proxy_balancer.c
@@ -312,7 +312,7 @@
/*
* If we found a value for stickysession, find the first '.' (or whatever
* sticky_separator is set to) within. Everything after '.' (if present)
- * is our route.
+ * is our route.
*/
if ((*route) && (balancer->s->sticky_separator != 0) && ((*route = strchr(*route, balancer->s->sticky_separator)) != NULL ))
(*route)++;
@@ -458,7 +458,7 @@
static apr_status_t decrement_busy_count(void *worker_)
{
proxy_worker *worker = worker_;
-
+
if (worker->s->busy) {
worker->s->busy--;
}
@@ -1127,6 +1127,9 @@
if ((val = apr_table_get(params, "w_status_H"))) {
ap_proxy_set_wstatus(PROXY_WORKER_HOT_STANDBY_FLAG, atoi(val), wsel);
}
+ if ((val = apr_table_get(params, "w_status_R"))) {
+ ap_proxy_set_wstatus(PROXY_WORKER_HOT_SPARE_FLAG, atoi(val), wsel);
+ }
if ((val = apr_table_get(params, "w_status_S"))) {
ap_proxy_set_wstatus(PROXY_WORKER_STOPPED_FLAG, atoi(val), wsel);
}
@@ -1651,7 +1654,8 @@
"Ignore Errors | "
"Draining Mode | "
"Disabled | "
- "Hot Standby | ", r);
+ "Hot Standby | "
+ "Hot Spare | ", r);
if (hc_show_exprs_f) {
ap_rputs("HC Fail | ", r);
}
@@ -1660,6 +1664,7 @@
create_radio("w_status_N", (PROXY_WORKER_IS(wsel, PROXY_WORKER_DRAIN)), r);
create_radio("w_status_D", (PROXY_WORKER_IS(wsel, PROXY_WORKER_DISABLED)), r);
create_radio("w_status_H", (PROXY_WORKER_IS(wsel, PROXY_WORKER_HOT_STANDBY)), r);
+ create_radio("w_status_R", (PROXY_WORKER_IS(wsel, PROXY_WORKER_HOT_SPARE)), r);
if (hc_show_exprs_f) {
create_radio("w_status_C", (PROXY_WORKER_IS(wsel, PROXY_WORKER_HC_FAIL)), r);
}
--- a/modules/proxy/proxy_util.c
+++ a/modules/proxy/proxy_util.c
@@ -40,6 +40,9 @@
#include "apr_support.h" /* for apr_wait_for_io_or_timeout() */
#endif
+static int (*ap_proxy_retry_worker_fn)(const char *proxy_function,
+ proxy_worker *worker, server_rec *s) = NULL;
+
APLOG_USE_MODULE(proxy);
/*
@@ -1291,6 +1294,103 @@
return APR_SUCCESS;
}
+PROXY_DECLARE(apr_array_header_t *) ap_proxy_balancer_usable_workers(proxy_balancer *balancer,
+ request_rec *r,
+ apr_pool_t *p)
+{
+ int i = 0;
+ int cur_lbset = 0;
+ int max_lbset = 0;
+ int unusable_workers = 0;
+ apr_array_header_t *workers;
+ apr_array_header_t *spares;
+ apr_array_header_t *standbys;
+ proxy_worker **worker;
+
+ if (!ap_proxy_retry_worker_fn) {
+ ap_proxy_retry_worker_fn =
+ APR_RETRIEVE_OPTIONAL_FN(ap_proxy_retry_worker);
+ if (!ap_proxy_retry_worker_fn) {
+ /* can only happen if mod_proxy isn't loaded */
+ return NULL;
+ }
+ }
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(10033)
+ "proxy: Entering %s for BALANCER (%s)",
+ balancer->lbmethod->name, balancer->s->name);
+
+ workers = apr_array_make(p, balancer->workers->nelts, sizeof(proxy_worker*));
+ spares = apr_array_make(p, 1, sizeof(proxy_worker*));
+ standbys = apr_array_make(p, 1, sizeof(proxy_worker*));
+
+ /* Process lbsets in order, only replacing unusable workers in a given lbset
+ * with available spares from the same lbset. Hot standbys will be used as a
+ * last resort when all other workers and spares are unavailable.
+ */
+ for (cur_lbset = 0; (workers->nelts == 0) && (cur_lbset <= max_lbset); cur_lbset++) {
+ unusable_workers = 0;
+ apr_array_clear(spares);
+ apr_array_clear(standbys);
+
+ for (i = 0; i < balancer->workers->nelts; i++, worker++) {
+ worker = &APR_ARRAY_IDX(balancer->workers, i, proxy_worker *);
+
+ if ((*worker)->s->lbset > max_lbset) {
+ max_lbset = (*worker)->s->lbset;
+ }
+
+ if (
+ ((*worker)->s->lbset != cur_lbset)
+ || PROXY_WORKER_IS_DRAINING(*worker)
+ ) {
+ continue;
+ }
+
+ /* If the worker is in error state run retry on that worker. It will
+ * be marked as operational if the retry timeout is elapsed. The
+ * worker might still be unusable, but we try anyway.
+ */
+ if (!PROXY_WORKER_IS_USABLE(*worker)) {
+ ap_proxy_retry_worker_fn("BALANCER", *worker, r->server);
+ }
+
+ if (PROXY_WORKER_IS_SPARE(*worker)) {
+ if (PROXY_WORKER_IS_USABLE(*worker)) {
+ APR_ARRAY_PUSH(spares, proxy_worker *) = *worker;
+ }
+ }
+ else if (PROXY_WORKER_IS_STANDBY(*worker)) {
+ if (PROXY_WORKER_IS_USABLE(*worker)) {
+ APR_ARRAY_PUSH(standbys, proxy_worker *) = *worker;
+ }
+ }
+ else if (PROXY_WORKER_IS_USABLE(*worker)) {
+ APR_ARRAY_PUSH(workers, proxy_worker *) = *worker;
+ }
+ else {
+ unusable_workers++;
+ }
+ }
+
+ /* Replace any unusable workers with any available spares. */
+ if (spares->nelts > 0) {
+ if (spares->nelts > unusable_workers) {
+ spares->nelts = unusable_workers;
+ }
+
+ apr_array_cat(workers, spares);
+ }
+
+ /* If no workers are available, use the standbys. */
+ if ((workers->nelts == 0) && (standbys->nelts > 0)) {
+ apr_array_cat(workers, standbys);
+ }
+ }
+
+ return workers;
+}
+
/*
* CONNECTION related...
*/
@@ -1976,9 +2076,9 @@
/*
* In the case of the reverse proxy, we need to see if we
* were passed a UDS url (eg: from mod_proxy) and adjust uds_path
- * as required.
+ * as required.
*/
-static void fix_uds_filename(request_rec *r, char **url)
+static void fix_uds_filename(request_rec *r, char **url)
{
char *ptr, *ptr2;
if (!r || !r->filename) return;
@@ -2289,9 +2389,9 @@
* Figure out if our passed in proxy_conn_rec has a usable
* address cached.
*
- * TODO: Handle this much better...
+ * TODO: Handle this much better...
*
- * XXX: If generic workers are ever address-reusable, we need
+ * XXX: If generic workers are ever address-reusable, we need
* to check host and port on the conn and be careful about
* spilling the cached addr from the worker.
*/
@@ -2432,7 +2532,7 @@
}
/* check if ProxyBlock directive on this host */
- if (OK != ap_proxy_checkproxyblock(r, conf, uri->hostname,
+ if (OK != ap_proxy_checkproxyblock(r, conf, uri->hostname,
proxyname ? NULL : conn->addr)) {
return ap_proxyerror(r, HTTP_FORBIDDEN,
"Connect to remote machine blocked");