ASF Bugzilla – Attachment 31616 Details for
Bug 55897
[PATCH]patch with SO_REUSEPORT support
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
patch with SO_REUSEPORT support
httpd_trunk_so_reuseport.patch (text/plain), 51.22 KB, created by
Yingqi.Lu
on 2014-05-13 19:50:21 UTC
(
hide
)
Description:
patch with SO_REUSEPORT support
Filename:
MIME Type:
Creator:
Yingqi.Lu
Created:
2014-05-13 19:50:21 UTC
Size:
51.22 KB
patch
obsolete
>Only in /home/httpd-trunk.new: 1 >diff -ru /home/httpd-trunk/include/ap_listen.h /home/httpd-trunk.new/include/ap_listen.h >--- /home/httpd-trunk/include/ap_listen.h 2014-04-11 15:17:07.766999560 -0400 >+++ /home/httpd-trunk.new/include/ap_listen.h 2014-05-07 17:19:02.167975169 -0400 >@@ -78,6 +78,14 @@ > */ > AP_DECLARE_DATA extern ap_listen_rec *ap_listeners; > >+AP_DECLARE_DATA extern ap_listen_rec **mpm_listen; >+ >+AP_DECLARE_DATA extern int enable_default_listener; >+ >+AP_DECLARE_DATA extern int num_buckets; >+ >+AP_DECLARE_DATA extern int have_so_reuseport; >+ > /** > * Setup all of the defaults for the listener list > */ >@@ -91,6 +99,14 @@ > */ > AP_DECLARE(int) ap_setup_listeners(server_rec *s); > >+/**This function duplicates ap_listeners. >+ * @param s The global server_rec >+ * @param p The config pool >+ * @param num_buckets The total number of listener buckets. >+**/ >+AP_DECLARE(apr_status_t) ap_duplicate_listeners(server_rec *s, apr_pool_t *p, int num_buckets); >+ >+ > /** > * Loop through the global ap_listen_rec list and close each of the sockets. > */ >diff -ru /home/httpd-trunk/include/mpm_common.h /home/httpd-trunk.new/include/mpm_common.h >--- /home/httpd-trunk/include/mpm_common.h 2014-04-11 15:17:07.754999558 -0400 >+++ /home/httpd-trunk.new/include/mpm_common.h 2014-05-07 17:19:02.167975169 -0400 >@@ -267,16 +267,18 @@ > * Write data to the pipe-of-death, signalling that one child process > * should die. > * @param pod the pipe-of-death to write to. >+ * @param my_bucket the bucket that holds the dying child process. > */ >-AP_DECLARE(apr_status_t) ap_mpm_pod_signal(ap_pod_t *pod); >+AP_DECLARE(apr_status_t) ap_mpm_pod_signal(ap_pod_t *pod, int child_bucket); > > /** > * Write data to the pipe-of-death, signalling that all child process > * should die. > * @param pod The pipe-of-death to write to. > * @param num The number of child processes to kill >+ * @param my_bucket the bucket that holds the dying child process. > */ >-AP_DECLARE(void) ap_mpm_pod_killpg(ap_pod_t *pod, int num); >+AP_DECLARE(void) ap_mpm_pod_killpg(ap_pod_t *pod, int num, int child_bucket); > > #define AP_MPM_PODX_RESTART_CHAR '$' > #define AP_MPM_PODX_GRACEFUL_CHAR '!' >diff -ru /home/httpd-trunk/server/listen.c /home/httpd-trunk.new/server/listen.c >--- /home/httpd-trunk/server/listen.c 2014-04-11 15:17:18.297999551 -0400 >+++ /home/httpd-trunk.new/server/listen.c 2014-05-07 17:19:02.168975169 -0400 >@@ -38,6 +38,11 @@ > > AP_DECLARE_DATA ap_listen_rec *ap_listeners = NULL; > >+AP_DECLARE_DATA ap_listen_rec **mpm_listen = NULL; >+AP_DECLARE_DATA int enable_default_listener = 1; >+AP_DECLARE_DATA int num_buckets = 1; >+AP_DECLARE_DATA int have_so_reuseport = 1; >+ > static ap_listen_rec *old_listeners; > static int ap_listenbacklog; > static int send_buffer_size; >@@ -124,6 +129,24 @@ > ap_sock_disable_nagle(s); > #endif > >+#ifndef SO_REUSEPORT >+#define SO_REUSEPORT 15 >+#endif >+ int thesock; >+ apr_os_sock_get(&thesock, s); >+ if (setsockopt(thesock, SOL_SOCKET, SO_REUSEPORT, (void *)&one, sizeof(int)) < 0) { >+ if (errno == ENOPROTOOPT) { >+ have_so_reuseport = 0; >+ } /* Check if SO_REUSEPORT is supported by the running Linux Kernel.*/ >+ else { >+ ap_log_perror(APLOG_MARK, APLOG_CRIT, stat, p, APLOGNO() >+ "make_sock: for address %pI, apr_socket_opt_set: (SO_REUSEPORT)", >+ server->bind_addr); >+ apr_socket_close(s); >+ return errno; >+ } >+ } >+ > if (do_bind_listen) { > #if APR_HAVE_IPV6 > if (server->bind_addr->family == APR_INET6) { >@@ -179,7 +202,7 @@ > #endif > > server->sd = s; >- server->active = 1; >+ server->active = enable_default_listener; > > server->accept_func = NULL; > >@@ -575,7 +598,7 @@ > } > } > #endif >- if (make_sock(pool, lr, 1) == APR_SUCCESS) { >+ if (make_sock(pool, lr, enable_default_listener) == APR_SUCCESS) { > ++num_open; > } > else { >@@ -727,13 +750,73 @@ > return num_listeners; > } > >+AP_DECLARE(apr_status_t) ap_duplicate_listeners(server_rec *s, apr_pool_t *p, >+ int num_buckets) { >+ int i; >+ apr_status_t stat; >+ int use_nonblock = 0; >+ ap_listen_rec *lr; >+ >+ mpm_listen = apr_palloc(p, sizeof(ap_listen_rec*) * num_buckets); >+ for (i = 0; i < num_buckets; i++) { >+ lr = ap_listeners; >+ ap_listen_rec *last = NULL; >+ while (lr) { >+ ap_listen_rec *duplr; >+ char *hostname; >+ apr_port_t port; >+ apr_sockaddr_t *sa; >+ duplr = apr_palloc(p, sizeof(ap_listen_rec)); >+ duplr->slave = NULL; >+ duplr->protocol = apr_pstrdup(p, lr->protocol); >+ hostname = apr_pstrdup(p, lr->bind_addr->hostname); >+ port = lr->bind_addr->port; >+ apr_sockaddr_info_get(&sa, hostname, APR_UNSPEC, port, 0, p); >+ duplr->bind_addr = sa; >+ duplr->next = NULL; >+ apr_socket_t *temps = duplr->sd; >+ if ((stat = apr_socket_create(&duplr->sd, duplr->bind_addr->family, >+ SOCK_STREAM, 0, p)) != APR_SUCCESS) { >+ ap_log_perror(APLOG_MARK, APLOG_CRIT, 0, p, APLOGNO() >+ "ap_duplicate_socket: for address %pI, " >+ "cannot duplicate a new socket!", >+ duplr->bind_addr); >+ return stat; >+ } >+ make_sock(p, duplr, 1); >+#if AP_NONBLOCK_WHEN_MULTI_LISTEN >+ use_nonblock = (ap_listeners && ap_listeners->next); >+ if ((stat = apr_socket_opt_set(duplr->sd, APR_SO_NONBLOCK, use_nonblock)) >+ != APR_SUCCESS) { >+ ap_log_perror(APLOG_MARK, APLOG_CRIT, stat, p, APLOGNO() >+ "unable to control socket non-blocking status"); >+ return stat; >+ } >+#endif >+ ap_apply_accept_filter(p, duplr, s); >+ >+ if (last == NULL) { >+ mpm_listen[i] = last = duplr; >+ } >+ else { >+ last->next = duplr; >+ last = duplr; >+ } >+ lr = lr->next; >+ } >+ } >+ return APR_SUCCESS; >+} >+ > AP_DECLARE_NONSTD(void) ap_close_listeners(void) > { > ap_listen_rec *lr; >- >- for (lr = ap_listeners; lr; lr = lr->next) { >- apr_socket_close(lr->sd); >- lr->active = 0; >+ int i; >+ for (i = 0; i < num_buckets; i++) { >+ for (lr = mpm_listen[i]; lr; lr = lr->next) { >+ apr_socket_close(lr->sd); >+ lr->active = 0; >+ } > } > } > >diff -ru /home/httpd-trunk/server/mpm/event/event.c /home/httpd-trunk.new/server/mpm/event/event.c >--- /home/httpd-trunk/server/mpm/event/event.c 2014-04-11 15:17:18.051999552 -0400 >+++ /home/httpd-trunk.new/server/mpm/event/event.c 2014-05-08 15:05:09.263981252 -0400 >@@ -59,6 +59,8 @@ > #include "apr_want.h" > #include "apr_version.h" > >+#include <stdlib.h> >+ > #if APR_HAVE_UNISTD_H > #include <unistd.h> > #endif >@@ -348,7 +350,7 @@ > * doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by > * without the need to spawn. > */ >- int idle_spawn_rate; >+ int *idle_spawn_rate; > #ifndef MAX_SPAWN_RATE > #define MAX_SPAWN_RATE (32) > #endif >@@ -358,7 +360,10 @@ > > #define ID_FROM_CHILD_THREAD(c, t) ((c * thread_limit) + t) > >-static ap_pod_t *pod; >+static ap_pod_t **pod; >+static ap_pod_t *child_pod; >+ap_listen_rec *child_listen; >+int *bucket; /* bucket array for the httpd child processes */ > > /* The event MPM respects a couple of runtime flags that can aid > * in debugging. Setting the -DNO_DETACH flag will prevent the root process >@@ -1260,7 +1265,7 @@ > TO_QUEUE_INIT(short_linger_q); > > listener_pollfd = apr_palloc(p, sizeof(apr_pollfd_t) * num_listensocks); >- for (lr = ap_listeners; lr != NULL; lr = lr->next, i++) { >+ for (lr = child_listen; lr != NULL; lr = lr->next, i++) { > apr_pollfd_t *pfd; > AP_DEBUG_ASSERT(i < num_listensocks); > pfd = &listener_pollfd[i]; >@@ -2345,6 +2350,8 @@ > thread_starter *ts; > apr_threadattr_t *thread_attr; > apr_thread_t *start_thread_id; >+ int i; >+ ap_listen_rec *lr; > > mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this > * child initializes >@@ -2353,6 +2360,19 @@ > ap_fatal_signal_child_setup(ap_server_conf); > apr_pool_create(&pchild, pconf); > >+ /* close unused listeners and pods */ >+ for (i = 0; i < num_buckets; i++) { >+ if (i != bucket[child_num_arg]) { >+ lr = mpm_listen[i]; >+ while(lr) { >+ apr_socket_close(lr->sd); >+ lr = lr->next; >+ } >+ mpm_listen[i]->active = 0; >+ ap_mpm_podx_close(pod[i]); >+ } >+ } >+ > /*stuff to do before we switch id's, so we have permissions. */ > ap_reopen_scoreboard(pchild, NULL, 0); > >@@ -2462,7 +2482,7 @@ > apr_signal(SIGTERM, dummy_signal_handler); > /* Watch for any messages from the parent over the POD */ > while (1) { >- rv = ap_mpm_podx_check(pod); >+ rv = ap_mpm_podx_check(child_pod); > if (rv == AP_MPM_PODX_NORESTART) { > /* see if termination was triggered while we slept */ > switch (terminate_mode) { >@@ -2515,6 +2535,9 @@ > /* NOTREACHED */ > } > >+ child_listen = mpm_listen[bucket[slot]]; >+ child_pod = pod[bucket[slot]]; >+ > if ((pid = fork()) == -1) { > ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, APLOGNO(00481) > "fork: Unable to fork new process"); >@@ -2575,6 +2598,7 @@ > if (ap_scoreboard_image->parent[i].pid != 0) { > continue; > } >+ bucket[i] = i % num_buckets; > if (make_child(ap_server_conf, i) < 0) { > break; > } >@@ -2582,7 +2606,7 @@ > } > } > >-static void perform_idle_server_maintenance(void) >+static void perform_idle_server_maintenance(int child_bucket) > { > int i, j; > int idle_thread_count; >@@ -2590,7 +2614,9 @@ > process_score *ps; > int free_length; > int totally_free_length = 0; >- int free_slots[MAX_SPAWN_RATE]; >+ int child_max_spawn_rate; >+ child_max_spawn_rate = MAX_SPAWN_RATE/num_buckets; >+ int free_slots[child_max_spawn_rate]; > int last_non_dead; > int total_non_dead; > int active_thread_count = 0; >@@ -2612,7 +2638,7 @@ > int child_threads_active = 0; > > if (i >= retained->max_daemons_limit >- && totally_free_length == retained->idle_spawn_rate) >+ && totally_free_length == retained->idle_spawn_rate[child_bucket]) > /* short cut if all active processes have been examined and > * enough empty scoreboard slots have been found > */ >@@ -2639,7 +2665,8 @@ > if (ps->pid != 0) { /* XXX just set all_dead_threads in outer > for loop if no pid? not much else matters */ > if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting >- && ps->generation == retained->my_generation) >+ && ps->generation == retained->my_generation && >+ bucket[i] == child_bucket) > { > ++idle_thread_count; > } >@@ -2650,8 +2677,8 @@ > } > active_thread_count += child_threads_active; > if (any_dead_threads >- && totally_free_length < retained->idle_spawn_rate >- && free_length < MAX_SPAWN_RATE >+ && totally_free_length < retained->idle_spawn_rate[child_bucket] >+ && free_length < MAX_SPAWN_RATE/num_buckets > && (!ps->pid /* no process in the slot */ > || ps->quiescing)) { /* or at least one is going away */ > if (all_dead_threads) { >@@ -2707,12 +2734,12 @@ > > retained->max_daemons_limit = last_non_dead + 1; > >- if (idle_thread_count > max_spare_threads) { >+ if (idle_thread_count > max_spare_threads/num_buckets) { > /* Kill off one child */ >- ap_mpm_podx_signal(pod, AP_MPM_PODX_GRACEFUL); >- retained->idle_spawn_rate = 1; >+ ap_mpm_podx_signal(pod[child_bucket], AP_MPM_PODX_GRACEFUL); >+ retained->idle_spawn_rate[child_bucket] = 1; > } >- else if (idle_thread_count < min_spare_threads) { >+ else if (idle_thread_count < min_spare_threads/num_buckets) { > /* terminate the free list */ > if (free_length == 0) { /* scoreboard is full, can't fork */ > >@@ -2730,13 +2757,13 @@ > ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00485) > "scoreboard is full, not at MaxRequestWorkers"); > } >- retained->idle_spawn_rate = 1; >+ retained->idle_spawn_rate[child_bucket] = 1; > } > else { >- if (free_length > retained->idle_spawn_rate) { >- free_length = retained->idle_spawn_rate; >+ if (free_length > retained->idle_spawn_rate[child_bucket]) { >+ free_length = retained->idle_spawn_rate[child_bucket]; > } >- if (retained->idle_spawn_rate >= 8) { >+ if (retained->idle_spawn_rate[child_bucket] >= 8) { > ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00486) > "server seems busy, (you may need " > "to increase StartServers, ThreadsPerChild " >@@ -2746,6 +2773,7 @@ > idle_thread_count, total_non_dead); > } > for (i = 0; i < free_length; ++i) { >+ bucket[free_slots[i]] = child_bucket; > make_child(ap_server_conf, free_slots[i]); > } > /* the next time around we want to spawn twice as many if this >@@ -2754,13 +2782,13 @@ > if (retained->hold_off_on_exponential_spawning) { > --retained->hold_off_on_exponential_spawning; > } >- else if (retained->idle_spawn_rate < MAX_SPAWN_RATE) { >- retained->idle_spawn_rate *= 2; >+ else if (retained->idle_spawn_rate[child_bucket] < MAX_SPAWN_RATE/num_buckets) { >+ retained->idle_spawn_rate[child_bucket] *= 2; > } > } > } > else { >- retained->idle_spawn_rate = 1; >+ retained->idle_spawn_rate[child_bucket] = 1; > } > } > >@@ -2817,7 +2845,7 @@ > ap_scoreboard_image->parent[child_slot].quiescing = 0; > if (processed_status == APEXIT_CHILDSICK) { > /* resource shortage, minimize the fork rate */ >- retained->idle_spawn_rate = 1; >+ retained->idle_spawn_rate[bucket[child_slot]] = 1; > } > else if (remaining_children_to_start > && child_slot < ap_daemons_limit) { >@@ -2835,7 +2863,9 @@ > if (processed_status == APEXIT_CHILDSICK > && old_gen == retained->my_generation) { > /* resource shortage, minimize the fork rate */ >- retained->idle_spawn_rate = 1; >+ for (i = 0; i < num_buckets; i++) { >+ retained->idle_spawn_rate[i] = 1; >+ } > } > #if APR_HAS_OTHER_CHILD > } >@@ -2873,8 +2903,10 @@ > */ > continue; > } >- >- perform_idle_server_maintenance(); >+ >+ for (i = 0; i < num_buckets; i++) { >+ perform_idle_server_maintenance(i); >+ } > } > } > >@@ -2882,6 +2914,9 @@ > { > int remaining_children_to_start; > >+ int i; >+ ap_listen_rec *lr; >+ > ap_log_pid(pconf, ap_pid_fname); > > if (!retained->is_graceful) { >@@ -2895,11 +2930,13 @@ > ap_scoreboard_image->global->running_generation = retained->my_generation; > } > >+ bucket = apr_palloc(_pconf, sizeof(int) * ap_daemons_limit); >+ > restart_pending = shutdown_pending = 0; > set_signals(); > /* Don't thrash... */ >- if (max_spare_threads < min_spare_threads + threads_per_child) >- max_spare_threads = min_spare_threads + threads_per_child; >+ if (max_spare_threads < min_spare_threads + threads_per_child * num_buckets) >+ max_spare_threads = min_spare_threads + threads_per_child * num_buckets; > > /* If we're doing a graceful_restart then we're going to see a lot > * of children exiting immediately when we get into the main loop >@@ -2940,7 +2977,9 @@ > /* Time to shut down: > * Kill child processes, tell them to call child_exit, etc... > */ >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_RESTART); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART); >+ } > ap_reclaim_child_processes(1, /* Start with SIGTERM */ > event_note_child_killed); > >@@ -2961,7 +3000,9 @@ > > /* Close our listeners, and then ask our children to do same */ > ap_close_listeners(); >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_GRACEFUL); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_GRACEFUL); >+ } > ap_relieve_child_processes(event_note_child_killed); > > if (!child_fatal) { >@@ -3001,7 +3042,9 @@ > * way, try and make sure that all of our processes are > * really dead. > */ >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_RESTART); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART); >+ } > ap_reclaim_child_processes(1, event_note_child_killed); > > return DONE; >@@ -3027,8 +3070,9 @@ > AP_SIG_GRACEFUL_STRING > " received. Doing graceful restart"); > /* wake up the children...time to die. But we'll have more soon */ >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_GRACEFUL); >- >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_GRACEFUL); >+ } > > /* This is mostly for debugging... so that we know what is still > * gracefully dealing with existing request. >@@ -3040,7 +3084,9 @@ > * and a SIGHUP, we may as well use the same signal, because some user > * pthreads are stealing signals from us left and right. > */ >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_RESTART); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART); >+ } > > ap_reclaim_child_processes(1, /* Start with SIGTERM */ > event_note_child_killed); >@@ -3060,6 +3106,8 @@ > int startup = 0; > int level_flags = 0; > apr_status_t rv; >+ int i; >+ int num_of_cores = 0; > > pconf = p; > >@@ -3069,6 +3117,8 @@ > level_flags |= APLOG_STARTUP; > } > >+ enable_default_listener = 0; >+ > if ((num_listensocks = ap_setup_listeners(ap_server_conf)) < 1) { > ap_log_error(APLOG_MARK, APLOG_ALERT | level_flags, 0, > (startup ? NULL : s), >@@ -3076,12 +3126,32 @@ > return DONE; > } > >+ enable_default_listener = 1; >+ if (have_so_reuseport) { >+ num_of_cores = sysconf(_SC_NPROCESSORS_ONLN); >+ if (num_of_cores > 8) { >+ num_buckets = sysconf(_SC_NPROCESSORS_ONLN)/8; >+ } >+ else { >+ num_buckets = 1; >+ } >+ } >+ else { >+ num_buckets = 1; >+ } >+ >+ ap_duplicate_listeners(ap_server_conf, pconf, num_buckets); >+ >+ pod = apr_palloc(pconf, sizeof(ap_pod_t *) * num_buckets); >+ > if (!one_process) { >- if ((rv = ap_mpm_podx_open(pconf, &pod))) { >- ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv, >- (startup ? NULL : s), >- "could not open pipe-of-death"); >- return DONE; >+ for (i = 0; i < num_buckets; i++) { >+ if ((rv = ap_mpm_podx_open(pconf, &pod[i]))) { >+ ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv, >+ (startup ? NULL : s), >+ "could not open pipe-of-death"); >+ return DONE; >+ } > } > } > /* for skiplist */ >@@ -3095,6 +3165,7 @@ > int no_detach, debug, foreground; > apr_status_t rv; > const char *userdata_key = "mpm_event_module"; >+ int i; > > mpm_state = AP_MPMQ_STARTING; > >@@ -3115,7 +3186,6 @@ > if (!retained) { > retained = ap_retained_data_create(userdata_key, sizeof(*retained)); > retained->max_daemons_limit = -1; >- retained->idle_spawn_rate = 1; > } > ++retained->module_loads; > if (retained->module_loads == 2) { >@@ -3129,6 +3199,10 @@ > "atomics not working as expected - add32 of negative number"); > return HTTP_INTERNAL_SERVER_ERROR; > } >+ retained->idle_spawn_rate = apr_palloc(pconf, sizeof(int) * num_buckets); >+ for (i = 0; i< num_buckets; i++) { >+ retained->idle_spawn_rate[i] = 1; >+ } > rv = apr_pollset_create(&event_pollset, 1, plog, > APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY); > if (rv != APR_SUCCESS) { >diff -ru /home/httpd-trunk/server/mpm/prefork/prefork.c /home/httpd-trunk.new/server/mpm/prefork/prefork.c >--- /home/httpd-trunk/server/mpm/prefork/prefork.c 2014-04-11 15:17:17.923999555 -0400 >+++ /home/httpd-trunk.new/server/mpm/prefork/prefork.c 2014-05-07 17:19:02.169975169 -0400 >@@ -48,6 +48,8 @@ > #include "ap_mmn.h" > #include "apr_poll.h" > >+#include <stdlib.h> >+ > #ifdef HAVE_TIME_H > #include <time.h> > #endif >@@ -86,14 +88,19 @@ > > /* config globals */ > >-static apr_proc_mutex_t *accept_mutex; >+static apr_proc_mutex_t **accept_mutex; > static int ap_daemons_to_start=0; > static int ap_daemons_min_free=0; > static int ap_daemons_max_free=0; > static int ap_daemons_limit=0; /* MaxRequestWorkers */ > static int server_limit = 0; > static int mpm_state = AP_MPMQ_STARTING; >-static ap_pod_t *pod; >+static ap_pod_t **pod; >+static ap_pod_t *child_pod; >+static apr_proc_mutex_t *child_mutex; >+ap_listen_rec *child_listen; >+int *bucket; /* bucket array for the httpd child processes */ >+ > > /* data retained by prefork across load/unload of the module > * allocated on first call to pre-config hook; located on >@@ -222,14 +229,14 @@ > prefork_note_child_killed(/* slot */ 0, 0, 0); > } > >- ap_mpm_pod_close(pod); >+ ap_mpm_pod_close(child_pod); > chdir_for_gprof(); > exit(code); > } > > static void accept_mutex_on(void) > { >- apr_status_t rv = apr_proc_mutex_lock(accept_mutex); >+ apr_status_t rv = apr_proc_mutex_lock(child_mutex); > if (rv != APR_SUCCESS) { > const char *msg = "couldn't grab the accept mutex"; > >@@ -247,7 +254,7 @@ > > static void accept_mutex_off(void) > { >- apr_status_t rv = apr_proc_mutex_unlock(accept_mutex); >+ apr_status_t rv = apr_proc_mutex_unlock(child_mutex); > if (rv != APR_SUCCESS) { > const char *msg = "couldn't release the accept mutex"; > >@@ -272,7 +279,7 @@ > * when it's safe in the single Listen case. > */ > #ifdef SINGLE_LISTEN_UNSERIALIZED_ACCEPT >-#define SAFE_ACCEPT(stmt) do {if (ap_listeners->next) {stmt;}} while(0) >+#define SAFE_ACCEPT(stmt) do {if (child_listen->next) {stmt;}} while(0) > #else > #define SAFE_ACCEPT(stmt) do {stmt;} while(0) > #endif >@@ -521,10 +528,23 @@ > apr_pool_create(&ptrans, pchild); > apr_pool_tag(ptrans, "transaction"); > >+/* close unused listeners and pods */ >+ for (i = 0; i < num_buckets; i++) { >+ if (i != bucket[my_child_num]) { >+ lr = mpm_listen[i]; >+ while(lr) { >+ apr_socket_close(lr->sd); >+ lr = lr->next; >+ } >+ mpm_listen[i]->active = 0; >+ ap_mpm_pod_close(pod[i]); >+ } >+ } >+ > /* needs to be done before we switch UIDs so we have permissions */ > ap_reopen_scoreboard(pchild, NULL, 0); >- lockfile = apr_proc_mutex_lockfile(accept_mutex); >- status = apr_proc_mutex_child_init(&accept_mutex, >+ lockfile = apr_proc_mutex_lockfile(child_mutex); >+ status = apr_proc_mutex_child_init(&child_mutex, > lockfile, > pchild); > if (status != APR_SUCCESS) { >@@ -532,7 +552,7 @@ > "Couldn't initialize cross-process lock in child " > "(%s) (%s)", > lockfile ? lockfile : "none", >- apr_proc_mutex_name(accept_mutex)); >+ apr_proc_mutex_name(child_mutex)); > clean_child_exit(APEXIT_CHILDFATAL); > } > >@@ -554,7 +574,7 @@ > clean_child_exit(APEXIT_CHILDSICK); /* assume temporary resource issue */ > } > >- for (lr = ap_listeners, i = num_listensocks; i--; lr = lr->next) { >+ for (lr = child_listen, i = num_listensocks; i--; lr = lr->next) { > apr_pollfd_t pfd = { 0 }; > > pfd.desc_type = APR_POLL_SOCKET; >@@ -612,7 +632,7 @@ > > if (num_listensocks == 1) { > /* There is only one listener record, so refer to that one. */ >- lr = ap_listeners; >+ lr = child_listen; > } > else { > /* multiple listening sockets - need to poll */ >@@ -710,7 +730,7 @@ > * while we were processing the connection or we are the lucky > * idle server process that gets to die. > */ >- if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */ >+ if (ap_mpm_pod_check(child_pod) == APR_SUCCESS) { /* selected as idle? */ > die_now = 1; > } > else if (retained->my_generation != >@@ -750,6 +770,9 @@ > (void) ap_update_child_status_from_indexes(slot, 0, SERVER_STARTING, > (request_rec *) NULL); > >+ child_listen = mpm_listen[bucket[slot]]; >+ child_mutex = accept_mutex[bucket[slot]]; >+ child_pod = pod[bucket[slot]]; > > #ifdef _OSD_POSIX > /* BS2000 requires a "special" version of fork() before a setuid() call */ >@@ -815,6 +838,7 @@ > if (ap_scoreboard_image->servers[i][0].status != SERVER_DEAD) { > continue; > } >+ bucket[i] = i % num_buckets; > if (make_child(ap_server_conf, i) < 0) { > break; > } >@@ -822,6 +846,8 @@ > } > } > >+static int bucket_make_child_record = -1; >+static int bucket_kill_child_record = -1; > static void perform_idle_server_maintenance(apr_pool_t *p) > { > int i; >@@ -874,7 +900,8 @@ > * shut down gracefully, in case it happened to pick up a request > * while we were counting > */ >- ap_mpm_pod_signal(pod); >+ bucket_kill_child_record = (bucket_kill_child_record + 1) % num_buckets; >+ ap_mpm_pod_signal(pod[bucket_kill_child_record], bucket_kill_child_record); > retained->idle_spawn_rate = 1; > } > else if (idle_count < ap_daemons_min_free) { >@@ -899,6 +926,7 @@ > idle_count, total_non_dead); > } > for (i = 0; i < free_length; ++i) { >+ bucket[free_slots[i]]= (++bucket_make_child_record) % num_buckets; > make_child(ap_server_conf, free_slots[i]); > } > /* the next time around we want to spawn twice as many if this >@@ -926,15 +954,24 @@ > int index; > int remaining_children_to_start; > apr_status_t rv; >+ int i; >+ ap_listen_rec *lr; > > ap_log_pid(pconf, ap_pid_fname); > >- /* Initialize cross-process accept lock */ >- rv = ap_proc_mutex_create(&accept_mutex, NULL, AP_ACCEPT_MUTEX_TYPE, NULL, >- s, _pconf, 0); >- if (rv != APR_SUCCESS) { >- mpm_state = AP_MPMQ_STOPPING; >- return DONE; >+ bucket = apr_palloc(_pconf, sizeof(int) * ap_daemons_limit); >+ /* Initialize cross-process accept lock for each bucket*/ >+ accept_mutex = apr_palloc(_pconf, sizeof(apr_proc_mutex_t *) * num_buckets); >+ for (i = 0; i < num_buckets; i++) { >+ rv = ap_proc_mutex_create(&accept_mutex[i], NULL, AP_ACCEPT_MUTEX_TYPE, NULL, >+ s, _pconf, 0); >+ if (rv != APR_SUCCESS) { >+ mpm_state = AP_MPMQ_STOPPING; >+ return DONE; >+ } >+ } >+ for (lr = ap_listeners; lr; lr = lr->next) { >+ apr_socket_close(lr->sd); > } > > if (!retained->is_graceful) { >@@ -953,12 +990,13 @@ > > if (one_process) { > AP_MONCONTROL(1); >+ bucket[0] = 0; > make_child(ap_server_conf, 0); > /* NOTREACHED */ > } > else { >- if (ap_daemons_max_free < ap_daemons_min_free + 1) /* Don't thrash... */ >- ap_daemons_max_free = ap_daemons_min_free + 1; >+ if (ap_daemons_max_free < ap_daemons_min_free + num_buckets) /* Don't thrash... */ >+ ap_daemons_max_free = ap_daemons_min_free + num_buckets; > > /* If we're doing a graceful_restart then we're going to see a lot > * of children exiting immediately when we get into the main loop >@@ -991,7 +1029,7 @@ > ap_log_command_line(plog, s); > ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00165) > "Accept mutex: %s (default: %s)", >- apr_proc_mutex_name(accept_mutex), >+ apr_proc_mutex_name(accept_mutex[0]), > apr_proc_mutex_defname()); > > mpm_state = AP_MPMQ_RUNNING; >@@ -1122,7 +1160,9 @@ > ap_close_listeners(); > > /* kill off the idle ones */ >- ap_mpm_pod_killpg(pod, retained->max_daemons_limit); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_pod_killpg(pod[i], i, retained->max_daemons_limit); >+ } > > /* Send SIGUSR1 to the active children */ > active_children = 0; >@@ -1196,7 +1236,9 @@ > "Graceful restart requested, doing restart"); > > /* kill off the idle ones */ >- ap_mpm_pod_killpg(pod, retained->max_daemons_limit); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_pod_killpg(pod[i], i, retained->max_daemons_limit); >+ } > > /* This is mostly for debugging... so that we know what is still > * gracefully dealing with existing request. This will break >@@ -1239,6 +1281,8 @@ > int startup = 0; > int level_flags = 0; > apr_status_t rv; >+ int i; >+ int num_of_cores = 0; > > pconf = p; > >@@ -1248,6 +1292,7 @@ > level_flags |= APLOG_STARTUP; > } > >+ enable_default_listener = 0; > if ((num_listensocks = ap_setup_listeners(ap_server_conf)) < 1) { > ap_log_error(APLOG_MARK, APLOG_ALERT | level_flags, 0, > (startup ? NULL : s), >@@ -1255,12 +1300,32 @@ > return DONE; > } > >- if ((rv = ap_mpm_pod_open(pconf, &pod))) { >- ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv, >- (startup ? NULL : s), >- "could not open pipe-of-death"); >- return DONE; >+ enable_default_listener = 1; >+ if (have_so_reuseport) { >+ num_of_cores = sysconf(_SC_NPROCESSORS_ONLN); >+ if (num_of_cores > 8) { >+ num_buckets = sysconf(_SC_NPROCESSORS_ONLN)/8; >+ } >+ else { >+ num_buckets = 1; >+ } > } >+ else { >+ num_buckets = 1; >+ } >+ >+ ap_duplicate_listeners(ap_server_conf, pconf, num_buckets); >+ >+ pod = apr_palloc(pconf, sizeof(ap_pod_t *) * num_buckets); >+ for (i = 0; i < num_buckets; i++) { >+ if ((rv = ap_mpm_pod_open(pconf, &pod[i]))) { >+ ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv, >+ (startup ? NULL : s), >+ "could not open pipe-of-death"); >+ return DONE; >+ } >+ } >+ > return OK; > } > >diff -ru /home/httpd-trunk/server/mpm/worker/worker.c /home/httpd-trunk.new/server/mpm/worker/worker.c >--- /home/httpd-trunk/server/mpm/worker/worker.c 2014-04-11 15:17:17.973999550 -0400 >+++ /home/httpd-trunk.new/server/mpm/worker/worker.c 2014-05-08 15:04:35.103981718 -0400 >@@ -30,6 +30,9 @@ > #include "apr_thread_mutex.h" > #include "apr_proc_mutex.h" > #include "apr_poll.h" >+ >+#include <stdlib.h> >+ > #define APR_WANT_STRFUNC > #include "apr_want.h" > >@@ -159,7 +162,7 @@ > * doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by > * without the need to spawn. > */ >- int idle_spawn_rate; >+ int *idle_spawn_rate; > #ifndef MAX_SPAWN_RATE > #define MAX_SPAWN_RATE (32) > #endif >@@ -188,7 +191,8 @@ > > #define ID_FROM_CHILD_THREAD(c, t) ((c * thread_limit) + t) > >-static ap_pod_t *pod; >+static ap_pod_t **pod; >+static ap_pod_t *child_pod; > > /* The worker MPM respects a couple of runtime flags that can aid > * in debugging. Setting the -DNO_DETACH flag will prevent the root process >@@ -218,10 +222,13 @@ > static apr_os_thread_t *listener_os_thread; > > /* Locks for accept serialization */ >-static apr_proc_mutex_t *accept_mutex; >+static apr_proc_mutex_t **accept_mutex; >+static apr_proc_mutex_t *child_mutex; >+ap_listen_rec *child_listen; >+int *bucket; /* bucket array for the httpd child processes */ > > #ifdef SINGLE_LISTEN_UNSERIALIZED_ACCEPT >-#define SAFE_ACCEPT(stmt) (ap_listeners->next ? (stmt) : APR_SUCCESS) >+#define SAFE_ACCEPT(stmt) (child_listen->next ? (stmt) : APR_SUCCESS) > #else > #define SAFE_ACCEPT(stmt) (stmt) > #endif >@@ -701,7 +708,7 @@ > clean_child_exit(APEXIT_CHILDSICK); > } > >- for (lr = ap_listeners; lr != NULL; lr = lr->next) { >+ for (lr = child_listen; lr != NULL; lr = lr->next) { > apr_pollfd_t pfd = { 0 }; > > pfd.desc_type = APR_POLL_SOCKET; >@@ -758,7 +765,7 @@ > /* We've already decremented the idle worker count inside > * ap_queue_info_wait_for_idler. */ > >- if ((rv = SAFE_ACCEPT(apr_proc_mutex_lock(accept_mutex))) >+ if ((rv = SAFE_ACCEPT(apr_proc_mutex_lock(child_mutex))) > != APR_SUCCESS) { > > if (!listener_may_exit) { >@@ -767,9 +774,9 @@ > break; /* skip the lock release */ > } > >- if (!ap_listeners->next) { >+ if (!child_listen->next) { > /* Only one listener, so skip the poll */ >- lr = ap_listeners; >+ lr = child_listen; > } > else { > while (!listener_may_exit) { >@@ -839,7 +846,7 @@ > resource_shortage = 1; > signal_threads(ST_GRACEFUL); > } >- if ((rv = SAFE_ACCEPT(apr_proc_mutex_unlock(accept_mutex))) >+ if ((rv = SAFE_ACCEPT(apr_proc_mutex_unlock(child_mutex))) > != APR_SUCCESS) { > > if (listener_may_exit) { >@@ -863,7 +870,7 @@ > } > } > else { >- if ((rv = SAFE_ACCEPT(apr_proc_mutex_unlock(accept_mutex))) >+ if ((rv = SAFE_ACCEPT(apr_proc_mutex_unlock(child_mutex))) > != APR_SUCCESS) { > int level = APLOG_EMERG; > >@@ -1217,6 +1224,8 @@ > thread_starter *ts; > apr_threadattr_t *thread_attr; > apr_thread_t *start_thread_id; >+ int i; >+ ap_listen_rec *lr; > > mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this > * child initializes >@@ -1225,11 +1234,24 @@ > ap_fatal_signal_child_setup(ap_server_conf); > apr_pool_create(&pchild, pconf); > >+ /* close unused listeners and pods */ >+ for (i = 0; i < num_buckets; i++) { >+ if (i != bucket[child_num_arg]) { >+ lr = mpm_listen[i]; >+ while(lr) { >+ apr_socket_close(lr->sd); >+ lr = lr->next; >+ } >+ mpm_listen[i]->active = 0; >+ ap_mpm_podx_close(pod[i]); >+ } >+ } >+ > /*stuff to do before we switch id's, so we have permissions.*/ > ap_reopen_scoreboard(pchild, NULL, 0); > >- rv = SAFE_ACCEPT(apr_proc_mutex_child_init(&accept_mutex, >- apr_proc_mutex_lockfile(accept_mutex), >+ rv = SAFE_ACCEPT(apr_proc_mutex_child_init(&child_mutex, >+ apr_proc_mutex_lockfile(child_mutex), > pchild)); > if (rv != APR_SUCCESS) { > ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(00280) >@@ -1338,7 +1360,7 @@ > apr_signal(SIGTERM, dummy_signal_handler); > /* Watch for any messages from the parent over the POD */ > while (1) { >- rv = ap_mpm_podx_check(pod); >+ rv = ap_mpm_podx_check(child_pod); > if (rv == AP_MPM_PODX_NORESTART) { > /* see if termination was triggered while we slept */ > switch(terminate_mode) { >@@ -1391,6 +1413,10 @@ > /* NOTREACHED */ > } > >+ child_listen = mpm_listen[bucket[slot]]; >+ child_mutex = accept_mutex[bucket[slot]]; >+ child_pod = pod[bucket[slot]]; >+ > if ((pid = fork()) == -1) { > ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, APLOGNO(00283) > "fork: Unable to fork new process"); >@@ -1449,6 +1475,7 @@ > if (ap_scoreboard_image->parent[i].pid != 0) { > continue; > } >+ bucket[i] = i % num_buckets; > if (make_child(ap_server_conf, i) < 0) { > break; > } >@@ -1456,7 +1483,7 @@ > } > } > >-static void perform_idle_server_maintenance(void) >+static void perform_idle_server_maintenance(int child_bucket) > { > int i, j; > int idle_thread_count; >@@ -1464,7 +1491,9 @@ > process_score *ps; > int free_length; > int totally_free_length = 0; >- int free_slots[MAX_SPAWN_RATE]; >+ int child_max_spawn_rate; >+ child_max_spawn_rate = MAX_SPAWN_RATE/num_buckets; >+ int free_slots[child_max_spawn_rate]; > int last_non_dead; > int total_non_dead; > int active_thread_count = 0; >@@ -1485,7 +1514,7 @@ > int all_dead_threads = 1; > int child_threads_active = 0; > >- if (i >= retained->max_daemons_limit && totally_free_length == retained->idle_spawn_rate) >+ if (i >= retained->max_daemons_limit && totally_free_length == retained->idle_spawn_rate[child_bucket]) > /* short cut if all active processes have been examined and > * enough empty scoreboard slots have been found > */ >@@ -1513,7 +1542,8 @@ > loop if no pid? not much else matters */ > if (status <= SERVER_READY && > !ps->quiescing && >- ps->generation == retained->my_generation) { >+ ps->generation == retained->my_generation && >+ bucket[i] == child_bucket) { > ++idle_thread_count; > } > if (status >= SERVER_READY && status < SERVER_GRACEFUL) { >@@ -1522,8 +1552,8 @@ > } > } > active_thread_count += child_threads_active; >- if (any_dead_threads && totally_free_length < retained->idle_spawn_rate >- && free_length < MAX_SPAWN_RATE >+ if (any_dead_threads && totally_free_length < retained->idle_spawn_rate[child_bucket] >+ && free_length < MAX_SPAWN_RATE/num_buckets > && (!ps->pid /* no process in the slot */ > || ps->quiescing)) { /* or at least one is going away */ > if (all_dead_threads) { >@@ -1579,12 +1609,12 @@ > > retained->max_daemons_limit = last_non_dead + 1; > >- if (idle_thread_count > max_spare_threads) { >+ if (idle_thread_count > max_spare_threads/num_buckets) { > /* Kill off one child */ >- ap_mpm_podx_signal(pod, AP_MPM_PODX_GRACEFUL); >- retained->idle_spawn_rate = 1; >+ ap_mpm_podx_signal(pod[child_bucket], AP_MPM_PODX_GRACEFUL); >+ retained->idle_spawn_rate[child_bucket] = 1; > } >- else if (idle_thread_count < min_spare_threads) { >+ else if (idle_thread_count < min_spare_threads/num_buckets) { > /* terminate the free list */ > if (free_length == 0) { /* scoreboard is full, can't fork */ > >@@ -1615,13 +1645,13 @@ > ap_server_conf, APLOGNO(00288) > "scoreboard is full, not at MaxRequestWorkers"); > } >- retained->idle_spawn_rate = 1; >+ retained->idle_spawn_rate[child_bucket] = 1; > } > else { >- if (free_length > retained->idle_spawn_rate) { >- free_length = retained->idle_spawn_rate; >+ if (free_length > retained->idle_spawn_rate[child_bucket]) { >+ free_length = retained->idle_spawn_rate[child_bucket]; > } >- if (retained->idle_spawn_rate >= 8) { >+ if (retained->idle_spawn_rate[child_bucket] >= 8) { > ap_log_error(APLOG_MARK, APLOG_INFO, 0, > ap_server_conf, APLOGNO(00289) > "server seems busy, (you may need " >@@ -1632,6 +1662,7 @@ > idle_thread_count, total_non_dead); > } > for (i = 0; i < free_length; ++i) { >+ bucket[free_slots[i]] = child_bucket; > make_child(ap_server_conf, free_slots[i]); > } > /* the next time around we want to spawn twice as many if this >@@ -1640,13 +1671,13 @@ > if (retained->hold_off_on_exponential_spawning) { > --retained->hold_off_on_exponential_spawning; > } >- else if (retained->idle_spawn_rate < MAX_SPAWN_RATE) { >- retained->idle_spawn_rate *= 2; >+ else if (retained->idle_spawn_rate[child_bucket] < MAX_SPAWN_RATE/num_buckets) { >+ retained->idle_spawn_rate[child_bucket] *= 2; > } > } > } > else { >- retained->idle_spawn_rate = 1; >+ retained->idle_spawn_rate[child_bucket] = 1; > } > } > >@@ -1702,7 +1733,7 @@ > ap_scoreboard_image->parent[child_slot].quiescing = 0; > if (processed_status == APEXIT_CHILDSICK) { > /* resource shortage, minimize the fork rate */ >- retained->idle_spawn_rate = 1; >+ retained->idle_spawn_rate[bucket[child_slot]] = 1; > } > else if (remaining_children_to_start > && child_slot < ap_daemons_limit) { >@@ -1719,7 +1750,9 @@ > if (processed_status == APEXIT_CHILDSICK > && old_gen == retained->my_generation) { > /* resource shortage, minimize the fork rate */ >- retained->idle_spawn_rate = 1; >+ for (i = 0; i < num_buckets; i++) { >+ retained->idle_spawn_rate[i] = 1; >+ } > } > #if APR_HAS_OTHER_CHILD > } >@@ -1758,7 +1791,9 @@ > continue; > } > >- perform_idle_server_maintenance(); >+ for (i = 0; i < num_buckets; i++) { >+ perform_idle_server_maintenance(i); >+ } > } > } > >@@ -1766,16 +1801,25 @@ > { > int remaining_children_to_start; > apr_status_t rv; >+ int i; >+ ap_listen_rec *lr; > > ap_log_pid(pconf, ap_pid_fname); > >+ bucket = apr_palloc(_pconf, sizeof(int) * ap_daemons_limit); > /* Initialize cross-process accept lock */ >- rv = ap_proc_mutex_create(&accept_mutex, NULL, AP_ACCEPT_MUTEX_TYPE, NULL, >- s, _pconf, 0); >- if (rv != APR_SUCCESS) { >- mpm_state = AP_MPMQ_STOPPING; >- return DONE; >+ accept_mutex = apr_palloc(_pconf, sizeof(apr_proc_mutex_t *) * num_buckets); >+ for (i = 0; i < num_buckets; i++) { >+ rv = ap_proc_mutex_create(&accept_mutex[i], NULL, AP_ACCEPT_MUTEX_TYPE, NULL, >+ s, _pconf, 0); >+ if (rv != APR_SUCCESS) { >+ mpm_state = AP_MPMQ_STOPPING; >+ return DONE; >+ } > } >+ for (lr = ap_listeners; lr; lr = lr->next) { >+ apr_socket_close(lr->sd); >+ } > > if (!retained->is_graceful) { > if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) { >@@ -1791,8 +1835,8 @@ > restart_pending = shutdown_pending = 0; > set_signals(); > /* Don't thrash... */ >- if (max_spare_threads < min_spare_threads + threads_per_child) >- max_spare_threads = min_spare_threads + threads_per_child; >+ if (max_spare_threads < min_spare_threads + threads_per_child * num_buckets) >+ max_spare_threads = min_spare_threads + threads_per_child * num_buckets; > > /* If we're doing a graceful_restart then we're going to see a lot > * of children exiting immediately when we get into the main loop >@@ -1825,7 +1869,7 @@ > ap_log_command_line(plog, s); > ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00294) > "Accept mutex: %s (default: %s)", >- apr_proc_mutex_name(accept_mutex), >+ apr_proc_mutex_name(accept_mutex[0]), > apr_proc_mutex_defname()); > mpm_state = AP_MPMQ_RUNNING; > >@@ -1836,7 +1880,9 @@ > /* Time to shut down: > * Kill child processes, tell them to call child_exit, etc... > */ >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_RESTART); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART); >+ } > ap_reclaim_child_processes(1, /* Start with SIGTERM */ > worker_note_child_killed); > >@@ -1857,7 +1903,9 @@ > > /* Close our listeners, and then ask our children to do same */ > ap_close_listeners(); >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_GRACEFUL); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_GRACEFUL); >+ } > ap_relieve_child_processes(worker_note_child_killed); > > if (!child_fatal) { >@@ -1897,7 +1945,9 @@ > * way, try and make sure that all of our processes are > * really dead. > */ >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_RESTART); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART); >+ } > ap_reclaim_child_processes(1, worker_note_child_killed); > > return DONE; >@@ -1922,8 +1972,9 @@ > ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00297) > AP_SIG_GRACEFUL_STRING " received. Doing graceful restart"); > /* wake up the children...time to die. But we'll have more soon */ >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_GRACEFUL); >- >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_GRACEFUL); >+ } > > /* This is mostly for debugging... so that we know what is still > * gracefully dealing with existing request. >@@ -1935,7 +1986,9 @@ > * and a SIGHUP, we may as well use the same signal, because some user > * pthreads are stealing signals from us left and right. > */ >- ap_mpm_podx_killpg(pod, ap_daemons_limit, AP_MPM_PODX_RESTART); >+ for (i = 0; i < num_buckets; i++) { >+ ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART); >+ } > > ap_reclaim_child_processes(1, /* Start with SIGTERM */ > worker_note_child_killed); >@@ -1954,6 +2007,8 @@ > int startup = 0; > int level_flags = 0; > apr_status_t rv; >+ int i; >+ int num_of_cores = 0; > > pconf = p; > >@@ -1963,19 +2018,38 @@ > level_flags |= APLOG_STARTUP; > } > >+ enable_default_listener = 0; > if ((num_listensocks = ap_setup_listeners(ap_server_conf)) < 1) { > ap_log_error(APLOG_MARK, APLOG_ALERT | level_flags, 0, > (startup ? NULL : s), > "no listening sockets available, shutting down"); > return DONE; > } >+ enable_default_listener = 1; >+ if (have_so_reuseport) { >+ num_of_cores = sysconf(_SC_NPROCESSORS_ONLN); >+ if (num_of_cores > 8) { >+ num_buckets = sysconf(_SC_NPROCESSORS_ONLN)/8; >+ } >+ else { >+ num_buckets = 1; >+ } >+ } >+ else { >+ num_buckets = 1; >+ } > >+ ap_duplicate_listeners(ap_server_conf, pconf, num_buckets); >+ >+ pod = apr_palloc(pconf, sizeof(ap_pod_t *) * num_buckets); > if (!one_process) { >- if ((rv = ap_mpm_podx_open(pconf, &pod))) { >- ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv, >- (startup ? NULL : s), >- "could not open pipe-of-death"); >- return DONE; >+ for (i = 0; i < num_buckets; i++) { >+ if ((rv = ap_mpm_podx_open(pconf, &pod[i]))) { >+ ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv, >+ (startup ? NULL : s), >+ "could not open pipe-of-death"); >+ return DONE; >+ } > } > } > return OK; >@@ -1987,6 +2061,7 @@ > int no_detach, debug, foreground; > apr_status_t rv; > const char *userdata_key = "mpm_worker_module"; >+ int i; > > mpm_state = AP_MPMQ_STARTING; > >@@ -2009,7 +2084,6 @@ > if (!retained) { > retained = ap_retained_data_create(userdata_key, sizeof(*retained)); > retained->max_daemons_limit = -1; >- retained->idle_spawn_rate = 1; > } > ++retained->module_loads; > if (retained->module_loads == 2) { >@@ -2023,6 +2097,10 @@ > "apr_proc_detach failed"); > return HTTP_INTERNAL_SERVER_ERROR; > } >+ retained->idle_spawn_rate = apr_palloc(pconf, sizeof(int) * num_buckets); >+ for (i = 0; i< num_buckets; i++) { >+ retained->idle_spawn_rate[i] = 1; >+ } > } > } > >diff -ru /home/httpd-trunk/server/mpm_unix.c /home/httpd-trunk.new/server/mpm_unix.c >--- /home/httpd-trunk/server/mpm_unix.c 2014-04-11 15:17:18.306999551 -0400 >+++ /home/httpd-trunk.new/server/mpm_unix.c 2014-05-07 17:19:02.170975169 -0400 >@@ -607,7 +607,7 @@ > * permits the MPM to skip the poll when there is only one listening > * socket, because it provides a alternate way to unblock an accept() > * when the pod is used. */ >-static apr_status_t dummy_connection(ap_pod_t *pod) >+static apr_status_t dummy_connection(ap_pod_t *pod, int child_bucket) > { > const char *data; > apr_status_t rv; >@@ -626,12 +626,12 @@ > * plain-HTTP, not SSL; using an SSL port would either be > * expensive to do correctly (performing a complete SSL handshake) > * or cause log spam by doing incorrectly (simply sending EOF). */ >- lp = ap_listeners; >+ lp = mpm_listen[child_bucket]; > while (lp && lp->protocol && strcasecmp(lp->protocol, "http") != 0) { > lp = lp->next; > } > if (!lp) { >- lp = ap_listeners; >+ lp = mpm_listen[child_bucket]; > } > > rv = apr_socket_create(&sock, lp->bind_addr->family, SOCK_STREAM, 0, p); >@@ -712,7 +712,7 @@ > return rv; > } > >-AP_DECLARE(apr_status_t) ap_mpm_pod_signal(ap_pod_t *pod) >+AP_DECLARE(apr_status_t) ap_mpm_pod_signal(ap_pod_t *pod, int child_bucket) > { > apr_status_t rv; > >@@ -721,10 +721,10 @@ > return rv; > } > >- return dummy_connection(pod); >+ return dummy_connection(pod, child_bucket); > } > >-void ap_mpm_pod_killpg(ap_pod_t *pod, int num) >+void ap_mpm_pod_killpg(ap_pod_t *pod, int num, int child_bucket) > { > int i; > apr_status_t rv = APR_SUCCESS; >@@ -748,7 +748,7 @@ > ap_scoreboard_image->servers[i][0].pid == 0) { > continue; > } >- rv = dummy_connection(pod); >+ rv = dummy_connection(pod, child_bucket); > } > } >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 55897
:
31124
|
31125
|
31126
|
31127
|
31171
|
31253
|
31397
|
31616
|
31632
|
31681
|
32079
|
32081
|
32082