Bug 25137 - Backport of atomics from 2.1 to fdqueue.c
Summary: Backport of atomics from 2.1 to fdqueue.c
Status: RESOLVED WONTFIX
Alias: None
Product: Apache httpd-2
Classification: Unclassified
Component: mpm_worker (show other bugs)
Version: 2.0.48
Hardware: Other other
: P3 enhancement (vote)
Target Milestone: ---
Assignee: Apache HTTPD Bugs Mailing List
URL:
Keywords: PatchAvailable
Depends on:
Blocks:
 
Reported: 2003-12-02 13:37 UTC by M. Brian Akins
Modified: 2009-05-25 15:15 UTC (History)
0 users



Attachments

Note You need to log in before you can comment on or make changes to this bug.
Description M. Brian Akins 2003-12-02 13:37:44 UTC
--- /home/bakins/src/httpd-2.0.48/server/mpm/worker/fdqueue.c   2003-11-06
08:16:03.000000000 -0500
+++ fdqueue.c   2003-09-28 23:58:41.000000000 -0400
@@ -57,26 +57,40 @@
  */
  
 #include "fdqueue.h"
+#include "apr_atomic.h"
+
+typedef struct recycled_pool {
+    apr_pool_t *pool;
+    struct recycled_pool *next;
+} recycled_pool;
  
 struct fd_queue_info_t {
-    int idlers;
+    apr_uint32_t idlers;
     apr_thread_mutex_t *idlers_mutex;
     apr_thread_cond_t *wait_for_idler;
     int terminated;
     int max_idlers;
-    apr_pool_t        **recycled_pools;
-    int num_recycled;
+    recycled_pool  *recycled_pools;
 };
  
 static apr_status_t queue_info_cleanup(void *data_)
 {
     fd_queue_info_t *qi = data_;
-    int i;
     apr_thread_cond_destroy(qi->wait_for_idler);
     apr_thread_mutex_destroy(qi->idlers_mutex);
-    for (i = 0; i < qi->num_recycled; i++) {
-        apr_pool_destroy(qi->recycled_pools[i]);
+
+    /* Clean up any pools in the recycled list */
+    for (;;) {
+        struct recycled_pool *first_pool = qi->recycled_pools;
+        if (first_pool == NULL) {
+            break;
+        }
+        if (apr_atomic_casptr((volatile void**)&(qi->recycled_pools),
first_pool->next,
+                              first_pool) == first_pool) {
+            apr_pool_destroy(first_pool->pool);
+        }
     }
+
     return APR_SUCCESS;
 }
  
@@ -98,9 +112,7 @@
     if (rv != APR_SUCCESS) {
         return rv;
     }
-    qi->recycled_pools = (apr_pool_t **)apr_palloc(pool, max_idlers *
-                                                   sizeof(apr_pool_t *));
-    qi->num_recycled = 0;
+    qi->recycled_pools = NULL;
     qi->max_idlers = max_idlers;
     apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
                               apr_pool_cleanup_null);
@@ -114,24 +126,53 @@
                                     apr_pool_t *pool_to_recycle)
 {
     apr_status_t rv;
-    rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
-    if (rv != APR_SUCCESS) {
-        return rv;
-    }
-    AP_DEBUG_ASSERT(queue_info->idlers >= 0);
-    AP_DEBUG_ASSERT(queue_info->num_recycled < queue_info->max_idlers);
+    int prev_idlers;
+
+    /* If we have been given a pool to recycle, atomically link
+     * it into the queue_info's list of recycled pools
+     */
     if (pool_to_recycle) {
-        queue_info->recycled_pools[queue_info->num_recycled++] =
-            pool_to_recycle;
+        struct recycled_pool *new_recycle;
+        new_recycle = (struct recycled_pool *)apr_palloc(pool_to_recycle,
+                                                         sizeof(*new_recycle));
+        new_recycle->pool = pool_to_recycle;
+        for (;;) {
+            new_recycle->next = queue_info->recycled_pools;
+            if (apr_atomic_casptr((volatile void**)&(queue_info->recycled_pools),
+                                  new_recycle, new_recycle->next) ==
+                new_recycle->next) {
+                break;
+            }
+        }
     }
-    if (queue_info->idlers++ == 0) {
-        /* Only signal if we had no idlers before. */
-        apr_thread_cond_signal(queue_info->wait_for_idler);
+
+    /* Atomically increment the count of idle workers */
+    for (;;) {
+        prev_idlers = queue_info->idlers;
+        if (apr_atomic_cas32(&(queue_info->idlers), prev_idlers + 1,
+                             prev_idlers) == prev_idlers) {
+            break;
+        }
     }
-    rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
-    if (rv != APR_SUCCESS) {
-        return rv;
+
+    /* If this thread just made the idle worker count nonzero,
+     * wake up the listener. */
+    if (prev_idlers == 0) {
+        rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
+        if (rv != APR_SUCCESS) {
+            return rv;
+        }
+        rv = apr_thread_cond_signal(queue_info->wait_for_idler);
+        if (rv != APR_SUCCESS) {
+            apr_thread_mutex_unlock(queue_info->idlers_mutex);
+            return rv;
+        }
+        rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
+        if (rv != APR_SUCCESS) {
+            return rv;
+        }
     }
+
     return APR_SUCCESS;
 }
  
@@ -139,34 +180,66 @@
                                           apr_pool_t **recycled_pool)
 {
     apr_status_t rv;
+
     *recycled_pool = NULL;
-    rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
-    if (rv != APR_SUCCESS) {
-        return rv;
-    }
-    AP_DEBUG_ASSERT(queue_info->idlers >= 0);
-    while ((queue_info->idlers == 0) && (!queue_info->terminated)) {
-        rv = apr_thread_cond_wait(queue_info->wait_for_idler,
-                                  queue_info->idlers_mutex);
+
+    /* Block if the count of idle workers is zero */
+    if (queue_info->idlers == 0) {
+        rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
         if (rv != APR_SUCCESS) {
-            apr_status_t rv2;
-            rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
-            if (rv2 != APR_SUCCESS) {
-                return rv2;
+            return rv;
+        }
+        /* Re-check the idle worker count to guard against a
+         * race condition.  Now that we're in the mutex-protected
+         * region, one of two things may have happened:
+         *   - If the idle worker count is still zero, the
+         *     workers are all still busy, so it's safe to
+         *     block on a condition variable.
+         *   - If the idle worker count is nonzero, then a
+         *     worker has become idle since the first check
+         *     of queue_info->idlers above.  It's possible
+         *     that the worker has also signaled the condition
+         *     variable--and if so, the listener missed it
+         *     because it wasn't yet blocked on the condition
+         *     variable.  But if the idle worker count is
+         *     now nonzero, it's safe for this function to
+         *     return immediately.
+         */
+        if (queue_info->idlers == 0) {
+            rv = apr_thread_cond_wait(queue_info->wait_for_idler,
+                                  queue_info->idlers_mutex);
+            if (rv != APR_SUCCESS) {
+                apr_status_t rv2;
+                rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
+                if (rv2 != APR_SUCCESS) {
+                    return rv2;
+                }
+                return rv;
             }
+        }
+        rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
+        if (rv != APR_SUCCESS) {
             return rv;
         }
     }
-    queue_info->idlers--; /* Oh, and idler? Let's take 'em! */
-    if (queue_info->num_recycled) {
-        *recycled_pool =
-            queue_info->recycled_pools[--queue_info->num_recycled];
-    }
-    rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
-    if (rv != APR_SUCCESS) {
-        return rv;
+
+    /* Atomically decrement the idle worker count */
+    apr_atomic_dec32(&(queue_info->idlers));
+
+    /* Atomically pop a pool from the recycled list */
+    for (;;) {
+        struct recycled_pool *first_pool = queue_info->recycled_pools;
+        if (first_pool == NULL) {
+            break;
+        }
+        if (apr_atomic_casptr((volatile void**)&(queue_info->recycled_pools),
first_pool->next,
+                              first_pool) == first_pool) {
+            *recycled_pool = first_pool->pool;
+            break;
+        }
     }
-    else if (queue_info->terminated) {
+
+    if (queue_info->terminated) {
         return APR_EOF;
     }
     else {
@@ -183,11 +256,7 @@
     }
     queue_info->terminated = 1;
     apr_thread_cond_broadcast(queue_info->wait_for_idler);
-    rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
-    if (rv != APR_SUCCESS) {
-        return rv;
-    }
-    return APR_SUCCESS;
+    return apr_thread_mutex_unlock(queue_info->idlers_mutex);
 }
  
 /**
@@ -334,10 +403,7 @@
         return rv;
     }
     apr_thread_cond_broadcast(queue->not_empty);
-    if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
-        return rv;
-    }
-    return APR_SUCCESS;
+    return apr_thread_mutex_unlock(queue->one_big_mutex);
 }
  
 apr_status_t ap_queue_term(fd_queue_t *queue)
Comment 1 Nick Kew 2009-05-25 15:13:55 UTC
I think this ancient request is superseded by 2.2.
Comment 2 Nick Kew 2009-05-25 15:15:00 UTC
I think this ancient request is superseded by 2.2.