View | Details | Raw Unified | Return to bug 57399
Collapse All | Expand All

(-)server/mpm/event/event.c (-140 / +283 lines)
Lines 177-182 static int dying = 0; Link Here
177
static int workers_may_exit = 0;
177
static int workers_may_exit = 0;
178
static int start_thread_may_exit = 0;
178
static int start_thread_may_exit = 0;
179
static int listener_may_exit = 0;
179
static int listener_may_exit = 0;
180
static int listener_is_wakeable = 0;        /* Pollset supports APR_POLLSET_WAKEABLE */
180
static int num_listensocks = 0;
181
static int num_listensocks = 0;
181
static apr_int32_t conns_this_child;        /* MaxConnectionsPerChild, only access
182
static apr_int32_t conns_this_child;        /* MaxConnectionsPerChild, only access
182
                                               in listener thread */
183
                                               in listener thread */
Lines 199-204 module AP_MODULE_DECLARE_DATA mpm_event_module; Link Here
199
struct event_srv_cfg_s;
200
struct event_srv_cfg_s;
200
typedef struct event_srv_cfg_s event_srv_cfg;
201
typedef struct event_srv_cfg_s event_srv_cfg;
201
202
203
static apr_pollfd_t *listener_pollfd;
204
205
/*
206
 * The pollset for sockets that are in any of the timeout queues. Currently
207
 * we use the timeout_mutex to make sure that connections are added/removed
208
 * atomically to/from both event_pollset and a timeout queue. Otherwise
209
 * some confusion can happen under high load if timeout queues and pollset
210
 * get out of sync.
211
 * XXX: It should be possible to make the lock unnecessary in many or even all
212
 * XXX: cases.
213
 */
214
static apr_pollset_t *event_pollset;
215
202
struct event_conn_state_t {
216
struct event_conn_state_t {
203
    /** APR_RING of expiration timeouts */
217
    /** APR_RING of expiration timeouts */
204
    APR_RING_ENTRY(event_conn_state_t) timeout_list;
218
    APR_RING_ENTRY(event_conn_state_t) timeout_list;
Lines 228-236 APR_RING_HEAD(timeout_head_t, event_conn_state_t); Link Here
228
242
229
struct timeout_queue {
243
struct timeout_queue {
230
    struct timeout_head_t head;
244
    struct timeout_head_t head;
231
    int count, *total;
232
    apr_interval_time_t timeout;
245
    apr_interval_time_t timeout;
233
    struct timeout_queue *next;
246
    apr_uint32_t count;         /* for this queue */
247
    apr_uint32_t *total;        /* for all chained/related queues */
248
    struct timeout_queue *next; /* chaining */
234
};
249
};
235
/*
250
/*
236
 * Several timeout queues that use different timeouts, so that we always can
251
 * Several timeout queues that use different timeouts, so that we always can
Lines 244-295 static struct timeout_queue *write_completion_q, Link Here
244
                            *keepalive_q,
259
                            *keepalive_q,
245
                            *linger_q,
260
                            *linger_q,
246
                            *short_linger_q;
261
                            *short_linger_q;
262
static volatile apr_time_t  queues_next_expiry;
247
263
248
static apr_pollfd_t *listener_pollfd;
264
/* Prevent extra poll/wakeup calls for timeouts close in the future (queues
265
 * have the granularity of a second anyway).
266
 * XXX: Wouldn't 0.5s (instead of 0.1s) be "enough"?
267
 */
268
#define TIMEOUT_FUDGE_FACTOR apr_time_from_msec(100)
249
269
250
/*
270
/*
251
 * Macros for accessing struct timeout_queue.
271
 * Macros for accessing struct timeout_queue.
252
 * For TO_QUEUE_APPEND and TO_QUEUE_REMOVE, timeout_mutex must be held.
272
 * For TO_QUEUE_APPEND and TO_QUEUE_REMOVE, timeout_mutex must be held.
253
 */
273
 */
254
#define TO_QUEUE_APPEND(q, el)                                                \
274
static void TO_QUEUE_APPEND(struct timeout_queue *q, event_conn_state_t *el)
255
    do {                                                                      \
275
{
256
        APR_RING_INSERT_TAIL(&(q)->head, el, event_conn_state_t,              \
276
    apr_time_t q_expiry;
257
                             timeout_list);                                   \
277
    apr_time_t next_expiry;
258
        ++*(q)->total;                                                        \
259
        ++(q)->count;                                                         \
260
    } while (0)
261
278
262
#define TO_QUEUE_REMOVE(q, el)                                                \
279
    APR_RING_INSERT_TAIL(&q->head, el, event_conn_state_t, timeout_list);
263
    do {                                                                      \
280
    apr_atomic_inc32(q->total);
264
        APR_RING_REMOVE(el, timeout_list);                                    \
281
    ++q->count;
265
        --*(q)->total;                                                        \
266
        --(q)->count;                                                         \
267
    } while (0)
268
282
269
#define TO_QUEUE_INIT(q, p, t, v)                                             \
283
    /* Cheaply update the overall queues' next expiry according to the
270
    do {                                                                      \
284
     * first entry of this queue (oldest), if necessary.
271
        struct timeout_queue *b = (v);                                        \
285
     */
272
        (q) = apr_palloc((p), sizeof *(q));                                   \
286
    el = APR_RING_FIRST(&q->head);
273
        APR_RING_INIT(&(q)->head, event_conn_state_t, timeout_list);          \
287
    q_expiry = el->queue_timestamp + q->timeout;
274
        (q)->total = (b) ? (b)->total : apr_pcalloc((p), sizeof *(q)->total); \
288
    next_expiry = queues_next_expiry;
275
        (q)->count = 0;                                                       \
289
    if (!next_expiry || next_expiry > q_expiry + TIMEOUT_FUDGE_FACTOR) {
276
        (q)->timeout = (t);                                                   \
290
        queues_next_expiry = q_expiry;
277
        (q)->next = NULL;                                                     \
291
        /* Unblock the poll()ing listener for it to update its timeout. */
278
    } while (0)
292
        if (listener_is_wakeable) {
293
            apr_pollset_wakeup(event_pollset);
294
        }
295
    }
296
}
279
297
280
#define TO_QUEUE_ELEM_INIT(el) APR_RING_ELEM_INIT(el, timeout_list)
298
static void TO_QUEUE_REMOVE(struct timeout_queue *q, event_conn_state_t *el)
299
{
300
    APR_RING_REMOVE(el, timeout_list);
301
    apr_atomic_dec32(q->total);
302
    --q->count;
303
}
281
304
282
/*
305
static struct timeout_queue *TO_QUEUE_MAKE(apr_pool_t *p, apr_time_t t,
283
 * The pollset for sockets that are in any of the timeout queues. Currently
306
                                           struct timeout_queue *ref)
284
 * we use the timeout_mutex to make sure that connections are added/removed
307
{
285
 * atomically to/from both event_pollset and a timeout queue. Otherwise
308
    struct timeout_queue *q;
286
 * some confusion can happen under high load if timeout queues and pollset
309
                                           
287
 * get out of sync.
310
    q = apr_pcalloc(p, sizeof *q);
288
 * XXX: It should be possible to make the lock unnecessary in many or even all
311
    APR_RING_INIT(&q->head, event_conn_state_t, timeout_list);
289
 * XXX: cases.
312
    q->total = (ref) ? ref->total : apr_pcalloc(p, sizeof *q->total);
290
 */
313
    q->timeout = t;
291
static apr_pollset_t *event_pollset;
292
314
315
    return q;
316
}
317
318
#define TO_QUEUE_ELEM_INIT(el) \
319
    APR_RING_ELEM_INIT((el), timeout_list)
320
293
/* The structure used to pass unique initialization info to each thread */
321
/* The structure used to pass unique initialization info to each thread */
294
typedef struct
322
typedef struct
295
{
323
{
Lines 474-479 static void wakeup_listener(void) Link Here
474
        return;
502
        return;
475
    }
503
    }
476
504
505
    /* Unblock the listener if it's poll()ing */
506
    if (listener_is_wakeable) {
507
        apr_pollset_wakeup(event_pollset);
508
    }
509
477
    /* unblock the listener if it's waiting for a worker */
510
    /* unblock the listener if it's waiting for a worker */
478
    ap_queue_info_term(worker_queue_info);
511
    ap_queue_info_term(worker_queue_info);
479
512
Lines 647-653 static apr_status_t decrement_connection_count(voi Link Here
647
        default:
680
        default:
648
            break;
681
            break;
649
    }
682
    }
650
    apr_atomic_dec32(&connection_count);
683
    /* Unblock the listener if it's waiting for connection_count = 0 */
684
    if (!apr_atomic_dec32(&connection_count)
685
             && listener_is_wakeable && listener_may_exit) {
686
        apr_pollset_wakeup(event_pollset);
687
    }
651
    return APR_SUCCESS;
688
    return APR_SUCCESS;
652
}
689
}
653
690
Lines 842-855 static int start_lingering_close_common(event_conn Link Here
842
    else {
879
    else {
843
        cs->c->sbh = NULL;
880
        cs->c->sbh = NULL;
844
    }
881
    }
845
    apr_thread_mutex_lock(timeout_mutex);
846
    TO_QUEUE_APPEND(q, cs);
847
    cs->pfd.reqevents = (
882
    cs->pfd.reqevents = (
848
            cs->pub.sense == CONN_SENSE_WANT_WRITE ? APR_POLLOUT :
883
            cs->pub.sense == CONN_SENSE_WANT_WRITE ? APR_POLLOUT :
849
                    APR_POLLIN) | APR_POLLHUP | APR_POLLERR;
884
                    APR_POLLIN) | APR_POLLHUP | APR_POLLERR;
850
    cs->pub.sense = CONN_SENSE_DEFAULT;
885
    cs->pub.sense = CONN_SENSE_DEFAULT;
886
    apr_thread_mutex_lock(timeout_mutex);
887
    TO_QUEUE_APPEND(q, cs);
888
    apr_thread_mutex_unlock(timeout_mutex);
851
    rv = apr_pollset_add(event_pollset, &cs->pfd);
889
    rv = apr_pollset_add(event_pollset, &cs->pfd);
852
    apr_thread_mutex_unlock(timeout_mutex);
853
    if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
890
    if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
854
        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03092)
891
        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03092)
855
                     "start_lingering_close: apr_pollset_add failure");
892
                     "start_lingering_close: apr_pollset_add failure");
Lines 1126-1139 read_request: Link Here
1126
             */
1163
             */
1127
            cs->queue_timestamp = apr_time_now();
1164
            cs->queue_timestamp = apr_time_now();
1128
            notify_suspend(cs);
1165
            notify_suspend(cs);
1129
            apr_thread_mutex_lock(timeout_mutex);
1130
            TO_QUEUE_APPEND(cs->sc->wc_q, cs);
1131
            cs->pfd.reqevents = (
1166
            cs->pfd.reqevents = (
1132
                    cs->pub.sense == CONN_SENSE_WANT_READ ? APR_POLLIN :
1167
                    cs->pub.sense == CONN_SENSE_WANT_READ ? APR_POLLIN :
1133
                            APR_POLLOUT) | APR_POLLHUP | APR_POLLERR;
1168
                            APR_POLLOUT) | APR_POLLHUP | APR_POLLERR;
1134
            cs->pub.sense = CONN_SENSE_DEFAULT;
1169
            cs->pub.sense = CONN_SENSE_DEFAULT;
1170
            apr_thread_mutex_lock(timeout_mutex);
1171
            TO_QUEUE_APPEND(cs->sc->wc_q, cs);
1172
            apr_thread_mutex_unlock(timeout_mutex);
1135
            rc = apr_pollset_add(event_pollset, &cs->pfd);
1173
            rc = apr_pollset_add(event_pollset, &cs->pfd);
1136
            apr_thread_mutex_unlock(timeout_mutex);
1174
            if (rc != APR_SUCCESS) {
1175
                ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(03465)
1176
                             "process_socket: apr_pollset_add failure for "
1177
                             "write completion");
1178
                apr_thread_mutex_lock(timeout_mutex);
1179
                TO_QUEUE_REMOVE(cs->sc->wc_q, cs);
1180
                apr_thread_mutex_unlock(timeout_mutex);
1181
                apr_socket_close(cs->pfd.desc.s);
1182
                ap_push_pool(worker_queue_info, cs->p);
1183
            }
1137
            return;
1184
            return;
1138
        }
1185
        }
1139
        else if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted ||
1186
        else if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted ||
Lines 1163-1180 read_request: Link Here
1163
         */
1210
         */
1164
        cs->queue_timestamp = apr_time_now();
1211
        cs->queue_timestamp = apr_time_now();
1165
        notify_suspend(cs);
1212
        notify_suspend(cs);
1166
        apr_thread_mutex_lock(timeout_mutex);
1167
        TO_QUEUE_APPEND(cs->sc->ka_q, cs);
1168
1213
1169
        /* Add work to pollset. */
1214
        /* Add work to pollset. */
1170
        cs->pfd.reqevents = APR_POLLIN;
1215
        cs->pfd.reqevents = APR_POLLIN;
1171
        rc = apr_pollset_add(event_pollset, &cs->pfd);
1216
        apr_thread_mutex_lock(timeout_mutex);
1217
        TO_QUEUE_APPEND(cs->sc->ka_q, cs);
1172
        apr_thread_mutex_unlock(timeout_mutex);
1218
        apr_thread_mutex_unlock(timeout_mutex);
1173
1219
1220
        rc = apr_pollset_add(event_pollset, &cs->pfd);
1174
        if (rc != APR_SUCCESS) {
1221
        if (rc != APR_SUCCESS) {
1175
            ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(03093)
1222
            ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf, APLOGNO(03093)
1176
                         "process_socket: apr_pollset_add failure");
1223
                         "process_socket: apr_pollset_add failure for "
1177
            AP_DEBUG_ASSERT(rc == APR_SUCCESS);
1224
                         "keep alive");
1225
            apr_thread_mutex_lock(timeout_mutex);
1226
            TO_QUEUE_REMOVE(cs->sc->ka_q, cs);
1227
            apr_thread_mutex_unlock(timeout_mutex);
1228
            apr_socket_close(cs->pfd.desc.s);
1229
            ap_push_pool(worker_queue_info, cs->p);
1230
            return;
1178
        }
1231
        }
1179
    }
1232
    }
1180
    else if (cs->pub.state == CONN_STATE_SUSPENDED) {
1233
    else if (cs->pub.state == CONN_STATE_SUSPENDED) {
Lines 1345-1351 static void get_worker(int *have_idle_worker_p, in Link Here
1345
static APR_RING_HEAD(timer_free_ring_t, timer_event_t) timer_free_ring;
1398
static APR_RING_HEAD(timer_free_ring_t, timer_event_t) timer_free_ring;
1346
1399
1347
static apr_skiplist *timer_skiplist;
1400
static apr_skiplist *timer_skiplist;
1401
static volatile apr_time_t timers_next_expiry;
1348
1402
1403
/* Same goal as for TIMEOUT_FUDGE_FACTOR (avoid extra poll calls), but applied
1404
 * to timers. Since their timeouts are custom (user defined), we can't be too
1405
 * approximative here (hence using 0.01s).
1406
 */
1407
#define EVENT_FUDGE_FACTOR apr_time_from_msec(10)
1408
1349
/* The following compare function is used by apr_skiplist_insert() to keep the
1409
/* The following compare function is used by apr_skiplist_insert() to keep the
1350
 * elements (timers) sorted and provide O(log n) complexity (this is also true
1410
 * elements (timers) sorted and provide O(log n) complexity (this is also true
1351
 * for apr_skiplist_{find,remove}(), but those are not used in MPM event where
1411
 * for apr_skiplist_{find,remove}(), but those are not used in MPM event where
Lines 1391-1399 static apr_status_t event_register_timed_callback( Link Here
1391
    /* XXXXX: optimize */
1451
    /* XXXXX: optimize */
1392
    te->when = t + apr_time_now();
1452
    te->when = t + apr_time_now();
1393
1453
1394
    /* Okay, add sorted by when.. */
1454
    { 
1395
    apr_skiplist_insert(timer_skiplist, te);
1455
        apr_time_t next_expiry;
1396
1456
1457
        /* Okay, add sorted by when.. */
1458
        apr_skiplist_insert(timer_skiplist, te);
1459
1460
        /* Cheaply update the overall timers' next expiry according to
1461
         * this event, if necessary.
1462
         */
1463
        next_expiry = timers_next_expiry;
1464
        if (!next_expiry || next_expiry > te->when + EVENT_FUDGE_FACTOR) {
1465
            timers_next_expiry = te->when;
1466
            /* Unblock the poll()ing listener for it to update its timeout. */
1467
            if (listener_is_wakeable) {
1468
                apr_pollset_wakeup(event_pollset);
1469
            }
1470
        }
1471
    }
1472
1397
    apr_thread_mutex_unlock(g_timer_skiplist_mtx);
1473
    apr_thread_mutex_unlock(g_timer_skiplist_mtx);
1398
1474
1399
    return APR_SUCCESS;
1475
    return APR_SUCCESS;
Lines 1425-1431 static void process_lingering_close(event_conn_sta Link Here
1425
        return;
1501
        return;
1426
    }
1502
    }
1427
1503
1428
    apr_thread_mutex_lock(timeout_mutex);
1429
    rv = apr_pollset_remove(event_pollset, pfd);
1504
    rv = apr_pollset_remove(event_pollset, pfd);
1430
    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
1505
    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
1431
1506
Lines 1432-1437 static void process_lingering_close(event_conn_sta Link Here
1432
    rv = apr_socket_close(csd);
1507
    rv = apr_socket_close(csd);
1433
    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
1508
    AP_DEBUG_ASSERT(rv == APR_SUCCESS);
1434
1509
1510
    apr_thread_mutex_lock(timeout_mutex);
1435
    TO_QUEUE_REMOVE(q, cs);
1511
    TO_QUEUE_REMOVE(q, cs);
1436
    apr_thread_mutex_unlock(timeout_mutex);
1512
    apr_thread_mutex_unlock(timeout_mutex);
1437
    TO_QUEUE_ELEM_INIT(cs);
1513
    TO_QUEUE_ELEM_INIT(cs);
Lines 1449-1461 static void process_timeout_queue(struct timeout_q Link Here
1449
                                  apr_time_t timeout_time,
1525
                                  apr_time_t timeout_time,
1450
                                  int (*func)(event_conn_state_t *))
1526
                                  int (*func)(event_conn_state_t *))
1451
{
1527
{
1452
    int total = 0, count;
1528
    apr_uint32_t total = 0, count;
1453
    event_conn_state_t *first, *cs, *last;
1529
    event_conn_state_t *first, *cs, *last;
1454
    struct timeout_head_t trash;
1530
    struct timeout_head_t trash;
1455
    struct timeout_queue *qp;
1531
    struct timeout_queue *qp;
1456
    apr_status_t rv;
1532
    apr_status_t rv;
1457
1533
1458
    if (!*q->total) {
1534
    if (!apr_atomic_read32(q->total)) {
1459
        return;
1535
        return;
1460
    }
1536
    }
1461
1537
Lines 1464-1483 static void process_timeout_queue(struct timeout_q Link Here
1464
        count = 0;
1540
        count = 0;
1465
        cs = first = last = APR_RING_FIRST(&qp->head);
1541
        cs = first = last = APR_RING_FIRST(&qp->head);
1466
        while (cs != APR_RING_SENTINEL(&qp->head, event_conn_state_t,
1542
        while (cs != APR_RING_SENTINEL(&qp->head, event_conn_state_t,
1467
                                       timeout_list)
1543
                                       timeout_list)) {
1468
               /* Trash the entry if:
1544
            /* Trash the entry if:
1469
                * - no timeout_time was given (asked for all), or
1545
             * - no timeout_time was given (asked for all), or
1470
                * - it expired (according to the queue timeout), or
1546
             * - it expired (according to the queue timeout), or
1471
                * - the system clock skewed in the past: no entry should be
1547
             * - the system clock skewed in the past: no entry should be
1472
                *   registered above the given timeout_time (~now) + the queue
1548
             *   registered above the given timeout_time (~now) + the queue
1473
                *   timeout, we won't keep any here (eg. for centuries).
1549
             *   timeout, we won't keep any here (eg. for centuries).
1474
                * Stop otherwise, no following entry will match thanks to the
1550
             *
1475
                * single timeout per queue (entries are added to the end!).
1551
             * Otherwise stop, no following entry will match thanks to the
1476
                * This allows maintenance in O(1).
1552
             * single timeout per queue (entries are added to the end!).
1477
                */
1553
             * This allows maintenance in O(1).
1478
               && (!timeout_time
1554
             */
1479
                   || cs->queue_timestamp + qp->timeout < timeout_time
1555
            if (timeout_time
1480
                   || cs->queue_timestamp > timeout_time + qp->timeout)) {
1556
                    && cs->queue_timestamp + qp->timeout > timeout_time
1557
                    && cs->queue_timestamp < timeout_time + qp->timeout) {
1558
                /* Since this is the next expiring of this queue, update the
1559
                 * overall queues' next expiry if it's later than this one.
1560
                 */
1561
                apr_time_t q_expiry = cs->queue_timestamp + qp->timeout;
1562
                apr_time_t next_expiry = queues_next_expiry;
1563
                if (!next_expiry || next_expiry > q_expiry) {
1564
                    queues_next_expiry = q_expiry;
1565
                }
1566
                break;
1567
            }
1568
1481
            last = cs;
1569
            last = cs;
1482
            rv = apr_pollset_remove(event_pollset, &cs->pfd);
1570
            rv = apr_pollset_remove(event_pollset, &cs->pfd);
1483
            if (rv != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rv)) {
1571
            if (rv != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rv)) {
Lines 1493-1498 static void process_timeout_queue(struct timeout_q Link Here
1493
        APR_RING_UNSPLICE(first, last, timeout_list);
1581
        APR_RING_UNSPLICE(first, last, timeout_list);
1494
        APR_RING_SPLICE_TAIL(&trash, first, last, event_conn_state_t,
1582
        APR_RING_SPLICE_TAIL(&trash, first, last, event_conn_state_t,
1495
                             timeout_list);
1583
                             timeout_list);
1584
        AP_DEBUG_ASSERT(apr_atomic_read32(q->total) >= count);
1585
        apr_atomic_sub32(q->total, count);
1496
        qp->count -= count;
1586
        qp->count -= count;
1497
        total += count;
1587
        total += count;
1498
    }
1588
    }
Lines 1499-1506 static void process_timeout_queue(struct timeout_q Link Here
1499
    if (!total)
1589
    if (!total)
1500
        return;
1590
        return;
1501
1591
1502
    AP_DEBUG_ASSERT(*q->total >= total);
1503
    *q->total -= total;
1504
    apr_thread_mutex_unlock(timeout_mutex);
1592
    apr_thread_mutex_unlock(timeout_mutex);
1505
    first = APR_RING_FIRST(&trash);
1593
    first = APR_RING_FIRST(&trash);
1506
    do {
1594
    do {
Lines 1512-1517 static void process_timeout_queue(struct timeout_q Link Here
1512
    apr_thread_mutex_lock(timeout_mutex);
1600
    apr_thread_mutex_lock(timeout_mutex);
1513
}
1601
}
1514
1602
1603
static void process_keepalive_queue(apr_time_t timeout_time)
1604
{
1605
    /* If all workers are busy, we kill older keep-alive connections so
1606
     * that they may connect to another process.
1607
     */
1608
    if (!timeout_time) {
1609
        ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
1610
                     "All workers are busy or dying, will close %u "
1611
                     "keep-alive connections",
1612
                     apr_atomic_read32(keepalive_q->total));
1613
    }
1614
    process_timeout_queue(keepalive_q, timeout_time,
1615
                          start_lingering_close_nonblocking);
1616
}
1617
1515
static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
1618
static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
1516
{
1619
{
1517
    timer_event_t *ep;
1620
    timer_event_t *ep;
Lines 1519-1524 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1519
    apr_status_t rc;
1622
    apr_status_t rc;
1520
    proc_info *ti = dummy;
1623
    proc_info *ti = dummy;
1521
    int process_slot = ti->pslot;
1624
    int process_slot = ti->pslot;
1625
    struct process_score *ps = ap_get_scoreboard_process(process_slot);
1522
    apr_pool_t *tpool = apr_thread_pool_get(thd);
1626
    apr_pool_t *tpool = apr_thread_pool_get(thd);
1523
    void *csd = NULL;
1627
    void *csd = NULL;
1524
    apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
1628
    apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
Lines 1534-1547 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1534
    last_log = apr_time_now();
1638
    last_log = apr_time_now();
1535
    free(ti);
1639
    free(ti);
1536
1640
1537
    /* the following times out events that are really close in the future
1538
     *   to prevent extra poll calls
1539
     *
1540
     * current value is .1 second
1541
     */
1542
#define TIMEOUT_FUDGE_FACTOR 100000
1543
#define EVENT_FUDGE_FACTOR 10000
1544
1545
    rc = init_pollset(tpool);
1641
    rc = init_pollset(tpool);
1546
    if (rc != APR_SUCCESS) {
1642
    if (rc != APR_SUCCESS) {
1547
        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
1643
        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
Lines 1559-1564 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1559
1655
1560
    for (;;) {
1656
    for (;;) {
1561
        int workers_were_busy = 0;
1657
        int workers_were_busy = 0;
1658
1562
        if (listener_may_exit) {
1659
        if (listener_may_exit) {
1563
            close_listeners(process_slot, &closed);
1660
            close_listeners(process_slot, &closed);
1564
            if (terminate_mode == ST_UNGRACEFUL
1661
            if (terminate_mode == ST_UNGRACEFUL
Lines 1572-1578 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1572
        now = apr_time_now();
1669
        now = apr_time_now();
1573
        if (APLOGtrace6(ap_server_conf)) {
1670
        if (APLOGtrace6(ap_server_conf)) {
1574
            /* trace log status every second */
1671
            /* trace log status every second */
1575
            if (now - last_log > apr_time_from_msec(1000)) {
1672
            if (now - last_log > apr_time_from_sec(1)) {
1576
                last_log = now;
1673
                last_log = now;
1577
                apr_thread_mutex_lock(timeout_mutex);
1674
                apr_thread_mutex_lock(timeout_mutex);
1578
                ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
1675
                ap_log_error(APLOG_MARK, APLOG_TRACE6, 0, ap_server_conf,
Lines 1580-1587 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1580
                             "keep-alive: %d lingering: %d suspended: %u)",
1677
                             "keep-alive: %d lingering: %d suspended: %u)",
1581
                             apr_atomic_read32(&connection_count),
1678
                             apr_atomic_read32(&connection_count),
1582
                             apr_atomic_read32(&clogged_count),
1679
                             apr_atomic_read32(&clogged_count),
1583
                             *write_completion_q->total,
1680
                             apr_atomic_read32(write_completion_q->total),
1584
                             *keepalive_q->total,
1681
                             apr_atomic_read32(keepalive_q->total),
1585
                             apr_atomic_read32(&lingering_count),
1682
                             apr_atomic_read32(&lingering_count),
1586
                             apr_atomic_read32(&suspended_count));
1683
                             apr_atomic_read32(&suspended_count));
1587
                if (dying) {
1684
                if (dying) {
Lines 1594-1625 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1594
            }
1691
            }
1595
        }
1692
        }
1596
1693
1597
        apr_thread_mutex_lock(g_timer_skiplist_mtx);
1694
        /* Start with an infinite poll() timeout and update it according to
1598
        te = apr_skiplist_peek(timer_skiplist);
1695
         * the next expiring timer or queue entry. If there are none, either
1599
        if (te) {
1696
         * the listener is wakeable and it can poll() indefinitely until a wake
1600
            if (te->when > now) {
1697
         * up occurs, otherwise periodic checks (maintenance, shutdown, ...)
1601
                timeout_interval = te->when - now;
1698
         * must be performed.
1699
         */
1700
        timeout_interval = -1;
1701
1702
        /* Push expired timers to a worker, the first remaining one determines
1703
         * the maximum time to poll() below, if any.
1704
         */
1705
        timeout_time = timers_next_expiry;
1706
        if (timeout_time && timeout_time < now + EVENT_FUDGE_FACTOR) {
1707
            apr_thread_mutex_lock(g_timer_skiplist_mtx);
1708
            while ((te = apr_skiplist_peek(timer_skiplist))) {
1709
                if (te->when > now + EVENT_FUDGE_FACTOR) {
1710
                    timers_next_expiry = te->when;
1711
                    timeout_interval = te->when - now;
1712
                    break;
1713
                }
1714
                apr_skiplist_pop(timer_skiplist, NULL);
1715
                push_timer2worker(te);
1602
            }
1716
            }
1603
            else {
1717
            if (!te) {
1604
                timeout_interval = 1;
1718
                timers_next_expiry = 0;
1605
            }
1719
            }
1720
            apr_thread_mutex_unlock(g_timer_skiplist_mtx);
1606
        }
1721
        }
1607
        else {
1722
1608
            timeout_interval = apr_time_from_msec(100);
1723
        /* Same for queues, use their next expiry, if any. */
1724
        timeout_time = queues_next_expiry;
1725
        if (timeout_time
1726
                && (timeout_interval < 0
1727
                    || timeout_time <= now
1728
                    || timeout_interval > timeout_time - now)) {
1729
            timeout_interval = timeout_time > now ? timeout_time - now : 1;
1609
        }
1730
        }
1610
        apr_thread_mutex_unlock(g_timer_skiplist_mtx);
1611
1731
1732
        /* When non-wakeable, don't wait more than 100 ms, in any case. */
1733
#define NON_WAKEABLE_POLL_TIMEOUT apr_time_from_msec(100)
1734
        if (!listener_is_wakeable
1735
                && (timeout_interval < 0
1736
                    || timeout_interval > NON_WAKEABLE_POLL_TIMEOUT)) {
1737
            timeout_interval = NON_WAKEABLE_POLL_TIMEOUT;
1738
        }
1739
1612
        rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd);
1740
        rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd);
1613
        if (rc != APR_SUCCESS) {
1741
        if (rc != APR_SUCCESS) {
1614
            if (APR_STATUS_IS_EINTR(rc)) {
1742
            if (APR_STATUS_IS_EINTR(rc)) {
1615
                continue;
1743
                /* Woken up, if we are exiting we must fall through to kill
1744
                 * kept-alive connections, otherwise we only need to update
1745
                 * timeouts (logic is above, so restart the loop).
1746
                 */
1747
                if (!listener_may_exit) {
1748
                    continue;
1749
                }
1750
                timeout_time = 0;
1616
            }
1751
            }
1617
            if (!APR_STATUS_IS_TIMEUP(rc)) {
1752
            else if (!APR_STATUS_IS_TIMEUP(rc)) {
1618
                ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf,
1753
                ap_log_error(APLOG_MARK, APLOG_CRIT, rc, ap_server_conf,
1619
                             "apr_pollset_poll failed.  Attempting to "
1754
                             "apr_pollset_poll failed.  Attempting to "
1620
                             "shutdown process gracefully");
1755
                             "shutdown process gracefully");
1621
                signal_threads(ST_GRACEFUL);
1756
                signal_threads(ST_GRACEFUL);
1622
            }
1757
            }
1758
            num = 0;
1623
        }
1759
        }
1624
1760
1625
        if (listener_may_exit) {
1761
        if (listener_may_exit) {
Lines 1664-1670 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1664
                               &workers_were_busy);
1800
                               &workers_were_busy);
1665
                    apr_thread_mutex_lock(timeout_mutex);
1801
                    apr_thread_mutex_lock(timeout_mutex);
1666
                    TO_QUEUE_REMOVE(remove_from_q, cs);
1802
                    TO_QUEUE_REMOVE(remove_from_q, cs);
1667
                    rc = apr_pollset_remove(event_pollset, &cs->pfd);
1668
                    apr_thread_mutex_unlock(timeout_mutex);
1803
                    apr_thread_mutex_unlock(timeout_mutex);
1669
1804
1670
                    /*
1805
                    /*
Lines 1673-1678 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1673
                     * therefore, we can accept _SUCCESS or _NOTFOUND,
1808
                     * therefore, we can accept _SUCCESS or _NOTFOUND,
1674
                     * and we still want to keep going
1809
                     * and we still want to keep going
1675
                     */
1810
                     */
1811
                    rc = apr_pollset_remove(event_pollset, &cs->pfd);
1676
                    if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
1812
                    if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
1677
                        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
1813
                        ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
1678
                                     APLOGNO(03094) "pollset remove failed");
1814
                                     APLOGNO(03094) "pollset remove failed");
Lines 1808-1859 static void * APR_THREAD_FUNC listener_thread(apr_ Link Here
1808
        /* XXX possible optimization: stash the current time for use as
1944
        /* XXX possible optimization: stash the current time for use as
1809
         * r->request_time for new requests
1945
         * r->request_time for new requests
1810
         */
1946
         */
1811
        now = apr_time_now();
1947
        /* We process the timeout queues here only when their overall next
1812
        /* We only do this once per 0.1s (TIMEOUT_FUDGE_FACTOR), or on a clock
1948
         * expiry (read once above) is over. This happens accurately since
1813
         * skew (if the system time is set back in the meantime, timeout_time
1949
         * adding to the queues (in workers) can only decrease this expiry,
1814
         * will exceed now + TIMEOUT_FUDGE_FACTOR, can't happen otherwise).
1950
         * while latest ones are only taken into account here (in listener)
1951
         * during queues' processing, with the lock held. This works both
1952
         * with and without wake-ability.
1815
         */
1953
         */
1816
        if (now > timeout_time || now + TIMEOUT_FUDGE_FACTOR < timeout_time ) {
1954
        if (timeout_time && timeout_time < (now = apr_time_now())) {
1817
            struct process_score *ps;
1818
            timeout_time = now + TIMEOUT_FUDGE_FACTOR;
1955
            timeout_time = now + TIMEOUT_FUDGE_FACTOR;
1819
1956
1820
            /* handle timed out sockets */
1957
            /* handle timed out sockets */
1821
            apr_thread_mutex_lock(timeout_mutex);
1958
            apr_thread_mutex_lock(timeout_mutex);
1822
1959
1960
            /* Processing all the queues below will recompute this. */
1961
            queues_next_expiry = 0;
1962
1823
            /* Step 1: keepalive timeouts */
1963
            /* Step 1: keepalive timeouts */
1824
            /* If all workers are busy, we kill older keep-alive connections so that they
1964
            if (workers_were_busy || dying) {
1825
             * may connect to another process.
1965
                process_keepalive_queue(0); /* kill'em all \m/ */
1826
             */
1827
            if ((workers_were_busy || dying) && *keepalive_q->total) {
1828
                if (!dying)
1829
                    ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
1830
                                 "All workers are busy, will close %d keep-alive "
1831
                                 "connections",
1832
                                 *keepalive_q->total);
1833
                process_timeout_queue(keepalive_q, 0,
1834
                                      start_lingering_close_nonblocking);
1835
            }
1966
            }
1836
            else {
1967
            else {
1837
                process_timeout_queue(keepalive_q, timeout_time,
1968
                process_keepalive_queue(timeout_time);
1838
                                      start_lingering_close_nonblocking);
1839
            }
1969
            }
1840
            /* Step 2: write completion timeouts */
1970
            /* Step 2: write completion timeouts */
1841
            process_timeout_queue(write_completion_q, timeout_time,
1971
            process_timeout_queue(write_completion_q, timeout_time,
1842
                                  start_lingering_close_nonblocking);
1972
                                  start_lingering_close_nonblocking);
1843
            /* Step 3: (normal) lingering close completion timeouts */
1973
            /* Step 3: (normal) lingering close completion timeouts */
1844
            process_timeout_queue(linger_q, timeout_time, stop_lingering_close);
1974
            process_timeout_queue(linger_q, timeout_time,
1975
                                  stop_lingering_close);
1845
            /* Step 4: (short) lingering close completion timeouts */
1976
            /* Step 4: (short) lingering close completion timeouts */
1846
            process_timeout_queue(short_linger_q, timeout_time, stop_lingering_close);
1977
            process_timeout_queue(short_linger_q, timeout_time,
1978
                                  stop_lingering_close);
1847
1979
1848
            ps = ap_get_scoreboard_process(process_slot);
1849
            ps->write_completion = *write_completion_q->total;
1850
            ps->keep_alive = *keepalive_q->total;
1851
            apr_thread_mutex_unlock(timeout_mutex);
1980
            apr_thread_mutex_unlock(timeout_mutex);
1852
1981
1982
            ps->keep_alive = apr_atomic_read32(keepalive_q->total);
1983
            ps->write_completion = apr_atomic_read32(write_completion_q->total);
1853
            ps->connections = apr_atomic_read32(&connection_count);
1984
            ps->connections = apr_atomic_read32(&connection_count);
1854
            ps->suspended = apr_atomic_read32(&suspended_count);
1985
            ps->suspended = apr_atomic_read32(&suspended_count);
1855
            ps->lingering_close = apr_atomic_read32(&lingering_count);
1986
            ps->lingering_close = apr_atomic_read32(&lingering_count);
1856
        }
1987
        }
1988
        else if ((workers_were_busy || dying)
1989
                 && apr_atomic_read32(keepalive_q->total)) {
1990
            apr_thread_mutex_lock(timeout_mutex);
1991
            process_keepalive_queue(0); /* kill'em all \m/ */
1992
            apr_thread_mutex_unlock(timeout_mutex);
1993
            ps->keep_alive = 0;
1994
        }
1995
1857
        if (listeners_disabled && !workers_were_busy
1996
        if (listeners_disabled && !workers_were_busy
1858
            && (int)apr_atomic_read32(&connection_count)
1997
            && (int)apr_atomic_read32(&connection_count)
1859
               - (int)apr_atomic_read32(&lingering_count)
1998
               - (int)apr_atomic_read32(&lingering_count)
Lines 2064-2069 static void *APR_THREAD_FUNC start_threads(apr_thr Link Here
2064
    int prev_threads_created;
2203
    int prev_threads_created;
2065
    int max_recycled_pools = -1;
2204
    int max_recycled_pools = -1;
2066
    int good_methods[] = {APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL};
2205
    int good_methods[] = {APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL};
2206
    /* XXX don't we need more to handle K-A or lingering close? */
2207
    const apr_uint32_t pollset_size = threads_per_child * 2;
2067
2208
2068
    /* We must create the fd queues before we start up the listener
2209
    /* We must create the fd queues before we start up the listener
2069
     * and worker threads. */
2210
     * and worker threads. */
Lines 2103-2126 static void *APR_THREAD_FUNC start_threads(apr_thr Link Here
2103
2244
2104
    /* Create the main pollset */
2245
    /* Create the main pollset */
2105
    for (i = 0; i < sizeof(good_methods) / sizeof(good_methods[0]); i++) {
2246
    for (i = 0; i < sizeof(good_methods) / sizeof(good_methods[0]); i++) {
2106
        rv = apr_pollset_create_ex(&event_pollset,
2247
        apr_uint32_t flags = APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY |
2107
                            threads_per_child*2, /* XXX don't we need more, to handle
2248
                             APR_POLLSET_NODEFAULT | APR_POLLSET_WAKEABLE;
2108
                                                * connections in K-A or lingering
2249
        rv = apr_pollset_create_ex(&event_pollset, pollset_size, pchild, flags,
2109
                                                * close?
2250
                                   good_methods[i]);
2110
                                                */
2111
                            pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY | APR_POLLSET_NODEFAULT,
2112
                            good_methods[i]);
2113
        if (rv == APR_SUCCESS) {
2251
        if (rv == APR_SUCCESS) {
2252
            listener_is_wakeable = 1;
2114
            break;
2253
            break;
2115
        }
2254
        }
2255
        flags &= ~APR_POLLSET_WAKEABLE;
2256
        rv = apr_pollset_create_ex(&event_pollset, pollset_size, pchild, flags,
2257
                                   good_methods[i]);
2258
        if (rv == APR_SUCCESS) {
2259
            break;
2260
        }
2116
    }
2261
    }
2117
    if (rv != APR_SUCCESS) {
2262
    if (rv != APR_SUCCESS) {
2118
        rv = apr_pollset_create(&event_pollset,
2263
        rv = apr_pollset_create(&event_pollset, pollset_size, pchild,
2119
                               threads_per_child*2, /* XXX don't we need more, to handle
2264
                                APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
2120
                                                     * connections in K-A or lingering
2121
                                                     * close?
2122
                                                     */
2123
                               pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
2124
    }
2265
    }
2125
    if (rv != APR_SUCCESS) {
2266
    if (rv != APR_SUCCESS) {
2126
        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03103)
2267
        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03103)
Lines 2129-2135 static void *APR_THREAD_FUNC start_threads(apr_thr Link Here
2129
    }
2270
    }
2130
2271
2131
    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02471)
2272
    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02471)
2132
                 "start_threads: Using %s", apr_pollset_method_name(event_pollset));
2273
                 "start_threads: Using %s (%swakeable)",
2274
                 apr_pollset_method_name(event_pollset),
2275
                 listener_is_wakeable ? "" : "not ");
2133
    worker_sockets = apr_pcalloc(pchild, threads_per_child
2276
    worker_sockets = apr_pcalloc(pchild, threads_per_child
2134
                                 * sizeof(apr_socket_t *));
2277
                                 * sizeof(apr_socket_t *));
2135
2278
Lines 3262-3271 static int event_post_config(apr_pool_t *pconf, ap Link Here
3262
    wc.hash = apr_hash_make(ptemp);
3405
    wc.hash = apr_hash_make(ptemp);
3263
    ka.hash = apr_hash_make(ptemp);
3406
    ka.hash = apr_hash_make(ptemp);
3264
3407
3265
    TO_QUEUE_INIT(linger_q, pconf,
3408
    linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(MAX_SECS_TO_LINGER),
3266
                  apr_time_from_sec(MAX_SECS_TO_LINGER), NULL);
3409
                             NULL);
3267
    TO_QUEUE_INIT(short_linger_q, pconf,
3410
    short_linger_q = TO_QUEUE_MAKE(pconf, apr_time_from_sec(SECONDS_TO_LINGER),
3268
                  apr_time_from_sec(SECONDS_TO_LINGER), NULL);
3411
                                   NULL);
3269
3412
3270
    for (; s; s = s->next) {
3413
    for (; s; s = s->next) {
3271
        event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc);
3414
        event_srv_cfg *sc = apr_pcalloc(pconf, sizeof *sc);
Lines 3273-3283 static int event_post_config(apr_pool_t *pconf, ap Link Here
3273
        ap_set_module_config(s->module_config, &mpm_event_module, sc);
3416
        ap_set_module_config(s->module_config, &mpm_event_module, sc);
3274
        if (!wc.tail) {
3417
        if (!wc.tail) {
3275
            /* The main server uses the global queues */
3418
            /* The main server uses the global queues */
3276
            TO_QUEUE_INIT(wc.q, pconf, s->timeout, NULL);
3419
            wc.q = TO_QUEUE_MAKE(pconf, s->timeout, NULL);
3277
            apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
3420
            apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
3278
            wc.tail = write_completion_q = wc.q;
3421
            wc.tail = write_completion_q = wc.q;
3279
3422
3280
            TO_QUEUE_INIT(ka.q, pconf, s->keep_alive_timeout, NULL);
3423
            ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, NULL);
3281
            apr_hash_set(ka.hash, &s->keep_alive_timeout,
3424
            apr_hash_set(ka.hash, &s->keep_alive_timeout,
3282
                         sizeof s->keep_alive_timeout, ka.q);
3425
                         sizeof s->keep_alive_timeout, ka.q);
3283
            ka.tail = keepalive_q = ka.q;
3426
            ka.tail = keepalive_q = ka.q;
Lines 3287-3293 static int event_post_config(apr_pool_t *pconf, ap Link Here
3287
             * or their own queue(s) if there isn't */
3430
             * or their own queue(s) if there isn't */
3288
            wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
3431
            wc.q = apr_hash_get(wc.hash, &s->timeout, sizeof s->timeout);
3289
            if (!wc.q) {
3432
            if (!wc.q) {
3290
                TO_QUEUE_INIT(wc.q, pconf, s->timeout, wc.tail);
3433
                wc.q = TO_QUEUE_MAKE(pconf, s->timeout, wc.tail);
3291
                apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
3434
                apr_hash_set(wc.hash, &s->timeout, sizeof s->timeout, wc.q);
3292
                wc.tail = wc.tail->next = wc.q;
3435
                wc.tail = wc.tail->next = wc.q;
3293
            }
3436
            }
Lines 3295-3301 static int event_post_config(apr_pool_t *pconf, ap Link Here
3295
            ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout,
3438
            ka.q = apr_hash_get(ka.hash, &s->keep_alive_timeout,
3296
                                sizeof s->keep_alive_timeout);
3439
                                sizeof s->keep_alive_timeout);
3297
            if (!ka.q) {
3440
            if (!ka.q) {
3298
                TO_QUEUE_INIT(ka.q, pconf, s->keep_alive_timeout, ka.tail);
3441
                ka.q = TO_QUEUE_MAKE(pconf, s->keep_alive_timeout, ka.tail);
3299
                apr_hash_set(ka.hash, &s->keep_alive_timeout,
3442
                apr_hash_set(ka.hash, &s->keep_alive_timeout,
3300
                             sizeof s->keep_alive_timeout, ka.q);
3443
                             sizeof s->keep_alive_timeout, ka.q);
3301
                ka.tail = ka.tail->next = ka.q;
3444
                ka.tail = ka.tail->next = ka.q;

Return to bug 57399