View | Details | Raw Unified | Return to bug 42806
Collapse All | Expand All

(-)apr/atomic/unix/mutex.c (+192 lines)
Line 0 Link Here
1
/* Licensed to the Apache Software Foundation (ASF) under one or more
2
 * contributor license agreements.  See the NOTICE file distributed with
3
 * this work for additional information regarding copyright ownership.
4
 * The ASF licenses this file to You under the Apache License, Version 2.0
5
 * (the "License"); you may not use this file except in compliance with
6
 * the License.  You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include "apr_arch_atomic.h"
18
19
#ifdef USE_ATOMICS_GENERIC
20
21
#include <stdlib.h>
22
23
#if APR_HAS_THREADS
24
#   define DECLARE_MUTEX_LOCKED(name, mem)  \
25
        apr_thread_mutex_t *name = mutex_hash(mem)
26
#   define MUTEX_UNLOCK(name)                                   \
27
        do {                                                    \
28
            if (apr_thread_mutex_unlock(name) != APR_SUCCESS)   \
29
                abort();                                        \
30
        } while (0)
31
#else
32
#   define DECLARE_MUTEX_LOCKED(name, mem)
33
#   define MUTEX_UNLOCK(name)
34
#   warning Be warned: using stubs for all atomic operations
35
#endif
36
37
#if APR_HAS_THREADS
38
39
static apr_thread_mutex_t **hash_mutex;
40
41
#define NUM_ATOMIC_HASH 7
42
/* shift by 2 to get rid of alignment issues */
43
#define ATOMIC_HASH(x) (unsigned int)(((unsigned long)(x)>>2)%(unsigned int)NUM_ATOMIC_HASH)
44
45
static apr_status_t atomic_cleanup(void *data)
46
{
47
    if (hash_mutex == data)
48
        hash_mutex = NULL;
49
50
    return APR_SUCCESS;
51
}
52
53
APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
54
{
55
    int i;
56
    apr_status_t rv;
57
58
    if (hash_mutex != NULL)
59
        return APR_SUCCESS;
60
61
    hash_mutex = apr_palloc(p, sizeof(apr_thread_mutex_t*) * NUM_ATOMIC_HASH);
62
    apr_pool_cleanup_register(p, hash_mutex, atomic_cleanup,
63
                              apr_pool_cleanup_null);
64
65
    for (i = 0; i < NUM_ATOMIC_HASH; i++) {
66
        rv = apr_thread_mutex_create(&(hash_mutex[i]),
67
                                     APR_THREAD_MUTEX_DEFAULT, p);
68
        if (rv != APR_SUCCESS) {
69
           return rv;
70
        }
71
    }
72
73
    return APR_SUCCESS;
74
}
75
76
static APR_INLINE apr_thread_mutex_t *mutex_hash(volatile apr_uint32_t *mem)
77
{
78
    apr_thread_mutex_t *mutex = hash_mutex[ATOMIC_HASH(mem)];
79
80
    if (apr_thread_mutex_lock(mutex) != APR_SUCCESS) {
81
        abort();
82
    }
83
84
    return mutex;
85
}
86
87
#else
88
89
APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
90
{
91
    return APR_SUCCESS;
92
}
93
94
#endif /* APR_HAS_THREADS */
95
96
APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
97
{
98
    return *mem;
99
}
100
101
APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
102
{
103
    DECLARE_MUTEX_LOCKED(mutex, mem);
104
105
    *mem = val;
106
107
    MUTEX_UNLOCK(mutex);
108
}
109
110
APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
111
{
112
    apr_uint32_t old_value;
113
    DECLARE_MUTEX_LOCKED(mutex, mem);
114
115
    old_value = *mem;
116
    *mem += val;
117
118
    MUTEX_UNLOCK(mutex);
119
120
    return old_value;
121
}
122
123
APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
124
{
125
    DECLARE_MUTEX_LOCKED(mutex, mem);
126
    *mem -= val;
127
    MUTEX_UNLOCK(mutex);
128
}
129
130
APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
131
{
132
    return apr_atomic_add32(mem, 1);
133
}
134
135
APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
136
{
137
    apr_uint32_t new;
138
    DECLARE_MUTEX_LOCKED(mutex, mem);
139
140
    (*mem)--;
141
    new = *mem;
142
143
    MUTEX_UNLOCK(mutex);
144
145
    return new;
146
}
147
148
APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
149
                              apr_uint32_t cmp)
150
{
151
    apr_uint32_t prev;
152
    DECLARE_MUTEX_LOCKED(mutex, mem);
153
154
    prev = *mem;
155
    if (prev == cmp) {
156
        *mem = with;
157
    }
158
159
    MUTEX_UNLOCK(mutex);
160
161
    return prev;
162
}
163
164
APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
165
{
166
    apr_uint32_t prev;
167
    DECLARE_MUTEX_LOCKED(mutex, mem);
168
169
    prev = *mem;
170
    *mem = val;
171
172
    MUTEX_UNLOCK(mutex);
173
174
    return prev;
175
}
176
177
APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
178
{
179
    void *prev;
180
    DECLARE_MUTEX_LOCKED(mutex, *mem);
181
182
    prev = *(void **)mem;
183
    if (prev == cmp) {
184
        *mem = with;
185
    }
186
187
    MUTEX_UNLOCK(mutex);
188
189
    return prev;
190
}
191
192
#endif /* USE_ATOMICS_GENERIC */
(-)apr/atomic/unix/apr_atomic.c (-464 lines)
Lines 1-464 Link Here
1
/* Licensed to the Apache Software Foundation (ASF) under one or more
2
 * contributor license agreements.  See the NOTICE file distributed with
3
 * this work for additional information regarding copyright ownership.
4
 * The ASF licenses this file to You under the Apache License, Version 2.0
5
 * (the "License"); you may not use this file except in compliance with
6
 * the License.  You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include "apr.h"
18
#include "apr_atomic.h"
19
#include "apr_thread_mutex.h"
20
21
#include "apr_private.h"
22
23
#include <stdlib.h>
24
#if (defined(SOLARIS2) && SOLARIS2 >= 10)
25
#include <atomic.h>
26
#endif
27
28
#if defined(__GNUC__) && defined(__STRICT_ANSI__) && !defined(USE_GENERIC_ATOMICS)
29
/* force use of generic atomics if building e.g. with -std=c89, which
30
 * doesn't allow inline asm */
31
#define USE_GENERIC_ATOMICS
32
#endif
33
34
#if (defined(__i386__) || defined(__x86_64__)) \
35
    && defined(__GNUC__) && !defined(USE_GENERIC_ATOMICS)
36
37
APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, 
38
                                           apr_uint32_t with,
39
                                           apr_uint32_t cmp)
40
{
41
    apr_uint32_t prev;
42
43
    asm volatile ("lock; cmpxchgl %1, %2"             
44
                  : "=a" (prev)               
45
                  : "r" (with), "m" (*(mem)), "0"(cmp) 
46
                  : "memory", "cc");
47
    return prev;
48
}
49
#define APR_OVERRIDE_ATOMIC_CAS32
50
51
static apr_uint32_t inline intel_atomic_add32(volatile apr_uint32_t *mem, 
52
                                              apr_uint32_t val)
53
{
54
    asm volatile ("lock; xaddl %0,%1"
55
                  : "=r"(val), "=m"(*mem) /* outputs */
56
                  : "0"(val), "m"(*mem)   /* inputs */
57
                  : "memory", "cc");
58
    return val;
59
}
60
61
APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, 
62
                                           apr_uint32_t val)
63
{
64
    return intel_atomic_add32(mem, val);
65
}
66
#define APR_OVERRIDE_ATOMIC_ADD32
67
68
APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
69
{
70
    asm volatile ("lock; subl %1, %0"
71
                  :
72
                  : "m" (*(mem)), "r" (val)
73
                  : "memory", "cc");
74
}
75
#define APR_OVERRIDE_ATOMIC_SUB32
76
77
APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
78
{
79
    unsigned char prev;
80
81
    asm volatile ("lock; decl %1;\n\t"
82
                  "setnz %%al"
83
                  : "=a" (prev)
84
                  : "m" (*(mem))
85
                  : "memory", "cc");
86
    return prev;
87
}
88
#define APR_OVERRIDE_ATOMIC_DEC32
89
90
APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
91
{
92
    return intel_atomic_add32(mem, 1);
93
}
94
#define APR_OVERRIDE_ATOMIC_INC32
95
96
APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
97
{
98
    *mem = val;
99
}
100
#define APR_OVERRIDE_ATOMIC_SET32
101
102
APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
103
{
104
    apr_uint32_t prev = val;
105
106
    asm volatile ("lock; xchgl %0, %1"
107
                  : "=r" (prev)
108
                  : "m" (*(mem)), "0"(prev)
109
                  : "memory");
110
    return prev;
111
}
112
#define APR_OVERRIDE_ATOMIC_XCHG32
113
114
/*#define apr_atomic_init(pool)        APR_SUCCESS*/
115
116
#endif /* (__linux__ || __EMX__ || __FreeBSD__) && __i386__ */
117
118
#if (defined(__PPC__) || defined(__ppc__)) && defined(__GNUC__) \
119
    && !defined(USE_GENERIC_ATOMICS)
120
121
APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem,
122
                                           apr_uint32_t swap,
123
                                           apr_uint32_t cmp)
124
{
125
    apr_uint32_t prev;
126
                                                                                
127
    asm volatile ("0:\n\t"                   /* retry local label     */
128
                  "lwarx  %0,0,%1\n\t"       /* load prev and reserve */
129
                  "cmpw   %0,%3\n\t"         /* does it match cmp?    */
130
                  "bne-   1f\n\t"            /* ...no, bail out       */
131
                  "stwcx. %2,0,%1\n\t"       /* ...yes, conditionally
132
                                                store swap            */
133
                  "bne-   0b\n\t"            /* start over if we lost
134
                                                the reservation       */
135
                  "1:"                       /* exit local label      */
136
137
                  : "=&r"(prev)                        /* output      */
138
                  : "b" (mem), "r" (swap), "r"(cmp)    /* inputs      */
139
                  : "memory", "cc");                   /* clobbered   */
140
    return prev;
141
}
142
#define APR_OVERRIDE_ATOMIC_CAS32
143
144
APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem,
145
                                           apr_uint32_t delta)
146
{
147
    apr_uint32_t prev, temp;
148
                                                                                
149
    asm volatile ("0:\n\t"                   /* retry local label     */
150
                  "lwarx  %0,0,%2\n\t"       /* load prev and reserve */
151
                  "add    %1,%0,%3\n\t"      /* temp = prev + delta   */
152
                  "stwcx. %1,0,%2\n\t"       /* conditionally store   */
153
                  "bne-   0b"                /* start over if we lost
154
                                                the reservation       */
155
156
                  /*XXX find a cleaner way to define the temp         
157
                   *    it's not an output
158
                   */
159
                  : "=&r" (prev), "=&r" (temp)        /* output, temp */
160
                  : "b" (mem), "r" (delta)            /* inputs       */
161
                  : "memory", "cc");                  /* clobbered    */
162
    return prev;
163
}
164
#define APR_OVERRIDE_ATOMIC_ADD32
165
166
#endif /* __PPC__ && __GNUC__ */
167
168
#if (defined(SOLARIS2) && SOLARIS2 >= 10) \
169
    && !defined(USE_GENERIC_ATOMICS)
170
171
#if !defined(APR_OVERRIDE_ATOMIC_CAS32)
172
APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem,
173
                                           apr_uint32_t with,
174
                                           apr_uint32_t cmp)
175
{
176
    return atomic_cas_32(mem, cmp, with);
177
}
178
#define APR_OVERRIDE_ATOMIC_CAS32
179
#endif /* APR_OVERRIDE_ATOMIC_CAS32 */
180
181
#if !defined(APR_OVERRIDE_ATOMIC_DEC32)
182
APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
183
{
184
    apr_uint32_t prev = *mem;
185
    atomic_dec_32(mem);
186
    return prev != 1;
187
}
188
#define APR_OVERRIDE_ATOMIC_DEC32
189
#endif /* APR_OVERRIDE_ATOMIC_DEC32 */
190
191
#if !defined(APR_OVERRIDE_ATOMIC_INC32)
192
APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
193
{
194
    apr_uint32_t prev = *mem;
195
    atomic_inc_32(mem);
196
    return prev;
197
}
198
#define APR_OVERRIDE_ATOMIC_INC32
199
#endif /* APR_OVERRIDE_ATOMIC_INC32 */
200
201
#if !defined(APR_OVERRIDE_ATOMIC_SET32)
202
APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
203
{
204
    *mem = val;
205
}
206
#define APR_OVERRIDE_ATOMIC_SET32
207
#endif /* APR_OVERRIDE_ATOMIC_SET32 */
208
209
#if !defined(APR_OVERRIDE_ATOMIC_XCHG32)
210
APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem,
211
                                            apr_uint32_t val) 
212
{
213
    return atomic_swap_32(mem, val);
214
}
215
#define APR_OVERRIDE_ATOMIC_XCHG32
216
#endif /* APR_OVERRIDE_ATOMIC_XCHG32 */
217
218
#endif /* SOLARIS2 && SOLARIS2 >= 10 */
219
220
#if !defined(APR_OVERRIDE_ATOMIC_INIT)
221
222
#if APR_HAS_THREADS
223
#define NUM_ATOMIC_HASH 7
224
/* shift by 2 to get rid of alignment issues */
225
#define ATOMIC_HASH(x) (unsigned int)(((unsigned long)(x)>>2)%(unsigned int)NUM_ATOMIC_HASH)
226
static apr_thread_mutex_t **hash_mutex;
227
#endif /* APR_HAS_THREADS */
228
229
#if APR_HAS_THREADS
230
static apr_status_t atomic_cleanup(void *data)
231
{
232
    if (hash_mutex == data)
233
        hash_mutex = NULL;
234
235
    return APR_SUCCESS;
236
}
237
#endif
238
239
apr_status_t apr_atomic_init(apr_pool_t *p)
240
{
241
#if APR_HAS_THREADS
242
    int i;
243
    apr_status_t rv;
244
245
    if (hash_mutex != NULL)
246
        return APR_SUCCESS;
247
248
    hash_mutex = apr_palloc(p, sizeof(apr_thread_mutex_t*) * NUM_ATOMIC_HASH);
249
    apr_pool_cleanup_register(p, hash_mutex, atomic_cleanup,
250
                              apr_pool_cleanup_null);
251
252
    for (i = 0; i < NUM_ATOMIC_HASH; i++) {
253
        rv = apr_thread_mutex_create(&(hash_mutex[i]),
254
                                     APR_THREAD_MUTEX_DEFAULT, p);
255
        if (rv != APR_SUCCESS) {
256
           return rv;
257
        }
258
    }
259
#endif /* APR_HAS_THREADS */
260
    return APR_SUCCESS;
261
}
262
#endif /* !defined(APR_OVERRIDE_ATOMIC_INIT) */
263
264
/* abort() if 'x' does not evaluate to APR_SUCCESS. */
265
#define CHECK(x) do { if ((x) != APR_SUCCESS) abort(); } while (0)
266
267
#if !defined(APR_OVERRIDE_ATOMIC_ADD32)
268
#if defined(APR_OVERRIDE_ATOMIC_CAS32)
269
apr_uint32_t apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
270
{
271
    apr_uint32_t old_value, new_value;
272
    
273
    do {
274
        old_value = *mem;
275
        new_value = old_value + val;
276
    } while (apr_atomic_cas32(mem, new_value, old_value) != old_value);
277
    return old_value;
278
}
279
#else
280
apr_uint32_t apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
281
{
282
    apr_uint32_t old_value;
283
284
#if APR_HAS_THREADS
285
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
286
       
287
    CHECK(apr_thread_mutex_lock(lock));
288
    old_value = *mem;
289
    *mem += val;
290
    CHECK(apr_thread_mutex_unlock(lock));
291
#else
292
    old_value = *mem;
293
    *mem += val;
294
#endif /* APR_HAS_THREADS */
295
    return old_value;
296
}
297
#endif /* defined(APR_OVERRIDE_ATOMIC_CAS32) */
298
#endif /* !defined(APR_OVERRIDE_ATOMIC_ADD32) */
299
300
#if !defined(APR_OVERRIDE_ATOMIC_SUB32)
301
#if defined(APR_OVERRIDE_ATOMIC_CAS32)
302
void apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
303
{
304
    apr_uint32_t old_value, new_value;
305
    
306
    do {
307
        old_value = *mem;
308
        new_value = old_value - val;
309
    } while (apr_atomic_cas32(mem, new_value, old_value) != old_value);
310
}
311
#else
312
void apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val) 
313
{
314
#if APR_HAS_THREADS
315
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
316
       
317
    CHECK(apr_thread_mutex_lock(lock));
318
    *mem -= val;
319
    CHECK(apr_thread_mutex_unlock(lock));
320
#else
321
    *mem -= val;
322
#endif /* APR_HAS_THREADS */
323
}
324
#endif /* defined(APR_OVERRIDE_ATOMIC_CAS32) */
325
#endif /* !defined(APR_OVERRIDE_ATOMIC_SUB32) */
326
327
#if !defined(APR_OVERRIDE_ATOMIC_SET32)
328
void apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val) 
329
{
330
#if APR_HAS_THREADS
331
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
332
333
    CHECK(apr_thread_mutex_lock(lock));
334
    *mem = val;
335
    CHECK(apr_thread_mutex_unlock(lock));
336
#else
337
    *mem = val;
338
#endif /* APR_HAS_THREADS */
339
}
340
#endif /* !defined(APR_OVERRIDE_ATOMIC_SET32) */
341
342
#if !defined(APR_OVERRIDE_ATOMIC_INC32)
343
apr_uint32_t apr_atomic_inc32(volatile apr_uint32_t *mem) 
344
{
345
    return apr_atomic_add32(mem, 1);
346
}
347
#endif /* !defined(APR_OVERRIDE_ATOMIC_INC32) */
348
349
#if !defined(APR_OVERRIDE_ATOMIC_DEC32)
350
#if defined(APR_OVERRIDE_ATOMIC_CAS32)
351
int apr_atomic_dec32(volatile apr_uint32_t *mem)
352
{
353
    apr_uint32_t old_value, new_value;
354
    
355
    do {
356
        old_value = *mem;
357
        new_value = old_value - 1;
358
    } while (apr_atomic_cas32(mem, new_value, old_value) != old_value);
359
    return old_value != 1;
360
}
361
#else
362
int apr_atomic_dec32(volatile apr_uint32_t *mem) 
363
{
364
#if APR_HAS_THREADS
365
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
366
    apr_uint32_t new;
367
368
    CHECK(apr_thread_mutex_lock(lock));
369
    (*mem)--;
370
    new = *mem;
371
    CHECK(apr_thread_mutex_unlock(lock));
372
    return new;
373
#else
374
    (*mem)--;
375
    return *mem; 
376
#endif /* APR_HAS_THREADS */
377
}
378
#endif /* defined(APR_OVERRIDE_ATOMIC_CAS32) */
379
#endif /* !defined(APR_OVERRIDE_ATOMIC_DEC32) */
380
381
#if !defined(APR_OVERRIDE_ATOMIC_CAS32)
382
apr_uint32_t apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
383
			      apr_uint32_t cmp)
384
{
385
    apr_uint32_t prev;
386
#if APR_HAS_THREADS
387
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
388
389
    CHECK(apr_thread_mutex_lock(lock));
390
    prev = *mem;
391
    if (prev == cmp) {
392
        *mem = with;
393
    }
394
    CHECK(apr_thread_mutex_unlock(lock));
395
#else
396
    prev = *mem;
397
    if (prev == cmp) {
398
        *mem = with;
399
    }
400
#endif /* APR_HAS_THREADS */
401
    return prev;
402
}
403
#endif /* !defined(APR_OVERRIDE_ATOMIC_CAS32) */
404
405
#if !defined(APR_OVERRIDE_ATOMIC_XCHG32)
406
#if defined(APR_OVERRIDE_ATOMIC_CAS32)
407
apr_uint32_t apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
408
{
409
    apr_uint32_t prev;
410
    do {
411
        prev = *mem;
412
    } while (apr_atomic_cas32(mem, val, prev) != prev);
413
    return prev;
414
}
415
#else
416
apr_uint32_t apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
417
{
418
    apr_uint32_t prev;
419
#if APR_HAS_THREADS
420
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
421
422
    CHECK(apr_thread_mutex_lock(lock));
423
    prev = *mem;
424
    *mem = val;
425
    CHECK(apr_thread_mutex_unlock(lock));
426
#else
427
    prev = *mem;
428
    *mem = val;
429
#endif /* APR_HAS_THREADS */
430
    return prev;
431
}
432
#endif /* defined(APR_OVERRIDE_ATOMIC_CAS32) */
433
#endif /* !defined(APR_OVERRIDE_ATOMIC_XCHG32) */
434
435
#if !defined(APR_OVERRIDE_ATOMIC_CASPTR)
436
void *apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
437
{
438
    void *prev;
439
#if APR_HAS_THREADS
440
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
441
442
    CHECK(apr_thread_mutex_lock(lock));
443
    prev = *(void **)mem;
444
    if (prev == cmp) {
445
        *mem = with;
446
    }
447
    CHECK(apr_thread_mutex_unlock(lock));
448
#else
449
    prev = *(void **)mem;
450
    if (prev == cmp) {
451
        *mem = with;
452
    }
453
#endif /* APR_HAS_THREADS */
454
    return prev;
455
}
456
#endif /* !defined(APR_OVERRIDE_ATOMIC_CASPTR) */
457
458
#if !defined(APR_OVERRIDE_ATOMIC_READ32)
459
APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
460
{
461
    return *mem;
462
}
463
#endif
464
(-)apr/include/arch/unix/apr_arch_atomic.h (+35 lines)
Line 0 Link Here
1
/* Licensed to the Apache Software Foundation (ASF) under one or more
2
 * contributor license agreements.  See the NOTICE file distributed with
3
 * this work for additional information regarding copyright ownership.
4
 * The ASF licenses this file to You under the Apache License, Version 2.0
5
 * (the "License"); you may not use this file except in compliance with
6
 * the License.  You may obtain a copy of the License at
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#ifndef ATOMIC_H
18
#define ATOMIC_H
19
20
#include "apr.h"
21
#include "apr_private.h"
22
#include "apr_atomic.h"
23
#include "apr_thread_mutex.h"
24
25
#if defined(USE_ATOMICS_GENERIC)
26
/* noop */
27
#elif defined(__GNUC__) && defined(__STRICT_ANSI__)
28
/* force use of generic atomics if building e.g. with -std=c89, which
29
 * doesn't allow inline asm */
30
#   define USE_ATOMICS_GENERIC
31
#else
32
#   define USE_ATOMICS_GENERIC
33
#endif
34
35
#endif /* ATOMIC_H */
(-)apr/configure.in (-1 / +1 lines)
Lines 495-501 esac Link Here
495
])
495
])
496
496
497
if test $force_generic_atomics = yes; then
497
if test $force_generic_atomics = yes; then
498
   AC_DEFINE([USE_GENERIC_ATOMICS], 1,
498
   AC_DEFINE([USE_ATOMICS_GENERIC], 1,
499
             [Define if use of generic atomics is requested])
499
             [Define if use of generic atomics is requested])
500
fi
500
fi
501
501

Return to bug 42806