Remove volatile qualifiers and issue memory barriers where appropriate to prevent reordering. Index: apr/atomic/unix/ia32.c =================================================================== --- apr.orig/atomic/unix/ia32.c +++ apr/atomic/unix/ia32.c @@ -18,22 +18,32 @@ #ifdef USE_ATOMICS_IA32 +#ifdef __INTEL_COMPILER +# define barrier() __memory_barrier() +#else +# define barrier() asm volatile ("": : :"memory") +#endif + APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p) { return APR_SUCCESS; } -APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem) +APR_DECLARE(apr_uint32_t) apr_atomic_read32(apr_uint32_t *mem) { + barrier(); + return *mem; } -APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val) +APR_DECLARE(void) apr_atomic_set32(apr_uint32_t *mem, apr_uint32_t val) { *mem = val; + + barrier(); } -APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) +APR_DECLARE(apr_uint32_t) apr_atomic_add32(apr_uint32_t *mem, apr_uint32_t val) { asm volatile ("lock; xaddl %0,%1" : "=r" (val), "=m" (*mem) @@ -42,7 +52,7 @@ APR_DECLARE(apr_uint32_t) apr_atomic_add return val; } -APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val) +APR_DECLARE(void) apr_atomic_sub32(apr_uint32_t *mem, apr_uint32_t val) { asm volatile ("lock; subl %1, %0" : /* no output */ @@ -50,12 +60,12 @@ APR_DECLARE(void) apr_atomic_sub32(volat : "memory", "cc"); } -APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem) +APR_DECLARE(apr_uint32_t) apr_atomic_inc32(apr_uint32_t *mem) { return apr_atomic_add32(mem, 1); } -APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem) +APR_DECLARE(int) apr_atomic_dec32(apr_uint32_t *mem) { unsigned char prev; @@ -67,7 +77,7 @@ APR_DECLARE(int) apr_atomic_dec32(volati return prev; } -APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with, +APR_DECLARE(apr_uint32_t) apr_atomic_cas32(apr_uint32_t *mem, apr_uint32_t with, apr_uint32_t cmp) { apr_uint32_t prev; @@ -79,7 +89,7 @@ APR_DECLARE(apr_uint32_t) apr_atomic_cas return prev; } -APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val) +APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(apr_uint32_t *mem, apr_uint32_t val) { apr_uint32_t prev = val; @@ -90,18 +100,20 @@ APR_DECLARE(apr_uint32_t) apr_atomic_xch return prev; } -APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp) +APR_DECLARE(void*) apr_atomic_casptr(void **mem, void *with, void *cmp) { void *prev; #if APR_SIZEOF_VOIDP == 4 asm volatile ("lock; cmpxchgl %2, %1" : "=a" (prev), "=m" (*mem) - : "r" (with), "m" (*mem), "0" (cmp)); + : "r" (with), "m" (*mem), "0" (cmp) + : "memory"); #elif APR_SIZEOF_VOIDP == 8 asm volatile ("lock; cmpxchgq %q2, %1" : "=a" (prev), "=m" (*mem) : "r" ((unsigned long)with), "m" (*mem), - "0" ((unsigned long)cmp)); + "0" ((unsigned long)cmp) + : "memory"); #else #error APR_SIZEOF_VOIDP value not supported #endif