Remove volatile qualifiers. The WIN32 Interlocked* functions by default provide full memory barrier semantics. Index: apr/atomic/win32/apr_atomic.c =================================================================== --- apr.orig/atomic/win32/apr_atomic.c +++ apr/atomic/win32/apr_atomic.c @@ -28,18 +28,18 @@ APR_DECLARE(apr_status_t) apr_atomic_ini * as the arguments for as our apr_atomic_foo32 Functions */ typedef WINBASEAPI apr_uint32_t (WINAPI * apr_atomic_win32_ptr_fn) - (apr_uint32_t volatile *); + (apr_uint32_t *); typedef WINBASEAPI apr_uint32_t (WINAPI * apr_atomic_win32_ptr_val_fn) - (apr_uint32_t volatile *, + (apr_uint32_t *, apr_uint32_t); typedef WINBASEAPI apr_uint32_t (WINAPI * apr_atomic_win32_ptr_val_val_fn) - (apr_uint32_t volatile *, + (apr_uint32_t *, apr_uint32_t, apr_uint32_t); typedef WINBASEAPI void * (WINAPI * apr_atomic_win32_ptr_ptr_ptr_fn) - (volatile void **, + (void **, void *, const void *); -APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) +APR_DECLARE(apr_uint32_t) apr_atomic_add32(apr_uint32_t *mem, apr_uint32_t val) { #if (defined(_M_IA64) || defined(_M_AMD64)) return InterlockedExchangeAdd(mem, val); @@ -53,7 +53,7 @@ APR_DECLARE(apr_uint32_t) apr_atomic_add /* Of course we want the 2's compliment of the unsigned value, val */ #pragma warning(disable: 4146) -APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val) +APR_DECLARE(void) apr_atomic_sub32(apr_uint32_t *mem, apr_uint32_t val) { #if (defined(_M_IA64) || defined(_M_AMD64)) InterlockedExchangeAdd(mem, -val); @@ -64,7 +64,7 @@ APR_DECLARE(void) apr_atomic_sub32(volat #endif } -APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem) +APR_DECLARE(apr_uint32_t) apr_atomic_inc32(apr_uint32_t *mem) { /* we return old value, win32 returns new value :( */ #if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) @@ -76,7 +76,7 @@ APR_DECLARE(apr_uint32_t) apr_atomic_inc #endif } -APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem) +APR_DECLARE(int) apr_atomic_dec32(apr_uint32_t *mem) { #if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) return InterlockedDecrement(mem); @@ -87,7 +87,7 @@ APR_DECLARE(int) apr_atomic_dec32(volati #endif } -APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val) +APR_DECLARE(void) apr_atomic_set32(apr_uint32_t *mem, apr_uint32_t val) { #if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) InterlockedExchange(mem, val); @@ -98,12 +98,12 @@ APR_DECLARE(void) apr_atomic_set32(volat #endif } -APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem) +APR_DECLARE(apr_uint32_t) apr_atomic_read32(apr_uint32_t *mem) { return *mem; } -APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with, +APR_DECLARE(apr_uint32_t) apr_atomic_cas32(apr_uint32_t *mem, apr_uint32_t with, apr_uint32_t cmp) { #if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) @@ -115,7 +115,7 @@ APR_DECLARE(apr_uint32_t) apr_atomic_cas #endif } -APR_DECLARE(void *) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp) +APR_DECLARE(void *) apr_atomic_casptr(void **mem, void *with, void *cmp) { #if (defined(_M_IA64) || defined(_M_AMD64) || defined(__MINGW32__)) && !defined(RC_INVOKED) return InterlockedCompareExchangePointer((void**)mem, with, (void*)cmp); @@ -125,7 +125,7 @@ APR_DECLARE(void *) apr_atomic_casptr(vo #endif } -APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val) +APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(apr_uint32_t *mem, apr_uint32_t val) { #if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) return InterlockedExchange(mem, val);