1
1

Add protections around 32 and 64 bits code. The 32 bits is not really usefull but I want to have both of them similar. Anyway, some architectures does not provide 64 bits atomic operations, so they will never get compiled. Let's just hope nobody use them ...

- remove all unsigned from the */atomic.h files
 - export defines depending on the architecture

This commit was SVN r3408.
Этот коммит содержится в:
George Bosilca 2004-10-28 20:32:12 +00:00
родитель 79c9eca6b7
Коммит a267e2671a
8 изменённых файлов: 277 добавлений и 257 удалений

Просмотреть файл

@ -41,14 +41,13 @@ static inline void ompi_atomic_wmb(void)
WMB();
}
static inline int ompi_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32
static inline int ompi_atomic_cmpset_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
uint32_t ret;
__asm __volatile__ (
int32_t ret;
__asm __volatile__ (
"1: ldl_l %0, %1 // load oldval value \n\
cmpeq %0, %2, %0 // compare \n\
beq %0, 2f // exit if not equal \n\
@ -66,9 +65,9 @@ static inline int ompi_atomic_cmpset_32(volatile uint32_t *addr,
}
static inline int ompi_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
static inline int ompi_atomic_cmpset_acq_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval)
{
int rc;
@ -79,20 +78,19 @@ static inline int ompi_atomic_cmpset_acq_32(volatile uint32_t *addr,
}
static inline int ompi_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
static inline int ompi_atomic_cmpset_rel_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval)
{
ompi_atomic_wmb();
return ompi_atomic_cmpset_32(addr, oldval, newval);
}
static inline int ompi_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64
static inline int ompi_atomic_cmpset_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
uint32_t ret;
int32_t ret;
__asm__ __volatile__ (
"1: ldq_l %0, %1 // load oldval value \n\
@ -112,9 +110,9 @@ static inline int ompi_atomic_cmpset_64(volatile uint64_t *addr,
}
static inline int ompi_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
static inline int ompi_atomic_cmpset_acq_64(volatile int64_t *addr,
int64_t oldval,
int64_t newval)
{
int rc;
@ -125,13 +123,12 @@ static inline int ompi_atomic_cmpset_acq_64(volatile uint64_t *addr,
}
static inline int ompi_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
static inline int ompi_atomic_cmpset_rel_64(volatile int64_t *addr,
int64_t oldval,
int64_t newval)
{
ompi_atomic_wmb();
return ompi_atomic_cmpset_64(addr, oldval, newval);
}
#endif /* ! OMPI_SYS_ARCH_ATOMIC_H */

Просмотреть файл

@ -36,9 +36,9 @@ static inline void ompi_atomic_wmb(void)
MB();
}
static inline int ompi_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32
static inline int ompi_atomic_cmpset_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
unsigned long prev;
__asm__ __volatile__(SMPLOCK "cmpxchgl %k1,%2"
@ -51,11 +51,11 @@ static inline int ompi_atomic_cmpset_32(volatile uint32_t *addr,
#define ompi_atomic_cmpset_acq_32 ompi_atomic_cmpset_32
#define ompi_atomic_cmpset_rel_32 ompi_atomic_cmpset_32
static inline int ompi_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64
static inline int ompi_atomic_cmpset_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
uint64_t ret = oldval;
int64_t ret = oldval;
__asm__ __volatile (
SMPLOCK "cmpxchgq %1,%2 \n\t"

Просмотреть файл

@ -149,6 +149,7 @@ static inline void ompi_atomic_unlock(ompi_lock_t *lock);
*/
#if !defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_ADD_32)
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32)
static inline int32_t ompi_atomic_add_32(volatile int32_t *addr, int delta)
{
int32_t oldval;
@ -158,9 +159,11 @@ static inline int32_t ompi_atomic_add_32(volatile int32_t *addr, int delta)
} while (0 == ompi_atomic_cmpset_32(addr, oldval, oldval + delta));
return (oldval + delta);
}
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32 */
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_ADD_32 */
#if !defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_SUB_32)
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32)
static inline int32_t ompi_atomic_sub_32(volatile int32_t *addr, int delta)
{
int32_t oldval;
@ -170,8 +173,14 @@ static inline int32_t ompi_atomic_sub_32(volatile int32_t *addr, int delta)
} while (0 == ompi_atomic_cmpset_32(addr, oldval, oldval - delta));
return (oldval - delta);
}
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32 */
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_SUB_32 */
/* Some architectures does not provide support for the 64 bits
* atomic operations. Until we find a better solution let's just
* undefine all those functions.
*/
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64)
#if !defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_ADD_64)
static inline int64_t ompi_atomic_add_64(volatile int64_t *addr, int delta)
{
@ -195,17 +204,23 @@ static inline int64_t ompi_atomic_sub_64(volatile int64_t *addr, int delta)
return (oldval - delta);
}
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_SUB_64 */
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64 */
static inline int ompi_atomic_cmpset_xx( volatile void* addr, int64_t oldval,
int64_t newval, size_t length )
{
switch( length ) {
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32)
case 4:
return ompi_atomic_cmpset_32( (volatile int32_t*)addr,
(int32_t)oldval, (int32_t)newval );
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32 */
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64)
case 8:
return ompi_atomic_cmpset_64( (volatile int64_t*)addr,
(int64_t)oldval, (int64_t)newval );
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64 */
default:
*(int*)(NULL) = 0;
}
@ -230,12 +245,17 @@ static inline int ompi_atomic_cmpset_acq_xx( volatile void* addr, int64_t oldval
int64_t newval, size_t length )
{
switch( length ) {
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32)
case 4:
return ompi_atomic_cmpset_acq_32( (volatile int32_t*)addr,
(int32_t)oldval, (int32_t)newval );
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32 */
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64)
case 8:
return ompi_atomic_cmpset_acq_64( (volatile int64_t*)addr,
(int64_t)oldval, (int64_t)newval );
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64 */
default:
*(int*)(NULL) = 0;
}
@ -260,12 +280,17 @@ static inline int ompi_atomic_cmpset_rel_xx( volatile void* addr, int64_t oldval
int64_t newval, size_t length )
{
switch( length ) {
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32)
case 4:
return ompi_atomic_cmpset_rel_32( (volatile int32_t*)addr,
(int32_t)oldval, (int32_t)newval );
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32 */
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64)
case 8:
return ompi_atomic_cmpset_rel_64( (volatile int64_t*)addr,
(int64_t)oldval, (int64_t)newval );
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64 */
default:
*(int*)(NULL) = 0;
}
@ -289,12 +314,17 @@ static inline int ompi_atomic_cmpset_rel_xx( volatile void* addr, int64_t oldval
static inline void ompi_atomic_add_xx( volatile void* addr, int32_t value, size_t length )
{
switch( length ) {
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32)
case 4:
ompi_atomic_add_32( (volatile int32_t*)addr, (int32_t)value );
break;
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32 */
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64)
case 8:
ompi_atomic_add_64( (volatile int64_t*)addr, (int64_t)value );
break;
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64 */
default:
*(int*)(NULL) = 0;
}
@ -314,12 +344,17 @@ static inline void ompi_atomic_add_xx( volatile void* addr, int32_t value, size_
static inline void ompi_atomic_sub_xx( volatile void* addr, int32_t value, size_t length )
{
switch( length ) {
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32)
case 4:
ompi_atomic_sub_32( (volatile int32_t*)addr, (int32_t)value );
break;
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32 */
#if defined(OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64)
case 8:
ompi_atomic_sub_64( (volatile int64_t*)addr, (int64_t)value );
break;
#endif /* OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64 */
default:
*(int*)(NULL) = 0;
}

Просмотреть файл

@ -36,7 +36,7 @@ static inline void ompi_atomic_wmb(void)
MB();
}
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32
static inline int ompi_atomic_cmpset_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval)
@ -55,6 +55,7 @@ static inline int ompi_atomic_cmpset_32(volatile int32_t *addr,
#define ompi_atomic_cmpset_acq_32 ompi_atomic_cmpset_32
#define ompi_atomic_cmpset_rel_32 ompi_atomic_cmpset_32
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64
typedef struct {
uint32_t lo;
uint32_t hi;

Просмотреть файл

@ -34,9 +34,9 @@ static inline void ompi_atomic_wmb(void)
MB();
}
static inline int ompi_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32
static inline int ompi_atomic_cmpset_acq_32( volatile uint32_t *addr,
uint32_t oldval, uint32_t newval)
{
uint32_t ret;
@ -51,9 +51,8 @@ static inline int ompi_atomic_cmpset_acq_32(volatile uint32_t *addr,
}
static inline int ompi_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
static inline int ompi_atomic_cmpset_rel_32( volatile uint32_t *addr,
uint32_t oldval, uint32_t newval)
{
uint32_t ret;
@ -70,9 +69,9 @@ static inline int ompi_atomic_cmpset_rel_32(volatile uint32_t *addr,
#define ompi_atomic_cmpset_32 ompi_atomic_cmpset_acq_32
static inline int ompi_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64
static inline int ompi_atomic_cmpset_acq_64( volatile uint64_t *addr,
uint64_t oldval, uint64_t newval)
{
uint64_t ret;
@ -87,9 +86,8 @@ static inline int ompi_atomic_cmpset_acq_64(volatile uint64_t *addr,
}
static inline int ompi_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
static inline int ompi_atomic_cmpset_rel_64( volatile uint64_t *addr,
uint64_t oldval, uint64_t newval)
{
uint64_t ret;

Просмотреть файл

@ -14,12 +14,16 @@
#define MB() __asm__ __volatile__ ("sync" : : : "memory")
#define RMB() __asm__ __volatile__ ("lwsync" : : : "memory")
#define WMB() __asm__ __volatile__ ("eieio" : : : "memory")
#define SMP_SYNC "sync \n\t"
#define SMP_ISYNC "\n\tisync"
#else
#define MB()
#define RMB()
#define WMB()
#define SMP_SYNC ""
#define SMP_ISYNC
#endif
@ -41,30 +45,30 @@ static inline void ompi_atomic_wmb(void)
WMB();
}
static inline int ompi_atomic_cmpset_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval)
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32
static inline int ompi_atomic_cmpset_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
int32_t ret;
int32_t ret;
__asm__ __volatile__ (
"1: lwarx %0, 0, %2 \n\
cmpw 0, %0, %3 \n\
bne- 2f \n\
stwcx. %4, 0, %2 \n\
bne- 1b \n\
2:"
: "=&r" (ret), "=m" (*addr)
: "r" (addr), "r" (oldval), "r" (newval), "m" (*addr)
: "cc", "memory");
__asm__ __volatile__ (
"1: lwarx %0, 0, %2 \n\t"
" cmpw 0, %0, %3 \n\t"
" bne- 2f \n\t"
" stwcx. %4, 0, %2 \n\t"
" bne- 1b \n\t"
SYNC
"2:"
: "=&r" (ret), "=m" (*addr)
: "r" (addr), "r" (oldval), "r" (newval), "m" (*addr)
: "cc", "memory");
return (ret == oldval);
return (ret == oldval);
}
static inline int ompi_atomic_cmpset_acq_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval)
static inline int ompi_atomic_cmpset_acq_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
int rc;
@ -75,39 +79,37 @@ static inline int ompi_atomic_cmpset_acq_32(volatile int32_t *addr,
}
static inline int ompi_atomic_cmpset_rel_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval)
static inline int ompi_atomic_cmpset_rel_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
ompi_atomic_wmb();
return ompi_atomic_cmpset_32(addr, oldval, newval);
}
static inline int ompi_atomic_cmpset_64(volatile int64_t *addr,
int64_t oldval,
int64_t newval)
#if HOW_TO_DECIDE_IF_THE_ARCHI_SUPPORT_64_BITS_ATOMICS
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64
static inline int ompi_atomic_cmpset_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
int64_t ret;
int64_t ret;
__asm__ __volatile__ (
"1: ldarx %0, 0, %2 \n\
cmpd 0, %0, %3 \n\
bne- 2f \n\
stdcx. %4, 0, %2 \n\
bne- 1b \n\
2:"
: "=&r" (ret), "=m" (*addr)
: "r" (addr), "r" (oldval), "r" (newval), "m" (*addr)
: "cc", "memory");
return (ret == oldval);
__asm__ __volatile__ (
"1: ldarx %0, 0, %2 \n\t"
" cmpd 0, %0, %3 \n\t"
" bne- 2f \n\t"
" stdcx. %4, 0, %2 \n\t"
" bne- 1b \n\t"
"2:"
: "=&r" (ret), "=m" (*addr)
: "r" (addr), "r" (oldval), "r" (newval), "m" (*addr)
: "cc", "memory");
return (ret == oldval);
}
static inline int ompi_atomic_cmpset_acq_64(volatile int64_t *addr,
int64_t oldval,
int64_t newval)
static inline int ompi_atomic_cmpset_acq_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
int rc;
@ -118,37 +120,45 @@ static inline int ompi_atomic_cmpset_acq_64(volatile int64_t *addr,
}
static inline int ompi_atomic_cmpset_rel_64(volatile int64_t *addr,
int64_t oldval,
int64_t newval)
static inline int ompi_atomic_cmpset_rel_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
ompi_atomic_wmb();
return ompi_atomic_cmpset_64(addr, oldval, newval);
}
#endif /* HOW_TO_DECIDE_IF_THE_ARCHI_SUPPORT_64_BITS_ATOMICS */
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_ADD_32
static inline int ompi_atomic_add_32(volatile int32_t* v, int i)
static inline int ompi_atomic_add_32(volatile int32_t* v, int inc)
{
__asm__ volatile("top1:\tlwarx r4, 0, %0\n\t" \
"addi r4, r4, 1\n\t" \
"stwcx. r4, 0, %0\n\t" \
"bne cr0, top1"
:
: "r" (v)
: "r4");
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%3 # atomic_add\n\t"
" add %0,%2,%0 \n\t"
" stwcx. %0,0,%3 \n\t"
" bne- 1b \n\t"
: "=&r" (t), "=m" (*v)
: "r" (inc), "r" (&v), "m" (*v)
: "cc");
return *v;
}
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_SUB_32
static inline int ompi_atomic_sub_32(volatile int32_t* v, int i)
static inline int ompi_atomic_sub_32(volatile int32_t* v, int dec)
{
__asm__ volatile("top2:\tlwarx r4, 0, %0\n\t" \
"subi r4, r4, 1\n\t" \
"stwcx. r4, 0, %0\n\t" \
"bne cr0, top2"
:
: "r" (v)
: "r4");
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%3 # atomic_add\n\t"
" subf %0,%2,%0 \n\t"
" stwcx. %0,0,%3 \n\t"
" bne- 1b \n\t"
: "=&r" (t), "=m" (*v)
: "r" (dec), "r" (&v), "m" (*v)
: "cc");
return *v;
}

Просмотреть файл

@ -35,74 +35,67 @@ static inline void ompi_atomic_wmb(void)
MEMBAR("#StoreStore");
}
static inline int ompi_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32
static inline int ompi_atomic_cmpset_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
uint32_t ret = oldval;
int32_t ret = oldval;
__asm__ __volatile("casa [%1] " ASI_P ", %2, %0"
: "+r" (ret)
: "r" (addr), "r" (newval));
return (ret == oldval);
__asm__ __volatile("casa [%1] " ASI_P ", %2, %0"
: "+r" (ret)
: "r" (addr), "r" (newval));
return (ret == oldval);
}
static inline int ompi_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
static inline int ompi_atomic_cmpset_acq_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
int rc;
int rc;
rc = ompi_atomic_cmpset_32(addr, oldval, newval);
ompi_atomic_rmb();
rc = ompi_atomic_cmpset_32(addr, oldval, newval);
ompi_atomic_rmb();
return rc;
return rc;
}
static inline int ompi_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t oldval,
uint32_t newval)
static inline int ompi_atomic_cmpset_rel_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
ompi_atomic_wmb();
return ompi_atomic_cmpset_32(addr, oldval, newval);
ompi_atomic_wmb();
return ompi_atomic_cmpset_32(addr, oldval, newval);
}
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64
static inline int ompi_atomic_cmpset_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
int64_t ret = oldval;
__asm__ __volatile("casxa [%1] " ASI_P ", %2, %0"
: "+r" (ret)
: "r" (addr), "r" (newval));
return (ret == oldval);
}
static inline int ompi_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
static inline int ompi_atomic_cmpset_acq_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
uint64_t ret = oldval;
__asm__ __volatile("casxa [%1] " ASI_P ", %2, %0"
: "+r" (ret)
: "r" (addr), "r" (newval));
return (ret == oldval);
int rc;
rc = ompi_atomic_cmpset_64(addr, oldval, newval);
ompi_atomic_rmb();
return rc;
}
static inline int ompi_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
static inline int ompi_atomic_cmpset_rel_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
int rc;
rc = ompi_atomic_cmpset_64(addr, oldval, newval);
ompi_atomic_rmb();
return rc;
}
static inline int ompi_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t oldval,
uint64_t newval)
{
ompi_atomic_wmb();
return ompi_atomic_cmpset_64(addr, oldval, newval);
ompi_atomic_wmb();
return ompi_atomic_cmpset_64(addr, oldval, newval);
}

Просмотреть файл

@ -9,156 +9,142 @@
* On ia64, we use cmpxchg, which supports acquire/release semantics natively.
*/
static inline void ompi_atomic_mb(void) {
static inline void ompi_atomic_mb(void)
{
#if 0
return KeMemoryBarrier();
return KeMemoryBarrier();
#endif
}
static inline void ompi_atomic_rmb(void) {
static inline void ompi_atomic_rmb(void)
{
#if 0
return KeMemoryBarrier();
return KeMemoryBarrier();
#endif
}
static inline void ompi_atomic_wmb(void) {
static inline void ompi_atomic_wmb(void)
{
#if 0
return KeMemoryBarrier();
return KeMemoryBarrier();
#endif
}
static inline int ompi_atomic_cmpset_acq_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval) {
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_32
static inline int ompi_atomic_cmpset_acq_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
#if 0
LONG ret = InterlockedCompareExchangeAcquire ((LONG volatile*) addr,
(LONG) newval,
(LONG) oldval);
return (oldval == ret) ? 1: 0;
int32_t ret = InterlockedCompareExchangeAcquire ((int32_t volatile*) addr,
(int32_t) newval, (int32_t) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
return 0;
#endif
}
static inline int ompi_atomic_cmpset_rel_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval) {
static inline int ompi_atomic_cmpset_rel_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
#if 0
LONG ret = InterlockedCompareExchangeRelease ((LONG volatile*) addr,
(LONG) newval,
(LONG) oldval);
return (oldval == ret) ? 1: 0;
int32_t ret = InterlockedCompareExchangeRelease ((int32_t volatile*) addr,
(int32_t) newval, (int32_t) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
return 0;
#endif
}
static inline int ompi_atomic_cmpset_32( volatile int32_t *addr,
int32_t oldval, int32_t newval)
{
#if 0
int32_t ret = InterlockedCompareExchange ((int32_t volatile*) addr,
(int32_t) newval, (int32_t) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
#endif
}
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_CMPSET_64
static inline int ompi_atomic_cmpset_acq_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
#if 0
int64_t ret = InterlockedCompareExchangeAcquire64 ((int64_t volatile*) addr,
(int64_t) newval, (int64_t) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
#endif
}
static inline int ompi_atomic_cmpset_rel_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
#if 0
int64_t ret = InterlockedCompareExchangeRelease64 ((int64_t volatile*) addr,
(int64_t) newval, (int64_t) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
#endif
}
static inline int ompi_atomic_cmpset_32(volatile int32_t *addr,
int32_t oldval,
int32_t newval) {
static inline int ompi_atomic_cmpset_64( volatile int64_t *addr,
int64_t oldval, int64_t newval)
{
#if 0
LONG ret = InterlockedCompareExchange ((LONG volatile*) addr,
(LONG) newval,
(LONG) oldval);
return (oldval == ret) ? 1: 0;
int64_t ret = InterlockedCompareExchange64 ((int64_t volatile*) addr,
(int64_t) newval, (int64_t) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
#endif
}
static inline int ompi_atomic_cmpset_acq_64(volatile int64_t *addr,
int64_t oldval,
int64_t newval) {
#if 0
LONGLONG ret = InterlockedCompareExchangeAcquire64 ((LONGLONG volatile*) addr,
(LONGLONG) newval,
(LONGLONG) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
#endif
}
static inline int ompi_atomic_cmpset_rel_64(volatile int64_t *addr,
int64_t oldval,
int64_t newval) {
#if 0
LONGLONG ret = InterlockedCompareExchangeRelease64 ((LONGLONG volatile*) addr,
(LONGLONG) newval,
(LONGLONG) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
#endif
}
static inline int ompi_atomic_cmpset_64(volatile int64_t *addr,
int64_t oldval,
int64_t newval) {
#if 0
LONGLONG ret = InterlockedCompareExchange64 ((LONGLONG volatile*) addr,
(LONGLONG) newval,
(LONGLONG) oldval);
return (oldval == ret) ? 1: 0;
#else
return 0;
return 0;
#endif
}
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_ADD_32
static inline int32_t ompi_atomic_add_32(volatile int32_t *addr, int32_t delta) {
return InterlockedExchangeAdd ((LONG volatile *) addr,
(LONG) delta);
static inline int32_t ompi_atomic_add_32(volatile int32_t *addr, int32_t delta)
{
return InterlockedExchangeAdd ((int32_t volatile *) addr,
(int32_t) delta);
}
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_ADD_64
static inline int64_t ompi_atomic_add_64(volatile int64_t *addr, int64_t delta) {
static inline int64_t ompi_atomic_add_64(volatile int64_t *addr, int64_t delta)
{
#if 0
return InterlockedExchangeAdd64 ((LONGLONG volatile *) addr,
(LONGLONG) delta);
return InterlockedExchangeAdd64 ((int64_t volatile *) addr,
(int64_t) delta);
#else
return 0;
return 0;
#endif
}
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_SUB_32
static inline int32_t ompi_atomic_sub_32(volatile int32_t *addr, int32_t delta) {
return InterlockedExchangeAdd ((LONG volatile *) addr,
(LONG) (-delta));
static inline int32_t ompi_atomic_sub_32(volatile int32_t *addr, int32_t delta)
{
return InterlockedExchangeAdd( (int32_t volatile *) addr,
(int32_t) (-delta));
}
#define OMPI_ARCHITECTURE_DEFINE_ATOMIC_SUB_64
static inline int64_t ompi_atomic_sub_64(volatile int64_t *addr, int64_t delta) {
static inline int64_t ompi_atomic_sub_64(volatile int64_t *addr, int64_t delta)
{
#if 0
return InterlockedExchangeAdd64 ((LONGLONG volatile *) addr,
(LONGLONG) (-delta));
return InterlockedExchangeAdd64 ((int64_t volatile *) addr,
(int64_t) (-delta));
#else
return 0;
return 0;
#endif
}