1
1

Adding more atomic stuff before I vanish...

This commit was SVN r1116.
Этот коммит содержится в:
David Daniel 2004-05-05 23:19:32 +00:00
родитель aa36122c66
Коммит c174b2ca7c
6 изменённых файлов: 495 добавлений и 99 удалений

137
src/include/sys/alpha/atomic.h Обычный файл
Просмотреть файл

@ -0,0 +1,137 @@
/*
* $HEADER$
*/
#ifndef LAM_SYS_ATOMIC_H_INCLUDED
#define LAM_SYS_ATOMIC_H_INCLUDED
/*
* On alpha, everything is load-locked, store-conditional...
*/
#ifdef HAVE_SMP
#define MB() __asm__ __volatile__ ("mb");
#define RMB() __asm__ __volatile__ ("mb");
#define WMB() __asm__ __volatile__ ("wmb");
#else
#define MB()
#define RMB()
#define WMB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
RMB();
}
static inline lam_atomic_wmb(void)
{
WMB();
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
uint32_t ret;
__asm __volatile__ (
"1: ldl_l %0, %1 // load old value \n\
cmpeq %0, %2, %0 // compare \n\
beq %0, 2f // exit if not equal \n\
mov %3, %0 // value to store \n\
stl_c %0, %1 // attempt to store \n\
beq %0, 3f // if failed, try again \n\
2: // done \n\
3: br 1b // try again \n\
.previous \n"
: "=&r" (ret), "+m" (*addr)
: "r" (old), "r" (new)
: "memory");
return ret;
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
int rc;
rc = lam_atomic_cmpset_32(addr, old, new);
lam_atomic_rmb();
return rc;
}
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
lam_atomic_wmb();
return lam_atomic_cmpset_32(addr, old, new);
}
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
uint32_t ret;
__asm__ __volatile__ (
"1: ldq_l %0, %1 // load old value \n\
cmpeq %0, %2, %0 // compare \n\
beq %0, 2f // exit if not equal \n\
mov %3, %0 // value to store \n\
stq_c %0, %1 // attempt to store \n\
beq %0, 3f // if failed, try again \n\
2: // done \n\
3: br 1b // try again \n\
.previous \n"
: "=&r" (ret), "+m" (*addr)
: "r" (old), "r" (new)
: "memory");
return ret;
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
int rc;
rc = lam_atomic_cmpset_64(addr, old, new);
lam_atomic_rmb();
return rc;
}
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
lam_atomic_wmb();
return lam_atomic_cmpset_64(addr, old, new);
}
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -5,65 +5,103 @@
#ifndef LAM_SYS_ATOMIC_H_INCLUDED
#define LAM_SYS_ATOMIC_H_INCLUDED
#ifdef SMP
#define LOCK "lock; "
#else
#define LOCK
#endif
/*
* On amd64, we use cmpxchg.
*/
#ifdef HAVE_SMP
#define LOCK "lock; "
#define MB() __asm__ __volatile__("": : :"memory")
#else
#define LOCK
#define MB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
MB();
}
static inline lam_atomic_wmb(void)
{
MB();
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
uint32_t ret = old;
__asm__ __volatile (
LOCK "cmpxchgl %1,%2 \n\
setz %%al \n\
movzbl %%al,%0 \n"
: "+a" (ret)
: "r" (new), "m" (*(addr))
: "memory");
return (ret == old);
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t cmp,
uint32_t old,
uint32_t new)
{
uint32_t ret = cmp;
__asm __volatile (
LOCK "cmpxchgl %1,%2; "
" setz %%al; "
" movzbl %%al,%0; "
: "+a" (ret)
: "r" (new), "m" (*(addr))
: "memory");
return (ret == cmp);
return lam_atomic_cmpset_32(addr, old, new);
}
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t cmp,
uint32_t old,
uint32_t new)
{
return lam_atomic_cmpset_acq_32(addr, cmp, new);
return lam_atomic_cmpset_32(addr, old, new);
}
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
uint64_t ret = old;
__asm__ __volatile (
LOCK "cmpxchgq %1,%2 \n\
setz %%al \n\
movzbl %%al,%0 \n"
: "+a" (ret)
: "r" (new), "m" (*(addr))
: "memory");
return (ret == old);
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t cmp,
uint64_t new)
uint64_t old,
uint64_t new)
{
uint64_t ret = cmp;
__asm __volatile (
LOCK "cmpxchgq %1,%2; "
" setz %%al; "
" movzbl %%al,%0; "
: "+a" (ret)
: "r" (new), "m" (*(addr))
: "memory");
return (ret == cmp);
return lam_atomic_cpmset_64(addr, old, new);
}
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t cmp,
uint64_t new)
uint64_t old,
uint64_t new)
{
return lam_atomic_cpmset_acq_64(addr, cmp, new);
return lam_atomic_cpmset_64(addr, old, new);
}
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -5,69 +5,107 @@
#ifndef LAM_SYS_ATOMIC_H_INCLUDED
#define LAM_SYS_ATOMIC_H_INCLUDED
#ifdef SMP
#define LOCK "lock; "
#else
#define LOCK
#endif
/*
* On ia32, we use cmpxchg.
*/
#ifdef HAVE_SMP
#define LOCK "lock; "
#define MB() __asm__ __volatile__("": : :"memory")
#else
#define LOCK
#define MB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
MB();
}
static inline lam_atomic_wmb(void)
{
MB();
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
uint32_t ret = old;
__asm__ __volatile (
LOCK "cmpxchgl %1,%2 \n\
setz %%al \n\
movzbl %%al,%0 \n"
: "+a" (ret)
: "r" (new), "m" (*addr)
: "memory");
return (ret == old);
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t cmp,
uint32_t old,
uint32_t new)
{
uint32_t ret = cmp;
__asm __volatile (
LOCK "cmpxchgl %1,%2; "
"setz %%al; "
"movzbl %%al,%0; "
: "+a" (ret)
: "r" (new), "m" (*addr)
: "memory");
return (ret == cmp);
return lam_atomic_cmpset_32(addr, old, new);
}
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t cmp,
uint32_t old,
uint32_t new)
{
return lam_atomic_cmpset_acq_32(addr, cmp, new);
return lam_atomic_cmpset_32(addr, old, new);
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t cmp,
uint64_t new)
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
/*
* Compare EDX:EAX with m64. If equal, set ZF and load ECX:EBX into
* m64. Else, clear ZF and load m64 into EDX:EAX.
*/
uint64_t ret = cmp;
uint64_t ret = old;
struct { uint32_t lo; uint32_t hi; } *p = (struct lwords *) &new;
__asm __volatile(
LOCK "cmpxchg8b %1"
: "+A" (ret)
: "m" (*addr), "b" (p->lo), "c" (p->hi)
: "memory");
__asm__ __volatile(
LOCK "cmpxchg8b %1\n"
: "+A" (ret)
: "m" (*addr), "b" (p->lo), "c" (p->hi)
: "memory");
return (ret == cmp);
return (ret == old);
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
return lam_atomic_cpmset_64(addr, old, new);
}
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t cmp,
uint64_t old,
uint64_t new)
{
return lam_atomic_cpmset_acq_64(addr, cmp, new);
return lam_atomic_cpmset_64(addr, old, new);
}
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -9,64 +9,113 @@
* On ia64, we use cmpxchg, which supports acquire/release semantics natively.
*/
#ifdef HAVE_SMP
#define MB() __asm__ __volatile__("": : :"memory")
#else
#define MB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
MB();
}
static inline lam_atomic_wmb(void)
{
MB();
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t cmp,
uint32_t old,
uint32_t new)
{
uint32_t ret;
__asm __volatile("mov ar.ccv=%2; "
"cmpxchg4.acq %0=%4,%3,ar.ccv; "
: "=r"(ret), "=m"(*addr)
: "r"(cmp), "r"(new), "m"(*addr)
: "memory");
return (ret == cmp);
__asm__ __volatile(
" mov ar.ccv=%2 \n\
cmpxchg4.acq %0=%4,%3,ar.ccv \n"
: "=r"(ret), "=m"(*addr)
: "r"(old), "r"(new), "m"(*addr)
: "memory");
return (ret == old);
}
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t cmp,
uint32_t old,
uint32_t new)
{
uint32_t ret;
__asm __volatile("mov ar.ccv=%2; "
"cmpxchg4.rel %0=%4,%3,ar.ccv; "
: "=r"(ret), "=m"(*addr)
: "r"(cmp), "r"(new), "m"(*addr)
: "memory");
return (ret == cmp);
__asm__ __volatile(
" mov ar.ccv=%2 \n\
cmpxchg4.rel %0=%4,%3,ar.ccv \n"
: "=r"(ret), "=m"(*addr)
: "r"(old), "r"(new), "m"(*addr)
: "memory");
return (ret == old);
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
return lam_atomic_cmpset_acq_32(addr, old, new);
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t cmp,
uint64_t old,
uint64_t new)
{
uint64_t ret;
__asm __volatile("mov ar.ccv=%2; "
"cmpxchg8.acq %0=%4,%3,ar.ccv; "
: "=r"(ret), "=m"(*addr)
: "r"(cmp), "r"(new), "m"(*addr)
: "memory");
__asm__ __volatile(
" mov ar.ccv=%2 \n\
cmpxchg8.acq %0=%4,%3,ar.ccv \n"
: "=r"(ret), "=m"(*addr)
: "r"(old), "r"(new), "m"(*addr)
: "memory");
return (ret == cmp);
return (ret == old);
}
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t cmp,
uint64_t old,
uint64_t new)
{
uint64_t ret;
__asm __volatile("mov ar.ccv=%2; "
"cmpxchg8.rel %0=%4,%3,ar.ccv; "
: "=r"(ret), "=m"(*addr)
: "r"(cmp), "r"(new), "m"(*addr)
: "memory");
__asm__ __volatile(
" mov ar.ccv=%2 \n\
cmpxchg8.rel %0=%4,%3,ar.ccv \n"
: "=r"(ret), "=m"(*addr)
: "r"(old), "r"(new), "m"(*addr)
: "memory");
return (ret);
}
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
return lam_atomic_cmpset_acq_64(addr, old, new);
}
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -9,9 +9,37 @@
* On powerpc ...
*/
#define lam_atomic_mb() __asm__ __volatile__ ("sync" : : : "memory")
#define lam_atomic_rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
#define lam_atomic_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
#ifdef HAVE_SMP
#define MB() __asm__ __volatile__ ("sync" : : : "memory")
#define RMB() __asm__ __volatile__ ("lwsync" : : : "memory")
#define WMB() __asm__ __volatile__ ("eieio" : : : "memory")
#else
#define MB()
#define RMB()
#define WMB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
RMB();
}
static inline lam_atomic_wmb(void)
{
WMB();
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
@ -57,7 +85,6 @@ static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
}
#if
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)

107
src/include/sys/sparc64/atomic.h Обычный файл
Просмотреть файл

@ -0,0 +1,107 @@
/*
* $HEADER$
*/
#ifndef LAM_SYS_ATOMIC_H_INCLUDED
#define LAM_SYS_ATOMIC_H_INCLUDED
/*
* On sparc64, use casa and casxa (compare and swap) instructions.
*/
#ifdef HAVE_SMP
#define MEMBAR(type) __asm__ __volatile__ ("membar" type : : : "memory")
#else
#define MEMBAR(type)
#endif
static inline lam_atomic_mb(void)
{
MEMBAR("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad");
}
static inline lam_atomic_rmb(void)
{
MEMBAR("#LoadLoad");
}
static inline lam_atomic_wmb(void)
{
MEMBAR("#StoreStore");
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
uint32_t ret = old;
__asm__ __volatile("casa [%1] ASI_P, %2, %0"
: "+r" (ret)
: "r" (addr), "r" (new));
return (ret == old);
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
int rc;
rc = lam_atomic_cmpset_32(addr, old, new);
lam_atomic_rmb();
return rc;
}
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
lam_atomic_wmb();
return lam_atomic_cmpset_32(addr, old, new);
}
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
uint64_t ret = old;
__asm__ __volatile("casxa [%1] ASI_P, %2, %0"
: "+r" (ret)
: "r" (addr), "r" (new));
return (ret == old);
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
int rc;
rc = lam_atomic_cmpset_64(addr, old, new);
lam_atomic_rmb();
return rc;
}
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
lam_atomic_wmb();
return lam_atomic_cmpset_64(addr, old, new);
}
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */