1
1

Adding more atomic stuff before I vanish...

This commit was SVN r1116.
Этот коммит содержится в:
David Daniel 2004-05-05 23:19:32 +00:00
родитель aa36122c66
Коммит c174b2ca7c
6 изменённых файлов: 495 добавлений и 99 удалений

137
src/include/sys/alpha/atomic.h Обычный файл
Просмотреть файл

@ -0,0 +1,137 @@
/*
* $HEADER$
*/
#ifndef LAM_SYS_ATOMIC_H_INCLUDED
#define LAM_SYS_ATOMIC_H_INCLUDED
/*
* On alpha, everything is load-locked, store-conditional...
*/
#ifdef HAVE_SMP
#define MB() __asm__ __volatile__ ("mb");
#define RMB() __asm__ __volatile__ ("mb");
#define WMB() __asm__ __volatile__ ("wmb");
#else
#define MB()
#define RMB()
#define WMB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
RMB();
}
static inline lam_atomic_wmb(void)
{
WMB();
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
uint32_t ret;
__asm __volatile__ (
"1: ldl_l %0, %1 // load old value \n\
cmpeq %0, %2, %0 // compare \n\
beq %0, 2f // exit if not equal \n\
mov %3, %0 // value to store \n\
stl_c %0, %1 // attempt to store \n\
beq %0, 3f // if failed, try again \n\
2: // done \n\
3: br 1b // try again \n\
.previous \n"
: "=&r" (ret), "+m" (*addr)
: "r" (old), "r" (new)
: "memory");
return ret;
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
int rc;
rc = lam_atomic_cmpset_32(addr, old, new);
lam_atomic_rmb();
return rc;
}
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
lam_atomic_wmb();
return lam_atomic_cmpset_32(addr, old, new);
}
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
uint32_t ret;
__asm__ __volatile__ (
"1: ldq_l %0, %1 // load old value \n\
cmpeq %0, %2, %0 // compare \n\
beq %0, 2f // exit if not equal \n\
mov %3, %0 // value to store \n\
stq_c %0, %1 // attempt to store \n\
beq %0, 3f // if failed, try again \n\
2: // done \n\
3: br 1b // try again \n\
.previous \n"
: "=&r" (ret), "+m" (*addr)
: "r" (old), "r" (new)
: "memory");
return ret;
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
int rc;
rc = lam_atomic_cmpset_64(addr, old, new);
lam_atomic_rmb();
return rc;
}
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
lam_atomic_wmb();
return lam_atomic_cmpset_64(addr, old, new);
}
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -5,65 +5,103 @@
#ifndef LAM_SYS_ATOMIC_H_INCLUDED #ifndef LAM_SYS_ATOMIC_H_INCLUDED
#define LAM_SYS_ATOMIC_H_INCLUDED #define LAM_SYS_ATOMIC_H_INCLUDED
#ifdef SMP
#define LOCK "lock; "
#else
#define LOCK
#endif
/* /*
* On amd64, we use cmpxchg. * On amd64, we use cmpxchg.
*/ */
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t cmp, #ifdef HAVE_SMP
#define LOCK "lock; "
#define MB() __asm__ __volatile__("": : :"memory")
#else
#define LOCK
#define MB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
MB();
}
static inline lam_atomic_wmb(void)
{
MB();
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new) uint32_t new)
{ {
uint32_t ret = cmp; uint32_t ret = old;
__asm __volatile ( __asm__ __volatile (
LOCK "cmpxchgl %1,%2; " LOCK "cmpxchgl %1,%2 \n\
" setz %%al; " setz %%al \n\
" movzbl %%al,%0; " movzbl %%al,%0 \n"
: "+a" (ret) : "+a" (ret)
: "r" (new), "m" (*(addr)) : "r" (new), "m" (*(addr))
: "memory"); : "memory");
return (ret == cmp); return (ret == old);
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
return lam_atomic_cmpset_32(addr, old, new);
} }
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr, static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t cmp, uint32_t old,
uint32_t new) uint32_t new)
{ {
return lam_atomic_cmpset_acq_32(addr, cmp, new); return lam_atomic_cmpset_32(addr, old, new);
} }
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr, static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t cmp, uint64_t old,
uint64_t new) uint64_t new)
{ {
uint64_t ret = cmp; uint64_t ret = old;
__asm __volatile ( __asm__ __volatile (
LOCK "cmpxchgq %1,%2; " LOCK "cmpxchgq %1,%2 \n\
" setz %%al; " setz %%al \n\
" movzbl %%al,%0; " movzbl %%al,%0 \n"
: "+a" (ret) : "+a" (ret)
: "r" (new), "m" (*(addr)) : "r" (new), "m" (*(addr))
: "memory"); : "memory");
return (ret == cmp); return (ret == old);
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
return lam_atomic_cpmset_64(addr, old, new);
} }
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr, static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t cmp, uint64_t old,
uint64_t new) uint64_t new)
{ {
return lam_atomic_cpmset_acq_64(addr, cmp, new); return lam_atomic_cpmset_64(addr, old, new);
} }
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */ #endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -5,44 +5,74 @@
#ifndef LAM_SYS_ATOMIC_H_INCLUDED #ifndef LAM_SYS_ATOMIC_H_INCLUDED
#define LAM_SYS_ATOMIC_H_INCLUDED #define LAM_SYS_ATOMIC_H_INCLUDED
#ifdef SMP
#define LOCK "lock; "
#else
#define LOCK
#endif
/* /*
* On ia32, we use cmpxchg. * On ia32, we use cmpxchg.
*/ */
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t cmp, #ifdef HAVE_SMP
#define LOCK "lock; "
#define MB() __asm__ __volatile__("": : :"memory")
#else
#define LOCK
#define MB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
MB();
}
static inline lam_atomic_wmb(void)
{
MB();
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new) uint32_t new)
{ {
uint32_t ret = cmp; uint32_t ret = old;
__asm __volatile ( __asm__ __volatile (
LOCK "cmpxchgl %1,%2; " LOCK "cmpxchgl %1,%2 \n\
"setz %%al; " setz %%al \n\
"movzbl %%al,%0; " movzbl %%al,%0 \n"
: "+a" (ret) : "+a" (ret)
: "r" (new), "m" (*addr) : "r" (new), "m" (*addr)
: "memory"); : "memory");
return (ret == cmp); return (ret == old);
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
return lam_atomic_cmpset_32(addr, old, new);
} }
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr, static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t cmp, uint32_t old,
uint32_t new) uint32_t new)
{ {
return lam_atomic_cmpset_acq_32(addr, cmp, new); return lam_atomic_cmpset_32(addr, old, new);
} }
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr, static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t cmp, uint64_t old,
uint64_t new) uint64_t new)
{ {
/* /*
@ -50,24 +80,32 @@ static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
* m64. Else, clear ZF and load m64 into EDX:EAX. * m64. Else, clear ZF and load m64 into EDX:EAX.
*/ */
uint64_t ret = cmp; uint64_t ret = old;
struct { uint32_t lo; uint32_t hi; } *p = (struct lwords *) &new; struct { uint32_t lo; uint32_t hi; } *p = (struct lwords *) &new;
__asm __volatile( __asm__ __volatile(
LOCK "cmpxchg8b %1" LOCK "cmpxchg8b %1\n"
: "+A" (ret) : "+A" (ret)
: "m" (*addr), "b" (p->lo), "c" (p->hi) : "m" (*addr), "b" (p->lo), "c" (p->hi)
: "memory"); : "memory");
return (ret == cmp); return (ret == old);
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
return lam_atomic_cpmset_64(addr, old, new);
} }
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr, static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t cmp, uint64_t old,
uint64_t new) uint64_t new)
{ {
return lam_atomic_cpmset_acq_64(addr, cmp, new); return lam_atomic_cpmset_64(addr, old, new);
} }
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */ #endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -9,64 +9,113 @@
* On ia64, we use cmpxchg, which supports acquire/release semantics natively. * On ia64, we use cmpxchg, which supports acquire/release semantics natively.
*/ */
#ifdef HAVE_SMP
#define MB() __asm__ __volatile__("": : :"memory")
#else
#define MB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
MB();
}
static inline lam_atomic_wmb(void)
{
MB();
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr, static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t cmp, uint32_t old,
uint32_t new) uint32_t new)
{ {
uint32_t ret; uint32_t ret;
__asm __volatile("mov ar.ccv=%2; " __asm__ __volatile(
"cmpxchg4.acq %0=%4,%3,ar.ccv; " " mov ar.ccv=%2 \n\
cmpxchg4.acq %0=%4,%3,ar.ccv \n"
: "=r"(ret), "=m"(*addr) : "=r"(ret), "=m"(*addr)
: "r"(cmp), "r"(new), "m"(*addr) : "r"(old), "r"(new), "m"(*addr)
: "memory"); : "memory");
return (ret == cmp);
return (ret == old);
} }
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr, static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t cmp, uint32_t old,
uint32_t new) uint32_t new)
{ {
uint32_t ret; uint32_t ret;
__asm __volatile("mov ar.ccv=%2; " __asm__ __volatile(
"cmpxchg4.rel %0=%4,%3,ar.ccv; " " mov ar.ccv=%2 \n\
cmpxchg4.rel %0=%4,%3,ar.ccv \n"
: "=r"(ret), "=m"(*addr) : "=r"(ret), "=m"(*addr)
: "r"(cmp), "r"(new), "m"(*addr) : "r"(old), "r"(new), "m"(*addr)
: "memory"); : "memory");
return (ret == cmp);
return (ret == old);
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
return lam_atomic_cmpset_acq_32(addr, old, new);
} }
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr, static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t cmp, uint64_t old,
uint64_t new) uint64_t new)
{ {
uint64_t ret; uint64_t ret;
__asm __volatile("mov ar.ccv=%2; " __asm__ __volatile(
"cmpxchg8.acq %0=%4,%3,ar.ccv; " " mov ar.ccv=%2 \n\
cmpxchg8.acq %0=%4,%3,ar.ccv \n"
: "=r"(ret), "=m"(*addr) : "=r"(ret), "=m"(*addr)
: "r"(cmp), "r"(new), "m"(*addr) : "r"(old), "r"(new), "m"(*addr)
: "memory"); : "memory");
return (ret == cmp); return (ret == old);
} }
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr, static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t cmp, uint64_t old,
uint64_t new) uint64_t new)
{ {
uint64_t ret; uint64_t ret;
__asm __volatile("mov ar.ccv=%2; " __asm__ __volatile(
"cmpxchg8.rel %0=%4,%3,ar.ccv; " " mov ar.ccv=%2 \n\
cmpxchg8.rel %0=%4,%3,ar.ccv \n"
: "=r"(ret), "=m"(*addr) : "=r"(ret), "=m"(*addr)
: "r"(cmp), "r"(new), "m"(*addr) : "r"(old), "r"(new), "m"(*addr)
: "memory"); : "memory");
return (ret); return (ret);
} }
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
return lam_atomic_cmpset_acq_64(addr, old, new);
}
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */ #endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -9,9 +9,37 @@
* On powerpc ... * On powerpc ...
*/ */
#define lam_atomic_mb() __asm__ __volatile__ ("sync" : : : "memory") #ifdef HAVE_SMP
#define lam_atomic_rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
#define lam_atomic_wmb() __asm__ __volatile__ ("eieio" : : : "memory") #define MB() __asm__ __volatile__ ("sync" : : : "memory")
#define RMB() __asm__ __volatile__ ("lwsync" : : : "memory")
#define WMB() __asm__ __volatile__ ("eieio" : : : "memory")
#else
#define MB()
#define RMB()
#define WMB()
#endif
static inline lam_atomic_mb(void)
{
MB();
}
static inline lam_atomic_rmb(void)
{
RMB();
}
static inline lam_atomic_wmb(void)
{
WMB();
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr, static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
@ -57,7 +85,6 @@ static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
} }
#if
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr, static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old, uint64_t old,
uint64_t new) uint64_t new)

107
src/include/sys/sparc64/atomic.h Обычный файл
Просмотреть файл

@ -0,0 +1,107 @@
/*
* $HEADER$
*/
#ifndef LAM_SYS_ATOMIC_H_INCLUDED
#define LAM_SYS_ATOMIC_H_INCLUDED
/*
* On sparc64, use casa and casxa (compare and swap) instructions.
*/
#ifdef HAVE_SMP
#define MEMBAR(type) __asm__ __volatile__ ("membar" type : : : "memory")
#else
#define MEMBAR(type)
#endif
static inline lam_atomic_mb(void)
{
MEMBAR("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad");
}
static inline lam_atomic_rmb(void)
{
MEMBAR("#LoadLoad");
}
static inline lam_atomic_wmb(void)
{
MEMBAR("#StoreStore");
}
static inline int lam_atomic_cmpset_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
uint32_t ret = old;
__asm__ __volatile("casa [%1] ASI_P, %2, %0"
: "+r" (ret)
: "r" (addr), "r" (new));
return (ret == old);
}
static inline int lam_atomic_cmpset_acq_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
int rc;
rc = lam_atomic_cmpset_32(addr, old, new);
lam_atomic_rmb();
return rc;
}
static inline int lam_atomic_cmpset_rel_32(volatile uint32_t *addr,
uint32_t old,
uint32_t new)
{
lam_atomic_wmb();
return lam_atomic_cmpset_32(addr, old, new);
}
static inline int lam_atomic_cmpset_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
uint64_t ret = old;
__asm__ __volatile("casxa [%1] ASI_P, %2, %0"
: "+r" (ret)
: "r" (addr), "r" (new));
return (ret == old);
}
static inline int lam_atomic_cmpset_acq_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
int rc;
rc = lam_atomic_cmpset_64(addr, old, new);
lam_atomic_rmb();
return rc;
}
static inline int lam_atomic_cmpset_rel_64(volatile uint64_t *addr,
uint64_t old,
uint64_t new)
{
lam_atomic_wmb();
return lam_atomic_cmpset_64(addr, old, new);
}
#endif /* ! LAM_SYS_ATOMIC_H_INCLUDED */