1
1

Remove the outdated src/os tree -- all this atomic stuff is now in

src/include/sys (and has been for a long time).

This commit was SVN r3809.
Этот коммит содержится в:
Jeff Squyres 2004-12-14 16:15:15 +00:00
родитель 2b9f9f67a4
Коммит 7abc1f0f3d
27 изменённых файлов: 0 добавлений и 2290 удалений

Просмотреть файл

@ -1059,19 +1059,6 @@ AC_CONFIG_FILES([
src/include/sys/sparc64/Makefile
src/class/Makefile
src/os/Makefile
src/os/cygwin/Makefile
src/os/darwin/Makefile
src/os/darwin/ppc_32/Makefile
src/os/darwin/ppc_64/Makefile
src/os/irix/Makefile
src/os/irix/sn0/Makefile
src/os/linux/Makefile
src/os/linux/alpha/Makefile
src/os/linux/i686/Makefile
src/os/linux/ia64/Makefile
src/os/linux/x86_64/Makefile
src/os/tru64/Makefile
src/runtime/Makefile
src/threads/Makefile
src/util/Makefile

Просмотреть файл

@ -88,11 +88,6 @@ SUBDIRS = \
dynamic-mca \
tools
# The "os" subdir only has header files, and nothing is generated. So
# it's safe to just include it in DIST_SUBDIRS.
DIST_SUBDIRS = $(SUBDIRS) os
# Build the main MPI library
lib_LTLIBRARIES = libmpi.la

Просмотреть файл

@ -1,30 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
SUBDIRS = cygwin darwin irix linux tru64
headers = \
atomic.h
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,83 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef OMPI_ATOMIC_H_INCLUDED
#define OMPI_ATOMIC_H_INCLUDED
#include "ompi_config.h"
/*
* Atomic functions
*/
#if defined(__alpha)
# if defined(__GNUC__)
# include "os/linux/alpha/atomic.h"
# else
# include "os/tru64/atomic.h"
# endif /* __GNUC__ */
#endif /* __alpha */
#if defined(__linux__) && defined(__i386)
#include "os/linux/i686/atomic.h"
#endif /* defined(__linux__) && defined(__i386) */
#if defined(__CYGWIN__)
#include "os/cygwin/atomic.h"
#endif /* defined(__CYGWIN__) */
#if defined(__ia64)
#include "os/linux/ia64/atomic.h"
#endif /* defined(__ia64) */
#if defined(__x86_64)
#include "os/linux/x86_64/atomic.h"
#endif /* defined(__x86_64) */
#if defined(__mips)
#include "os/irix/atomic.h"
#endif /* defined(__mpis) */
#if defined(__APPLE__)
/* check if PowerPC 970 (G5) */
#if defined(__ppc_64__)
#include "os/darwin/ppc_64/atomic.h"
#else
#include "os/darwin/ppc_32/atomic.h"
#endif /* defined(__ppc_64__) */
#endif /* defined(__APPLE__) */
#ifndef mb
#define mb()
#endif
#ifndef rmb
#define rmb()
#endif
#ifndef wmb
#define wmb()
#endif
/*
* macros
*/
#define ATOMIC_LOCK_INIT(LOCKPTR) spinunlock(LOCKPTR)
#define ATOMIC_LOCK(LOCKPTR) spinlock(LOCKPTR)
#define ATOMIC_UNLOCK(LOCKPTR) spinunlock(LOCKPTR)
#define ATOMIC_TRYLOCK(LOCKPTR) spintrylock(LOCKPTR)
#endif /* OMPI_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,28 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
headers = \
atomic.h
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/cygwin
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,201 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef CYGWIN_I686_ATOMIC_H_INCLUDED
#define CYGWIN_I686_ATOMIC_H_INCLUDED
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 1 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
/*
* 64 bit integer
*/
typedef struct {
ompi_lock_data_t lock;
volatile unsigned long long data;
} bigAtomicUnsignedInt;
/*
#ifdef __INTEL_COMPILER
#if defined(c_plusplus) || defined(__cplusplus)
extern "C"
{
#endif
void spinlock(ompi_lock_data_t *lockData);
int spintrylock(ompi_lock_data_t *lockData);
int fetchNadd(volatile int *addr, int inc);
int fetchNset(volatile int *addr, int setValue);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#else
*/
/*
* Spin until I can get the lock
*/
static inline void spinlock(ompi_lock_data_t *lockData)
{
__asm__ __volatile__(
"cmp $1, %0\n"
"jl 2f\n"
"\n1: "
"lock ; decl %0\n"
"jz 3f\n"
"2:\n"
"cmp $1, %0\n"
"jl 2b\n"
"jmp 1b\n"
"3:\n"
: "=m" (lockData->data.lockData_m) : : "memory");
}
/*
* This routine tries once to obtain the lock
*/
static inline int spintrylock(ompi_lock_data_t *lockData)
{
int gotLock;
__asm__ __volatile__(
"mov %2, %1\n"
"cmp $1, %0\n"
"jl 1f\n"
"lock ; decl %0\n"
"js 1f\n"
"mov $1, %1\n"
"jmp 2f\n"
"1:\n"
"mov $0, %1\n"
"2:"
: "=m" (lockData->data.lockData_m),
#ifdef __INTEL_COMPILER
"=&r" (gotLock) : "r" (0) : "memory");
#else
"=r" (gotLock) : "r" (0) : "memory");
#endif
return gotLock;
}
/*
* atomically add a constant to the input integer returning the
* previous value
*/
static inline int fetchNadd(volatile int *addr, int inc)
{
int inputValue;
__asm__ __volatile__(
" mov %2, %1\n" \
"lock ; xadd %1, %0\n"
#ifdef __INTEL_COMPILER
: "=m" (*addr), "=&r" (inputValue) : "r" (inc) : "memory");
#else
: "=m" (*addr), "=r" (inputValue) : "r" (inc) : "memory");
#endif
return (inputValue);
}
static inline int fetchNset(volatile int *addr, int setValue)
{
int inputValue;
__asm__ __volatile__(
" mov %2, %1\n" \
"lock ; xchg %1, %0\n"
#ifdef __INTEL_COMPILER
: "=m" (*addr), "=&r" (inputValue) : "r" (setValue) : "memory");
#else
: "=m" (*addr), "=r" (inputValue) : "r" (setValue) : "memory");
#endif
return (inputValue);
}
/*
* Clear the lock
*/
static inline void spinunlock(ompi_lock_data_t *lockData)
{
lockData->data.lockData_m = 1;
}
static inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
(addr->data) += inc;
spinunlock(&(addr->lock));
return returnValue;
}
static inline unsigned long long fetchNsetLong(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
addr->data = val;
spinunlock(&(addr->lock));
return returnValue;
}
static inline unsigned long long fetchNaddLongNoLock(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
returnValue = addr->data;
addr->data += inc;
return returnValue;
}
static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long value)
{
addr->data = value;
addr->lock.data.lockData_m = LOCK_UNLOCKED;
}
#endif /* CYGWIN_I686_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,19 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
SUBDIRS = ppc_32 ppc_64

Просмотреть файл

@ -1,21 +0,0 @@
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/darwin/ppc_32
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,196 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef DARWIN_POWERPC_ATOMIC_H_INCLUDED
#define DARWIN_POWERPC_ATOMIC_H_INCLUDED
/*
* The following atomic operations were adapted from the examples
* provided in the PowerPC programming manual available at
* http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600719DF2
*/
#define mb() __asm__ __volatile__("sync")
#define rmb() __asm__ __volatile__("sync")
#define wmb() __asm__ __volatile__("sync")
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 0 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
/*
* 64 bit integer
*/
typedef struct {
ompi_lock_data_t lock;
volatile unsigned long long data;
} bigAtomicUnsignedInt;
/*
* Spin until I can get the lock
*/
static inline void spinlock(ompi_lock_data_t *lockData)
{
volatile int *lockptr = &(lockData->data.lockData_m);
__asm__ __volatile__(
"mr r6, %0\n" /* save the address of the lock. */
"li r4,1\n"
"1:\n"
"lwarx r5,0,r6\n" /* Get current lock value. */
"cmpwi r5,0x0\n" /* Is it unlocked. if not, keep checking. */
"bne- 1b\n"
"stwcx. r4,0,r6\n" /* Try to atomically set the lock */
"bne- 1b\n"
"isync\n"
: : "r" (lockptr)
: "memory", "r4", "r5", "r6");
}
/*
* This routine tries once to obtain the lock
*/
static inline int spintrylock(ompi_lock_data_t *lockData)
{
volatile int *lockptr = &(lockData->data.lockData_m);
int gotLock = 0;
__asm__ __volatile__(
"mr r6, %1\n" /* save the address of the lock. */
"li r4,0x1\n"
"1:\n"
"lwarx r5,0,r6\n"
"cmpwi r5,0x0\n" /* Is it locked? */
"bne- 2f\n" /* Yes, return 0 */
"stwcx. r4,0,r6\n" /* Try to atomically set the lock */
"bne- 1b\n"
"addi %0,0,1\n"
"isync\n"
"b 3f\n"
"2: addi %0,0,0x0\n"
"3:"
: "=&r" (gotLock) : "r" (lockptr)
: "memory", "r4", "r5", "r6" );
return gotLock;
}
/*
* Clear the lock
*/
static inline void spinunlock(ompi_lock_data_t *lockData)
{
lockData->data.lockData_m = LOCK_UNLOCKED;
}
/*
* atomically add a constant to the input integer returning the
* previous value
*/
static inline int fetchNadd(volatile int *addr, int inc)
{
int inputValue;
__asm__ __volatile__(
"mr r5,%2\n" /* Save the increment */
"1:\n"
"lwarx %0, 0, %1\n" /* Grab the area value */
"add r6, %0, r5\n" /* Add the value */
"stwcx. r6, 0, %1\n" /* Try to save the new value */
"bne- 1b\n" /* Didn't get it, try again... */
"isync\n"
: "=&r" (inputValue) : "r" (addr), "r" (inc) :
"memory", "r5", "r6");
return inputValue;
}
static inline int fetchNset(volatile int *addr, int setValue)
{
int inputValue;
__asm__ __volatile__(
"mr r5,%2\n" /* Save the value to store */
"1:\n"
"lwarx %0, 0, %1\n" /* Grab the area value */
"stwcx. r5, 0, %1\n" /* Try to save the new value */
"bne- 1b\n" /* Didn't get it, try again... */
"isync\n"
: "=&r" (inputValue) : "r" (addr), "r" (setValue) :
"memory", "r5");
return inputValue;
}
static inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
(addr->data) += inc;
spinunlock(&(addr->lock));
return returnValue;
}
static inline unsigned long long fetchNsetLong(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
addr->data = val;
spinunlock(&(addr->lock));
return returnValue;
}
static inline unsigned long long fetchNaddLongNoLock(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
returnValue = addr->data;
addr->data += inc;
return returnValue;
}
static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long value)
{
addr->data = value;
addr->lock.data.lockData_m = LOCK_UNLOCKED;
}
#endif /* DARWIN_POWERPC_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,28 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
headers = \
atomic.h
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/darwin/ppc_64
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,212 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef DARWIN_PPC_64_ATOMIC_H_INCLUDED
#define DARWIN_PPC_64_ATOMIC_H_INCLUDED
/*
* The following atomic operations were adapted from the examples
* provided in the PowerPC programming manual available at
* http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600719DF2
*/
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
#define mb() __asm__ __volatile__("sync")
#define rmb() __asm__ __volatile__("sync")
#define wmb() __asm__ __volatile__("sync")
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 0 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
/*
* 64 bit integer
*/
typedef volatile unsigned long long bigAtomicUnsignedInt;
/*
* Spin until I can get the lock
*/
static inline void spinlock(ompi_lock_data_t *lockData)
{
volatile int *lockptr = &(lockData->data.lockData_m);
__asm__ __volatile__(
"mr r6, %0\n" /* save the address of the lock. */
"li r4,1\n"
"1:\n"
"lwarx r5,0,r6\n" /* Get current lock value. */
"cmpwi r5,0x0\n" /* Is it unlocked. if not, keep checking. */
"bne- 1b\n"
"stwcx. r4,0,r6\n" /* Try to atomically set the lock */
"bne- 1b\n"
"isync\n"
: : "r" (lockptr)
: "memory", "r4", "r5", "r6");
}
/*
* This routine tries once to obtain the lock
*/
static inline int spintrylock(ompi_lock_data_t *lockData)
{
volatile int *lockptr = &(lockData->data.lockData_m);
int gotLock = 0;
__asm__ __volatile__(
"mr r6, %1\n" /* save the address of the lock. */
"li r4,0x1\n"
"1:\n"
"lwarx r5,0,r6\n"
"cmpwi r5,0x0\n" /* Is it locked? */
"bne- 2f\n" /* Yes, return 0 */
"stwcx. r4,0,r6\n" /* Try to atomically set the lock */
"bne- 1b\n"
"addi %0,0,1\n"
"isync\n"
"b 3f\n"
"2: addi %0,0,0x0\n"
"3:"
: "=&r" (gotLock) : "r" (lockptr)
: "memory", "r4", "r5", "r6" );
return gotLock;
}
/*
* Clear the lock
*/
static inline void spinunlock(ompi_lock_data_t *lockData)
{
lockData->data.lockData_m = LOCK_UNLOCKED;
}
/*
* atomically add a constant to the input integer returning the
* previous value
*/
static inline int fetchNadd(volatile int *addr, int inc)
{
int inputValue;
__asm__ __volatile__(
"mr r5,%2\n" /* Save the increment */
"1:\n"
"lwarx %0, 0, %1\n" /* Grab the value */
"add r6, %0, r5\n" /* Add the value */
"stwcx. r6, 0, %1\n" /* Try to save the new value */
"bne- 1b\n" /* Didn't get it, try again... */
"isync\n"
: "=&r" (inputValue) : "r" (addr), "r" (inc) :
"memory", "r5", "r6");
return inputValue;
}
static inline int fetchNset(volatile int *addr, int setValue)
{
int inputValue;
__asm__ __volatile__(
"mr r5,%2\n" /* Save the value to store */
"1:\n"
"lwarx %0, 0, %1\n" /* Grab the area value */
"stwcx. r5, 0, %1\n" /* Try to save the new value */
"bne- 1b\n" /* Didn't get it, try again... */
"isync\n"
: "=&r" (inputValue) : "r" (addr), "r" (setValue) :
"memory", "r5");
return inputValue;
}
static inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
__asm__ __volatile__(
"mr r5,%2\n" /* Save the increment */
"1:\n"
"ldarx %0, 0, %1\n" /* Grab the value */
"add r6, %0, r5\n" /* Add the value */
"stdcx. r6, 0, %1\n" /* Try to save the new value */
"bne- 1b\n" /* Didn't get it, try again... */
"isync\n"
: "=&r" (returnValue) : "r" (addr), "r" (inc) :
"memory", "r5", "r6");
return returnValue;
}
static inline unsigned long long fetchNsetLong(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
unsigned long long returnValue;
__asm__ __volatile__(
"mr r5,%2\n" /* Save the value to store */
"1:\n"
"ldarx %0, 0, %1\n" /* Grab the area value */
"stdcx. r5, 0, %1\n" /* Try to save the new value */
"bne- 1b\n" /* Didn't get it, try again... */
"isync\n"
: "=&r" (returnValue) : "r" (addr), "r" (val) :
"memory", "r5");
return returnValue;
}
static inline unsigned long long fetchNaddLongNoLock(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
returnValue = *addr;
*addr += inc;
return returnValue;
}
static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long value)
{
*addr = value;
}
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif /* DARWIN_POWERPC_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,30 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
SUBDIRS = sn0
headers = \
atomic.h
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/irix
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,57 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef IRIX_ATOMIC_H_INCLUDED
#define IRIX_ATOMIC_H_INCLUDED
#include "internal/linkage.h"
/*
* 64 bit integer
*/
typedef volatile unsigned long long bigAtomicUnsignedInt;
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 0 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
CDECL_BEGIN
static inline void spinunlock(ompi_lock_data_t *ctlData_m)
{
ctlData_m->data.lockData_m = LOCK_UNLOCKED;
}
void spinlock(ompi_lock_data_t *);
int spintrylock(ompi_lock_data_t *);
int fetchNadd(volatile int *addr, int inc);
int fetchNset(volatile int *addr, int val);
unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr, int inc);
unsigned long long fetchNsetLong(bigAtomicUnsignedInt *addr,
unsigned long long val);
void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long val);
unsigned long long fetchNaddLongNoLock(bigAtomicUnsignedInt *addr,
int inc);
CDECL_END
#endif /* IRIX_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,16 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options

Просмотреть файл

@ -1,18 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
SUBDIRS = alpha i686 ia64 x86_64

Просмотреть файл

@ -1,28 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
headers = \
atomic.h
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/linux/alpha
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,234 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef LINUX_ALPHA_ATOMIC_H_INCLUDED
#define LINUX_ALPHA_ATOMIC_H_INCLUDED
/*
* 64 bit integer
*/
typedef volatile unsigned long long bigAtomicUnsignedInt;
#include "internal/linkage.h"
CDECL_BEGIN
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 0 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
#define mb() \
__asm__ __volatile__("mb": : :"memory")
#define rmb() \
__asm__ __volatile__("mb": : :"memory")
#define wmb() \
__asm__ __volatile__("wmb": : :"memory")
/*
* This is routine spins until the lock is obtained
* A value of 0 indicates that the lock is available
* 1 or more the lock is held by someone
*/
inline void spinlock(ompi_lock_data_t *lock)
{
/*** sungeun *** ref: alpha-linux spinlock sources ***/
int tmp = 0;
/* Use sub-sections to put the actual loop at the end
of this object file's text section so as to perfect
branch prediction. */
__asm__ __volatile__(
"1: ldl %0,%1\n"
" blbs %0,2f\n"
" ldl_l %0,%1\n"
" blbs %0,2f\n"
" or %0,1,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
" mb\n"
".subsection 2\n"
"2: ldl %0,%1\n"
" blbs %0,2b\n"
" br 1b\n"
".previous"
: "=&r" (tmp), "=m" (lock->data.lockData_m)
: "m"(lock->data.lockData_m) : "memory");
}
/*
* This routine tries once to obtain the lock
*/
inline int spintrylock(ompi_lock_data_t *lock)
{
int got_lock = 0;
int tmp = 0;
__asm__ __volatile__(
" ldl %0,%2\n"
" blbs %0,1f\n"
" ldl_l %0,%2\n"
" blbs %0,1f\n"
" or %0,1,%0\n"
" stl_c %0,%2\n"
" beq %0,1f\n"
" mov 1,%1\n"
"1: mb\n"
: "=&r" (tmp), "=&r" (got_lock), "=m" (lock->data.lockData_m)
: "m"(lock->data.lockData_m) : "memory");
return got_lock;
}
/*
* Clear the lock - alpha specific - need memory barrier
*/
inline void spinunlock(ompi_lock_data_t *lock)
{
mb();
lock->data.lockData_m = 0;
}
inline int fetchNadd(volatile int *addr, int inc)
{
int oldval = 0;
int tmp = 0;
__asm__ __volatile__(
"1: ldl_l %1, %0\n"
" addl %1, %2, %3\n"
" stl_c %3, %0\n"
" beq %3, 2f\n"
" br 3f\n"
"2:\n"
" br 1b\n"
"3:\n"
" mb\n"
: "=m" (*addr), "=r" (oldval)
: "r" (inc), "r" (tmp)
: "memory");
return oldval;
}
inline int fetchNset(volatile int *addr, int val)
{
int oldval = 0;
int tmp = 0;
__asm__ __volatile__(
"1: ldl_l %1, %0\n"
" mov %2, %3\n"
" stl_c %3, %0\n"
" beq %3, 2f\n"
" br 3f\n"
"2:\n"
" br 1b\n"
"3:\n"
" mb\n"
: "=m" (*addr), "=r" (oldval)
: "r" (val), "r" (tmp)
: "memory");
return oldval;
}
inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr, int inc)
{
unsigned long long oldval = 0;
unsigned long long tmp = 0;
__asm__ __volatile__(
/* load the contents of addr */
"1: ldq_l %1, %0\n"
/* increment count */
" addq %1, %2, %3\n"
/* conditional store */
" stq_c %3, %0\n"
/* store conditional failed - loop again */
" beq %3, 1b\n"
/* store conditional passed - go to memory barrier */
" br 3f\n"
/* loop again */
"2: br 1b\n"
/* memory barrier and exit */
"3: mb\n"
: "=m" (*addr), "=r" (oldval)
: "r" (inc), "r" (tmp)
: "memory");
return oldval;
}
inline unsigned long long fetchNaddLongNoLock(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long val;
val = *addr;
*addr += inc;
return val;
}
inline unsigned long long fetchNsetLong(volatile unsigned long long *addr,
unsigned long long val)
{
unsigned long long oldval = 0;
unsigned long long tmp = 0;
__asm__ __volatile__(
"1: ldq_l %1, %0\n"
" mov %2, %3\n"
" stq_c %3, %0\n"
" beq %3, 2f\n"
" br 3f\n"
"2:\n"
" br 1b\n"
"3:\n"
" mb\n"
: "=m" (*addr), "=r" (oldval)
: "r" (val), "r" (tmp)
: "memory");
return oldval;
}
inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
*addr = val;
}
CDECL_END
#endif /* LINUX_ALPHA_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,29 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
headers = \
atomic.h \
lock.s
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/linux/i686
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,201 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef LINUX_I686_ATOMIC_H_INCLUDED
#define LINUX_I686_ATOMIC_H_INCLUDED
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 1 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
/*
* 64 bit integer
*/
typedef struct {
ompi_lock_data_t lock;
volatile uint64_t data;
} bigAtomicUnsignedInt;
/* JMS This section is commented out */
/*
#ifdef __INTEL_COMPILER
#if defined(c_plusplus) || defined(__cplusplus)
extern "C"
{
#endif
void spinlock(ompi_lock_data_t *lockData);
int spintrylock(ompi_lock_data_t *lockData);
int fetchNadd(volatile int *addr, int inc);
int fetchNset(volatile int *addr, int setValue);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#else
*/
/*
* Spin until I can get the lock
*/
static inline void spinlock(ompi_lock_data_t *lockData)
{
__asm__ __volatile__(
"cmp $1, %0\n"
"jl 2f\n"
"\n1: "
"lock ; decl %0\n"
"jz 3f\n"
"2:\n"
"cmp $1, %0\n"
"jl 2b\n"
"jmp 1b\n"
"3:\n"
: "=m" (lockData->data.lockData_m) : : "memory");
}
/*
* This routine tries once to obtain the lock
*/
static inline int spintrylock(ompi_lock_data_t *lockData)
{
int gotLock;
__asm__ __volatile__(
"mov %2, %1\n"
"cmp $1, %0\n"
"jl 1f\n"
"lock ; decl %0\n"
"js 1f\n"
"mov $1, %1\n"
"jmp 2f\n"
"1:\n"
"mov $0, %1\n"
"2:"
: "=m" (lockData->data.lockData_m),
#ifdef __INTEL_COMPILER
"=&r" (gotLock) : "r" (0) : "memory");
#else
"=r" (gotLock) : "r" (0) : "memory");
#endif
return gotLock;
}
/*
* atomically add a constant to the input integer returning the
* previous value
*/
static inline int fetchNadd(volatile int *addr, int inc)
{
int inputValue;
__asm__ __volatile__(
" mov %2, %1\n" \
"lock ; xadd %1, %0\n"
#ifdef __INTEL_COMPILER
: "=m" (*addr), "=&r" (inputValue) : "r" (inc) : "memory");
#else
: "=m" (*addr), "=r" (inputValue) : "r" (inc) : "memory");
#endif
return (inputValue);
}
static inline int fetchNset(volatile int *addr, int setValue)
{
int inputValue;
__asm__ __volatile__(
" mov %2, %1\n" \
"lock ; xchg %1, %0\n"
#ifdef __INTEL_COMPILER
: "=m" (*addr), "=&r" (inputValue) : "r" (setValue) : "memory");
#else
: "=m" (*addr), "=r" (inputValue) : "r" (setValue) : "memory");
#endif
return (inputValue);
}
/*
* Clear the lock
*/
static inline void spinunlock(ompi_lock_data_t *lockData)
{
lockData->data.lockData_m = 1;
}
static inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
(addr->data) += inc;
spinunlock(&(addr->lock));
return returnValue;
}
static inline unsigned long long fetchNsetLong(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
addr->data = val;
spinunlock(&(addr->lock));
return returnValue;
}
static inline unsigned long long fetchNaddLongNoLock(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
returnValue = addr->data;
addr->data += inc;
return returnValue;
}
static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long value)
{
addr->data = value;
addr->lock.data.lockData_m = LOCK_UNLOCKED;
}
#endif /* LINUX_I686_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,85 +0,0 @@
;;
;; Copyright (c) 2004-2005 The Trustees of Indiana University.
;; All rights reserved.
;; Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
;; All rights reserved.
;; Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
;; University of Stuttgart. All rights reserved.
;; $COPYRIGHT$
;;
;; Additional copyrights may follow
;;
;; $HEADER$
;;
#include <asm.h>
.text
.global spinlock
spinlock:
push %ebp
mov %esp,%ebp
mov 0x8(%ebp),%eax
cmpl $0x1,(%eax)
jl 2f
1:
lock decl (%eax)
jz 3f
2:
cmpl $0x1,(%eax)
jl 2b
jmp 1b
3:
pop %ebp
ret
.global spintrylock
spintrylock:
push %ebp
mov %esp,%ebp
sub $0x4,%esp
mov 0x8(%ebp),%edx
mov $0x0,%eax
cmpl $0x1,(%edx)
jl 1f
lock decl (%edx)
js 1f
mov $0x1,%eax
jmp 2f
1:
mov $0x0,%eax
2:
mov %eax,0xfffffffc(%ebp)
leave
ret
.global fetchNadd
fetchNadd:
push %ebp
mov %esp,%ebp
sub $0x4,%esp
mov 0x8(%ebp),%edx
mov 0xc(%ebp),%eax
lock xadd %eax,(%edx)
mov %eax,0xfffffffc(%ebp)
leave
ret
.global fetchNset
fetchNset:
push %ebp
mov %esp,%ebp
sub $0x4,%esp
mov 0x8(%ebp),%edx
mov 0xc(%ebp),%eax
lock xchg %eax,(%edx)
mov %eax,0xfffffffc(%ebp)
leave
ret

Просмотреть файл

@ -1,29 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
headers = \
atomic.h \
lock.s
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/linux/ia64
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,183 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef LINUX_IA64_ATOMIC_H_INCLUDED
#define LINUX_IA64_ATOMIC_H_INCLUDED
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 1 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
/*
* 64 bit integer
*/
typedef struct {
ompi_lock_data_t lock;
volatile unsigned long long data;
} bigAtomicUnsignedInt;
#ifdef __INTEL_COMPILER
#if defined(c_plusplus) || defined(__cplusplus)
extern "C"
{
#endif
void spinlock(ompi_lock_data_t *lockData);
int spintrylock(ompi_lock_data_t *lockData);
int fetchNadd(volatile int *addr, int inc);
int fetchNset(volatile int *addr, int setValue);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#else
/*
* Spin until I can get the lock
*/
inline void spinlock(ompi_lock_data_t *lockData)
{
__asm__ __volatile__ (
"mov r30=1\n"
"mov ar.ccv=r0\n"
";;\n"
"cmpxchg4.acq r30=[%0],r30,ar.ccv\n"
";;\n"
"cmp.ne p15,p0=r30,r0\n"
"(p15) br.call.spnt.few b7=ia64_spinlock_contention\n"
";;\n"
"1:\n" /* force a new bundle */
:: "r"(*lockData)
: "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory");
}
/*
* This routine tries once to obtain the lock
*/
inline int spintrylock(ompi_lock_data_t *lockData)
{
int gotLock;
__asm__ __volatile__ (
"mov ar.ccv=r0\n"
";;\n"
"cmpxchg4.acq %0=[%2],%1,ar.ccv\n"
: "=r"(gotLock) : "r"(1), "r"(&(lockData)->data.lockData_m) : "ar.ccv", "memory");
return gotLock;
}
/*
* atomically add a constant to the input integer returning the
* previous value
*/
inline int fetchNadd(volatile int *addr, int inc)
{
int inputValue;
__asm__ __volatile__ (
"fetchadd4.rel %0=[%1],%2" \
: "=r"(*addr) : "r"(inputValue), "i"(inc) : "memory");
return (inputValue);
}
inline int fetchNset(volatile int *addr, int setValue)
{
int inputValue;
__asm__ __volatile__ (
"xchg4 %0=[%1],%2" \
: "=r"(*addr) : "r"(inputValue), "i"(setValue) : "memory");
return (inputValue);
}
#endif /* __INTEL_COMPILER */
/*
* Clear the lock
*/
inline void spinunlock(ompi_lock_data_t *lockData)
{
lockData->data.lockData_m = LOCK_UNLOCKED;
}
inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
(addr->data) += inc;
spinunlock(&(addr->lock));
return returnValue;
}
inline unsigned long long fetchNsetLong(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
addr->data = val;
spinunlock(&(addr->lock));
return returnValue;
}
inline unsigned long long fetchNaddLongNoLock(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
returnValue = addr->data;
addr->data += inc;
return returnValue;
}
inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long value)
{
addr->data = value;
addr->lock.data.lockData_m = LOCK_UNLOCKED;
}
#endif /* LINUX_IA64_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,84 +0,0 @@
;;
;; Copyright (c) 2004-2005 The Trustees of Indiana University.
;; All rights reserved.
;; Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
;; All rights reserved.
;; Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
;; University of Stuttgart. All rights reserved.
;; $COPYRIGHT$
;;
;; Additional copyrights may follow
;;
;; $HEADER$
;;
#include <asm.h>
.text
.global spinlock
spinlock:
push %ebp
mov %esp,%ebp
mov 0x8(%ebp),%eax
cmpl $0x1,(%eax)
jl 2f
1:
lock decl (%eax)
jz 3f
2:
cmpl $0x1,(%eax)
jl 2b
jmp 1b
3:
pop %ebp
ret
.global spintrylock
spintrylock:
push %ebp
mov %esp,%ebp
sub $0x4,%esp
mov 0x8(%ebp),%edx
mov $0x0,%eax
cmpl $0x1,(%edx)
jl 1f
lock decl (%edx)
js 1f
mov $0x1,%eax
jmp 2f
1:
mov $0x0,%eax
2:
mov %eax,0xfffffffc(%ebp)
leave
ret
.global fetchNadd
fetchNadd:
push %ebp
mov %esp,%ebp
sub $0x4,%esp
mov 0x8(%ebp),%edx
mov 0xc(%ebp),%eax
lock xadd %eax,(%edx)
mov %eax,0xfffffffc(%ebp)
leave
ret
.global fetchNset
fetchNset:
push %ebp
mov %esp,%ebp
sub $0x4,%esp
mov 0x8(%ebp),%edx
mov 0xc(%ebp),%eax
lock xchg %eax,(%edx)
mov %eax,0xfffffffc(%ebp)
leave
ret

Просмотреть файл

@ -1,28 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
headers = \
atomic.h
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/linux/x86_64
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,200 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef LINUX_X86_64_ATOMIC_H_INCLUDED
#define LINUX_X86_64_ATOMIC_H_INCLUDED
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 1 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
/*
* 64 bit integer
*/
typedef struct {
ompi_lock_data_t lock;
volatile unsigned long long data;
} bigAtomicUnsignedInt;
/*
#ifdef __INTEL_COMPILER
#if defined(c_plusplus) || defined(__cplusplus)
extern "C"
{
#endif
void spinlock(ompi_lock_data_t *lockData);
int spintrylock(ompi_lock_data_t *lockData);
int fetchNadd(volatile int *addr, int inc);
int fetchNset(volatile int *addr, int setValue);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#else
*/
/*
* Spin until I can get the lock
*/
static inline void spinlock(ompi_lock_data_t *lockData)
{
__asm__ __volatile__(
"cmp $1, %0\n"
"jl 2f\n"
"\n1: "
"lock ; decl %0\n"
"jz 3f\n"
"2:\n"
"cmp $1, %0\n"
"jl 2b\n"
"jmp 1b\n"
"3:\n"
: "=m" (lockData->data.lockData_m) : : "memory");
}
/*
* This routine tries once to obtain the lock
*/
static inline int spintrylock(ompi_lock_data_t *lockData)
{
int gotLock;
__asm__ __volatile__(
"mov %2, %1\n"
"cmp $1, %0\n"
"jl 1f\n"
"lock ; decl %0\n"
"js 1f\n"
"mov $1, %1\n"
"jmp 2f\n"
"1:\n"
"mov $0, %1\n"
"2:"
: "=m" (lockData->data.lockData_m),
#ifdef __INTEL_COMPILER
"=&r" (gotLock) : "r" (0) : "memory");
#else
"=r" (gotLock) : "r" (0) : "memory");
#endif
return gotLock;
}
/*
* atomically add a constant to the input integer returning the
* previous value
*/
static inline int fetchNadd(volatile int *addr, int inc)
{
int inputValue;
__asm__ __volatile__(
" mov %2, %1\n" \
"lock ; xadd %1, %0\n"
#ifdef __INTEL_COMPILER
: "=m" (*addr), "=&r" (inputValue) : "r" (inc) : "memory");
#else
: "=m" (*addr), "=r" (inputValue) : "r" (inc) : "memory");
#endif
return (inputValue);
}
static inline int fetchNset(volatile int *addr, int setValue)
{
int inputValue;
__asm__ __volatile__(
" mov %2, %1\n" \
"lock ; xchg %1, %0\n"
#ifdef __INTEL_COMPILER
: "=m" (*addr), "=&r" (inputValue) : "r" (setValue) : "memory");
#else
: "=m" (*addr), "=r" (inputValue) : "r" (setValue) : "memory");
#endif
return (inputValue);
}
/*
* Clear the lock
*/
static inline void spinunlock(ompi_lock_data_t *lockData)
{
lockData->data.lockData_m = 1;
}
static inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
(addr->data) += inc;
spinunlock(&(addr->lock));
return returnValue;
}
static inline unsigned long long fetchNsetLong(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
unsigned long long returnValue;
spinlock(&(addr->lock));
returnValue = addr->data;
addr->data = val;
spinunlock(&(addr->lock));
return returnValue;
}
static inline unsigned long long fetchNaddLongNoLock(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long returnValue;
returnValue = addr->data;
addr->data += inc;
return returnValue;
}
static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long value)
{
addr->data = value;
addr->lock.data.lockData_m = LOCK_UNLOCKED;
}
#endif /* LINUX_X86_64_ATOMIC_H_INCLUDED */

Просмотреть файл

@ -1,28 +0,0 @@
# -*- makefile -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
headers = \
atomic.h
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/openmpi/os/tru64
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,204 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef TRU64_ATOMIC_H_INCLUDED
#define TRU64_ATOMIC_H_INCLUDED
#include <c_asm.h>
#include <regdef.h>
#define mb() asm("mb\n");
#define rmb() asm("mb\n");
#define wmb() asm("wmb\n");
/*
* Lock structure
*/
enum { LOCK_UNLOCKED = 0 };
typedef struct {
union {
volatile int lockData_m;
char padding[4];
} data;
} ompi_lock_data_t;
/*
* 64 bit integer
*/
typedef volatile unsigned long long bigAtomicUnsignedInt;
static inline void spinlock(ompi_lock_data_t *lock)
{
asm("loop:\n"
" ldl %t1, (%a0)\n"
" blbs %t1, already_set\n"
" ldl_l %t1, (%a0)\n"
" blbs %t1, already_set\n"
" or %t1, 1, %t2\n"
" stl_c %t2, (%a0)\n"
" beq %t2, stl_c_failed\n"
" br lock_set\n"
"already_set:\n"
"stl_c_failed:\n"
" br loop\n"
"lock_set:\n"
" mb\n",
&(lock->data.lockData_m));
}
/*
* locked load - store conditional pair can fail for
* any number of implementation dependent reasons, so
* we try up to 4 times before declaring failure to
* obtain the lock...
*/
static inline int spintrylock(ompi_lock_data_t *lock)
{
int result = asm("mov %zero, %t3\n"
"loop:\n"
" ldl %t1, (%a0)\n"
" blbs %t1, already_set\n"
" ldl_l %t1, (%a0)\n"
" blbs %t1, already_set\n"
" or %t1, 1, %t2\n"
" stl_c %t2, (%a0)\n"
" beq %t2, stl_c_failed\n"
" mov 1, %v0\n"
" br lock_set\n"
"stl_c_failed:\n"
" addl %t3, 1, %t3\n"
" mov %t3, %t4\n"
" subl %t4, 3, %t4\n"
" ble %t4, loop\n"
"already_set:\n"
" mov %zero, %v0\n"
"lock_set:\n"
" mb\n",
&(lock->data.lockData_m));
return result;
}
/* alpha specific unlock function - need the memory barrier */
static inline void spinunlock(ompi_lock_data_t *lock)
{
asm("mb");
lock->data.lockData_m = LOCK_UNLOCKED;
}
static inline int fetchNadd(volatile int *addr, int inc)
{
int oldval;
oldval = asm("try_again:\n"
" ldl_l %v0, (%a0)\n"
" addl %v0, %a1, %t1\n"
" stl_c %t1, (%a0)\n"
" beq %t1, no_store\n"
" br store\n"
"no_store:\n"
" br try_again\n"
"store:\n"
" mb\n",
addr, inc);
return oldval;
}
static inline unsigned long long fetchNaddLong(bigAtomicUnsignedInt *addr,
int inc)
{
unsigned long long oldval;
oldval = asm("try_again:\n"
" ldq_l %v0, (%a0)\n"
" addq %v0, %a1, %t1\n"
" stq_c %t1, (%a0)\n"
" beq %t1, no_store\n"
" br store\n"
"no_store:\n"
" br try_again\n"
"store:\n"
" mb\n",
addr, inc);
return oldval;
}
static inline unsigned long long
fetchNaddLongNoLock(bigAtomicUnsignedInt *addr, int inc)
{
unsigned long long oldval;
oldval = *addr;
*addr += inc;
return oldval;
}
static inline int fetchNset(volatile int *addr, int val)
{
int oldval;
oldval = asm("try_again:\n"
" ldl_l %v0, (%a0)\n"
" mov %a1, %t1\n"
" stl_c %t1, (%a0)\n"
" beq %t1, no_store\n"
" br store\n"
"no_store:\n"
" br try_again\n"
"store:\n"
" mb\n",
addr, val);
return oldval;
}
static inline unsigned long long fetchNsetLong(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
unsigned long long oldval;
oldval = asm("try_again:\n"
" ldq_l %v0, (%a0)\n"
" mov %a1, %t1\n"
" stq_c %t1, (%a0)\n"
" beq %t1, no_store\n"
" br store\n"
"no_store:\n"
" br try_again\n"
"store:\n"
" mb\n",
addr, val);
return oldval;
}
static inline void setBigAtomicUnsignedInt(bigAtomicUnsignedInt *addr,
unsigned long long val)
{
*addr = val;
}
#endif /* TRU64_ATOMIC_H_INCLUDED */