1
1
Refs: 3763

This commit was SVN r29756.
Этот коммит содержится в:
Mike Dubman 2013-11-26 12:46:56 +00:00
родитель aa98b0333b
Коммит 0ddc2bc214
18 изменённых файлов: 173 добавлений и 177 удалений

Просмотреть файл

@ -29,7 +29,7 @@ int mca_atomic_basic_priority_param = -1;
/*
* Local function
*/
static int __basic_open(void);
static int _basic_open(void);
/*
* Instantiate the public struct with all of our public information
@ -51,7 +51,7 @@ mca_atomic_base_component_t mca_atomic_basic_component = {
OSHMEM_RELEASE_VERSION,
/* Component open and close functions */
__basic_open,
_basic_open,
NULL
},
{
@ -66,7 +66,7 @@ mca_atomic_base_component_t mca_atomic_basic_component = {
mca_atomic_basic_query
};
static int __basic_open(void)
static int _basic_open(void)
{
mca_atomic_basic_priority_param = 75;
(void) mca_base_component_var_register(&mca_atomic_basic_component.atomic_version,

Просмотреть файл

@ -33,7 +33,7 @@ mca_spml_ikrit_t *mca_spml_self = NULL;
/*
* Local function
*/
static int __mxm_open(void);
static int _mxm_open(void);
/*
* Instantiate the public struct with all of our public information
@ -55,7 +55,7 @@ mca_atomic_base_component_t mca_atomic_mxm_component = {
OSHMEM_RELEASE_VERSION,
/* Component open and close functions */
__mxm_open,
_mxm_open,
NULL
},
{
@ -70,7 +70,7 @@ mca_atomic_base_component_t mca_atomic_mxm_component = {
mca_atomic_mxm_query
};
static int __mxm_open(void)
static int _mxm_open(void)
{
/*
* This component is able to work using spml:ikrit component only

Просмотреть файл

@ -28,18 +28,18 @@
extern char* mca_memheap_base_param_hca_name;
static int __shm_attach(map_segment_t *, size_t, int, int);
static void __shm_detach(map_segment_t *);
static int _shm_attach(map_segment_t *, size_t, int, int);
static void _shm_detach(map_segment_t *);
static int __mmap_attach(map_segment_t *, size_t);
static void __mmap_detach(map_segment_t *);
static int _mmap_attach(map_segment_t *, size_t);
static void _mmap_detach(map_segment_t *);
#if defined(MPAGE_ENABLE) && (MPAGE_ENABLE > 0)
static int __ibv_attach(map_segment_t *, size_t);
static void __ibv_detach(map_segment_t *);
static int _ibv_attach(map_segment_t *, size_t);
static void _ibv_detach(map_segment_t *);
#endif /* MPAGE_ENABLE */
static int __adaptive_attach(map_segment_t *, size_t);
static int _adaptive_attach(map_segment_t *, size_t);
int mca_memheap_base_alloc_init(mca_memheap_map_t *map, size_t size)
{
@ -66,18 +66,18 @@ int mca_memheap_base_alloc_init(mca_memheap_map_t *map, size_t size)
switch (value) {
case 0:
/* use sysv alloc without hugepages */
ret = __shm_attach(s, size, 0, 1);
ret = _shm_attach(s, size, 0, 1);
break;
case 1:
ret = __shm_attach(s, size, 1, 1);
ret = _shm_attach(s, size, 1, 1);
if (OSHMEM_SUCCESS != ret)
ret = __shm_attach(s, size, 0, 1);
ret = _shm_attach(s, size, 0, 1);
break;
case 2:
/* huge pages only */
ret = __shm_attach(s, size, 1, 1);
ret = _shm_attach(s, size, 1, 1);
if (OSHMEM_SUCCESS != ret)
MEMHEAP_ERROR("FAILED to allocated symmetric heap using hugepages fallback is disabled, errno=%d",
errno);
@ -85,7 +85,7 @@ int mca_memheap_base_alloc_init(mca_memheap_map_t *map, size_t size)
case 3:
/* huge pages only + cleanup shmid */
ret = __shm_attach(s, size, 1, 0);
ret = _shm_attach(s, size, 1, 0);
if (OSHMEM_SUCCESS != ret)
MEMHEAP_ERROR("FAILED to allocated symmetric heap using hugepages fallback is disabled, errno=%d",
errno);
@ -93,31 +93,31 @@ int mca_memheap_base_alloc_init(mca_memheap_map_t *map, size_t size)
case 4:
/* use sysv alloc without hugepages */
ret = __shm_attach(s, size, 0, 0);
ret = _shm_attach(s, size, 0, 0);
break;
#if defined(MPAGE_ENABLE) && (MPAGE_ENABLE > 0)
case 5:
/* use shared memory registration (mpages) */
ret = __ibv_attach(s, size);
ret = _ibv_attach(s, size);
if (OSHMEM_SUCCESS != ret)
ret = __shm_attach(s, size, 0, 1);
ret = _shm_attach(s, size, 0, 1);
break;
#endif /* MPAGE_ENABLE */
case 100:
/* use mmap. It will severaly impact performance of intra node communication */
ret = __mmap_attach(s, size);
ret = _mmap_attach(s, size);
MEMHEAP_VERBOSE(1,
"mmap() memheap allocation will severely impact performance of intra node communication");
break;
case 101:
ret = __shm_attach(s, size, 1, 1);
ret = _shm_attach(s, size, 1, 1);
if (OSHMEM_SUCCESS != ret) {
MEMHEAP_ERROR("Failed to allocate hugepages. Falling back on regular allocation");
ret = __mmap_attach(s, size);
ret = _mmap_attach(s, size);
} else {
s->shmid = MEMHEAP_SHM_INVALID;
}
@ -125,7 +125,7 @@ int mca_memheap_base_alloc_init(mca_memheap_map_t *map, size_t size)
break;
case 102:
ret = __shm_attach(s, size, 1, 1);
ret = _shm_attach(s, size, 1, 1);
if (OSHMEM_SUCCESS != ret) {
MEMHEAP_ERROR("FAILED to allocated symmetric heap using hugepages fallback is disabled, errno=%d",
errno);
@ -135,7 +135,7 @@ int mca_memheap_base_alloc_init(mca_memheap_map_t *map, size_t size)
break;
default:
ret = __adaptive_attach(s, size);
ret = _adaptive_attach(s, size);
}
if (OSHMEM_SUCCESS == ret) {
@ -157,16 +157,16 @@ void mca_memheap_base_alloc_exit(mca_memheap_map_t *map)
switch (s->type) {
case MAP_SEGMENT_ALLOC_SHM:
__shm_detach(s);
_shm_detach(s);
break;
case MAP_SEGMENT_ALLOC_MMAP:
__mmap_detach(s);
_mmap_detach(s);
break;
#if defined(MPAGE_ENABLE) && (MPAGE_ENABLE > 0)
case MAP_SEGMENT_ALLOC_IBV:
__ibv_detach(s);
_ibv_detach(s);
break;
#endif /* MPAGE_ENABLE */
@ -176,34 +176,34 @@ void mca_memheap_base_alloc_exit(mca_memheap_map_t *map)
}
}
static int __adaptive_attach(map_segment_t *s, size_t size)
static int _adaptive_attach(map_segment_t *s, size_t size)
{
int rc = OSHMEM_SUCCESS;
#if defined(MPAGE_ENABLE) && (MPAGE_ENABLE > 0)
rc = __ibv_attach(s, size);
rc = _ibv_attach(s, size);
#endif /* MPAGE_ENABLE */
if (rc) {
rc = __shm_attach(s, size, 1, 1);
rc = _shm_attach(s, size, 1, 1);
}
if (rc) {
rc = __shm_attach(s, size, 0, 1);
rc = _shm_attach(s, size, 0, 1);
}
if (rc) {
rc = __shm_attach(s, size, 0, 0);
rc = _shm_attach(s, size, 0, 0);
}
if (rc) {
rc = __mmap_attach(s, size);
rc = _mmap_attach(s, size);
}
return rc;
}
static int __shm_attach(map_segment_t *s, size_t size, int use_hp, int do_rmid)
static int _shm_attach(map_segment_t *s, size_t size, int use_hp, int do_rmid)
{
static int shm_context = 0;
;
@ -245,13 +245,13 @@ static int __shm_attach(map_segment_t *s, size_t size, int use_hp, int do_rmid)
s->shmid = shmid;
s->start = addr;
s->size = size;
s->end = (void *) (((unsigned char *)s->start) + s->size);
s->end = s->start + s->size;
s->context = &shm_context;
return OSHMEM_SUCCESS;
}
static void __shm_detach(map_segment_t *s)
static void _shm_detach(map_segment_t *s)
{
assert(s);
@ -268,7 +268,7 @@ static void __shm_detach(map_segment_t *s)
}
}
static int __mmap_attach(map_segment_t *s, size_t size)
static int _mmap_attach(map_segment_t *s, size_t size)
{
void *addr = NULL;
@ -297,13 +297,13 @@ MAP_ANONYMOUS |
s->shmid = MEMHEAP_SHM_INVALID;
s->start = addr;
s->size = size;
s->end = (void *) (((unsigned char *)s->start) + s->size);
s->end = s->start + s->size;
s->context = NULL;
return OSHMEM_SUCCESS;
}
static void __mmap_detach(map_segment_t *s)
static void _mmap_detach(map_segment_t *s)
{
assert(s);
@ -312,7 +312,7 @@ static void __mmap_detach(map_segment_t *s)
#if defined(MPAGE_ENABLE) && (MPAGE_ENABLE > 0)
static int __ibv_attach(map_segment_t *s, size_t size)
static int _ibv_attach(map_segment_t *s, size_t size)
{
int rc = OSHMEM_SUCCESS;
static openib_device_t memheap_device;
@ -482,7 +482,7 @@ static int __ibv_attach(map_segment_t *s, size_t size)
return rc;
}
static void __ibv_detach(map_segment_t *s)
static void _ibv_detach(map_segment_t *s)
{
int rc = OSHMEM_SUCCESS;
openib_device_t *device = NULL;

Просмотреть файл

@ -71,7 +71,7 @@ static inline void* __seg2base_va(int seg)
return memheap_map->mem_segs[seg].start;
}
static int __seg_cmp(const void *k, const void *v)
static int _seg_cmp(const void *k, const void *v)
{
uintptr_t va = (uintptr_t) k;
map_segment_t *s = (map_segment_t *) v;
@ -96,7 +96,7 @@ static inline map_segment_t *__find_va(const void* va)
&memheap_map->mem_segs[SYMB_SEG_INDEX],
memheap_map->n_segments - 1,
sizeof(*s),
__seg_cmp);
_seg_cmp);
}
#if MEMHEAP_BASE_DEBUG == 1
@ -528,8 +528,8 @@ static inline void* va2rva(void* va,
void* local_base,
void* remote_base)
{
return (void*) (remote_base > local_base ? (uintptr_t)va + (((uintptr_t)remote_base) - ((uintptr_t)local_base)) :
(uintptr_t)va - (((uintptr_t)remote_base) - ((uintptr_t)local_base)));
return (void*) (remote_base > local_base ? (uintptr_t)va + (remote_base - local_base) :
(uintptr_t)va - (local_base - remote_base));
}
mca_spml_mkey_t * mca_memheap_base_get_cached_mkey(int pe,
@ -599,8 +599,7 @@ uint64_t mca_memheap_base_find_offset(int pe,
s = __find_va(va);
return ((s && s->is_active) ?
(uint64_t) ((uintptr_t)rva - (uintptr_t)s->mkeys_cache[pe][tr_id].va_base) : 0);
return ((s && s->is_active) ? (rva - s->mkeys_cache[pe][tr_id].va_base) : 0);
}
int mca_memheap_base_is_symmetric_addr(const void* va)
@ -619,11 +618,10 @@ int mca_memheap_base_detect_addr_type(void* va)
if (s->type == MAP_SEGMENT_STATIC) {
addr_type = ADDR_STATIC;
} else if ((uintptr_t)va >= (uintptr_t) s->start
&& (uintptr_t)va < (uintptr_t) (((size_t)s->start) + mca_memheap.memheap_size)) {
&& (uintptr_t)va < (uintptr_t) (s->start + mca_memheap.memheap_size)) {
addr_type = ADDR_USER;
} else {
assert( (uintptr_t)va >= (uintptr_t)(((size_t)s->start) + mca_memheap.memheap_size)
&& (uintptr_t)va < (uintptr_t)s->end);
assert( (uintptr_t)va >= (uintptr_t)(s->start + mca_memheap.memheap_size) && (uintptr_t)va < (uintptr_t)s->end);
addr_type = ADDR_PRIVATE;
}
}

Просмотреть файл

@ -15,8 +15,8 @@
#include <stdio.h>
static int __dereg_segment(map_segment_t *s);
static int __reg_segment(map_segment_t *s, int *num_btl);
static int _dereg_segment(map_segment_t *s);
static int _reg_segment(map_segment_t *s, int *num_btl);
int mca_memheap_base_reg(mca_memheap_map_t *memheap_map)
{
@ -31,10 +31,10 @@ int mca_memheap_base_reg(mca_memheap_map_t *memheap_map)
i,
s->start,
s->end,
(long long)(((uintptr_t)s->end) - ((uintptr_t)s->start)),
(long long)(s->end - s->start),
s->type,
s->shmid);
ret = __reg_segment(s, &memheap_map->num_transports);
ret = _reg_segment(s, &memheap_map->num_transports);
}
return ret;
@ -56,14 +56,14 @@ int mca_memheap_base_dereg(mca_memheap_map_t *memheap_map)
i,
s->start,
s->end,
(long long)(((uintptr_t)s->end) - ((uintptr_t)s->start))),
ret = __dereg_segment(s);
(long long)(s->end - s->start));
ret = _dereg_segment(s);
}
return ret;
}
static int __dereg_segment(map_segment_t *s)
static int _dereg_segment(map_segment_t *s)
{
int rc = OSHMEM_SUCCESS;
int j;
@ -92,7 +92,7 @@ static int __dereg_segment(map_segment_t *s)
return rc;
}
static int __reg_segment(map_segment_t *s, int *num_btl)
static int _reg_segment(map_segment_t *s, int *num_btl)
{
int rc = OSHMEM_SUCCESS;
int my_pe;
@ -110,7 +110,7 @@ static int __reg_segment(map_segment_t *s, int *num_btl)
if (!rc) {
s->mkeys = MCA_SPML_CALL(register((void *)(unsigned long)s->start,
(long long)((uintptr_t)s->end) - ((uintptr_t)s->start),
s->end - s->start,
MEMHEAP_SHM_CODE(s->type, s->shmid),
num_btl));
if (NULL == s->mkeys) {

Просмотреть файл

@ -33,7 +33,7 @@ mca_memheap_base_module_t mca_memheap;
* be pointed to by mca_memheap_base_module_t.
*/
static memheap_context_t* __memheap_create(void);
static memheap_context_t* _memheap_create(void);
/**
* Choose to init one component with the highest priority.
@ -54,7 +54,7 @@ int mca_memheap_base_select()
char** include = opal_argv_split(mca_memheap_base_include, ',');
char** exclude = opal_argv_split(mca_memheap_base_exclude, ',');
context = __memheap_create();
context = _memheap_create();
if (!context) {
return OSHMEM_ERROR;
}
@ -203,7 +203,7 @@ static size_t memheap_size(void)
return (size_t) memheap_align(size * factor);
}
static memheap_context_t* __memheap_create(void)
static memheap_context_t* _memheap_create(void)
{
int rc = OSHMEM_SUCCESS;
static memheap_context_t context;

Просмотреть файл

@ -35,10 +35,10 @@ typedef struct memheap_static_context {
static memheap_static_context_t memheap_context;
static int __load_segments(void);
static int __check_perms(struct map_segment_desc *seg);
static int __check_address(struct map_segment_desc *seg);
static int __check_pathname(struct map_segment_desc *seg);
static int _load_segments(void);
static int _check_perms(struct map_segment_desc *seg);
static int _check_address(struct map_segment_desc *seg);
static int _check_pathname(struct map_segment_desc *seg);
int mca_memheap_base_static_init(mca_memheap_map_t *map)
{
@ -48,7 +48,7 @@ int mca_memheap_base_static_init(mca_memheap_map_t *map)
assert(map);
assert(SYMB_SEG_INDEX <= map->n_segments);
ret = __load_segments();
ret = _load_segments();
if (OSHMEM_SUCCESS == ret) {
int i;
@ -62,12 +62,12 @@ int mca_memheap_base_static_init(mca_memheap_map_t *map)
s->shmid = MEMHEAP_SHM_INVALID;
s->start = memheap_context.mem_segs[i].start;
s->end = memheap_context.mem_segs[i].end;
s->size = (size_t) (((uintptr_t)s->end) - ((uintptr_t)s->start));
s->size = s->end - s->start;
s->type = MAP_SEGMENT_STATIC;
s->context = NULL;
map->n_segments++;
total_mem += (size_t) (((uintptr_t)s->end) - ((uintptr_t)s->start));
total_mem += s->end - s->start;
}
MEMHEAP_VERBOSE(1,
"Memheap static memory: %llu byte(s), %d segments",
@ -82,7 +82,7 @@ void mca_memheap_base_static_exit(mca_memheap_map_t *map)
assert(map);
}
static int __check_perms(struct map_segment_desc *seg)
static int _check_perms(struct map_segment_desc *seg)
{
if (!strcmp(seg->perms, "rw-p") || !strcmp(seg->perms, "rwxp"))
return OSHMEM_SUCCESS;
@ -90,7 +90,7 @@ static int __check_perms(struct map_segment_desc *seg)
return OSHMEM_ERROR;
}
static int __check_address(struct map_segment_desc *seg)
static int _check_address(struct map_segment_desc *seg)
{
extern unsigned _end;
void* data_end = &_end;
@ -113,7 +113,7 @@ static int __check_address(struct map_segment_desc *seg)
return OSHMEM_SUCCESS;
}
static int __check_pathname(struct map_segment_desc *seg)
static int _check_pathname(struct map_segment_desc *seg)
{
/* Probably we need to check found path but
* To press check coverity issue following code is disabled
@ -156,7 +156,7 @@ static int __check_pathname(struct map_segment_desc *seg)
return OSHMEM_SUCCESS;
}
static int __load_segments(void)
static int _load_segments(void)
{
FILE *fp;
char line[1024];
@ -182,13 +182,13 @@ static int __load_segments(void)
(long long *) &seg.inode,
seg.pathname);
if (OSHMEM_ERROR == __check_address(&seg))
if (OSHMEM_ERROR == _check_address(&seg))
continue;
if (OSHMEM_ERROR == __check_pathname(&seg))
if (OSHMEM_ERROR == _check_pathname(&seg))
continue;
if (OSHMEM_ERROR == __check_perms(&seg))
if (OSHMEM_ERROR == _check_perms(&seg))
continue;
MEMHEAP_VERBOSE(5, "add: %s", line);

Просмотреть файл

@ -18,7 +18,7 @@ static int mca_memheap_buddy_component_close(void);
static mca_memheap_base_module_t* mca_memheap_buddy_component_init(memheap_context_t *,
int *);
static int __basic_open(void);
static int _basic_open(void);
mca_memheap_base_component_t mca_memheap_buddy_component = {
{
@ -29,7 +29,7 @@ mca_memheap_base_component_t mca_memheap_buddy_component = {
OSHMEM_MINOR_VERSION, /* MCA component minor version */
OSHMEM_RELEASE_VERSION, /* MCA component release version */
__basic_open,
_basic_open,
mca_memheap_buddy_component_close,
NULL
},
@ -41,7 +41,7 @@ mca_memheap_base_component_t mca_memheap_buddy_component = {
};
/* Open component */
static int __basic_open(void)
static int _basic_open(void)
{
return OSHMEM_SUCCESS;
}

Просмотреть файл

@ -18,7 +18,7 @@ static int mca_memheap_ptmalloc_component_close(void);
static mca_memheap_base_module_t* mca_memheap_ptmalloc_component_init(memheap_context_t *,
int *);
static int __basic_open(void);
static int _basic_open(void);
mca_memheap_base_component_t mca_memheap_ptmalloc_component = {
{
@ -29,7 +29,7 @@ mca_memheap_base_component_t mca_memheap_ptmalloc_component = {
OSHMEM_MINOR_VERSION, /* MCA component minor version */
OSHMEM_RELEASE_VERSION, /* MCA component release version */
__basic_open,
_basic_open,
mca_memheap_ptmalloc_component_close,
NULL
},
@ -41,7 +41,7 @@ mca_memheap_base_component_t mca_memheap_ptmalloc_component = {
};
/* Open component */
static int __basic_open(void)
static int _basic_open(void)
{
return OSHMEM_SUCCESS;
}

Просмотреть файл

@ -21,14 +21,14 @@
#include "oshmem/proc/proc.h"
#include "scoll_basic.h"
static int __algorithm_central_counter(struct oshmem_group_t *group,
static int _algorithm_central_counter(struct oshmem_group_t *group,
long *pSync);
static int __algorithm_tournament(struct oshmem_group_t *group, long *pSync);
static int __algorithm_recursive_doubling(struct oshmem_group_t *group,
static int _algorithm_tournament(struct oshmem_group_t *group, long *pSync);
static int _algorithm_recursive_doubling(struct oshmem_group_t *group,
long *pSync);
static int __algorithm_dissemination(struct oshmem_group_t *group, long *pSync);
static int __algorithm_basic(struct oshmem_group_t *group, long *pSync);
static int __algorithm_adaptive(struct oshmem_group_t *group, long *pSync);
static int _algorithm_dissemination(struct oshmem_group_t *group, long *pSync);
static int _algorithm_basic(struct oshmem_group_t *group, long *pSync);
static int _algorithm_adaptive(struct oshmem_group_t *group, long *pSync);
int mca_scoll_basic_barrier(struct oshmem_group_t *group, long *pSync, int alg)
{
@ -47,37 +47,37 @@ int mca_scoll_basic_barrier(struct oshmem_group_t *group, long *pSync, int alg)
switch (alg) {
case SCOLL_ALG_BARRIER_CENTRAL_COUNTER:
{
rc = __algorithm_central_counter(group, pSync);
rc = _algorithm_central_counter(group, pSync);
break;
}
case SCOLL_ALG_BARRIER_TOURNAMENT:
{
rc = __algorithm_tournament(group, pSync);
rc = _algorithm_tournament(group, pSync);
break;
}
case SCOLL_ALG_BARRIER_RECURSIVE_DOUBLING:
{
rc = __algorithm_recursive_doubling(group, pSync);
rc = _algorithm_recursive_doubling(group, pSync);
break;
}
case SCOLL_ALG_BARRIER_DISSEMINATION:
{
rc = __algorithm_dissemination(group, pSync);
rc = _algorithm_dissemination(group, pSync);
break;
}
case SCOLL_ALG_BARRIER_BASIC:
{
rc = __algorithm_basic(group, pSync);
rc = _algorithm_basic(group, pSync);
break;
}
case SCOLL_ALG_BARRIER_ADAPTIVE:
{
rc = __algorithm_adaptive(group, pSync);
rc = _algorithm_adaptive(group, pSync);
break;
}
default:
{
rc = __algorithm_recursive_doubling(group, pSync);
rc = _algorithm_recursive_doubling(group, pSync);
}
}
} else {
@ -90,14 +90,14 @@ int mca_scoll_basic_barrier(struct oshmem_group_t *group, long *pSync, int alg)
}
/*
This algorithm is quite simple and straightforward. But because of itТs obvious simplicity and
This algorithm is quite simple and straightforward. But because of it<EFBFBD>s obvious simplicity and
the naive prove for correctness it is implemented quite often. One node asks peers if they are
achieve barrier state. When all processors are ready it signals to go ahead.
Outlay:
NP-1 competing network transfers are needed to implement the counter
The memory usage is constant (1 byte) per node.
*/
static int __algorithm_central_counter(struct oshmem_group_t *group,
static int _algorithm_central_counter(struct oshmem_group_t *group,
long *pSync)
{
int rc = OSHMEM_SUCCESS;
@ -203,7 +203,7 @@ static int __algorithm_central_counter(struct oshmem_group_t *group,
Outlay:
The game scales with log2(NP) and uses 1 byte of memory.
*/
static int __algorithm_tournament(struct oshmem_group_t *group, long *pSync)
static int _algorithm_tournament(struct oshmem_group_t *group, long *pSync)
{
int rc = OSHMEM_SUCCESS;
int round = 0;
@ -303,7 +303,7 @@ static int __algorithm_tournament(struct oshmem_group_t *group, long *pSync)
Outlay:
The algorithm uses a maximum of log2(NP) + 2 network writes and P bytes memory per node.
*/
static int __algorithm_recursive_doubling(struct oshmem_group_t *group,
static int _algorithm_recursive_doubling(struct oshmem_group_t *group,
long *pSync)
{
int rc = OSHMEM_SUCCESS;
@ -441,7 +441,7 @@ static int __algorithm_recursive_doubling(struct oshmem_group_t *group,
Outlay:
The game scales with log2(NP) and uses 1 byte of memory.
*/
static int __algorithm_dissemination(struct oshmem_group_t *group, long *pSync)
static int _algorithm_dissemination(struct oshmem_group_t *group, long *pSync)
{
int rc = OSHMEM_SUCCESS;
int round = 0;
@ -499,7 +499,7 @@ static int __algorithm_dissemination(struct oshmem_group_t *group, long *pSync)
return rc;
}
static int __algorithm_basic(struct oshmem_group_t *group, long *pSync)
static int _algorithm_basic(struct oshmem_group_t *group, long *pSync)
{
int rc = OSHMEM_SUCCESS;
int root_id = 0;
@ -549,7 +549,7 @@ static int __algorithm_basic(struct oshmem_group_t *group, long *pSync)
return rc;
}
static int __algorithm_adaptive(struct oshmem_group_t *group, long *pSync)
static int _algorithm_adaptive(struct oshmem_group_t *group, long *pSync)
{
int rc = OSHMEM_SUCCESS;
bool local_peers_only = true;
@ -576,9 +576,9 @@ static int __algorithm_adaptive(struct oshmem_group_t *group, long *pSync)
* otherwise use put/get way
*/
if (local_peers_only || (group->proc_count < 32)) {
rc = __algorithm_basic(group, pSync);
rc = _algorithm_basic(group, pSync);
} else {
rc = __algorithm_recursive_doubling(group, pSync);
rc = _algorithm_recursive_doubling(group, pSync);
}
return rc;

Просмотреть файл

@ -22,13 +22,13 @@
#include "oshmem/mca/scoll/base/base.h"
#include "scoll_basic.h"
static int __algorithm_central_counter(struct oshmem_group_t *group,
static int _algorithm_central_counter(struct oshmem_group_t *group,
int PE_root,
void *target,
const void *source,
size_t nlong,
long *pSync);
static int __algorithm_binomial_tree(struct oshmem_group_t *group,
static int _algorithm_binomial_tree(struct oshmem_group_t *group,
int PE_root,
void *target,
const void *source,
@ -61,7 +61,7 @@ int mca_scoll_basic_broadcast(struct oshmem_group_t *group,
switch (alg) {
case SCOLL_ALG_BROADCAST_CENTRAL_COUNTER:
{
rc = __algorithm_central_counter(group,
rc = _algorithm_central_counter(group,
PE_root,
target,
source,
@ -71,7 +71,7 @@ int mca_scoll_basic_broadcast(struct oshmem_group_t *group,
}
case SCOLL_ALG_BROADCAST_BINOMIAL:
{
rc = __algorithm_binomial_tree(group,
rc = _algorithm_binomial_tree(group,
PE_root,
target,
source,
@ -81,7 +81,7 @@ int mca_scoll_basic_broadcast(struct oshmem_group_t *group,
}
default:
{
rc = __algorithm_binomial_tree(group,
rc = _algorithm_binomial_tree(group,
PE_root,
target,
source,
@ -107,13 +107,13 @@ int mca_scoll_basic_broadcast(struct oshmem_group_t *group,
}
/*
This algorithm is quite simple and straightforward. But because of itТs obvious simplicity and
This algorithm is quite simple and straightforward. But because of it<EFBFBD>s obvious simplicity and
the naive prove for correctness it is implemented quite often. The root send data to all.
Outlay:
NP-1 competing network transfers are needed to implement the counter
The memory usage is constant (1 byte) per node.
*/
static int __algorithm_central_counter(struct oshmem_group_t *group,
static int _algorithm_central_counter(struct oshmem_group_t *group,
int PE_root,
void *target,
const void *source,
@ -164,7 +164,7 @@ static int __algorithm_central_counter(struct oshmem_group_t *group,
Outlay:
The game scales with log2(NP) and uses 1 byte of memory.
*/
static int __algorithm_binomial_tree(struct oshmem_group_t *group,
static int _algorithm_binomial_tree(struct oshmem_group_t *group,
int PE_root,
void *target,
const void *source,

Просмотреть файл

@ -20,27 +20,27 @@
#include "oshmem/mca/scoll/base/base.h"
#include "scoll_basic.h"
static int __algorithm_central_collector(struct oshmem_group_t *group,
static int _algorithm_central_collector(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
long *pSync);
static int __algorithm_f_central_counter(struct oshmem_group_t *group,
static int _algorithm_f_central_counter(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
long *pSync);
static int __algorithm_f_tournament(struct oshmem_group_t *group,
static int _algorithm_f_tournament(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
long *pSync);
static int __algorithm_f_recursive_doubling(struct oshmem_group_t *group,
static int _algorithm_f_recursive_doubling(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
long *pSync);
static int __algorithm_f_ring(struct oshmem_group_t *group,
static int _algorithm_f_ring(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
@ -72,7 +72,7 @@ int mca_scoll_basic_collect(struct oshmem_group_t *group,
switch (alg) {
case SCOLL_ALG_COLLECT_CENTRAL_COUNTER:
{
rc = __algorithm_f_central_counter(group,
rc = _algorithm_f_central_counter(group,
target,
source,
nlong,
@ -81,7 +81,7 @@ int mca_scoll_basic_collect(struct oshmem_group_t *group,
}
case SCOLL_ALG_COLLECT_TOURNAMENT:
{
rc = __algorithm_f_tournament(group,
rc = _algorithm_f_tournament(group,
target,
source,
nlong,
@ -90,7 +90,7 @@ int mca_scoll_basic_collect(struct oshmem_group_t *group,
}
case SCOLL_ALG_COLLECT_RECURSIVE_DOUBLING:
{
rc = __algorithm_f_recursive_doubling(group,
rc = _algorithm_f_recursive_doubling(group,
target,
source,
nlong,
@ -99,7 +99,7 @@ int mca_scoll_basic_collect(struct oshmem_group_t *group,
}
case SCOLL_ALG_COLLECT_RING:
{
rc = __algorithm_f_ring(group,
rc = _algorithm_f_ring(group,
target,
source,
nlong,
@ -108,7 +108,7 @@ int mca_scoll_basic_collect(struct oshmem_group_t *group,
}
default:
{
rc = __algorithm_f_central_counter(group,
rc = _algorithm_f_central_counter(group,
target,
source,
nlong,
@ -116,7 +116,7 @@ int mca_scoll_basic_collect(struct oshmem_group_t *group,
}
}
} else {
rc = __algorithm_central_collector(group,
rc = _algorithm_central_collector(group,
target,
source,
nlong,
@ -141,7 +141,7 @@ int mca_scoll_basic_collect(struct oshmem_group_t *group,
Outlay:
NP-1 competing network transfers are needed.
*/
static int __algorithm_f_central_counter(struct oshmem_group_t *group,
static int _algorithm_f_central_counter(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
@ -201,7 +201,7 @@ static int __algorithm_f_central_counter(struct oshmem_group_t *group,
return rc;
}
static int __algorithm_f_tournament(struct oshmem_group_t *group,
static int _algorithm_f_tournament(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
@ -311,7 +311,7 @@ static int __algorithm_f_tournament(struct oshmem_group_t *group,
return rc;
}
static int __algorithm_f_ring(struct oshmem_group_t *group,
static int _algorithm_f_ring(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
@ -370,7 +370,7 @@ static int __algorithm_f_ring(struct oshmem_group_t *group,
return rc;
}
static int __algorithm_f_recursive_doubling(struct oshmem_group_t *group,
static int _algorithm_f_recursive_doubling(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,
@ -525,7 +525,7 @@ static int __algorithm_f_recursive_doubling(struct oshmem_group_t *group,
Outlay:
NP-1 competing network transfers are needed.
*/
static int __algorithm_central_collector(struct oshmem_group_t *group,
static int _algorithm_central_collector(struct oshmem_group_t *group,
void *target,
const void *source,
size_t nlong,

Просмотреть файл

@ -21,35 +21,35 @@
#include "oshmem/mca/scoll/base/base.h"
#include "scoll_basic.h"
static int __algorithm_central_counter(struct oshmem_group_t *group,
static int _algorithm_central_counter(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
size_t nlong,
long *pSync,
void *pWrk);
static int __algorithm_tournament(struct oshmem_group_t *group,
static int _algorithm_tournament(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
size_t nlong,
long *pSync,
void *pWrk);
static int __algorithm_recursive_doubling(struct oshmem_group_t *group,
static int _algorithm_recursive_doubling(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
size_t nlong,
long *pSync,
void *pWrk);
static int __algorithm_linear(struct oshmem_group_t *group,
static int _algorithm_linear(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
size_t nlong,
long *pSync,
void *pWrk);
static int __algorithm_log(struct oshmem_group_t *group,
static int _algorithm_log(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
@ -84,7 +84,7 @@ int mca_scoll_basic_reduce(struct oshmem_group_t *group,
switch (alg) {
case SCOLL_ALG_REDUCE_CENTRAL_COUNTER:
{
rc = __algorithm_central_counter(group,
rc = _algorithm_central_counter(group,
op,
target,
source,
@ -95,7 +95,7 @@ int mca_scoll_basic_reduce(struct oshmem_group_t *group,
}
case SCOLL_ALG_REDUCE_TOURNAMENT:
{
rc = __algorithm_tournament(group,
rc = _algorithm_tournament(group,
op,
target,
source,
@ -106,7 +106,7 @@ int mca_scoll_basic_reduce(struct oshmem_group_t *group,
}
case SCOLL_ALG_REDUCE_RECURSIVE_DOUBLING:
{
rc = __algorithm_recursive_doubling(group,
rc = _algorithm_recursive_doubling(group,
op,
target,
source,
@ -117,7 +117,7 @@ int mca_scoll_basic_reduce(struct oshmem_group_t *group,
}
case SCOLL_ALG_REDUCE_LEGACY_LINEAR:
{
rc = __algorithm_linear(group,
rc = _algorithm_linear(group,
op,
target,
source,
@ -128,7 +128,7 @@ int mca_scoll_basic_reduce(struct oshmem_group_t *group,
}
case SCOLL_ALG_REDUCE_LEGACY_LOG:
{
rc = __algorithm_log(group,
rc = _algorithm_log(group,
op,
target,
source,
@ -139,7 +139,7 @@ int mca_scoll_basic_reduce(struct oshmem_group_t *group,
}
default:
{
rc = __algorithm_central_counter(group,
rc = _algorithm_central_counter(group,
op,
target,
source,
@ -171,7 +171,7 @@ int mca_scoll_basic_reduce(struct oshmem_group_t *group,
Outlay:
NP-1 competing network transfers are needed.
*/
static int __algorithm_central_counter(struct oshmem_group_t *group,
static int _algorithm_central_counter(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
@ -243,7 +243,7 @@ static int __algorithm_central_counter(struct oshmem_group_t *group,
return rc;
}
static int __algorithm_tournament(struct oshmem_group_t *group,
static int _algorithm_tournament(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
@ -365,7 +365,7 @@ static int __algorithm_tournament(struct oshmem_group_t *group,
return rc;
}
static int __algorithm_recursive_doubling(struct oshmem_group_t *group,
static int _algorithm_recursive_doubling(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
@ -539,7 +539,7 @@ static int __algorithm_recursive_doubling(struct oshmem_group_t *group,
return rc;
}
static int __algorithm_linear(struct oshmem_group_t *group,
static int _algorithm_linear(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,
@ -641,7 +641,7 @@ static int __algorithm_linear(struct oshmem_group_t *group,
return rc;
}
static int __algorithm_log(struct oshmem_group_t *group,
static int _algorithm_log(struct oshmem_group_t *group,
struct oshmem_op_t *op,
void *target,
const void *source,

Просмотреть файл

@ -27,7 +27,7 @@
static const int root_id = 0;
#define __INTERNAL_BARRIER_FROM_SCOLL_BASIC 1
static int __internal_barrier(mca_scoll_fca_module_t *fca_module)
static int _internal_barrier(mca_scoll_fca_module_t *fca_module)
{
#if !__INTERNAL_BARRIER_FROM_SCOLL_BASIC
struct oshmem_group_t *group = fca_module->comm;
@ -121,7 +121,7 @@ static int have_remote_peers(struct oshmem_group_t *group,
* * Fills local rank information in fca_module.
* */
static int __get_local_ranks(mca_scoll_fca_module_t *fca_module)
static int _get_local_ranks(mca_scoll_fca_module_t *fca_module)
{
struct oshmem_group_t *comm = fca_module->comm;
oshmem_proc_t* proc;
@ -163,7 +163,7 @@ static int __get_local_ranks(mca_scoll_fca_module_t *fca_module)
return OSHMEM_SUCCESS;
}
static int __fca_comm_new(mca_scoll_fca_module_t *fca_module)
static int _fca_comm_new(mca_scoll_fca_module_t *fca_module)
{
struct oshmem_group_t *comm = fca_module->comm;
fca_comm_new_spec_t spec;
@ -191,7 +191,7 @@ static int __fca_comm_new(mca_scoll_fca_module_t *fca_module)
for (i = 0; i < comm->proc_count; i++) {
mca_scoll_fca_component.rcounts[i] = -1;
}
__internal_barrier(fca_module);
_internal_barrier(fca_module);
MCA_SPML_CALL(put((void *)&mca_scoll_fca_component.rcounts[my_id], (size_t)sizeof(info_size), (void *)&info_size, root_pe));
if (root_pe == comm->my_pe) {
@ -223,7 +223,7 @@ static int __fca_comm_new(mca_scoll_fca_module_t *fca_module)
my_info,
info_size);
}
__internal_barrier(fca_module);
_internal_barrier(fca_module);
if (root_pe == comm->my_pe) {
for (i = 0; i < comm->proc_count; i++) {
if (mca_scoll_fca_component.rcounts[i] > 0) {
@ -260,14 +260,14 @@ static int __fca_comm_new(mca_scoll_fca_module_t *fca_module)
free(all_info);
}
__internal_barrier(fca_module);
_internal_barrier(fca_module);
if (root_pe != comm->my_pe) {
MCA_SPML_CALL(get((void *)mca_scoll_fca_component.ret,sizeof(int), (void *)mca_scoll_fca_component.ret, root_pe));
}
/* Examine comm_new return value */
__internal_barrier(fca_module);
_internal_barrier(fca_module);
if (*mca_scoll_fca_component.ret < 0) {
FCA_ERROR("rank %i: COMM_NEW failed: %s",
fca_module->rank, fca_strerror(*mca_scoll_fca_component.ret));
@ -286,12 +286,12 @@ static int __fca_comm_new(mca_scoll_fca_module_t *fca_module)
sizeof(fca_module->fca_comm_desc));
}
__internal_barrier(fca_module);
_internal_barrier(fca_module);
if (root_pe != comm->my_pe) {
MCA_SPML_CALL(get((void *)mca_scoll_fca_component.fca_comm_desc_exchangeable, sizeof(fca_module->fca_comm_desc), (void *)&fca_module->fca_comm_desc, root_pe));
}
__internal_barrier(fca_module);
_internal_barrier(fca_module);
}
FCA_MODULE_VERBOSE(fca_module,
@ -301,12 +301,12 @@ static int __fca_comm_new(mca_scoll_fca_module_t *fca_module)
return OSHMEM_SUCCESS;
}
static int __create_fca_comm(mca_scoll_fca_module_t *fca_module)
static int _create_fca_comm(mca_scoll_fca_module_t *fca_module)
{
int comm_size;
int rc, ret;
rc = __fca_comm_new(fca_module);
rc = _fca_comm_new(fca_module);
if (rc != OSHMEM_SUCCESS)
return rc;
@ -346,7 +346,7 @@ static int __create_fca_comm(mca_scoll_fca_module_t *fca_module)
return OSHMEM_SUCCESS;
}
static void __destroy_fca_comm(mca_scoll_fca_module_t *fca_module)
static void _destroy_fca_comm(mca_scoll_fca_module_t *fca_module)
{
int ret;
struct oshmem_group_t *comm = fca_module->comm;
@ -377,7 +377,7 @@ static void __destroy_fca_comm(mca_scoll_fca_module_t *fca_module)
OBJ_RETAIN(fca_module->previous_ ## __api ## _module);\
} while(0)
static int __save_coll_handlers(mca_scoll_fca_module_t *fca_module)
static int _save_coll_handlers(mca_scoll_fca_module_t *fca_module)
{
struct oshmem_group_t *comm = fca_module->comm;
@ -406,15 +406,15 @@ static int mca_scoll_fca_module_enable(mca_scoll_base_module_t *module,
if (rc != OSHMEM_SUCCESS)
goto exit_fatal;
rc = __save_coll_handlers(fca_module);
rc = _save_coll_handlers(fca_module);
if (rc != OSHMEM_SUCCESS)
goto exit_fatal;
rc = __get_local_ranks(fca_module);
rc = _get_local_ranks(fca_module);
if (rc != OSHMEM_SUCCESS)
goto exit_fatal;
rc = __create_fca_comm(fca_module);
rc = _create_fca_comm(fca_module);
if (rc != OSHMEM_SUCCESS)
goto exit_fatal;
@ -456,7 +456,7 @@ static void mca_scoll_fca_module_destruct(mca_scoll_fca_module_t *fca_module)
OBJ_RELEASE(fca_module->previous_collect_module);
OBJ_RELEASE(fca_module->previous_reduce_module);
if (fca_module->fca_comm)
__destroy_fca_comm(fca_module);
_destroy_fca_comm(fca_module);
free(fca_module->local_ranks);
mca_scoll_fca_module_clear(fca_module);
}

Просмотреть файл

@ -58,7 +58,7 @@
#include "oshmem/shmem/shmem_lock.h"
#include "oshmem/runtime/oshmem_shmem_preconnect.h"
static int __shmem_finalize(void);
static int _shmem_finalize(void);
int oshmem_shmem_finalize(void)
{
@ -68,7 +68,7 @@ int oshmem_shmem_finalize(void)
if (opal_atomic_cmpset_32(&finalize_has_already_started, 0, 1)
&& oshmem_shmem_initialized && !oshmem_shmem_aborted) {
/* Should be called first because ompi_mpi_finalize makes orte and opal finalization */
ret = __shmem_finalize();
ret = _shmem_finalize();
if ((OSHMEM_SUCCESS == ret) && ompi_mpi_initialized
&& !ompi_mpi_finalized) {
@ -83,7 +83,7 @@ int oshmem_shmem_finalize(void)
return ret;
}
static int __shmem_finalize(void)
static int _shmem_finalize(void)
{
int ret = OSHMEM_SUCCESS;

Просмотреть файл

@ -179,7 +179,7 @@ opal_hash_table_t ompi_mpi_f90_integer_hashtable;
opal_hash_table_t ompi_mpi_f90_real_hashtable;
opal_hash_table_t ompi_mpi_f90_complex_hashtable;
static int __shmem_init(int argc, char **argv, int requested, int *provided);
static int _shmem_init(int argc, char **argv, int requested, int *provided);
#if OSHMEM_OPAL_THREAD_ENABLE
static void* shmem_opal_thread(void* argc)
@ -220,11 +220,9 @@ int oshmem_shmem_init(int argc, char **argv, int requested, int *provided)
ret = ompi_mpi_init(argc, argv, requested, provided);
}
if (OSHMEM_SUCCESS == ret) {
ret = __shmem_init(argc, argv, requested, provided);
ret = _shmem_init(argc, argv, requested, provided);
}
MPI_Barrier(MPI_COMM_WORLD);
if (OSHMEM_SUCCESS == ret) {
oshmem_shmem_initialized = true;
@ -310,7 +308,7 @@ int oshmem_shmem_preconnect_all_finalize(void)
return OSHMEM_SUCCESS;
}
static int __shmem_init(int argc, char **argv, int requested, int *provided)
static int _shmem_init(int argc, char **argv, int requested, int *provided)
{
int ret = OSHMEM_SUCCESS;
char *error = NULL;

Просмотреть файл

@ -21,7 +21,7 @@
#include "oshmem/proc/proc.h"
#include "oshmem/proc/proc_group_cache.h"
static void __shmem_broadcast(void *target,
static void _shmem_broadcast(void *target,
const void *source,
size_t nbytes,
int PE_root,
@ -44,12 +44,12 @@ static void __shmem_broadcast(void *target,
RUNTIME_CHECK_ADDR(target); \
RUNTIME_CHECK_ADDR(source); \
\
__shmem_broadcast( target, source, nelems * element_size, \
_shmem_broadcast( target, source, nelems * element_size, \
PE_root, PE_start, logPE_stride, PE_size, \
pSync); \
}
static void __shmem_broadcast(void *target,
static void _shmem_broadcast(void *target,
const void *source,
size_t nbytes,
int PE_root,

Просмотреть файл

@ -21,7 +21,7 @@
#include "oshmem/proc/proc.h"
#include "oshmem/proc/proc_group_cache.h"
static void __shmem_collect(void *target,
static void _shmem_collect(void *target,
const void *source,
size_t nbytes,
int PE_start,
@ -43,13 +43,13 @@ static void __shmem_collect(void *target,
RUNTIME_CHECK_ADDR(target); \
RUNTIME_CHECK_ADDR(source); \
\
__shmem_collect( target, source, nelems * element_size, \
_shmem_collect( target, source, nelems * element_size, \
PE_start, logPE_stride, PE_size, \
pSync, \
nelems_type); \
}
static void __shmem_collect(void *target,
static void _shmem_collect(void *target,
const void *source,
size_t nbytes,
int PE_start,