1
1

Merge pull request #908 from igor-ivanov/pr/oshmem-check

Recovering oshmem functionality
Этот коммит содержится в:
Mike Dubman 2015-09-21 19:50:24 +03:00
родитель 60c2b0df48 7de0537a1d
Коммит 23c41a0320
15 изменённых файлов: 116 добавлений и 725 удалений

Просмотреть файл

@ -42,7 +42,7 @@
#include "ompi/runtime/mpiruntime.h"
#include "ompi/runtime/params.h"
static opal_list_t ompi_proc_list;
opal_list_t ompi_proc_list;
static opal_mutex_t ompi_proc_lock;
static opal_hash_table_t ompi_proc_hash;

Просмотреть файл

@ -68,6 +68,8 @@ struct ompi_proc_t {
/* endpoint data */
void *proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MAX];
char padding[16]; /* for future extensions (OSHMEM uses this area also)*/
};
typedef struct ompi_proc_t ompi_proc_t;
OBJ_CLASS_DECLARATION(ompi_proc_t);
@ -83,7 +85,7 @@ OBJ_CLASS_DECLARATION(ompi_proc_t);
* Please use ompi_proc_local() instead.
*/
OMPI_DECLSPEC extern ompi_proc_t* ompi_proc_local_proc;
OMPI_DECLSPEC extern opal_list_t ompi_proc_list;
/* ******************************************************************** */

Просмотреть файл

@ -1,7 +1,7 @@
# -*- shell-script -*-
#
#
# Copyright (c) 2013 Mellanox Technologies, Inc.
# Copyright (c) 2013-2015 Mellanox Technologies, Inc.
# All rights reserved.
# $COPYRIGHT$
#
@ -10,6 +10,10 @@
# $HEADER$
#
#
dist_oshmemdata_DATA = \
help-oshmem-scoll-fca.txt
AM_CPPFLAGS = $(coll_fca_CPPFLAGS) -DCOLL_FCA_HOME=\"$(coll_fca_HOME)\" $(coll_fca_extra_CPPFLAGS)
scoll_fca_sources = \
scoll_fca.h \

Просмотреть файл

@ -0,0 +1,16 @@
#
# Copyright (c) 2015 Mellanox Technologies, Inc.
# All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
[module_enable:fatal]
scoll:fca module reports issue during module enabling phase.
Try to use scoll:fca component with anoter one
for example scoll:basic
Error: %s
#

Просмотреть файл

@ -14,6 +14,8 @@
#include "scoll_fca.h"
#include <stdio.h>
#include <unistd.h>
#include "opal/util/show_help.h"
#include "oshmem/constants.h"
#include "oshmem/mca/scoll/scoll.h"
#include "oshmem/mca/scoll/base/base.h"
@ -430,6 +432,10 @@ static int mca_scoll_fca_module_enable(mca_scoll_base_module_t *module,
* So differnt frameworks will be used for collective ops
*/
FCA_ERROR("FCA module enable failed - aborting to prevent inconsistent application state");
/* There's no modules available */
opal_show_help("help-oshmem-scoll-fca.txt",
"module_enable:fatal", true,
"FCA module enable failed - aborting to prevent inconsistent application state");
oshmem_shmem_abort(-1);
return OMPI_ERROR;
}

Просмотреть файл

@ -1,4 +1,4 @@
# Copyright (c) 2013 Mellanox Technologies, Inc.
# Copyright (c) 2013-2015 Mellanox Technologies, Inc.
# All rights reserved.
# $COPYRIGHT$
#
@ -7,6 +7,10 @@
# $HEADER$
#
#
dist_oshmemdata_DATA = \
help-oshmem-scoll-mpi.txt
scoll_mpi_sources = \
scoll_mpi.h \
scoll_mpi_debug.h \

Просмотреть файл

@ -0,0 +1,16 @@
#
# Copyright (c) 2015 Mellanox Technologies, Inc.
# All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
[module_enable:fatal]
scoll:mpi module reports issue during module enabling phase.
Try to use scoll:mpi component with anoter one
for example scoll:basic
Error: %s
#

Просмотреть файл

@ -12,7 +12,10 @@
#include "ompi_config.h"
#include "scoll_mpi.h"
#include "opal/util/show_help.h"
#include "oshmem/proc/proc.h"
#include "oshmem/runtime/runtime.h"
#include "ompi/mca/coll/base/base.h"
int mca_scoll_mpi_init_query(bool enable_progress_threads, bool enable_mpi_threads)
@ -76,7 +79,13 @@ static int mca_scoll_mpi_module_enable(mca_scoll_base_module_t *module,
{
if (OSHMEM_SUCCESS != mca_scoll_mpi_save_coll_handlers(module, osh_group)){
MPI_COLL_ERROR("scoll_mpi: mca_coll_mpi_save_coll_handlers failed");
MPI_COLL_ERROR("MPI module enable failed - aborting to prevent inconsistent application state");
/* There's no modules available */
opal_show_help("help-oshmem-scoll-mpi.txt",
"module_enable:fatal", true,
"MPI module enable failed - aborting to prevent inconsistent application state");
oshmem_shmem_abort(-1);
return OSHMEM_ERROR;
}
@ -113,8 +122,6 @@ mca_scoll_mpi_comm_query(oshmem_group_t *osh_group, int *priority)
if (NULL == oshmem_group_all) {
osh_group->ompi_comm = &(ompi_mpi_comm_world.comm);
} else {
int my_rank = MPI_UNDEFINED;
err = ompi_comm_group(&(ompi_mpi_comm_world.comm), &parent_group);
if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
return NULL;
@ -134,10 +141,6 @@ mca_scoll_mpi_comm_query(oshmem_group_t *osh_group, int *priority)
break;
}
}
/* NTH: keep track of my rank in the new group for the workaround below */
if (ranks[i] == ompi_comm_rank (&ompi_mpi_comm_world.comm)) {
my_rank = i;
}
}
err = ompi_group_incl(parent_group, osh_group->proc_count, ranks, &new_group);
@ -145,15 +148,6 @@ mca_scoll_mpi_comm_query(oshmem_group_t *osh_group, int *priority)
free(ranks);
return NULL;
}
/* NTH: XXX -- WORKAROUND -- The oshmem code overwrites ompi_proc_local_proc with its
* own proc but does not update the proc list in comm world or comm self. This causes
* the code in ompi_group_incl that updates grp_my_rank to fail. This will cause failures
* here and when an application attempts to mix oshmem and mpi so it will really need to
* be fixed in oshmem/proc and not here. For now we need to work around a new jenkins
* failure so set my group ranking so we do not crash when running ompi_comm_create_group. */
new_group->grp_my_rank = my_rank;
err = ompi_comm_create_group(&(ompi_mpi_comm_world.comm), new_group, tag, &newcomm);
if (OPAL_UNLIKELY(OMPI_SUCCESS != err)) {
free(ranks);

Просмотреть файл

@ -660,7 +660,7 @@ sshmem_mkey_t *mca_spml_ikrit_register(void* addr,
}
SPML_VERBOSE(5,
"rank %d ptl %d addr %p size %llu %s",
oshmem_proc_local_proc->super.proc_name.vpid, i, addr, (unsigned long long)size,
oshmem_proc_pe(oshmem_proc_local()), i, addr, (unsigned long long)size,
mca_spml_base_mkey2str(&mkeys[i]));
}

Просмотреть файл

@ -166,15 +166,6 @@ extern int mca_spml_ikrit_del_procs(oshmem_proc_t** procs, size_t nprocs);
extern int mca_spml_ikrit_fence(void);
extern int spml_ikrit_progress(void);
static inline oshmem_proc_t *mca_spml_ikrit_proc_find(int dst)
{
orte_process_name_t name;
name.jobid = ORTE_PROC_MY_NAME->jobid;
name.vpid = dst;
return oshmem_proc_find(&name);
}
END_C_DECLS
#endif

Просмотреть файл

@ -452,7 +452,7 @@ sshmem_mkey_t *mca_spml_yoda_register(void* addr,
SPML_VERBOSE(5,
"rank %d btl %s va_base: 0x%p len: %d key %llx size %llu",
OSHMEM_PROC_VPID(oshmem_proc_local_proc), btl_type2str(ybtl->btl_type),
oshmem_proc_pe(oshmem_proc_local()), btl_type2str(ybtl->btl_type),
mkeys[i].va_base, mkeys[i].len, (unsigned long long)mkeys[i].u.key, (unsigned long long)size);
}
*count = mca_spml_yoda.n_btls;

Просмотреть файл

@ -33,518 +33,25 @@
#include "opal/util/arch.h"
#include "opal/class/opal_list.h"
#include "ompi/proc/proc.h"
opal_convertor_t* oshmem_shmem_local_convertor = NULL;
opal_list_t oshmem_proc_list = {{0}};
static opal_mutex_t oshmem_proc_lock;
oshmem_proc_t* oshmem_proc_local_proc = NULL;
static void oshmem_proc_construct(oshmem_proc_t* proc);
static void oshmem_proc_destruct(oshmem_proc_t* proc);
OBJ_CLASS_INSTANCE( oshmem_proc_t,
opal_list_item_t,
oshmem_proc_construct,
oshmem_proc_destruct);
void oshmem_proc_construct(oshmem_proc_t* proc)
{
memset(proc->proc_endpoints, 0, sizeof(proc->proc_endpoints));
/* By default all processors are supposedly having the same architecture as me. Thus,
* by default we run in a homogeneous environment. Later, when the RTE can tell us
* the arch of the remote nodes, we will have to set the convertors to the correct
* architecture.
*/
proc->super.proc_arch = opal_local_arch;
proc->super.proc_convertor = oshmem_shmem_local_convertor;
OBJ_RETAIN( oshmem_shmem_local_convertor);
proc->super.proc_flags = 0;
proc->num_transports = 0;
/* initialize this pointer to NULL */
proc->super.proc_hostname = NULL;
}
void oshmem_proc_destruct(oshmem_proc_t* proc)
{
/* As all the convertors are created with OBJ_NEW we can just call OBJ_RELEASE. All, except
* the local convertor, will get destroyed at some point here. If the reference count is correct
* the local convertor (who has the reference count increased in the datatype) will not get
* destroyed here. It will be destroyed later when the ompi_datatype_finalize is called.
*/
OBJ_RELEASE(proc->super.proc_convertor);
/* DO NOT FREE THE HOSTNAME FIELD AS THIS POINTS
* TO AN AREA ALLOCATED/FREE'D ELSEWHERE
*/
OPAL_THREAD_LOCK(&oshmem_proc_lock);
opal_list_remove_item(&oshmem_proc_list, (opal_list_item_t*) proc);
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
}
int oshmem_proc_init(void)
{
orte_vpid_t i;
OBJ_CONSTRUCT(&oshmem_proc_list, opal_list_t);
OBJ_CONSTRUCT(&oshmem_proc_lock, opal_mutex_t);
oshmem_shmem_local_convertor = opal_convertor_create(opal_local_arch, 0);
size_t ompi_num_procs;
ompi_proc_t **ompi_procs = ompi_proc_world(&ompi_num_procs);
/* create proc structures and find self */
for (i = 0; i < orte_process_info.num_procs; i++) {
oshmem_proc_t *proc = OBJ_NEW(oshmem_proc_t);
opal_list_append(&oshmem_proc_list, (opal_list_item_t*)proc);
proc->super.proc_name = ompi_procs[i]->super.proc_name;
proc->super.proc_arch = ompi_procs[i]->super.proc_arch;
proc->super.proc_flags = ompi_procs[i]->super.proc_flags;
proc->super.proc_hostname = ompi_procs[i]->super.proc_hostname;
if (i == ORTE_PROC_MY_NAME->vpid) {
oshmem_proc_local_proc = proc;
}
}
if (ompi_procs)
free(ompi_procs);
assert(sizeof(ompi_proc_t) >= sizeof(oshmem_proc_t));
return OSHMEM_SUCCESS;
}
/* in some cases, all PE procs are required to do a modex so they
* can (at the least) exchange their architecture. Since we cannot
* know in advance if this was required, we provide a separate function
* to set the arch (instead of doing it inside of oshmem_proc_init) that
* can be called after the modex completes in oshmem_shmem_init. Thus, we
* know that - regardless of how the arch is known, whether via modex
* or dropped in from a local daemon - the arch can be set correctly
* at this time
*/
int oshmem_proc_set_arch(void)
{
oshmem_proc_t *proc = NULL;
opal_list_item_t *item = NULL;
int ret = OSHMEM_SUCCESS;
OPAL_THREAD_LOCK(&oshmem_proc_lock);
for (item = opal_list_get_first(&oshmem_proc_list);
item != opal_list_get_end(&oshmem_proc_list);
item = opal_list_get_next(item)) {
proc = (oshmem_proc_t*) item;
if (OSHMEM_PROC_VPID(proc) != ORTE_PROC_MY_NAME->vpid) {
/* if arch is different than mine, create a new convertor for this proc */
if (proc->super.proc_arch != opal_local_arch) {
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
OBJ_RELEASE(proc->super.proc_convertor);
proc->super.proc_convertor = opal_convertor_create(proc->super.proc_arch, 0);
#else
orte_show_help("help-shmem-runtime.txt",
"heterogeneous-support-unavailable",
true,
orte_process_info.nodename,
proc->super.proc_hostname == NULL ?
"<hostname unavailable>" :
proc->super.proc_hostname);
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return OSHMEM_ERR_NOT_SUPPORTED;
#endif
}
}
}
/* Set predefined groups */
ret = oshmem_proc_group_init();
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return ret;
}
int oshmem_proc_finalize(void)
{
opal_list_item_t *item;
/* Destroy all groups */
oshmem_proc_group_finalize();
/* remove all items from list and destroy them. Since we cannot know
* the reference count of the procs for certain, it is possible that
* a single OBJ_RELEASE won't drive the count to zero, and hence will
* not release the memory. Accordingly, we cycle through the list here,
* calling release on each item.
*
* This will cycle until it forces the reference count of each item
* to zero, thus causing the destructor to run - which will remove
* the item from the list!
*
* We cannot do this under the thread lock as the destructor will
* call it when removing the item from the list. However, this function
* is ONLY called from MPI_Finalize, and all threads are prohibited from
* calling an MPI function once ANY thread has called MPI_Finalize. Of
* course, multiple threads are allowed to call MPI_Finalize, so this
* function may get called multiple times by various threads. We believe
* it is thread safe to do so...though it may not -appear- to be so
* without walking through the entire list/destructor sequence.
*/
while (opal_list_get_end(&oshmem_proc_list)
!= (item = opal_list_get_first(&oshmem_proc_list))) {
OBJ_RELEASE(item);
}
OBJ_RELEASE( oshmem_shmem_local_convertor);
/* now destruct the list and thread lock */
OBJ_DESTRUCT(&oshmem_proc_list);
OBJ_DESTRUCT(&oshmem_proc_lock);
return OSHMEM_SUCCESS;
}
oshmem_proc_t** oshmem_proc_world(size_t *size)
{
oshmem_proc_t **procs;
oshmem_proc_t *proc;
size_t count = 0;
orte_ns_cmp_bitmask_t mask;
orte_process_name_t my_name;
/* check bozo case */
if (NULL == oshmem_proc_local_proc) {
return NULL ;
}
mask = ORTE_NS_CMP_JOBID;
my_name = *(orte_process_name_t*)&oshmem_proc_local_proc->super.proc_name;
/* First count how many match this jobid */
OPAL_THREAD_LOCK(&oshmem_proc_lock);
for (proc = (oshmem_proc_t*) opal_list_get_first(&oshmem_proc_list);
proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list);
proc = (oshmem_proc_t*) opal_list_get_next(proc)) {
if (OPAL_EQUAL
== orte_util_compare_name_fields(mask,
(orte_process_name_t*)&proc->super.proc_name,
&my_name)) {
++count;
}
}
/* allocate an array */
procs = (oshmem_proc_t**) malloc(count * sizeof(oshmem_proc_t*));
if (NULL == procs) {
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return NULL ;
}
/* now save only the procs that match this jobid */
count = 0;
for (proc = (oshmem_proc_t*) opal_list_get_first(&oshmem_proc_list);
proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list);
proc = (oshmem_proc_t*) opal_list_get_next(proc)) {
if (OPAL_EQUAL
== orte_util_compare_name_fields(mask,
(orte_process_name_t*)&proc->super.proc_name,
&my_name)) {
/* DO NOT RETAIN THIS OBJECT - the reference count on this
* object will be adjusted by external callers. The intent
* here is to allow the reference count to drop to zero if
* the app no longer desires to communicate with this proc.
* For example, the proc may call comm_disconnect on all
* communicators involving this proc. In such cases, we want
* the proc object to be removed from the list. By not incrementing
* the reference count here, we allow this to occur.
*
* We don't implement that yet, but we are still safe for now as
* the OBJ_NEW in oshmem_proc_init owns the initial reference
* count which cannot be released until oshmem_proc_finalize is
* called.
*/
procs[count++] = proc;
}
} OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
*size = count;
return procs;
}
oshmem_proc_t** oshmem_proc_all(size_t* size)
{
oshmem_proc_t **procs =
(oshmem_proc_t**) malloc(opal_list_get_size(&oshmem_proc_list)
* sizeof(oshmem_proc_t*));
oshmem_proc_t *proc;
size_t count = 0;
if (NULL == procs) {
return NULL ;
}
OPAL_THREAD_LOCK(&oshmem_proc_lock);
for (proc = (oshmem_proc_t*) opal_list_get_first(&oshmem_proc_list);
proc && (proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list));
proc = (oshmem_proc_t*)opal_list_get_next(proc)) {
/* We know this isn't consistent with the behavior in oshmem_proc_world,
* but we are leaving the RETAIN for now because the code using this function
* assumes that the results need to be released when done. It will
* be cleaned up later as the "fix" will impact other places in
* the code
*/
OBJ_RETAIN(proc);
procs[count++] = proc;
}
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
*size = count;
return procs;
}
oshmem_proc_t** oshmem_proc_self(size_t* size)
{
oshmem_proc_t **procs = (oshmem_proc_t**) malloc(sizeof(oshmem_proc_t*));
if (NULL == procs) {
return NULL ;
}
/* We know this isn't consistent with the behavior in oshmem_proc_world,
* but we are leaving the RETAIN for now because the code using this function
* assumes that the results need to be released when done. It will
* be cleaned up later as the "fix" will impact other places in
* the code
*/
OBJ_RETAIN(oshmem_proc_local_proc);
*procs = oshmem_proc_local_proc;
*size = 1;
return procs;
}
oshmem_proc_t * oshmem_proc_find(const orte_process_name_t * name)
{
oshmem_proc_t *proc, *rproc = NULL;
orte_ns_cmp_bitmask_t mask;
/* return the proc-struct which matches this jobid+process id */
mask = ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
OPAL_THREAD_LOCK(&oshmem_proc_lock);
for (proc = (oshmem_proc_t*) opal_list_get_first(&oshmem_proc_list);
proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list);
proc = (oshmem_proc_t*) opal_list_get_next(proc)) {
if (OPAL_EQUAL
== orte_util_compare_name_fields(mask,
(orte_process_name_t*)&proc->super.proc_name,
name)) {
rproc = proc;
break;
}
} OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return rproc;
}
int oshmem_proc_pack(oshmem_proc_t **proclist,
int proclistsize,
opal_buffer_t* buf)
{
int i, rc;
OPAL_THREAD_LOCK(&oshmem_proc_lock);
/* cycle through the provided array, packing the OSHMEM level
* data for each proc. This data may or may not be included
* in any subsequent modex operation, so we include it here
* to ensure completion of a connect/accept handshake. See
* the ompi/mca/dpm framework for an example of where and how
* this info is used.
*
* Eventually, we will review the procedures that call this
* function to see if duplication of communication can be
* reduced. For now, just go ahead and pack the info so it
* can be sent.
*/
for (i = 0; i < proclistsize; i++) {
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_name), 1, ORTE_NAME);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return rc;
}
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_arch), 1, OPAL_UINT32);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return rc;
}
rc = opal_dss.pack(buf, &(proclist[i]->super.proc_hostname), 1, OPAL_STRING);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return rc;
}
} OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return OSHMEM_SUCCESS;
}
static oshmem_proc_t *
oshmem_proc_find_and_add(const orte_process_name_t * name, bool* isnew)
{
oshmem_proc_t *proc, *rproc = NULL;
orte_ns_cmp_bitmask_t mask;
/* return the proc-struct which matches this jobid+process id */
mask = ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
OPAL_THREAD_LOCK(&oshmem_proc_lock);
for (proc = (oshmem_proc_t*) opal_list_get_first(&oshmem_proc_list);
proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list);
proc = (oshmem_proc_t*) opal_list_get_next(proc)) {
if (OPAL_EQUAL
== orte_util_compare_name_fields(mask,
(orte_process_name_t*)&proc->super.proc_name,
name)) {
rproc = proc;
*isnew = false;
break;
}
}
/* if we didn't find this proc in the list, create a new
* proc_t and append it to the list
*/
if (NULL == rproc) {
*isnew = true;
rproc = OBJ_NEW(oshmem_proc_t);
if (NULL != rproc) {
opal_list_append(&oshmem_proc_list, (opal_list_item_t*)rproc);
rproc->super.proc_name = *(opal_process_name_t*)name;
}
/* caller had better fill in the rest of the proc, or there's
going to be pain later... */
}
OPAL_THREAD_UNLOCK(&oshmem_proc_lock);
return rproc;
}
int oshmem_proc_unpack(opal_buffer_t* buf,
int proclistsize,
oshmem_proc_t ***proclist,
int *newproclistsize,
oshmem_proc_t ***newproclist)
{
int i;
size_t newprocs_len = 0;
oshmem_proc_t **plist = NULL, **newprocs = NULL;
/* do not free plist *ever*, since it is used in the remote group
structure of a communicator */
plist = (oshmem_proc_t **) calloc(proclistsize, sizeof(oshmem_proc_t *));
if (NULL == plist) {
return OSHMEM_ERR_OUT_OF_RESOURCE;
}
/* free this on the way out */
newprocs = (oshmem_proc_t **) calloc(proclistsize, sizeof(oshmem_proc_t *));
if (NULL == newprocs) {
free(plist);
return OSHMEM_ERR_OUT_OF_RESOURCE;
}
/* cycle through the array of provided procs and unpack
* their info - as packed by oshmem_proc_pack
*/
for (i = 0; i < proclistsize; i++) {
orte_std_cntr_t count = 1;
orte_process_name_t new_name;
uint32_t new_arch;
char *new_hostname;
bool isnew = false;
int rc;
rc = opal_dss.unpack(buf, &new_name, &count, ORTE_NAME);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
free(plist);
free(newprocs);
return rc;
}
rc = opal_dss.unpack(buf, &new_arch, &count, OPAL_UINT32);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
free(plist);
free(newprocs);
return rc;
}
rc = opal_dss.unpack(buf, &new_hostname, &count, OPAL_STRING);
if (rc != ORTE_SUCCESS) {
ORTE_ERROR_LOG(rc);
free(plist);
free(newprocs);
return rc;
}
/* see if this proc is already on our oshmem_proc_list */
plist[i] = oshmem_proc_find_and_add(&new_name, &isnew);
if (isnew) {
/* if not, then it was added, so update the values
* in the proc_t struct with the info that was passed
* to us
*/
newprocs[newprocs_len++] = plist[i];
/* update all the values */
plist[i]->super.proc_arch = new_arch;
/* if arch is different than mine, create a new convertor for this proc */
if (plist[i]->super.proc_arch != opal_local_arch) {
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
OBJ_RELEASE(plist[i]->super.proc_convertor);
plist[i]->super.proc_convertor = opal_convertor_create(plist[i]->super.proc_arch, 0);
#else
orte_show_help("help-shmem-runtime.txt",
"heterogeneous-support-unavailable",
true,
orte_process_info.nodename,
new_hostname == NULL ? "<hostname unavailable>" :
new_hostname);
free(plist);
free(newprocs);
return OSHMEM_ERR_NOT_SUPPORTED;
#endif
}
if (0
== strcmp(oshmem_proc_local_proc->super.proc_hostname,
new_hostname)) {
plist[i]->super.proc_flags |= (OPAL_PROC_ON_NODE | OPAL_PROC_ON_CU
| OPAL_PROC_ON_CLUSTER);
}
/* Save the hostname */
plist[i]->super.proc_hostname = new_hostname;
/* eventually, we will update the orte/mca/ess framework's data
* to contain the info for the new proc. For now, we ignore
* this step since the MPI layer already has all the info
* it requires
*/
}
}
if (NULL != newproclistsize)
*newproclistsize = newprocs_len;
if (NULL != newproclist) {
*newproclist = newprocs;
} else if (newprocs != NULL ) {
free(newprocs);
}
*proclist = plist;
return OSHMEM_SUCCESS;
}
opal_pointer_array_t oshmem_group_array = {{0}};
oshmem_group_t* oshmem_group_all = NULL;
@ -553,8 +60,15 @@ oshmem_group_t* oshmem_group_null = NULL;
OBJ_CLASS_INSTANCE(oshmem_group_t, opal_object_t, NULL, NULL);
OSHMEM_DECLSPEC int oshmem_proc_group_init(void)
int oshmem_proc_group_init(void)
{
if (orte_process_info.num_procs != opal_list_get_size(&ompi_proc_list)) {
opal_output(0,
"Error: oshmem_group_all is not created: orte_process_info.num_procs = %d ompi_proc_list = %d",
orte_process_info.num_procs,
opal_list_get_size(&ompi_proc_list));
return OSHMEM_ERROR;
}
/* Setup communicator array */
OBJ_CONSTRUCT(&oshmem_group_array, opal_pointer_array_t);
@ -571,14 +85,14 @@ OSHMEM_DECLSPEC int oshmem_proc_group_init(void)
== (oshmem_group_all =
oshmem_proc_group_create(0,
1,
opal_list_get_size(&oshmem_proc_list)))) {
oshmem_num_procs()))) {
oshmem_proc_group_destroy(oshmem_group_all);
return OSHMEM_ERROR;
}
/* Setup SHMEM_GROUP_SELF */
if (NULL
== (oshmem_group_self = oshmem_proc_group_create(OSHMEM_PROC_VPID(oshmem_proc_local()),
== (oshmem_group_self = oshmem_proc_group_create(oshmem_proc_pe(oshmem_proc_local()),
0,
1))) {
oshmem_proc_group_destroy(oshmem_group_self);
@ -591,7 +105,7 @@ OSHMEM_DECLSPEC int oshmem_proc_group_init(void)
return OSHMEM_SUCCESS;
}
OSHMEM_DECLSPEC int oshmem_proc_group_finalize(void)
int oshmem_proc_group_finalize(void)
{
int max, i;
oshmem_group_t *group;
@ -613,9 +127,9 @@ OSHMEM_DECLSPEC int oshmem_proc_group_finalize(void)
return OSHMEM_SUCCESS;
}
OSHMEM_DECLSPEC oshmem_group_t* oshmem_proc_group_create(int pe_start,
int pe_stride,
size_t pe_size)
oshmem_group_t* oshmem_proc_group_create(int pe_start,
int pe_stride,
size_t pe_size)
{
int cur_pe, count_pe;
int i;
@ -623,6 +137,8 @@ OSHMEM_DECLSPEC oshmem_group_t* oshmem_proc_group_create(int pe_start,
oshmem_proc_t** proc_array = NULL;
oshmem_proc_t* proc = NULL;
assert(oshmem_proc_local());
group = OBJ_NEW(oshmem_group_t);
if (group) {
@ -638,12 +154,15 @@ OSHMEM_DECLSPEC oshmem_group_t* oshmem_proc_group_create(int pe_start,
return NULL ;
}
group->my_pe = OSHMEM_PROC_VPID(oshmem_proc_local());
group->my_pe = oshmem_proc_pe(oshmem_proc_local());
group->is_member = 0;
/* now save only the procs that match this jobid */
for (proc = (oshmem_proc_t*) opal_list_get_first(&oshmem_proc_list);
proc != (oshmem_proc_t*) opal_list_get_end(&oshmem_proc_list);
proc = (oshmem_proc_t*) opal_list_get_next(proc)) {
for (i = 0 ; i < oshmem_num_procs() ; i++) {
proc = oshmem_proc_find(i);
if (NULL == proc) {
opal_output(0,
"Error: Can not find proc object for pe = %d", i);
return NULL;
}
if (count_pe >= (int) pe_size) {
break;
} else if ((cur_pe >= pe_start)
@ -687,7 +206,7 @@ OSHMEM_DECLSPEC oshmem_group_t* oshmem_proc_group_create(int pe_start,
return group;
}
OSHMEM_DECLSPEC void oshmem_proc_group_destroy(oshmem_group_t* group)
void oshmem_proc_group_destroy(oshmem_group_t* group)
{
if (group) {
mca_scoll_base_group_unselect(group);

Просмотреть файл

@ -24,6 +24,7 @@
#include "orte/types.h"
#include "orte/runtime/orte_globals.h"
#include "ompi/proc/proc.h"
#include "ompi/communicator/communicator.h"
BEGIN_C_DECLS
@ -85,18 +86,6 @@ OSHMEM_DECLSPEC extern oshmem_group_t* oshmem_group_all;
OSHMEM_DECLSPEC extern oshmem_group_t* oshmem_group_self;
OSHMEM_DECLSPEC extern oshmem_group_t* oshmem_group_null;
/**
* @private
*
* Pointer to the oshmem_proc_t structure for the local process
*
* Pointer to the oshmem_proc_t structure for the local process.
*
* @note This pointer is declared here to allow inline functions
* within this header file to access the local process quickly.
* Please use oshmem_proc_local() instead.
*/
OSHMEM_DECLSPEC extern oshmem_proc_t* oshmem_proc_local_proc;
/* ******************************************************************** */
@ -120,21 +109,6 @@ OSHMEM_DECLSPEC extern oshmem_proc_t* oshmem_proc_local_proc;
*/
OSHMEM_DECLSPEC int oshmem_proc_init(void);
/**
* Set the arch of each proc in the oshmem_proc_list
*
* In some environments, SHMEM procs are required to exchange their
* arch via a modex operation during mpi_init. In other environments,
* the arch is determined by other mechanisms and provided to the
* proc directly. To support both mechanisms, we provide a separate
* function to set the arch of the procs -after- the modex operation
* has completed in mpi_init.
*
* @retval OSHMEM_SUCCESS Archs successfully set
* @retval OSHMEM_ERROR Archs could not be initialized
*/
OSHMEM_DECLSPEC int oshmem_proc_set_arch(void);
/**
* Finalize the OSHMEM Process subsystem
*
@ -146,64 +120,6 @@ OSHMEM_DECLSPEC int oshmem_proc_set_arch(void);
*/
OSHMEM_DECLSPEC int oshmem_proc_finalize(void);
/**
* Returns the list of proc instances associated with this job.
*
* Returns the list of proc instances associated with this job. Given
* the current association between a job and an pe set, this
* function provides the process instances for the current
* pe set.
*
* @note The reference count of each process in the array is
* NOT incremented - the caller is responsible for ensuring the
* correctness of the reference count once they are done with
* the array.
*
* @param[in] size Number of processes in the oshmem_proc_t array
*
* @return Array of pointers to proc instances in the current
* pe set, or NULL if there is an internal failure.
*/
OSHMEM_DECLSPEC oshmem_proc_t** oshmem_proc_world(size_t* size);
/**
* Returns the list of all known proc instances.
*
* Returns the list of all known proc instances, including those in
* other pe sets. It is possible that we may no longer be
* connected to some of the procs returned (in the SHMEM sense of the
* word connected). In a strictly SHMEM-1 application, this function
* will return the same information as oshmem_proc_world().
*
* @note The reference count of each process in the array is
* incremented and the caller is responsible for releasing each
* process in the array, as well as freeing the array.
*
* @param[in] size Number of processes in the oshmem_proc_t array
*
* @return Array of pointers to proc instances in the current
* known universe, or NULL if there is an internal failure.
*/
OSHMEM_DECLSPEC oshmem_proc_t** oshmem_proc_all(size_t* size);
/**
* Returns a list of the local process
*
* Returns a list containing the local process (and only the local
* process). Has calling semantics similar to oshmem_proc_world() and
* oshmem_proc_all().
*
* @note The reference count of each process in the array is
* incremented and the caller is responsible for releasing each
* process in the array, as well as freeing the array.
*
* @param[in] size Number of processes in the oshmem_proc_t array
*
* @return Array of pointers to proc instances in the current
* known universe, or NULL if there is an internal failure.
*/
OSHMEM_DECLSPEC oshmem_proc_t** oshmem_proc_self(size_t* size);
/**
* Returns a pointer to the local process
*
@ -213,9 +129,9 @@ OSHMEM_DECLSPEC oshmem_proc_t** oshmem_proc_self(size_t* size);
*
* @return Pointer to the local process structure
*/
static inline oshmem_proc_t* oshmem_proc_local(void)
static inline oshmem_proc_t *oshmem_proc_local(void)
{
return oshmem_proc_local_proc;
return (oshmem_proc_t *)ompi_proc_local_proc;
}
/**
@ -229,75 +145,19 @@ static inline oshmem_proc_t* oshmem_proc_local(void)
*
* @return Pointer to the process instance for \c name
*/
OSHMEM_DECLSPEC oshmem_proc_t * oshmem_proc_find(const orte_process_name_t* name);
static inline oshmem_proc_t *oshmem_proc_for_find(const orte_process_name_t name)
{
return (oshmem_proc_t *)ompi_proc_for_name(name);
}
/**
* Pack proc list into portable buffer
*
* This function takes a list of oshmem_proc_t pointers (e.g. as given
* in groups) and returns a orte buffer containing all information
* needed to add the proc to a remote list. This includes the ORTE
* process name, the architecture, and the hostname. Ordering is
* maintained. The buffer is packed to be sent to a remote node with
* different architecture (endian or word size). The buffer can be
* dss unloaded to be sent using SHMEM or send using rml_send_packed().
*
* @param[in] proclist List of process pointers
* @param[in] proclistsize Length of the proclist array
* @param[in,out] buf An orte_buffer containing the packed names.
* The buffer must be constructed but empty when
* passed to this function
* @retval OSHMEM_SUCCESS Success
* @retval OSHMEM_ERROR Unspecified error
*/
OSHMEM_DECLSPEC int oshmem_proc_pack(oshmem_proc_t **proclist,
int proclistsize,
opal_buffer_t *buf);
static inline oshmem_proc_t *oshmem_proc_find(int pe)
{
orte_process_name_t name;
/**
* Unpack a portable buffer of procs
*
* This function unpacks a packed list of oshmem_proc_t structures and
* returns the ordered list of proc structures. If the given proc is
* already "known", the architecture and hostname information in the
* buffer is ignored. If the proc is "new" to this process, it will
* be added to the global list of known procs, with information
* provided in the buffer. The lookup actions are always entirely
* local. The proclist returned is a list of pointers to all procs in
* the buffer, whether they were previously known or are new to this
* process.
*
* @note In previous versions of this function, The PML's add_procs()
* function was called for any new processes discovered as a result of
* this operation. That is no longer the case -- the caller must use
* the newproclist information to call add_procs() if necessary.
*
* @note The reference count for procs created as a result of this
* operation will be set to 1. Existing procs will not have their
* reference count changed. The reference count of a proc at the
* return of this function is the same regardless of whether NULL is
* provided for newproclist. The user is responsible for freeing the
* newproclist array.
*
* @param[in] buf orte_buffer containing the packed names
* @param[in] proclistsize number of expected proc-pointres
* @param[out] proclist list of process pointers
* @param[out] newproclistsize Number of new procs added as a result
* of the unpack operation. NULL may be
* provided if information is not needed.
* @param[out] newproclist List of new procs added as a result of
* the unpack operation. NULL may be
* provided if informationis not needed.
*
* Return value:
* OSHMEM_SUCCESS on success
* OSHMEM_ERROR else
*/
OSHMEM_DECLSPEC int oshmem_proc_unpack(opal_buffer_t *buf,
int proclistsize,
oshmem_proc_t ***proclist,
int *newproclistsize,
oshmem_proc_t ***newproclist);
name.jobid = ORTE_PROC_MY_NAME->jobid;
name.vpid = pe;
return oshmem_proc_for_find(name);
}
static inline int oshmem_proc_pe(oshmem_proc_t *proc)
{
@ -352,7 +212,7 @@ OSHMEM_DECLSPEC int oshmem_proc_group_finalize(void);
* @return Array of pointers to proc instances in the current
* known universe, or NULL if there is an internal failure.
*/
OSHMEM_DECLSPEC oshmem_group_t* oshmem_proc_group_create(int pe_start,
OSHMEM_DECLSPEC oshmem_group_t *oshmem_proc_group_create(int pe_start,
int pe_stride,
size_t pe_size);
@ -367,7 +227,7 @@ static inline oshmem_proc_t *oshmem_proc_group_all(int pe)
return oshmem_group_all->proc_array[pe];
}
static inline oshmem_proc_t* oshmem_proc_group_find(oshmem_group_t* group,
static inline oshmem_proc_t *oshmem_proc_group_find(oshmem_group_t* group,
int pe)
{
int i = 0;
@ -390,7 +250,7 @@ static inline oshmem_proc_t* oshmem_proc_group_find(oshmem_group_t* group,
name.jobid = ORTE_PROC_MY_NAME->jobid;
name.vpid = pe;
proc = oshmem_proc_find(&name);
proc = oshmem_proc_for_find(name);
}
return proc;
@ -420,12 +280,8 @@ static inline int oshmem_proc_group_is_member(oshmem_group_t *group)
static inline int oshmem_num_procs(void)
{
extern opal_list_t oshmem_proc_list;
if (!oshmem_group_all)
return opal_list_get_size(&oshmem_proc_list);
return oshmem_group_all->proc_count;
return (oshmem_group_all ?
oshmem_group_all->proc_count : (int)opal_list_get_size(&ompi_proc_list));
}
static inline int oshmem_my_proc_id(void)

Просмотреть файл

@ -148,6 +148,11 @@ static int _shmem_finalize(void)
return ret;
}
/* free proc_group resources */
if (OSHMEM_SUCCESS != (ret = oshmem_proc_group_finalize())) {
return ret;
}
/* free proc resources */
if (OSHMEM_SUCCESS != (ret = oshmem_proc_finalize())) {
return ret;

Просмотреть файл

@ -238,11 +238,6 @@ static int _shmem_init(int argc, char **argv, int requested, int *provided)
goto error;
}
/* We need to do this anyway.
* This place requires to be reviewed and more elegant way is expected
*/
ompi_proc_local_proc = (ompi_proc_t*) oshmem_proc_local_proc;
/* Register the OSHMEM layer's MCA parameters */
if (OSHMEM_SUCCESS != (ret = oshmem_shmem_register_params())) {
error = "oshmem_info_register: oshmem_register_params failed";
@ -297,11 +292,8 @@ static int _shmem_init(int argc, char **argv, int requested, int *provided)
goto error;
}
/* identify the architectures of remote procs and setup
* their datatype convertors, if required
*/
if (OSHMEM_SUCCESS != (ret = oshmem_proc_set_arch())) {
error = "oshmem_proc_set_arch failed";
if (OSHMEM_SUCCESS != (ret = oshmem_proc_group_init())) {
error = "oshmem_proc_group_init() failed";
goto error;
}
@ -312,20 +304,6 @@ static int _shmem_init(int argc, char **argv, int requested, int *provided)
goto error;
}
/* There is issue with call add_proc twice so
* we need to use btl info got from PML add_procs() before call of SPML add_procs()
*/
{
ompi_proc_t** procs = NULL;
size_t nprocs = 0;
procs = ompi_proc_world(&nprocs);
while (nprocs--) {
oshmem_group_all->proc_array[nprocs]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML] =
procs[nprocs]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML];
}
free(procs);
}
ret =
MCA_SPML_CALL(add_procs(oshmem_group_all->proc_array, oshmem_group_all->proc_count));
if (OSHMEM_SUCCESS != ret) {