2004-07-08 18:48:34 +04:00
|
|
|
/*
|
2006-03-05 14:18:19 +03:00
|
|
|
* Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
|
2005-11-05 22:57:48 +03:00
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2006-03-05 14:18:19 +03:00
|
|
|
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2006-03-05 14:18:19 +03:00
|
|
|
* Copyright (c) 2004-2006 High Performance Computing Center Stuttgart,
|
2004-11-28 23:09:25 +03:00
|
|
|
* University of Stuttgart. All rights reserved.
|
2006-03-05 14:18:19 +03:00
|
|
|
* Copyright (c) 2004-2006 The Regents of the University of California.
|
2005-03-24 15:43:37 +03:00
|
|
|
* All rights reserved.
|
2006-05-11 23:46:21 +04:00
|
|
|
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
|
2004-11-22 04:38:40 +03:00
|
|
|
* $COPYRIGHT$
|
2005-09-01 05:07:30 +04:00
|
|
|
*
|
2004-11-22 04:38:40 +03:00
|
|
|
* Additional copyrights may follow
|
2005-09-01 05:07:30 +04:00
|
|
|
*
|
2004-11-22 03:37:56 +03:00
|
|
|
* $HEADER$
|
2004-07-08 18:48:34 +04:00
|
|
|
*/
|
|
|
|
|
2004-08-19 03:24:27 +04:00
|
|
|
#include "ompi_config.h"
|
|
|
|
|
2004-02-13 16:56:55 +03:00
|
|
|
#include <string.h>
|
2004-07-08 18:48:34 +04:00
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
#include "opal/threads/mutex.h"
|
2005-07-04 03:31:27 +04:00
|
|
|
#include "opal/util/output.h"
|
2005-08-27 01:03:41 +04:00
|
|
|
#include "orte/util/sys_info.h"
|
2006-02-07 06:32:36 +03:00
|
|
|
#include "orte/dss/dss.h"
|
2005-07-15 02:43:01 +04:00
|
|
|
#include "orte/mca/oob/oob.h"
|
|
|
|
#include "orte/mca/ns/ns.h"
|
|
|
|
#include "orte/mca/gpr/gpr.h"
|
2005-09-01 05:07:30 +04:00
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
2005-07-15 02:43:01 +04:00
|
|
|
#include "orte/util/proc_info.h"
|
|
|
|
#include "ompi/proc/proc.h"
|
|
|
|
#include "ompi/mca/pml/pml.h"
|
2005-08-05 22:03:30 +04:00
|
|
|
#include "ompi/datatype/dt_arch.h"
|
2005-07-15 02:43:01 +04:00
|
|
|
#include "ompi/datatype/convertor.h"
|
2006-05-11 23:46:21 +04:00
|
|
|
#include "ompi/runtime/params.h"
|
2004-01-29 18:34:47 +03:00
|
|
|
|
2005-07-03 20:22:16 +04:00
|
|
|
static opal_list_t ompi_proc_list;
|
2005-07-04 02:45:48 +04:00
|
|
|
static opal_mutex_t ompi_proc_lock;
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t* ompi_proc_local_proc = NULL;
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
static void ompi_proc_construct(ompi_proc_t* proc);
|
|
|
|
static void ompi_proc_destruct(ompi_proc_t* proc);
|
2005-07-15 02:43:01 +04:00
|
|
|
static int setup_registry_callback(void);
|
|
|
|
static void callback(orte_gpr_notify_data_t *data, void *cbdata);
|
2004-01-16 03:31:58 +03:00
|
|
|
|
2004-10-26 15:39:16 +04:00
|
|
|
OBJ_CLASS_INSTANCE(
|
2005-09-01 05:07:30 +04:00
|
|
|
ompi_proc_t,
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_item_t,
|
2005-09-01 05:07:30 +04:00
|
|
|
ompi_proc_construct,
|
2004-10-26 15:39:16 +04:00
|
|
|
ompi_proc_destruct
|
|
|
|
);
|
|
|
|
|
2004-01-16 03:31:58 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
void ompi_proc_construct(ompi_proc_t* proc)
|
2004-01-16 03:31:58 +03:00
|
|
|
{
|
2006-07-04 05:20:20 +04:00
|
|
|
proc->proc_bml = NULL;
|
2004-02-14 01:16:39 +03:00
|
|
|
proc->proc_pml = NULL;
|
|
|
|
proc->proc_modex = NULL;
|
2005-07-04 02:45:48 +04:00
|
|
|
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
|
2004-03-26 17:15:20 +03:00
|
|
|
|
2005-08-05 22:03:30 +04:00
|
|
|
/* By default all processors are supposelly having the same architecture as me. Thus,
|
|
|
|
* by default we run in a homogeneous environment. Later when the registry callback
|
|
|
|
* get fired we will have to set the convertors to the correct architecture.
|
|
|
|
*/
|
|
|
|
proc->proc_convertor = ompi_mpi_local_convertor;
|
|
|
|
OBJ_RETAIN( ompi_mpi_local_convertor );
|
|
|
|
proc->proc_arch = ompi_mpi_local_arch;
|
2004-01-29 18:34:47 +03:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
proc->proc_flags = 0;
|
|
|
|
|
2006-05-11 23:46:21 +04:00
|
|
|
/* By default, put NULL in the hostname. It may or may not get
|
|
|
|
filled in later -- consumer of this field beware! */
|
|
|
|
proc->proc_hostname = NULL;
|
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_append(&ompi_proc_list, (opal_list_item_t*)proc);
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-01-16 03:31:58 +03:00
|
|
|
}
|
|
|
|
|
2004-01-29 18:34:47 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
void ompi_proc_destruct(ompi_proc_t* proc)
|
2004-01-16 03:31:58 +03:00
|
|
|
{
|
2005-07-15 02:43:01 +04:00
|
|
|
if (proc->proc_modex != NULL) {
|
2004-10-15 01:04:45 +04:00
|
|
|
OBJ_RELEASE(proc->proc_modex);
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2005-08-05 22:03:30 +04:00
|
|
|
/* As all the convertors are created with OBJ_NEW we can just call OBJ_RELEASE. All, except
|
|
|
|
* the local convertor, will get destroyed at some point here. If the reference count is correct
|
|
|
|
* the local convertor (who has the reference count increased in the datatype) will not get
|
|
|
|
* destroyed here. It will be destroyed later when the ompi_ddt_finalize is called.
|
|
|
|
*/
|
2005-07-13 00:25:47 +04:00
|
|
|
OBJ_RELEASE( proc->proc_convertor );
|
2006-05-11 23:46:21 +04:00
|
|
|
if (NULL != proc->proc_hostname) {
|
|
|
|
free(proc->proc_hostname);
|
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_remove_item(&ompi_proc_list, (opal_list_item_t*)proc);
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-03-31 20:59:06 +04:00
|
|
|
OBJ_DESTRUCT(&proc->proc_lock);
|
2004-01-16 03:31:58 +03:00
|
|
|
}
|
|
|
|
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
int ompi_proc_init(void)
|
2004-02-13 16:56:55 +03:00
|
|
|
{
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_process_name_t *peers;
|
2005-09-01 19:05:03 +04:00
|
|
|
size_t i, npeers, self, num_tokens;
|
|
|
|
orte_jobid_t jobid;
|
|
|
|
char *segment, **tokens;
|
2006-02-07 06:32:36 +03:00
|
|
|
orte_data_value_t value = { {OBJ_CLASS(orte_data_value_t),0}, ORTE_NULL, NULL};
|
|
|
|
uint32_t ui32;
|
2004-02-13 16:56:55 +03:00
|
|
|
int rc;
|
|
|
|
|
2005-07-03 20:22:16 +04:00
|
|
|
OBJ_CONSTRUCT(&ompi_proc_list, opal_list_t);
|
2005-07-04 02:45:48 +04:00
|
|
|
OBJ_CONSTRUCT(&ompi_proc_lock, opal_mutex_t);
|
2004-10-15 01:04:45 +04:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* get all peers in this job */
|
2006-03-05 14:18:19 +03:00
|
|
|
if(ORTE_SUCCESS != (rc = orte_ns.get_peers(&peers, &npeers, &self))) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "ompi_proc_init: get_peers failed with errno=%d", rc);
|
2004-02-13 16:56:55 +03:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* find self */
|
2005-08-05 22:03:30 +04:00
|
|
|
for( i = 0; i < npeers; i++ ) {
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t *proc = OBJ_NEW(ompi_proc_t);
|
2004-07-01 18:49:54 +04:00
|
|
|
proc->proc_name = peers[i];
|
2005-03-14 23:57:21 +03:00
|
|
|
if( i == self ) {
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_local_proc = proc;
|
2005-07-15 02:43:01 +04:00
|
|
|
proc->proc_flags |= OMPI_PROC_FLAG_LOCAL;
|
2004-03-03 19:44:41 +03:00
|
|
|
}
|
2004-02-13 16:56:55 +03:00
|
|
|
}
|
2005-03-14 23:57:21 +03:00
|
|
|
free(peers);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
|
|
|
/* setup registry callback to find everyone on my local node.
|
|
|
|
Can't do a GPR get because we're in the middle of MPI_INIT,
|
|
|
|
and we're setup for the GPR compound command -- so create a
|
|
|
|
subscription which will be serviced later, at the end of the
|
|
|
|
compound command. */
|
|
|
|
if (ORTE_SUCCESS != (rc = setup_registry_callback())) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-08-05 22:03:30 +04:00
|
|
|
/* Here we have to add to the GPR the information about the current architecture.
|
|
|
|
*/
|
2006-02-07 06:32:36 +03:00
|
|
|
if (OMPI_SUCCESS != (rc = ompi_arch_compute_local_id(&ui32))) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.set(&value, &ui32, ORTE_UINT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2005-09-01 19:05:03 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ns.get_jobid(&jobid, orte_process_info.my_name))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find the job segment on the registry */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_schema.get_job_segment_name(&segment, jobid))) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get the registry tokens for this node */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_schema.get_proc_tokens(&tokens, &num_tokens,
|
|
|
|
orte_process_info.my_name))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
free(segment);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* put the arch info on the registry */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_gpr.put_1(ORTE_GPR_TOKENS_OR | ORTE_GPR_KEYS_OR,
|
|
|
|
segment, tokens,
|
2006-02-07 06:32:36 +03:00
|
|
|
OMPI_PROC_ARCH, &value))) {
|
2005-09-01 19:05:03 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
free(segment);
|
|
|
|
for (i=0; i < num_tokens; i++) {
|
|
|
|
free(tokens[i]);
|
|
|
|
tokens[i] = NULL;
|
|
|
|
}
|
|
|
|
if (NULL != tokens) free(tokens);
|
2005-08-05 22:03:30 +04:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
return OMPI_SUCCESS;
|
2004-02-13 16:56:55 +03:00
|
|
|
}
|
|
|
|
|
2004-12-02 16:28:10 +03:00
|
|
|
int ompi_proc_finalize (void)
|
|
|
|
{
|
|
|
|
ompi_proc_t *proc, *nextproc, *endproc;
|
|
|
|
|
2005-07-03 20:22:16 +04:00
|
|
|
proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
|
|
|
nextproc = (ompi_proc_t*)opal_list_get_next(proc);
|
|
|
|
endproc = (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
2004-12-02 16:28:10 +03:00
|
|
|
|
|
|
|
OBJ_RELEASE(proc);
|
|
|
|
while ( nextproc != endproc ) {
|
2005-07-13 00:25:47 +04:00
|
|
|
proc = nextproc;
|
|
|
|
nextproc = (ompi_proc_t *)opal_list_get_next(proc);
|
|
|
|
OBJ_RELEASE(proc);
|
2004-12-02 16:28:10 +03:00
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&ompi_proc_list);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t** ompi_proc_world(size_t *size)
|
2004-02-13 16:56:55 +03:00
|
|
|
{
|
2005-07-15 02:43:01 +04:00
|
|
|
ompi_proc_t **procs;
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t *proc;
|
2004-02-13 16:56:55 +03:00
|
|
|
size_t count = 0;
|
2005-07-15 02:43:01 +04:00
|
|
|
orte_ns_cmp_bitmask_t mask;
|
|
|
|
orte_process_name_t my_name;
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* check bozo case */
|
|
|
|
if (NULL == ompi_proc_local_proc) {
|
2004-02-13 16:56:55 +03:00
|
|
|
return NULL;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
|
|
|
mask = ORTE_NS_CMP_JOBID;
|
|
|
|
my_name = ompi_proc_local_proc->proc_name;
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* First count how many match this jobid */
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-09-01 05:07:30 +04:00
|
|
|
for (proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-15 02:43:01 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
|
|
|
if (0 == orte_ns.compare(mask, &proc->proc_name, &my_name)) {
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate an array */
|
|
|
|
procs = (ompi_proc_t**) malloc(count * sizeof(ompi_proc_t*));
|
|
|
|
if (NULL == procs) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now save only the procs that match this jobid */
|
|
|
|
count = 0;
|
2005-09-01 05:07:30 +04:00
|
|
|
for (proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-15 02:43:01 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
|
|
|
if (0 == orte_ns.compare(mask, &proc->proc_name, &my_name)) {
|
|
|
|
procs[count++] = proc;
|
|
|
|
}
|
2004-02-13 16:56:55 +03:00
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
2004-02-13 16:56:55 +03:00
|
|
|
*size = count;
|
|
|
|
return procs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t** ompi_proc_all(size_t* size)
|
2004-02-13 16:56:55 +03:00
|
|
|
{
|
2005-09-01 05:07:30 +04:00
|
|
|
ompi_proc_t **procs =
|
2005-07-03 20:22:16 +04:00
|
|
|
(ompi_proc_t**) malloc(opal_list_get_size(&ompi_proc_list) * sizeof(ompi_proc_t*));
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t *proc;
|
2004-02-13 16:56:55 +03:00
|
|
|
size_t count = 0;
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
if (NULL == procs) {
|
2004-02-13 16:56:55 +03:00
|
|
|
return NULL;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-09-01 05:07:30 +04:00
|
|
|
for(proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-03 20:22:16 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
2004-02-13 16:56:55 +03:00
|
|
|
OBJ_RETAIN(proc);
|
|
|
|
procs[count++] = proc;
|
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-02-13 16:56:55 +03:00
|
|
|
*size = count;
|
|
|
|
return procs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t** ompi_proc_self(size_t* size)
|
2004-02-13 16:56:55 +03:00
|
|
|
{
|
2004-10-18 20:11:14 +04:00
|
|
|
ompi_proc_t **procs = (ompi_proc_t**) malloc(sizeof(ompi_proc_t*));
|
2005-07-15 02:43:01 +04:00
|
|
|
if (NULL == procs) {
|
2004-02-13 16:56:55 +03:00
|
|
|
return NULL;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2004-06-07 19:33:53 +04:00
|
|
|
OBJ_RETAIN(ompi_proc_local_proc);
|
|
|
|
*procs = ompi_proc_local_proc;
|
2004-02-13 16:56:55 +03:00
|
|
|
*size = 1;
|
|
|
|
return procs;
|
|
|
|
}
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
ompi_proc_t * ompi_proc_find ( const orte_process_name_t * name )
|
2004-05-18 01:28:32 +04:00
|
|
|
{
|
2004-09-17 14:10:24 +04:00
|
|
|
ompi_proc_t *proc, *rproc=NULL;
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_ns_cmp_bitmask_t mask;
|
2004-05-18 01:28:32 +04:00
|
|
|
|
|
|
|
/* return the proc-struct which matches this jobid+process id */
|
2004-07-08 18:48:34 +04:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
mask = ORTE_NS_CMP_CELLID | ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-09-01 05:07:30 +04:00
|
|
|
for(proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-03 20:22:16 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
2005-07-15 02:43:01 +04:00
|
|
|
if (0 == orte_ns.compare(mask, &proc->proc_name, name)) {
|
2005-03-14 23:57:21 +03:00
|
|
|
rproc = proc;
|
|
|
|
break;
|
|
|
|
}
|
2004-05-18 01:28:32 +04:00
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-09-17 14:10:24 +04:00
|
|
|
return rproc;
|
2004-05-18 01:28:32 +04:00
|
|
|
}
|
2004-07-01 18:49:54 +04:00
|
|
|
|
2004-08-04 21:05:22 +04:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
ompi_proc_t * ompi_proc_find_and_add ( const orte_process_name_t * name, bool* isnew )
|
2004-09-17 14:10:24 +04:00
|
|
|
{
|
|
|
|
ompi_proc_t *proc, *rproc=NULL;
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_ns_cmp_bitmask_t mask;
|
2004-09-17 14:10:24 +04:00
|
|
|
|
|
|
|
/* return the proc-struct which matches this jobid+process id */
|
2005-03-14 23:57:21 +03:00
|
|
|
mask = ORTE_NS_CMP_CELLID | ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-09-01 05:07:30 +04:00
|
|
|
for(proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-03 20:22:16 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
2005-07-15 02:43:01 +04:00
|
|
|
if (0 == orte_ns.compare(mask, &proc->proc_name, name)) {
|
2005-03-14 23:57:21 +03:00
|
|
|
*isnew = false;
|
|
|
|
rproc = proc;
|
|
|
|
break;
|
|
|
|
}
|
2004-09-17 14:10:24 +04:00
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-09-17 14:10:24 +04:00
|
|
|
|
|
|
|
if ( NULL == rproc ) {
|
2005-07-15 02:43:01 +04:00
|
|
|
ompi_proc_t *tproc = OBJ_NEW(ompi_proc_t);
|
|
|
|
rproc = tproc;
|
|
|
|
rproc->proc_name = *name;
|
2004-10-25 23:52:37 +04:00
|
|
|
*isnew = true;
|
2004-09-17 14:10:24 +04:00
|
|
|
}
|
|
|
|
return rproc;
|
|
|
|
}
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
int ompi_proc_get_namebuf ( ompi_proc_t **proclist, int proclistsize, orte_buffer_t* buf)
|
2004-08-04 21:05:22 +04:00
|
|
|
{
|
|
|
|
int i;
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2004-08-04 21:05:22 +04:00
|
|
|
for (i=0; i<proclistsize; i++) {
|
2006-02-07 06:32:36 +03:00
|
|
|
int rc = orte_dss.pack(buf, &(proclist[i]->proc_name), 1, ORTE_NAME);
|
2006-03-05 14:18:19 +03:00
|
|
|
if(rc != ORTE_SUCCESS) {
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2005-03-14 23:57:21 +03:00
|
|
|
return rc;
|
|
|
|
}
|
2004-08-04 21:05:22 +04:00
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-08-04 21:05:22 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
int ompi_proc_get_proclist (orte_buffer_t* buf, int proclistsize, ompi_proc_t ***proclist)
|
2004-08-04 21:05:22 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
ompi_proc_t **plist=NULL;
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_process_name_t name;
|
2004-10-25 23:52:37 +04:00
|
|
|
bool isnew = false;
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* do not free plist *ever*, since it is used in the remote group
|
|
|
|
structure of a communicator */
|
2004-09-16 14:07:14 +04:00
|
|
|
plist = (ompi_proc_t **) calloc (proclistsize, sizeof (ompi_proc_t *));
|
2004-08-04 21:05:22 +04:00
|
|
|
if ( NULL == plist ) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
for ( i=0; i<proclistsize; i++ ){
|
2005-03-14 23:57:21 +03:00
|
|
|
size_t count=1;
|
2006-02-07 06:32:36 +03:00
|
|
|
int rc = orte_dss.unpack(buf, &name, &count, ORTE_NAME);
|
2005-07-15 02:43:01 +04:00
|
|
|
if(rc != ORTE_SUCCESS) {
|
2005-03-14 23:57:21 +03:00
|
|
|
return rc;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2004-10-25 23:52:37 +04:00
|
|
|
plist[i] = ompi_proc_find_and_add ( &name, &isnew );
|
|
|
|
if(isnew) {
|
2005-04-13 07:19:48 +04:00
|
|
|
MCA_PML_CALL(add_procs(&plist[i], 1));
|
2004-10-25 23:52:37 +04:00
|
|
|
}
|
2004-08-04 21:05:22 +04:00
|
|
|
}
|
|
|
|
*proclist = plist;
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* As described above, we cannot do a simple GPR get because we're in
|
|
|
|
* the middle of the GPR compound command in MPI_INIT. So setup a
|
|
|
|
* subscription that will be fullfilled later in MPI_INIT.
|
|
|
|
*/
|
|
|
|
static int setup_registry_callback(void)
|
|
|
|
{
|
|
|
|
int rc;
|
2005-09-01 19:05:03 +04:00
|
|
|
char *segment, *sub_name, *trig_name, *keys[3];
|
2005-07-15 02:43:01 +04:00
|
|
|
ompi_proc_t *local = ompi_proc_local();
|
2005-09-01 05:07:30 +04:00
|
|
|
orte_gpr_subscription_id_t id;
|
2005-07-15 02:43:01 +04:00
|
|
|
orte_jobid_t jobid;
|
2005-09-01 05:07:30 +04:00
|
|
|
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_ns.get_jobid(&jobid, &local->proc_name))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2005-09-01 05:07:30 +04:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* find the job segment on the registry */
|
2005-09-01 05:07:30 +04:00
|
|
|
if (ORTE_SUCCESS !=
|
2005-07-15 02:43:01 +04:00
|
|
|
(rc = orte_schema.get_job_segment_name(&segment, jobid))) {
|
2006-03-05 14:18:19 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2005-07-15 02:43:01 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* indicate that this is a standard subscription. This indicates
|
|
|
|
that the subscription will be common to all processes. Thus,
|
|
|
|
the resulting data can be consolidated into a
|
|
|
|
process-independent message and broadcast to all processes */
|
2005-09-01 05:07:30 +04:00
|
|
|
if (ORTE_SUCCESS !=
|
|
|
|
(rc = orte_schema.get_std_subscription_name(&sub_name,
|
2005-07-18 22:49:00 +04:00
|
|
|
OMPI_PROC_SUBSCRIPTION, jobid))) {
|
2005-09-01 05:07:30 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
free(segment);
|
2005-07-15 02:43:01 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-09-01 05:07:30 +04:00
|
|
|
/* define the keys to be returned */
|
|
|
|
keys[0] = strdup(ORTE_PROC_NAME_KEY);
|
|
|
|
keys[1] = strdup(ORTE_NODE_NAME_KEY);
|
2005-09-01 19:05:03 +04:00
|
|
|
keys[2] = strdup(OMPI_PROC_ARCH);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
2005-08-05 22:03:30 +04:00
|
|
|
/* Here we have to add another key to the registry to be able to get the information
|
|
|
|
* about the remote architectures.
|
|
|
|
* TODO: George.
|
|
|
|
*/
|
|
|
|
|
2005-09-01 05:07:30 +04:00
|
|
|
/* attach ourselves to the standard stage-1 trigger */
|
|
|
|
if (ORTE_SUCCESS !=
|
|
|
|
(rc = orte_schema.get_std_trigger_name(&trig_name,
|
2005-07-15 02:43:01 +04:00
|
|
|
ORTE_STG1_TRIGGER, jobid))) {
|
2005-09-01 05:07:30 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_gpr.subscribe_N(&id, trig_name, sub_name,
|
|
|
|
ORTE_GPR_NOTIFY_DELETE_AFTER_TRIG,
|
|
|
|
ORTE_GPR_TOKENS_OR | ORTE_GPR_KEYS_OR,
|
|
|
|
segment,
|
|
|
|
NULL, /* wildcard - look at all containers */
|
2005-09-01 19:05:03 +04:00
|
|
|
3, keys,
|
2005-09-01 05:07:30 +04:00
|
|
|
callback, NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2005-09-01 05:07:30 +04:00
|
|
|
free(trig_name);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
2005-09-01 05:07:30 +04:00
|
|
|
CLEANUP:
|
|
|
|
free(segment);
|
|
|
|
free(sub_name);
|
|
|
|
free(keys[0]);
|
|
|
|
free(keys[1]);
|
2006-03-05 14:18:19 +03:00
|
|
|
free(keys[2]);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This callback is invoked by a subscription during MPI_INIT to let
|
|
|
|
* us know what procs are on what hosts. We look at the results and
|
|
|
|
* figure out which procs are on the same host as the local proc. For
|
|
|
|
* each proc that is on the same host as the local proc, we set that
|
|
|
|
* proc's OMPI_PROC_FLAG_LOCAL flag.
|
2005-09-01 05:07:30 +04:00
|
|
|
*/
|
2005-07-15 02:43:01 +04:00
|
|
|
static void callback(orte_gpr_notify_data_t *data, void *cbdata)
|
|
|
|
{
|
2005-07-18 22:49:00 +04:00
|
|
|
size_t i, j, k;
|
2006-03-09 20:23:00 +03:00
|
|
|
char *str = NULL;
|
2006-02-07 06:32:36 +03:00
|
|
|
uint32_t arch = 0, *ui32;
|
2005-09-01 19:05:03 +04:00
|
|
|
bool found_name, found_arch;
|
2005-07-15 02:43:01 +04:00
|
|
|
orte_ns_cmp_bitmask_t mask;
|
2006-02-07 06:32:36 +03:00
|
|
|
orte_process_name_t name, *nptr;
|
2005-07-15 02:43:01 +04:00
|
|
|
orte_gpr_value_t **value;
|
|
|
|
orte_gpr_keyval_t **keyval;
|
|
|
|
ompi_proc_t *proc;
|
2006-02-07 06:32:36 +03:00
|
|
|
int rc;
|
2005-07-15 02:43:01 +04:00
|
|
|
|
|
|
|
/* check bozo case */
|
|
|
|
if (0 == data->cnt) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* locks are probably not necessary here, but just be safe anyway */
|
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
|
|
|
|
|
|
|
/* loop over the data returned in the subscription */
|
|
|
|
mask = ORTE_NS_CMP_CELLID | ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
|
2005-07-18 22:49:00 +04:00
|
|
|
value = (orte_gpr_value_t**)(data->values)->addr;
|
|
|
|
for (i = 0, k=0; k < data->cnt &&
|
|
|
|
i < (data->values)->size; ++i) {
|
|
|
|
if (NULL != value[i]) {
|
|
|
|
k++;
|
|
|
|
str = NULL;
|
|
|
|
found_name = false;
|
2005-09-01 19:05:03 +04:00
|
|
|
found_arch = false;
|
2005-07-18 22:49:00 +04:00
|
|
|
keyval = value[i]->keyvals;
|
2005-09-01 05:07:30 +04:00
|
|
|
|
2005-07-18 22:49:00 +04:00
|
|
|
/* find the 2 keys that we're looking for */
|
|
|
|
for (j = 0; j < value[i]->cnt; ++j) {
|
|
|
|
if (strcmp(keyval[j]->key, ORTE_PROC_NAME_KEY) == 0) {
|
2006-02-07 06:32:36 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.get((void**)&nptr, keyval[j]->value, ORTE_NAME))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
orte_ns.get_proc_name_string(&str, nptr);
|
|
|
|
name = *nptr;
|
2005-07-18 22:49:00 +04:00
|
|
|
found_name = true;
|
|
|
|
} else if (strcmp(keyval[j]->key, ORTE_NODE_NAME_KEY) == 0) {
|
|
|
|
if (NULL != str) {
|
|
|
|
free(str);
|
|
|
|
}
|
2006-02-07 06:32:36 +03:00
|
|
|
str = strdup(keyval[j]->value->data);
|
2005-09-01 19:05:03 +04:00
|
|
|
} else if (strcmp(keyval[j]->key, OMPI_PROC_ARCH) == 0) {
|
2006-02-07 06:32:36 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.get((void**)&ui32, keyval[j]->value, ORTE_UINT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
arch = *ui32;
|
2005-09-01 19:05:03 +04:00
|
|
|
found_arch = true;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
|
|
|
}
|
2005-09-01 05:07:30 +04:00
|
|
|
|
2005-09-01 19:05:03 +04:00
|
|
|
/* if we found all keys and the proc is on my local host,
|
2005-07-18 22:49:00 +04:00
|
|
|
find it in the master proc list and set the "local" flag */
|
2005-09-01 19:05:03 +04:00
|
|
|
if (NULL != str && found_name && found_arch) {
|
2005-09-01 05:07:30 +04:00
|
|
|
for (proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-18 22:49:00 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
2005-09-01 19:05:03 +04:00
|
|
|
|
2006-02-26 03:05:25 +03:00
|
|
|
/* find the associated proc entry and update its
|
|
|
|
arch flag. If the nodename of this info is
|
|
|
|
my local host, also set the LOCAL flag. */
|
|
|
|
if (0 == orte_ns.compare(mask, &name, &proc->proc_name)) {
|
|
|
|
proc->proc_arch = arch;
|
|
|
|
if (0 == strcmp(str, orte_system_info.nodename)) {
|
2005-09-01 19:05:03 +04:00
|
|
|
proc->proc_flags |= OMPI_PROC_FLAG_LOCAL;
|
2006-02-26 03:05:25 +03:00
|
|
|
}
|
2006-03-20 04:13:41 +03:00
|
|
|
/* if arch is different than mine, create a new convertor for this proc */
|
|
|
|
if (proc->proc_arch != ompi_mpi_local_arch) {
|
|
|
|
OBJ_RELEASE(proc->proc_convertor);
|
|
|
|
proc->proc_convertor = ompi_convertor_create(proc->proc_arch, 0);
|
|
|
|
}
|
|
|
|
|
2006-05-11 23:46:21 +04:00
|
|
|
/* Save the hostname */
|
|
|
|
if (ompi_mpi_keep_peer_hostnames) {
|
|
|
|
proc->proc_hostname = str;
|
|
|
|
str = NULL;
|
|
|
|
}
|
2005-07-18 22:49:00 +04:00
|
|
|
}
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-09 20:23:00 +03:00
|
|
|
if (NULL != str) {
|
|
|
|
free(str);
|
|
|
|
}
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* unlock */
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
|
|
|
}
|
|
|
|
|