2004-07-08 18:48:34 +04:00
|
|
|
/*
|
2006-03-05 14:18:19 +03:00
|
|
|
* Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
|
2005-11-05 22:57:48 +03:00
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2006-03-05 14:18:19 +03:00
|
|
|
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2006-03-05 14:18:19 +03:00
|
|
|
* Copyright (c) 2004-2006 High Performance Computing Center Stuttgart,
|
2004-11-28 23:09:25 +03:00
|
|
|
* University of Stuttgart. All rights reserved.
|
2006-03-05 14:18:19 +03:00
|
|
|
* Copyright (c) 2004-2006 The Regents of the University of California.
|
2005-03-24 15:43:37 +03:00
|
|
|
* All rights reserved.
|
2007-01-27 16:44:03 +03:00
|
|
|
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
|
2004-11-22 04:38:40 +03:00
|
|
|
* $COPYRIGHT$
|
2005-09-01 05:07:30 +04:00
|
|
|
*
|
2004-11-22 04:38:40 +03:00
|
|
|
* Additional copyrights may follow
|
2005-09-01 05:07:30 +04:00
|
|
|
*
|
2004-11-22 03:37:56 +03:00
|
|
|
* $HEADER$
|
2004-07-08 18:48:34 +04:00
|
|
|
*/
|
|
|
|
|
2004-08-19 03:24:27 +04:00
|
|
|
#include "ompi_config.h"
|
|
|
|
|
2004-02-13 16:56:55 +03:00
|
|
|
#include <string.h>
|
2004-07-08 18:48:34 +04:00
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
#include "opal/threads/mutex.h"
|
2005-07-04 03:31:27 +04:00
|
|
|
#include "opal/util/output.h"
|
2007-01-02 11:04:34 +03:00
|
|
|
#include "opal/util/show_help.h"
|
2005-08-27 01:03:41 +04:00
|
|
|
#include "orte/util/sys_info.h"
|
2006-02-07 06:32:36 +03:00
|
|
|
#include "orte/dss/dss.h"
|
2005-07-15 02:43:01 +04:00
|
|
|
#include "orte/mca/oob/oob.h"
|
|
|
|
#include "orte/mca/ns/ns.h"
|
|
|
|
#include "orte/mca/gpr/gpr.h"
|
2005-09-01 05:07:30 +04:00
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
2005-07-15 02:43:01 +04:00
|
|
|
#include "orte/util/proc_info.h"
|
|
|
|
#include "ompi/proc/proc.h"
|
|
|
|
#include "ompi/mca/pml/pml.h"
|
2005-08-05 22:03:30 +04:00
|
|
|
#include "ompi/datatype/dt_arch.h"
|
2005-07-15 02:43:01 +04:00
|
|
|
#include "ompi/datatype/convertor.h"
|
2006-05-11 23:46:21 +04:00
|
|
|
#include "ompi/runtime/params.h"
|
2007-01-02 11:04:34 +03:00
|
|
|
#include "ompi/runtime/mpiruntime.h"
|
2004-01-29 18:34:47 +03:00
|
|
|
|
2005-07-03 20:22:16 +04:00
|
|
|
static opal_list_t ompi_proc_list;
|
2005-07-04 02:45:48 +04:00
|
|
|
static opal_mutex_t ompi_proc_lock;
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t* ompi_proc_local_proc = NULL;
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
static void ompi_proc_construct(ompi_proc_t* proc);
|
|
|
|
static void ompi_proc_destruct(ompi_proc_t* proc);
|
2005-07-15 02:43:01 +04:00
|
|
|
static int setup_registry_callback(void);
|
|
|
|
static void callback(orte_gpr_notify_data_t *data, void *cbdata);
|
2004-01-16 03:31:58 +03:00
|
|
|
|
2004-10-26 15:39:16 +04:00
|
|
|
OBJ_CLASS_INSTANCE(
|
2005-09-01 05:07:30 +04:00
|
|
|
ompi_proc_t,
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_item_t,
|
2005-09-01 05:07:30 +04:00
|
|
|
ompi_proc_construct,
|
2004-10-26 15:39:16 +04:00
|
|
|
ompi_proc_destruct
|
|
|
|
);
|
|
|
|
|
2004-01-16 03:31:58 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
void ompi_proc_construct(ompi_proc_t* proc)
|
2004-01-16 03:31:58 +03:00
|
|
|
{
|
2006-07-04 05:20:20 +04:00
|
|
|
proc->proc_bml = NULL;
|
2004-02-14 01:16:39 +03:00
|
|
|
proc->proc_pml = NULL;
|
|
|
|
proc->proc_modex = NULL;
|
2005-07-04 02:45:48 +04:00
|
|
|
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
|
2004-03-26 17:15:20 +03:00
|
|
|
|
2005-08-05 22:03:30 +04:00
|
|
|
/* By default all processors are supposelly having the same architecture as me. Thus,
|
|
|
|
* by default we run in a homogeneous environment. Later when the registry callback
|
|
|
|
* get fired we will have to set the convertors to the correct architecture.
|
|
|
|
*/
|
|
|
|
proc->proc_convertor = ompi_mpi_local_convertor;
|
|
|
|
OBJ_RETAIN( ompi_mpi_local_convertor );
|
|
|
|
proc->proc_arch = ompi_mpi_local_arch;
|
2004-01-29 18:34:47 +03:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
proc->proc_flags = 0;
|
|
|
|
|
2006-05-11 23:46:21 +04:00
|
|
|
/* By default, put NULL in the hostname. It may or may not get
|
|
|
|
filled in later -- consumer of this field beware! */
|
|
|
|
proc->proc_hostname = NULL;
|
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_append(&ompi_proc_list, (opal_list_item_t*)proc);
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2007-01-29 23:54:55 +03:00
|
|
|
|
2004-01-16 03:31:58 +03:00
|
|
|
}
|
|
|
|
|
2004-01-29 18:34:47 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
void ompi_proc_destruct(ompi_proc_t* proc)
|
2004-01-16 03:31:58 +03:00
|
|
|
{
|
2005-07-15 02:43:01 +04:00
|
|
|
if (proc->proc_modex != NULL) {
|
2004-10-15 01:04:45 +04:00
|
|
|
OBJ_RELEASE(proc->proc_modex);
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2005-08-05 22:03:30 +04:00
|
|
|
/* As all the convertors are created with OBJ_NEW we can just call OBJ_RELEASE. All, except
|
|
|
|
* the local convertor, will get destroyed at some point here. If the reference count is correct
|
|
|
|
* the local convertor (who has the reference count increased in the datatype) will not get
|
|
|
|
* destroyed here. It will be destroyed later when the ompi_ddt_finalize is called.
|
|
|
|
*/
|
2005-07-13 00:25:47 +04:00
|
|
|
OBJ_RELEASE( proc->proc_convertor );
|
2006-05-11 23:46:21 +04:00
|
|
|
if (NULL != proc->proc_hostname) {
|
|
|
|
free(proc->proc_hostname);
|
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_remove_item(&ompi_proc_list, (opal_list_item_t*)proc);
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-03-31 20:59:06 +04:00
|
|
|
OBJ_DESTRUCT(&proc->proc_lock);
|
2004-01-16 03:31:58 +03:00
|
|
|
}
|
|
|
|
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
int ompi_proc_init(void)
|
2004-02-13 16:56:55 +03:00
|
|
|
{
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_process_name_t *peers;
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
orte_std_cntr_t i, npeers, num_tokens;
|
2005-09-01 19:05:03 +04:00
|
|
|
orte_jobid_t jobid;
|
|
|
|
char *segment, **tokens;
|
2007-01-27 16:44:03 +03:00
|
|
|
orte_data_value_t value = ORTE_DATA_VALUE_EMPTY;
|
2006-02-07 06:32:36 +03:00
|
|
|
uint32_t ui32;
|
2004-02-13 16:56:55 +03:00
|
|
|
int rc;
|
|
|
|
|
2005-07-03 20:22:16 +04:00
|
|
|
OBJ_CONSTRUCT(&ompi_proc_list, opal_list_t);
|
2005-07-04 02:45:48 +04:00
|
|
|
OBJ_CONSTRUCT(&ompi_proc_lock, opal_mutex_t);
|
2004-10-15 01:04:45 +04:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* get all peers in this job */
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
if(ORTE_SUCCESS != (rc = orte_ns.get_peers(&peers, &npeers, NULL))) {
|
2005-07-04 03:31:27 +04:00
|
|
|
opal_output(0, "ompi_proc_init: get_peers failed with errno=%d", rc);
|
2004-02-13 16:56:55 +03:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* find self */
|
2005-08-05 22:03:30 +04:00
|
|
|
for( i = 0; i < npeers; i++ ) {
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t *proc = OBJ_NEW(ompi_proc_t);
|
2004-07-01 18:49:54 +04:00
|
|
|
proc->proc_name = peers[i];
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
if( i == ORTE_PROC_MY_NAME->vpid ) {
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_local_proc = proc;
|
2005-07-15 02:43:01 +04:00
|
|
|
proc->proc_flags |= OMPI_PROC_FLAG_LOCAL;
|
2004-03-03 19:44:41 +03:00
|
|
|
}
|
2004-02-13 16:56:55 +03:00
|
|
|
}
|
2005-03-14 23:57:21 +03:00
|
|
|
free(peers);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
|
|
|
/* setup registry callback to find everyone on my local node.
|
|
|
|
Can't do a GPR get because we're in the middle of MPI_INIT,
|
|
|
|
and we're setup for the GPR compound command -- so create a
|
|
|
|
subscription which will be serviced later, at the end of the
|
|
|
|
compound command. */
|
|
|
|
if (ORTE_SUCCESS != (rc = setup_registry_callback())) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-08-05 22:03:30 +04:00
|
|
|
/* Here we have to add to the GPR the information about the current architecture.
|
|
|
|
*/
|
2006-02-07 06:32:36 +03:00
|
|
|
if (OMPI_SUCCESS != (rc = ompi_arch_compute_local_id(&ui32))) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.set(&value, &ui32, ORTE_UINT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2005-09-01 19:05:03 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
jobid = ORTE_PROC_MY_NAME->jobid;
|
2005-09-01 19:05:03 +04:00
|
|
|
|
|
|
|
/* find the job segment on the registry */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_schema.get_job_segment_name(&segment, jobid))) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get the registry tokens for this node */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_schema.get_proc_tokens(&tokens, &num_tokens,
|
|
|
|
orte_process_info.my_name))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
free(segment);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* put the arch info on the registry */
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_gpr.put_1(ORTE_GPR_TOKENS_OR | ORTE_GPR_KEYS_OR,
|
|
|
|
segment, tokens,
|
2006-02-07 06:32:36 +03:00
|
|
|
OMPI_PROC_ARCH, &value))) {
|
2005-09-01 19:05:03 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
free(segment);
|
|
|
|
for (i=0; i < num_tokens; i++) {
|
|
|
|
free(tokens[i]);
|
|
|
|
tokens[i] = NULL;
|
|
|
|
}
|
|
|
|
if (NULL != tokens) free(tokens);
|
2005-08-05 22:03:30 +04:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
return OMPI_SUCCESS;
|
2004-02-13 16:56:55 +03:00
|
|
|
}
|
|
|
|
|
2004-12-02 16:28:10 +03:00
|
|
|
int ompi_proc_finalize (void)
|
|
|
|
{
|
|
|
|
ompi_proc_t *proc, *nextproc, *endproc;
|
|
|
|
|
2005-07-03 20:22:16 +04:00
|
|
|
proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
|
|
|
nextproc = (ompi_proc_t*)opal_list_get_next(proc);
|
|
|
|
endproc = (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
2004-12-02 16:28:10 +03:00
|
|
|
|
|
|
|
OBJ_RELEASE(proc);
|
|
|
|
while ( nextproc != endproc ) {
|
2005-07-13 00:25:47 +04:00
|
|
|
proc = nextproc;
|
|
|
|
nextproc = (ompi_proc_t *)opal_list_get_next(proc);
|
|
|
|
OBJ_RELEASE(proc);
|
2004-12-02 16:28:10 +03:00
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&ompi_proc_list);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t** ompi_proc_world(size_t *size)
|
2004-02-13 16:56:55 +03:00
|
|
|
{
|
2005-07-15 02:43:01 +04:00
|
|
|
ompi_proc_t **procs;
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t *proc;
|
2004-02-13 16:56:55 +03:00
|
|
|
size_t count = 0;
|
2005-07-15 02:43:01 +04:00
|
|
|
orte_ns_cmp_bitmask_t mask;
|
|
|
|
orte_process_name_t my_name;
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* check bozo case */
|
|
|
|
if (NULL == ompi_proc_local_proc) {
|
2004-02-13 16:56:55 +03:00
|
|
|
return NULL;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
|
|
|
mask = ORTE_NS_CMP_JOBID;
|
|
|
|
my_name = ompi_proc_local_proc->proc_name;
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* First count how many match this jobid */
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-09-01 05:07:30 +04:00
|
|
|
for (proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-15 02:43:01 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
if (ORTE_EQUAL == orte_ns.compare_fields(mask, &proc->proc_name, &my_name)) {
|
2005-07-15 02:43:01 +04:00
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate an array */
|
|
|
|
procs = (ompi_proc_t**) malloc(count * sizeof(ompi_proc_t*));
|
|
|
|
if (NULL == procs) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now save only the procs that match this jobid */
|
|
|
|
count = 0;
|
2005-09-01 05:07:30 +04:00
|
|
|
for (proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-15 02:43:01 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
if (ORTE_EQUAL == orte_ns.compare_fields(mask, &proc->proc_name, &my_name)) {
|
2005-07-15 02:43:01 +04:00
|
|
|
procs[count++] = proc;
|
|
|
|
}
|
2004-02-13 16:56:55 +03:00
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
2004-02-13 16:56:55 +03:00
|
|
|
*size = count;
|
|
|
|
return procs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t** ompi_proc_all(size_t* size)
|
2004-02-13 16:56:55 +03:00
|
|
|
{
|
2005-09-01 05:07:30 +04:00
|
|
|
ompi_proc_t **procs =
|
2005-07-03 20:22:16 +04:00
|
|
|
(ompi_proc_t**) malloc(opal_list_get_size(&ompi_proc_list) * sizeof(ompi_proc_t*));
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t *proc;
|
2004-02-13 16:56:55 +03:00
|
|
|
size_t count = 0;
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
if (NULL == procs) {
|
2004-02-13 16:56:55 +03:00
|
|
|
return NULL;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2004-02-13 16:56:55 +03:00
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-09-01 05:07:30 +04:00
|
|
|
for(proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-03 20:22:16 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
2004-02-13 16:56:55 +03:00
|
|
|
OBJ_RETAIN(proc);
|
|
|
|
procs[count++] = proc;
|
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-02-13 16:56:55 +03:00
|
|
|
*size = count;
|
|
|
|
return procs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_proc_t** ompi_proc_self(size_t* size)
|
2004-02-13 16:56:55 +03:00
|
|
|
{
|
2004-10-18 20:11:14 +04:00
|
|
|
ompi_proc_t **procs = (ompi_proc_t**) malloc(sizeof(ompi_proc_t*));
|
2005-07-15 02:43:01 +04:00
|
|
|
if (NULL == procs) {
|
2004-02-13 16:56:55 +03:00
|
|
|
return NULL;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2004-06-07 19:33:53 +04:00
|
|
|
OBJ_RETAIN(ompi_proc_local_proc);
|
|
|
|
*procs = ompi_proc_local_proc;
|
2004-02-13 16:56:55 +03:00
|
|
|
*size = 1;
|
|
|
|
return procs;
|
|
|
|
}
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
ompi_proc_t * ompi_proc_find ( const orte_process_name_t * name )
|
2004-05-18 01:28:32 +04:00
|
|
|
{
|
2004-09-17 14:10:24 +04:00
|
|
|
ompi_proc_t *proc, *rproc=NULL;
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_ns_cmp_bitmask_t mask;
|
2004-05-18 01:28:32 +04:00
|
|
|
|
|
|
|
/* return the proc-struct which matches this jobid+process id */
|
2005-03-14 23:57:21 +03:00
|
|
|
mask = ORTE_NS_CMP_CELLID | ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-09-01 05:07:30 +04:00
|
|
|
for(proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-03 20:22:16 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
if (ORTE_EQUAL == orte_ns.compare_fields(mask, &proc->proc_name, name)) {
|
2005-03-14 23:57:21 +03:00
|
|
|
rproc = proc;
|
|
|
|
break;
|
|
|
|
}
|
2004-05-18 01:28:32 +04:00
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
|
2004-09-17 14:10:24 +04:00
|
|
|
return rproc;
|
2004-05-18 01:28:32 +04:00
|
|
|
}
|
2004-07-01 18:49:54 +04:00
|
|
|
|
2004-08-04 21:05:22 +04:00
|
|
|
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
static ompi_proc_t *
|
|
|
|
ompi_proc_find_and_add(const orte_process_name_t * name, bool* isnew)
|
2004-09-17 14:10:24 +04:00
|
|
|
{
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
ompi_proc_t *proc, *rproc = NULL;
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_ns_cmp_bitmask_t mask;
|
2004-09-17 14:10:24 +04:00
|
|
|
|
|
|
|
/* return the proc-struct which matches this jobid+process id */
|
2005-03-14 23:57:21 +03:00
|
|
|
mask = ORTE_NS_CMP_CELLID | ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2005-09-01 05:07:30 +04:00
|
|
|
for(proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-03 20:22:16 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
if (ORTE_EQUAL == orte_ns.compare_fields(mask, &proc->proc_name, name)) {
|
2005-03-14 23:57:21 +03:00
|
|
|
rproc = proc;
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
*isnew = false;
|
2005-03-14 23:57:21 +03:00
|
|
|
break;
|
|
|
|
}
|
2004-09-17 14:10:24 +04:00
|
|
|
}
|
|
|
|
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
if (NULL == rproc) {
|
2004-10-25 23:52:37 +04:00
|
|
|
*isnew = true;
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
rproc = OBJ_NEW(ompi_proc_t);
|
|
|
|
if (NULL != rproc) {
|
|
|
|
rproc->proc_name = *name;
|
|
|
|
}
|
|
|
|
/* caller had better fill in the rest of the proc, or there's
|
|
|
|
going to be pain later... */
|
2004-09-17 14:10:24 +04:00
|
|
|
}
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
|
|
|
|
2004-09-17 14:10:24 +04:00
|
|
|
return rproc;
|
|
|
|
}
|
|
|
|
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
|
|
|
|
int
|
|
|
|
ompi_proc_pack(ompi_proc_t **proclist, int proclistsize, orte_buffer_t* buf)
|
2004-08-04 21:05:22 +04:00
|
|
|
{
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
int i, rc;
|
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
2004-08-04 21:05:22 +04:00
|
|
|
for (i=0; i<proclistsize; i++) {
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
rc = orte_dss.pack(buf, &(proclist[i]->proc_name), 1, ORTE_NAME);
|
|
|
|
if(rc != ORTE_SUCCESS) {
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
rc = orte_dss.pack(buf, &(proclist[i]->proc_arch), 1, ORTE_UINT32);
|
|
|
|
if(rc != ORTE_SUCCESS) {
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
rc = orte_dss.pack(buf, &(proclist[i]->proc_hostname), 1, ORTE_STRING);
|
2006-03-05 14:18:19 +03:00
|
|
|
if(rc != ORTE_SUCCESS) {
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2005-03-14 23:57:21 +03:00
|
|
|
return rc;
|
|
|
|
}
|
2004-08-04 21:05:22 +04:00
|
|
|
}
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
2004-08-04 21:05:22 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
int
|
|
|
|
ompi_proc_unpack(orte_buffer_t* buf, int proclistsize, ompi_proc_t ***proclist)
|
2004-08-04 21:05:22 +04:00
|
|
|
{
|
|
|
|
int i;
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
size_t newprocs_len = 0;
|
|
|
|
ompi_proc_t **plist=NULL, **newprocs = NULL;
|
2004-10-25 23:52:37 +04:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* do not free plist *ever*, since it is used in the remote group
|
|
|
|
structure of a communicator */
|
2004-09-16 14:07:14 +04:00
|
|
|
plist = (ompi_proc_t **) calloc (proclistsize, sizeof (ompi_proc_t *));
|
2004-08-04 21:05:22 +04:00
|
|
|
if ( NULL == plist ) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
/* free this on the way out */
|
|
|
|
newprocs = (ompi_proc_t **) calloc (proclistsize, sizeof (ompi_proc_t *));
|
|
|
|
if (NULL == newprocs) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
2004-08-04 21:05:22 +04:00
|
|
|
|
|
|
|
for ( i=0; i<proclistsize; i++ ){
|
2006-08-15 23:54:10 +04:00
|
|
|
orte_std_cntr_t count=1;
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
orte_process_name_t new_name;
|
|
|
|
uint32_t new_arch;
|
|
|
|
char *new_hostname;
|
|
|
|
bool isnew = false;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = orte_dss.unpack(buf, &new_name, &count, ORTE_NAME);
|
|
|
|
if (rc != ORTE_SUCCESS) {
|
2005-03-14 23:57:21 +03:00
|
|
|
return rc;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
rc = orte_dss.unpack(buf, &new_arch, &count, ORTE_UINT32);
|
|
|
|
if (rc != ORTE_SUCCESS) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
rc = orte_dss.unpack(buf, &new_hostname, &count, ORTE_STRING);
|
|
|
|
if (rc != ORTE_SUCCESS) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
plist[i] = ompi_proc_find_and_add(&new_name, &isnew);
|
|
|
|
if (isnew) {
|
|
|
|
newprocs[newprocs_len++] = plist[i];
|
|
|
|
|
|
|
|
plist[i]->proc_arch = new_arch;
|
|
|
|
|
|
|
|
/* if arch is different than mine, create a new convertor for this proc */
|
|
|
|
if (plist[i]->proc_arch != ompi_mpi_local_arch) {
|
2006-12-30 20:13:18 +03:00
|
|
|
#if OMPI_ENABLE_HETEROGENEOUS_SUPPORT
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
OBJ_RELEASE(plist[i]->proc_convertor);
|
|
|
|
plist[i]->proc_convertor = ompi_convertor_create(plist[i]->proc_arch, 0);
|
2006-12-30 20:13:18 +03:00
|
|
|
#else
|
|
|
|
opal_show_help("help-mpi-runtime",
|
|
|
|
"heterogeneous-support-unavailable",
|
|
|
|
true, orte_system_info.nodename,
|
|
|
|
new_hostname == NULL ? "<hostname unavailable>" :
|
|
|
|
new_hostname);
|
|
|
|
return OMPI_ERR_NOT_SUPPORTED;
|
|
|
|
#endif
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Save the hostname */
|
|
|
|
if (ompi_mpi_keep_peer_hostnames) {
|
|
|
|
plist[i]->proc_hostname = new_hostname;
|
|
|
|
}
|
2004-10-25 23:52:37 +04:00
|
|
|
}
|
2004-08-04 21:05:22 +04:00
|
|
|
}
|
Clean up the way procs are added to the global process list after MPI_INIT:
* Do not add new procs to the global list during modex callback or
when sharing orte names during accept/connect. For modex, we
cache the modex info for later, in case that proc ever does get
added to the global proc list. For accept/connect orte name
exchange between the roots, we only need the orte name, so no
need to add a proc structure anyway. The procs will be added
to the global process list during the proc exchange later in
the wireup process
* Rename proc_get_namebuf and proc_get_proclist to proc_pack
and proc_unpack and extend them to include all information
needed to build that proc struct on a remote node (which
includes ORTE name, architecture, and hostname). Change
unpack to call pml_add_procs for the entire list of new
procs at once, rather than one at a time.
* Remove ompi_proc_find_and_add from the public proc
interface and make it a private function. This function
would add a half-created proc to the global proc list, so
making it harder to call is a good thing.
This means that there's only two ways to add new procs into the global proc list at this time: During MPI_INIT via the call to ompi_proc_init, where my job is added to the list and via ompi_proc_unpack using a buffer from a packed proc list sent to us by someone else. Currently, this is enough to implement MPI semantics. We can extend the interface more if we like, but that may require HNP communication to get the remote proc information and I wanted to avoid that if at all possible.
Refs trac:564
This commit was SVN r12798.
The following Trac tickets were found above:
Ticket 564 --> https://svn.open-mpi.org/trac/ompi/ticket/564
2006-12-07 22:56:54 +03:00
|
|
|
|
|
|
|
if (newprocs_len > 0) MCA_PML_CALL(add_procs(newprocs, newprocs_len));
|
|
|
|
if (newprocs != NULL) free(newprocs);
|
|
|
|
|
2004-08-04 21:05:22 +04:00
|
|
|
*proclist = plist;
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* As described above, we cannot do a simple GPR get because we're in
|
|
|
|
* the middle of the GPR compound command in MPI_INIT. So setup a
|
|
|
|
* subscription that will be fullfilled later in MPI_INIT.
|
|
|
|
*/
|
|
|
|
static int setup_registry_callback(void)
|
|
|
|
{
|
|
|
|
int rc;
|
2005-09-01 19:05:03 +04:00
|
|
|
char *segment, *sub_name, *trig_name, *keys[3];
|
2005-07-15 02:43:01 +04:00
|
|
|
ompi_proc_t *local = ompi_proc_local();
|
2005-09-01 05:07:30 +04:00
|
|
|
orte_gpr_subscription_id_t id;
|
2005-07-15 02:43:01 +04:00
|
|
|
orte_jobid_t jobid;
|
2005-09-01 05:07:30 +04:00
|
|
|
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
jobid = local->proc_name.jobid;
|
2005-09-01 05:07:30 +04:00
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* find the job segment on the registry */
|
2005-09-01 05:07:30 +04:00
|
|
|
if (ORTE_SUCCESS !=
|
2005-07-15 02:43:01 +04:00
|
|
|
(rc = orte_schema.get_job_segment_name(&segment, jobid))) {
|
2006-03-05 14:18:19 +03:00
|
|
|
ORTE_ERROR_LOG(rc);
|
2005-07-15 02:43:01 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* indicate that this is a standard subscription. This indicates
|
|
|
|
that the subscription will be common to all processes. Thus,
|
|
|
|
the resulting data can be consolidated into a
|
|
|
|
process-independent message and broadcast to all processes */
|
2005-09-01 05:07:30 +04:00
|
|
|
if (ORTE_SUCCESS !=
|
|
|
|
(rc = orte_schema.get_std_subscription_name(&sub_name,
|
2005-07-18 22:49:00 +04:00
|
|
|
OMPI_PROC_SUBSCRIPTION, jobid))) {
|
2005-09-01 05:07:30 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
free(segment);
|
2005-07-15 02:43:01 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-09-01 05:07:30 +04:00
|
|
|
/* define the keys to be returned */
|
|
|
|
keys[0] = strdup(ORTE_PROC_NAME_KEY);
|
|
|
|
keys[1] = strdup(ORTE_NODE_NAME_KEY);
|
2005-09-01 19:05:03 +04:00
|
|
|
keys[2] = strdup(OMPI_PROC_ARCH);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
2005-08-05 22:03:30 +04:00
|
|
|
/* Here we have to add another key to the registry to be able to get the information
|
|
|
|
* about the remote architectures.
|
|
|
|
* TODO: George.
|
|
|
|
*/
|
|
|
|
|
2005-09-01 05:07:30 +04:00
|
|
|
/* attach ourselves to the standard stage-1 trigger */
|
|
|
|
if (ORTE_SUCCESS !=
|
|
|
|
(rc = orte_schema.get_std_trigger_name(&trig_name,
|
2005-07-15 02:43:01 +04:00
|
|
|
ORTE_STG1_TRIGGER, jobid))) {
|
2005-09-01 05:07:30 +04:00
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_gpr.subscribe_N(&id, trig_name, sub_name,
|
|
|
|
ORTE_GPR_NOTIFY_DELETE_AFTER_TRIG,
|
2006-12-10 02:10:25 +03:00
|
|
|
ORTE_GPR_TOKENS_OR | ORTE_GPR_KEYS_OR | ORTE_GPR_STRIPPED,
|
2005-09-01 05:07:30 +04:00
|
|
|
segment,
|
|
|
|
NULL, /* wildcard - look at all containers */
|
2005-09-01 19:05:03 +04:00
|
|
|
3, keys,
|
2005-09-01 05:07:30 +04:00
|
|
|
callback, NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
2005-09-01 05:07:30 +04:00
|
|
|
free(trig_name);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
2005-09-01 05:07:30 +04:00
|
|
|
CLEANUP:
|
|
|
|
free(segment);
|
|
|
|
free(sub_name);
|
|
|
|
free(keys[0]);
|
|
|
|
free(keys[1]);
|
2006-03-05 14:18:19 +03:00
|
|
|
free(keys[2]);
|
2005-07-15 02:43:01 +04:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This callback is invoked by a subscription during MPI_INIT to let
|
|
|
|
* us know what procs are on what hosts. We look at the results and
|
|
|
|
* figure out which procs are on the same host as the local proc. For
|
|
|
|
* each proc that is on the same host as the local proc, we set that
|
|
|
|
* proc's OMPI_PROC_FLAG_LOCAL flag.
|
2005-09-01 05:07:30 +04:00
|
|
|
*/
|
2005-07-15 02:43:01 +04:00
|
|
|
static void callback(orte_gpr_notify_data_t *data, void *cbdata)
|
|
|
|
{
|
2006-08-15 23:54:10 +04:00
|
|
|
orte_std_cntr_t i, j, k;
|
2006-03-09 20:23:00 +03:00
|
|
|
char *str = NULL;
|
2006-02-07 06:32:36 +03:00
|
|
|
uint32_t arch = 0, *ui32;
|
2005-09-01 19:05:03 +04:00
|
|
|
bool found_name, found_arch;
|
2005-07-15 02:43:01 +04:00
|
|
|
orte_ns_cmp_bitmask_t mask;
|
2006-02-07 06:32:36 +03:00
|
|
|
orte_process_name_t name, *nptr;
|
2005-07-15 02:43:01 +04:00
|
|
|
orte_gpr_value_t **value;
|
|
|
|
orte_gpr_keyval_t **keyval;
|
|
|
|
ompi_proc_t *proc;
|
2006-02-07 06:32:36 +03:00
|
|
|
int rc;
|
2005-07-15 02:43:01 +04:00
|
|
|
|
|
|
|
/* check bozo case */
|
|
|
|
if (0 == data->cnt) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* locks are probably not necessary here, but just be safe anyway */
|
|
|
|
OPAL_THREAD_LOCK(&ompi_proc_lock);
|
|
|
|
|
|
|
|
/* loop over the data returned in the subscription */
|
|
|
|
mask = ORTE_NS_CMP_CELLID | ORTE_NS_CMP_JOBID | ORTE_NS_CMP_VPID;
|
2005-07-18 22:49:00 +04:00
|
|
|
value = (orte_gpr_value_t**)(data->values)->addr;
|
|
|
|
for (i = 0, k=0; k < data->cnt &&
|
|
|
|
i < (data->values)->size; ++i) {
|
|
|
|
if (NULL != value[i]) {
|
|
|
|
k++;
|
|
|
|
str = NULL;
|
|
|
|
found_name = false;
|
2005-09-01 19:05:03 +04:00
|
|
|
found_arch = false;
|
2005-07-18 22:49:00 +04:00
|
|
|
keyval = value[i]->keyvals;
|
2005-09-01 05:07:30 +04:00
|
|
|
|
2005-07-18 22:49:00 +04:00
|
|
|
/* find the 2 keys that we're looking for */
|
|
|
|
for (j = 0; j < value[i]->cnt; ++j) {
|
|
|
|
if (strcmp(keyval[j]->key, ORTE_PROC_NAME_KEY) == 0) {
|
2006-02-07 06:32:36 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.get((void**)&nptr, keyval[j]->value, ORTE_NAME))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
orte_ns.get_proc_name_string(&str, nptr);
|
|
|
|
name = *nptr;
|
2005-07-18 22:49:00 +04:00
|
|
|
found_name = true;
|
|
|
|
} else if (strcmp(keyval[j]->key, ORTE_NODE_NAME_KEY) == 0) {
|
|
|
|
if (NULL != str) {
|
|
|
|
free(str);
|
|
|
|
}
|
2006-08-24 20:38:08 +04:00
|
|
|
str = strdup((const char*)keyval[j]->value->data);
|
2005-09-01 19:05:03 +04:00
|
|
|
} else if (strcmp(keyval[j]->key, OMPI_PROC_ARCH) == 0) {
|
2006-02-07 06:32:36 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = orte_dss.get((void**)&ui32, keyval[j]->value, ORTE_UINT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
arch = *ui32;
|
2005-09-01 19:05:03 +04:00
|
|
|
found_arch = true;
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
|
|
|
}
|
2005-09-01 05:07:30 +04:00
|
|
|
|
2005-09-01 19:05:03 +04:00
|
|
|
/* if we found all keys and the proc is on my local host,
|
2005-07-18 22:49:00 +04:00
|
|
|
find it in the master proc list and set the "local" flag */
|
2005-09-01 19:05:03 +04:00
|
|
|
if (NULL != str && found_name && found_arch) {
|
2005-09-01 05:07:30 +04:00
|
|
|
for (proc = (ompi_proc_t*)opal_list_get_first(&ompi_proc_list);
|
2005-07-18 22:49:00 +04:00
|
|
|
proc != (ompi_proc_t*)opal_list_get_end(&ompi_proc_list);
|
|
|
|
proc = (ompi_proc_t*)opal_list_get_next(proc)) {
|
2005-09-01 19:05:03 +04:00
|
|
|
|
2006-02-26 03:05:25 +03:00
|
|
|
/* find the associated proc entry and update its
|
|
|
|
arch flag. If the nodename of this info is
|
|
|
|
my local host, also set the LOCAL flag. */
|
Bring over the update to terminate orteds that are generated by a dynamic spawn such as comm_spawn. This introduces the concept of a job "family" - i.e., jobs that have a parent/child relationship. Comm_spawn'ed jobs have a parent (the one that spawned them). We track that relationship throughout the lineage - i.e., if a comm_spawned job in turn calls comm_spawn, then it has a parent (the one that spawned it) and a "root" job (the original job that started things).
Accordingly, there are new APIs to the name service to support the ability to get a job's parent, root, immediate children, and all its descendants. In addition, the terminate_job, terminate_orted, and signal_job APIs for the PLS have been modified to accept attributes that define the extent of their actions. For example, doing a "terminate_job" with an attribute of ORTE_NS_INCLUDE_DESCENDANTS will terminate the given jobid AND all jobs that descended from it.
I have tested this capability on a MacBook under rsh, Odin under SLURM, and LANL's Flash (bproc). It worked successfully on non-MPI jobs (both simple and including a spawn), and MPI jobs (again, both simple and with a spawn).
This commit was SVN r12597.
2006-11-14 22:34:59 +03:00
|
|
|
if (ORTE_EQUAL == orte_ns.compare_fields(mask, &name, &proc->proc_name)) {
|
2006-02-26 03:05:25 +03:00
|
|
|
proc->proc_arch = arch;
|
|
|
|
if (0 == strcmp(str, orte_system_info.nodename)) {
|
2005-09-01 19:05:03 +04:00
|
|
|
proc->proc_flags |= OMPI_PROC_FLAG_LOCAL;
|
2006-02-26 03:05:25 +03:00
|
|
|
}
|
2006-12-30 20:13:18 +03:00
|
|
|
|
|
|
|
/* if arch is different than mine, create a
|
|
|
|
new convertor for this proc in
|
|
|
|
heterogeneous mode or abort in
|
|
|
|
non-heterogeneous mode. */
|
2006-03-20 04:13:41 +03:00
|
|
|
if (proc->proc_arch != ompi_mpi_local_arch) {
|
2006-12-30 20:13:18 +03:00
|
|
|
#if OMPI_ENABLE_HETEROGENEOUS_SUPPORT
|
2006-03-20 04:13:41 +03:00
|
|
|
OBJ_RELEASE(proc->proc_convertor);
|
|
|
|
proc->proc_convertor = ompi_convertor_create(proc->proc_arch, 0);
|
2006-12-30 20:13:18 +03:00
|
|
|
#else
|
|
|
|
opal_show_help("help-mpi-runtime",
|
|
|
|
"proc:heterogeneous-support-unavailable",
|
|
|
|
true, orte_system_info.nodename, str);
|
|
|
|
/* we can't return an error, so abort. */
|
|
|
|
ompi_mpi_abort(MPI_COMM_WORLD, OMPI_ERR_NOT_SUPPORTED, false);
|
|
|
|
#endif
|
2006-03-20 04:13:41 +03:00
|
|
|
}
|
|
|
|
|
2006-05-11 23:46:21 +04:00
|
|
|
/* Save the hostname */
|
2006-12-30 20:13:18 +03:00
|
|
|
if (ompi_mpi_keep_peer_hostnames && NULL == proc->proc_hostname) {
|
2006-05-11 23:46:21 +04:00
|
|
|
proc->proc_hostname = str;
|
|
|
|
str = NULL;
|
|
|
|
}
|
2005-07-18 22:49:00 +04:00
|
|
|
}
|
2005-07-15 02:43:01 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-09 20:23:00 +03:00
|
|
|
if (NULL != str) {
|
|
|
|
free(str);
|
|
|
|
}
|
|
|
|
|
2005-07-15 02:43:01 +04:00
|
|
|
/* unlock */
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
|
|
|
|
}
|
|
|
|
|