2008-02-28 04:57:57 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/types.h"
|
|
|
|
#include "orte/constants.h"
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
2009-03-18 01:37:15 +03:00
|
|
|
#include "opal/util/output.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "opal/threads/tsd.h"
|
|
|
|
|
|
|
|
#include "opal/dss/dss.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
|
|
|
#include "orte/mca/odls/odls_types.h"
|
|
|
|
#include "orte/mca/rml/rml.h"
|
2009-02-14 05:26:12 +03:00
|
|
|
#include "orte/mca/rml/rml_types.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "orte/util/name_fns.h"
|
|
|
|
#include "orte/runtime/orte_globals.h"
|
2009-05-15 17:21:18 +04:00
|
|
|
#include "orte/runtime/orte_wait.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
#include "orte/util/comm/comm.h"
|
|
|
|
|
2009-05-15 17:21:18 +04:00
|
|
|
/* quick timeout loop */
|
|
|
|
static bool timer_fired;
|
|
|
|
static opal_buffer_t answer;
|
|
|
|
static opal_event_t *quicktime=NULL;
|
|
|
|
static int error_exit;
|
|
|
|
|
|
|
|
static void quicktime_cb(int fd, short event, void *cbdata)
|
|
|
|
{
|
|
|
|
if (NULL != quicktime) {
|
|
|
|
free(quicktime);
|
|
|
|
quicktime = NULL;
|
|
|
|
}
|
|
|
|
error_exit = ORTE_ERR_SILENT;
|
|
|
|
/* declare it fired */
|
|
|
|
timer_fired = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void recv_info(int status, orte_process_name_t* sender,
|
|
|
|
opal_buffer_t* buffer, orte_rml_tag_t tag,
|
|
|
|
void* cbdata)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* cancel the timer */
|
|
|
|
if (NULL != quicktime) {
|
|
|
|
opal_evtimer_del(quicktime);
|
|
|
|
free(quicktime);
|
|
|
|
quicktime = NULL;
|
|
|
|
}
|
|
|
|
/* xfer the answer */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.copy_payload(&answer, buffer))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
/* declare the work done */
|
|
|
|
timer_fired = true;
|
|
|
|
}
|
|
|
|
|
2009-05-21 06:42:21 +04:00
|
|
|
static void send_cbfunc(int status, orte_process_name_t* sender,
|
|
|
|
opal_buffer_t* buffer, orte_rml_tag_t tag,
|
|
|
|
void* cbdata)
|
|
|
|
{
|
|
|
|
/* cancel the timer */
|
|
|
|
if (NULL != quicktime) {
|
|
|
|
opal_evtimer_del(quicktime);
|
|
|
|
free(quicktime);
|
|
|
|
quicktime = NULL;
|
|
|
|
}
|
|
|
|
OBJ_RELEASE(buffer);
|
|
|
|
/* declare the work done */
|
|
|
|
timer_fired = true;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
int orte_util_comm_query_job_info(const orte_process_name_t *hnp, orte_jobid_t job,
|
|
|
|
int *num_jobs, orte_job_t ***job_info_array)
|
|
|
|
{
|
|
|
|
int ret;
|
2009-05-15 17:21:18 +04:00
|
|
|
int32_t cnt, cnt_jobs, n;
|
2009-05-21 06:42:21 +04:00
|
|
|
opal_buffer_t *cmd;
|
2008-02-28 04:57:57 +03:00
|
|
|
orte_daemon_cmd_flag_t command = ORTE_DAEMON_REPORT_JOB_INFO_CMD;
|
|
|
|
orte_job_t **job_info;
|
2009-05-15 17:21:18 +04:00
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/* set default response */
|
|
|
|
*num_jobs = 0;
|
|
|
|
*job_info_array = NULL;
|
|
|
|
|
|
|
|
/* send query to HNP */
|
2009-05-21 06:42:21 +04:00
|
|
|
cmd = OBJ_NEW(opal_buffer_t);
|
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.pack(cmd, &command, 1, ORTE_DAEMON_CMD))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-21 06:42:21 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.pack(cmd, &job, 1, ORTE_JOBID))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-21 06:42:21 +04:00
|
|
|
/* define a max time to wait for send to complete */
|
|
|
|
timer_fired = false;
|
|
|
|
error_exit = ORTE_SUCCESS;
|
|
|
|
ORTE_DETECT_TIMEOUT(&quicktime, 100, 1000, 100000, quicktime_cb);
|
|
|
|
|
|
|
|
/* do the send */
|
|
|
|
if (0 > (ret = orte_rml.send_buffer_nb((orte_process_name_t*)hnp, cmd, ORTE_RML_TAG_DAEMON, 0,
|
|
|
|
send_cbfunc, NULL))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-21 06:42:21 +04:00
|
|
|
|
|
|
|
/* wait for send to complete */
|
|
|
|
ORTE_PROGRESSED_WAIT(timer_fired, 0, 1);
|
|
|
|
|
|
|
|
/* did it succeed? */
|
|
|
|
if (ORTE_SUCCESS != error_exit) {
|
|
|
|
return error_exit;
|
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
|
2009-05-15 17:21:18 +04:00
|
|
|
/* setup for answer */
|
2008-02-28 04:57:57 +03:00
|
|
|
OBJ_CONSTRUCT(&answer, opal_buffer_t);
|
2009-05-15 17:21:18 +04:00
|
|
|
|
|
|
|
/* define a max time to wait for an answer */
|
|
|
|
timer_fired = false;
|
|
|
|
error_exit = ORTE_SUCCESS;
|
2009-05-21 06:42:21 +04:00
|
|
|
ORTE_DETECT_TIMEOUT(&quicktime, 100, 1000, 100000, quicktime_cb);
|
2009-05-15 17:21:18 +04:00
|
|
|
|
|
|
|
/* get the answer */
|
|
|
|
if (ORTE_SUCCESS != (ret = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
|
|
|
|
ORTE_RML_TAG_TOOL,
|
|
|
|
ORTE_RML_NON_PERSISTENT,
|
|
|
|
recv_info,
|
|
|
|
NULL))) {
|
|
|
|
/* cancel the timer */
|
|
|
|
if (NULL != quicktime) {
|
|
|
|
opal_evtimer_del(quicktime);
|
|
|
|
free(quicktime);
|
|
|
|
quicktime = NULL;
|
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
return ret;
|
|
|
|
}
|
2009-05-15 17:21:18 +04:00
|
|
|
|
|
|
|
ORTE_PROGRESSED_WAIT(timer_fired, 0, 1);
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != error_exit) {
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
return error_exit;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
cnt = 1;
|
2009-05-15 17:21:18 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.unpack(&answer, &cnt_jobs, &cnt, OPAL_INT32))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate the required memory */
|
|
|
|
if (0 < cnt_jobs) {
|
|
|
|
job_info = (orte_job_t**)malloc(cnt_jobs * sizeof(orte_job_t*));
|
|
|
|
/* unpack the job data */
|
2009-03-03 16:38:29 +03:00
|
|
|
for (n=0; n < cnt_jobs; n++) {
|
|
|
|
cnt = 1;
|
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.unpack(&answer, &job_info[n], &cnt, ORTE_JOB))) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
free(job_info);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
*job_info_array = job_info;
|
|
|
|
*num_jobs = cnt_jobs;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2008-04-30 23:49:53 +04:00
|
|
|
int orte_util_comm_query_node_info(const orte_process_name_t *hnp, char *node,
|
2008-02-28 04:57:57 +03:00
|
|
|
int *num_nodes, orte_node_t ***node_info_array)
|
|
|
|
{
|
|
|
|
int ret;
|
2009-05-15 17:21:18 +04:00
|
|
|
int32_t cnt, cnt_nodes, n;
|
2009-05-21 06:42:21 +04:00
|
|
|
opal_buffer_t *cmd;
|
2008-02-28 04:57:57 +03:00
|
|
|
orte_daemon_cmd_flag_t command = ORTE_DAEMON_REPORT_NODE_INFO_CMD;
|
|
|
|
orte_node_t **node_info;
|
|
|
|
|
|
|
|
/* set default response */
|
|
|
|
*num_nodes = 0;
|
|
|
|
*node_info_array = NULL;
|
|
|
|
|
|
|
|
/* query the HNP for node info */
|
2009-05-21 06:42:21 +04:00
|
|
|
cmd = OBJ_NEW(opal_buffer_t);
|
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.pack(cmd, &command, 1, ORTE_DAEMON_CMD))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-21 06:42:21 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.pack(cmd, &node, 1, OPAL_STRING))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-21 06:42:21 +04:00
|
|
|
/* define a max time to wait for send to complete */
|
|
|
|
timer_fired = false;
|
|
|
|
error_exit = ORTE_SUCCESS;
|
|
|
|
ORTE_DETECT_TIMEOUT(&quicktime, 100, 1000, 100000, quicktime_cb);
|
|
|
|
|
|
|
|
/* do the send */
|
|
|
|
if (0 > (ret = orte_rml.send_buffer_nb((orte_process_name_t*)hnp, cmd, ORTE_RML_TAG_DAEMON, 0,
|
|
|
|
send_cbfunc, NULL))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-05-21 06:42:21 +04:00
|
|
|
/* wait for send to complete */
|
|
|
|
ORTE_PROGRESSED_WAIT(timer_fired, 0, 1);
|
|
|
|
|
|
|
|
/* did it succeed? */
|
|
|
|
if (ORTE_SUCCESS != error_exit) {
|
|
|
|
return error_exit;
|
|
|
|
}
|
|
|
|
|
2009-05-15 17:21:18 +04:00
|
|
|
/* define a max time to wait for an answer */
|
|
|
|
timer_fired = false;
|
|
|
|
error_exit = ORTE_SUCCESS;
|
|
|
|
ORTE_DETECT_TIMEOUT(&quicktime, 10, 1000, 10000, quicktime_cb);
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
/* get the answer */
|
|
|
|
OBJ_CONSTRUCT(&answer, opal_buffer_t);
|
2009-05-15 17:21:18 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
|
|
|
|
ORTE_RML_TAG_TOOL,
|
|
|
|
ORTE_RML_NON_PERSISTENT,
|
|
|
|
recv_info,
|
|
|
|
NULL))) {
|
|
|
|
/* cancel the timer */
|
|
|
|
if (NULL != quicktime) {
|
|
|
|
opal_evtimer_del(quicktime);
|
|
|
|
free(quicktime);
|
|
|
|
quicktime = NULL;
|
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-16 08:15:55 +04:00
|
|
|
OBJ_DESTRUCT(&answer);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-15 17:21:18 +04:00
|
|
|
|
|
|
|
ORTE_PROGRESSED_WAIT(timer_fired, 0, 1);
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != error_exit) {
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
return error_exit;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
cnt = 1;
|
2009-05-15 17:21:18 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.unpack(&answer, &cnt_nodes, &cnt, OPAL_INT32))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate the required memory */
|
|
|
|
if (0 < cnt_nodes) {
|
|
|
|
node_info = (orte_node_t**)malloc(cnt_nodes * sizeof(orte_node_t*));
|
|
|
|
/* unpack the node data */
|
2009-05-15 17:21:18 +04:00
|
|
|
for (n=0; n < cnt_nodes; n++) {
|
|
|
|
cnt = 1;
|
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.unpack(&answer, &node_info[n], &cnt, ORTE_NODE))) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
free(node_info);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
*node_info_array = node_info;
|
|
|
|
*num_nodes = cnt_nodes;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
int orte_util_comm_query_proc_info(const orte_process_name_t *hnp, orte_jobid_t job, orte_vpid_t vpid,
|
|
|
|
int *num_procs, orte_proc_t ***proc_info_array)
|
|
|
|
{
|
|
|
|
int ret;
|
2009-05-15 17:21:18 +04:00
|
|
|
int32_t cnt, cnt_procs, n;
|
2009-05-21 06:42:21 +04:00
|
|
|
opal_buffer_t *cmd;
|
2008-02-28 04:57:57 +03:00
|
|
|
orte_daemon_cmd_flag_t command = ORTE_DAEMON_REPORT_PROC_INFO_CMD;
|
|
|
|
orte_proc_t **proc_info;
|
|
|
|
|
|
|
|
/* set default response */
|
|
|
|
*num_procs = 0;
|
|
|
|
*proc_info_array = NULL;
|
|
|
|
|
|
|
|
/* query the HNP for info on the procs in this job */
|
2009-05-21 06:42:21 +04:00
|
|
|
cmd = OBJ_NEW(opal_buffer_t);
|
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.pack(cmd, &command, 1, ORTE_DAEMON_CMD))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-21 06:42:21 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.pack(cmd, &job, 1, ORTE_JOBID))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-21 06:42:21 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.pack(cmd, &vpid, 1, ORTE_VPID))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-21 06:42:21 +04:00
|
|
|
/* define a max time to wait for send to complete */
|
|
|
|
timer_fired = false;
|
|
|
|
error_exit = ORTE_SUCCESS;
|
|
|
|
ORTE_DETECT_TIMEOUT(&quicktime, 100, 1000, 100000, quicktime_cb);
|
|
|
|
|
|
|
|
/* do the send */
|
|
|
|
if (0 > (ret = orte_rml.send_buffer_nb((orte_process_name_t*)hnp, cmd, ORTE_RML_TAG_DAEMON, 0,
|
|
|
|
send_cbfunc, NULL))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-21 06:42:21 +04:00
|
|
|
OBJ_RELEASE(cmd);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-05-21 06:42:21 +04:00
|
|
|
/* wait for send to complete */
|
|
|
|
ORTE_PROGRESSED_WAIT(timer_fired, 0, 1);
|
|
|
|
|
|
|
|
/* did it succeed? */
|
|
|
|
if (ORTE_SUCCESS != error_exit) {
|
|
|
|
return error_exit;
|
|
|
|
}
|
|
|
|
|
2009-05-15 17:21:18 +04:00
|
|
|
/* define a max time to wait for an answer */
|
|
|
|
timer_fired = false;
|
|
|
|
error_exit = ORTE_SUCCESS;
|
|
|
|
ORTE_DETECT_TIMEOUT(&quicktime, 10, 1000, 10000, quicktime_cb);
|
|
|
|
|
|
|
|
/* get the answer */
|
2008-02-28 04:57:57 +03:00
|
|
|
OBJ_CONSTRUCT(&answer, opal_buffer_t);
|
2009-05-15 17:21:18 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
|
|
|
|
ORTE_RML_TAG_TOOL,
|
|
|
|
ORTE_RML_NON_PERSISTENT,
|
|
|
|
recv_info,
|
|
|
|
NULL))) {
|
|
|
|
/* cancel the timer */
|
|
|
|
if (NULL != quicktime) {
|
|
|
|
opal_evtimer_del(quicktime);
|
|
|
|
free(quicktime);
|
|
|
|
quicktime = NULL;
|
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
2009-05-16 08:15:55 +04:00
|
|
|
OBJ_DESTRUCT(&answer);
|
2008-02-28 04:57:57 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2009-05-15 17:21:18 +04:00
|
|
|
|
|
|
|
ORTE_PROGRESSED_WAIT(timer_fired, 0, 1);
|
|
|
|
|
|
|
|
if (ORTE_SUCCESS != error_exit) {
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
return error_exit;
|
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
cnt = 1;
|
2009-05-15 17:21:18 +04:00
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.unpack(&answer, &cnt_procs, &cnt, OPAL_INT32))) {
|
2008-02-28 04:57:57 +03:00
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate the required memory */
|
|
|
|
if (0 < cnt_procs) {
|
|
|
|
proc_info = (orte_proc_t**)malloc(cnt_procs * sizeof(orte_proc_t*));
|
|
|
|
/* unpack the procs */
|
2009-05-15 17:21:18 +04:00
|
|
|
for (n=0; n < cnt_procs; n++) {
|
|
|
|
cnt = 1;
|
|
|
|
if (ORTE_SUCCESS != (ret = opal_dss.unpack(&answer, &proc_info[n], &cnt, ORTE_PROC))) {
|
|
|
|
ORTE_ERROR_LOG(ret);
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
free(proc_info);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
}
|
|
|
|
*proc_info_array = proc_info;
|
|
|
|
*num_procs = (int)cnt_procs;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&answer);
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The spawn function cannot just call the plm.proxy since that won't
|
|
|
|
* necessarily be open. Likewise, we can't just send the launch request
|
|
|
|
* to the HNP's plm_receive as that function would return the response
|
|
|
|
* to the plm_proxy tag! So we have to go another route to get this
|
|
|
|
* request processed
|
|
|
|
*/
|
|
|
|
int orte_util_comm_spawn_job(const orte_process_name_t *hnp, orte_job_t *jdata)
|
|
|
|
{
|
|
|
|
opal_buffer_t buf;
|
|
|
|
orte_daemon_cmd_flag_t command;
|
|
|
|
orte_std_cntr_t count;
|
|
|
|
int rc;
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_debug_output,
|
2008-02-28 04:57:57 +03:00
|
|
|
"%s util_comm_spawn_job: requesting HNP %s spawn new job",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(hnp)));
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* setup the buffer */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
|
|
|
|
/* tell the HNP we are sending a launch request */
|
|
|
|
command = ORTE_DAEMON_SPAWN_JOB_CMD;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &command, 1, ORTE_DAEMON_CMD))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pack the jdata object */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &jdata, 1, ORTE_JOB))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_debug_output,
|
2008-02-28 04:57:57 +03:00
|
|
|
"%s util_comm_spawn_job: sending spawn cmd to HNP %s",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(hnp)));
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* tell the target HNP to launch the job */
|
|
|
|
if (0 > (rc = orte_rml.send_buffer((orte_process_name_t*)hnp, &buf, ORTE_RML_TAG_DAEMON, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_debug_output,
|
2008-02-28 04:57:57 +03:00
|
|
|
"%s util_comm_spawn_job: waiting for response",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* wait for the target's response */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
if (0 > (rc = orte_rml.recv_buffer(ORTE_NAME_WILDCARD, &buf, ORTE_RML_TAG_TOOL, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get the new jobid back in case the caller wants it */
|
|
|
|
count = 1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &(jdata->jobid), &count, ORTE_JOBID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
if (ORTE_JOBID_INVALID == jdata->jobid) {
|
|
|
|
/* something went wrong on far end - go no further */
|
|
|
|
rc = ORTE_ERR_FAILED_TO_START;
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* good to go! */
|
|
|
|
|
|
|
|
CLEANUP:
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int orte_util_comm_terminate_job(const orte_process_name_t *hnp, orte_jobid_t job)
|
|
|
|
{
|
|
|
|
opal_buffer_t buf;
|
|
|
|
orte_daemon_cmd_flag_t command;
|
|
|
|
orte_std_cntr_t count;
|
|
|
|
int rc, ret = ORTE_ERROR;
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_debug_output,
|
2008-02-28 04:57:57 +03:00
|
|
|
"%s util_comm_spawn_job: requesting HNP %s terminate job %s",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(hnp),
|
|
|
|
ORTE_JOBID_PRINT(job)));
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* setup the buffer */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
|
|
|
|
/* tell the HNP we are sending a terminate request */
|
|
|
|
command = ORTE_DAEMON_TERMINATE_JOB_CMD;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &command, 1, ORTE_DAEMON_CMD))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
ret = rc;
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pack the jobid */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &job, 1, ORTE_JOBID))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
ret = rc;
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_debug_output,
|
2008-02-28 04:57:57 +03:00
|
|
|
"%s util_comm_spawn_job: sending terminate cmd to HNP %s",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(hnp)));
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* tell the target HNP to terminate the job */
|
|
|
|
if (0 > (rc = orte_rml.send_buffer((orte_process_name_t*)hnp, &buf, ORTE_RML_TAG_DAEMON, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
ret = rc;
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_debug_output,
|
2008-02-28 04:57:57 +03:00
|
|
|
"%s util_comm_terminate_job: waiting for response",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
/* wait for the target's response */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
if (0 > (rc = orte_rml.recv_buffer(ORTE_NAME_WILDCARD, &buf, ORTE_RML_TAG_TOOL, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
ret = rc;
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get the status code */
|
|
|
|
count = 1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&buf, &ret, &count, OPAL_INT))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
ret = rc;
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
CLEANUP:
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
Afraid this has a couple of things mixed into the commit. Couldn't be helped - had missed one commit prior to running out the door on vacation.
Fix race conditions in abnormal terminations. We had done a first-cut at this in a prior commit. However, the window remained partially open due to the fact that the HNP has multiple paths leading to orte_finalize. Most of our frameworks don't care if they are finalized more than once, but one of them does, which meant we segfaulted if orte_finalize got called more than once. Besides, we really shouldn't be doing that anyway.
So we now introduce a set of atomic locks that prevent us from multiply calling abort, attempting to call orte_finalize, etc. My initial tests indicate this is working cleanly, but since it is a race condition issue, more testing will have to be done before we know for sure that this problem has been licked.
Also, some updates relevant to the tool comm library snuck in here. Since those also touched the orted code (as did the prior changes), I didn't want to attempt to separate them out - besides, they are coming in soon anyway. More on them later as that functionality approaches completion.
This commit was SVN r17843.
2008-03-17 20:58:59 +03:00
|
|
|
int orte_util_comm_halt_vm(const orte_process_name_t *hnp)
|
|
|
|
{
|
|
|
|
opal_buffer_t buf;
|
|
|
|
orte_daemon_cmd_flag_t command;
|
|
|
|
int rc;
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_debug_output,
|
Afraid this has a couple of things mixed into the commit. Couldn't be helped - had missed one commit prior to running out the door on vacation.
Fix race conditions in abnormal terminations. We had done a first-cut at this in a prior commit. However, the window remained partially open due to the fact that the HNP has multiple paths leading to orte_finalize. Most of our frameworks don't care if they are finalized more than once, but one of them does, which meant we segfaulted if orte_finalize got called more than once. Besides, we really shouldn't be doing that anyway.
So we now introduce a set of atomic locks that prevent us from multiply calling abort, attempting to call orte_finalize, etc. My initial tests indicate this is working cleanly, but since it is a race condition issue, more testing will have to be done before we know for sure that this problem has been licked.
Also, some updates relevant to the tool comm library snuck in here. Since those also touched the orted code (as did the prior changes), I didn't want to attempt to separate them out - besides, they are coming in soon anyway. More on them later as that functionality approaches completion.
This commit was SVN r17843.
2008-03-17 20:58:59 +03:00
|
|
|
"%s util_comm_halt_vm: ordering HNP %s terminate",
|
2009-03-06 00:50:47 +03:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
|
|
|
ORTE_NAME_PRINT(hnp)));
|
Afraid this has a couple of things mixed into the commit. Couldn't be helped - had missed one commit prior to running out the door on vacation.
Fix race conditions in abnormal terminations. We had done a first-cut at this in a prior commit. However, the window remained partially open due to the fact that the HNP has multiple paths leading to orte_finalize. Most of our frameworks don't care if they are finalized more than once, but one of them does, which meant we segfaulted if orte_finalize got called more than once. Besides, we really shouldn't be doing that anyway.
So we now introduce a set of atomic locks that prevent us from multiply calling abort, attempting to call orte_finalize, etc. My initial tests indicate this is working cleanly, but since it is a race condition issue, more testing will have to be done before we know for sure that this problem has been licked.
Also, some updates relevant to the tool comm library snuck in here. Since those also touched the orted code (as did the prior changes), I didn't want to attempt to separate them out - besides, they are coming in soon anyway. More on them later as that functionality approaches completion.
This commit was SVN r17843.
2008-03-17 20:58:59 +03:00
|
|
|
|
|
|
|
/* setup the buffer */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
|
|
|
|
/* tell the HNP to die */
|
|
|
|
command = ORTE_DAEMON_HALT_VM_CMD;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(&buf, &command, 1, ORTE_DAEMON_CMD))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* send the order */
|
|
|
|
if (0 > (rc = orte_rml.send_buffer((orte_process_name_t*)hnp, &buf, ORTE_RML_TAG_DAEMON, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
/* don't bother waiting around */
|
|
|
|
CLEANUP:
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|