1
1

Fix the unity routed component and direct xcast mode.

Ensure that direct xcast handles all its use-cases correctly.

Unity routed component needs to use the base recv function to properly operate.

This commit was SVN r17764.
Этот коммит содержится в:
Ralph Castain 2008-03-06 18:13:05 +00:00
родитель 1cb2c46716
Коммит 64d43cc44b
3 изменённых файлов: 278 добавлений и 185 удалений

Просмотреть файл

@ -374,36 +374,20 @@ CLEANUP:
return rc;
}
static int xcast_direct(orte_jobid_t job,
static int relay_via_hnp(orte_jobid_t job,
opal_buffer_t *buffer,
orte_rml_tag_t tag)
{
int rc;
orte_process_name_t peer;
orte_vpid_t i;
opal_buffer_t *buf=NULL, *bfr=buffer;
orte_rml_tag_t tag) {
opal_buffer_t *buf;
orte_daemon_cmd_flag_t command;
orte_grpcomm_mode_t mode;
orte_rml_tag_t target=tag;
int rc;
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_output,
"%s xcast_direct",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
/* if I am applicaton proc and this is going to some job other
* than my own, then we have to send it via the daemons as the proc would have
* no way of knowing how many procs are in the other job.
*/
if (ORTE_PROC_MY_NAME->jobid != job &&
!orte_process_info.hnp && !orte_process_info.daemon) {
/* since we have to pack some additional info into the buffer
* for this case, we create a new buffer into to contain all the
* info needed plus the payload
*/
buf = OBJ_NEW(opal_buffer_t);
/* I have to send this to the HNP for handling as I have no idea
* how many recipients there are! start by telling the HNP to relay
*/
/* start by telling the HNP to relay */
command = ORTE_DAEMON_PROCESS_AND_RELAY_CMD;
if (ORTE_SUCCESS != (rc = opal_dss.pack(buf, &command, 1, ORTE_DAEMON_CMD))) {
ORTE_ERROR_LOG(rc);
@ -448,18 +432,118 @@ static int xcast_direct(orte_jobid_t job,
goto CLEANUP;
}
rc = ORTE_SUCCESS;
goto CLEANUP;
CLEANUP:
OBJ_RELEASE(buf);
return rc;
}
/* if I am a daemon or the HNP and this is going to the daemon job to
static int xcast_direct(orte_jobid_t job,
opal_buffer_t *buffer,
orte_rml_tag_t tag)
{
int rc;
orte_process_name_t peer;
orte_vpid_t i, num_targets;
opal_buffer_t *buf=NULL, *bfr=buffer;
orte_daemon_cmd_flag_t command;
orte_rml_tag_t target=tag;
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_output,
"%s xcast_direct",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
/* if I am applicaton proc */
if (!orte_process_info.hnp &&
!orte_process_info.daemon &&
!orte_process_info.tool) {
/* if this is going to some job other
* than my own, then we have to send it via the HNP as I have
* no way of knowing how many procs are in the other job.
*/
if (ORTE_PROC_MY_NAME->jobid != job) {
if (ORTE_SUCCESS != (rc = relay_via_hnp(job, buffer, tag))) {
ORTE_ERROR_LOG(rc);
goto CLEANUP;
}
}
/* if it is my jobid, then we can just send this ourselves -
* set the target tag
*/
target = tag;
/* set number of procs to the #procs in our job */
num_targets = orte_process_info.num_procs;
/* point to the right buffer */
bfr = buffer;
/* go to send it */
goto SEND;
}
/* if I am a daemon */
if (orte_process_info.daemon) {
/* if this is going to another job, then I have to relay
* it through the HNP as I have no idea how many procs
* are in that job
*/
if (ORTE_PROC_MY_NAME->jobid != job) {
if (ORTE_SUCCESS != (rc = relay_via_hnp(job, buffer, tag))) {
ORTE_ERROR_LOG(rc);
goto CLEANUP;
}
}
/* if this is going to the daemon job to
* someplace other than the daemon cmd processor, then I need to add
* a command to the buffer so the recipient daemons know what to do
*/
if ((orte_process_info.hnp || orte_process_info.daemon) &&
ORTE_PROC_MY_NAME->jobid == job &&
ORTE_RML_TAG_DAEMON != tag) {
if (ORTE_RML_TAG_DAEMON != tag) {
/* setup a buffer to handle the additional info */
buf = OBJ_NEW(opal_buffer_t);
/* add the proper command so the daemon's know what to do */
command = ORTE_DAEMON_MESSAGE_LOCAL_PROCS;
if (ORTE_SUCCESS != (rc = opal_dss.pack(buf, &command, 1, ORTE_DAEMON_CMD))) {
ORTE_ERROR_LOG(rc);
goto CLEANUP;
}
if (ORTE_SUCCESS != (rc = opal_dss.pack(buf, &job, 1, ORTE_JOBID))) {
ORTE_ERROR_LOG(rc);
goto CLEANUP;
}
if (ORTE_SUCCESS != (rc = opal_dss.pack(buf, &tag, 1, ORTE_RML_TAG))) {
ORTE_ERROR_LOG(rc);
goto CLEANUP;
}
/* copy the payload into the new buffer - this is non-destructive, so our
* caller is still responsible for releasing any memory in the buffer they
* gave to us
*/
if (ORTE_SUCCESS != (rc = opal_dss.copy_payload(buf, buffer))) {
ORTE_ERROR_LOG(rc);
goto CLEANUP;
}
/* point to correct buffer to be sent */
bfr = buf;
/* send this to the daemon tag so it gets processed correctly */
target = ORTE_RML_TAG_DAEMON;
/* set the number of targets to be the number of daemons */
num_targets = orte_process_info.num_procs;
/* send it */
goto SEND;
}
}
/* if I am the HNP */
if (orte_process_info.hnp) {
orte_job_t *jdata;
/* if this is going to the daemon job */
if (ORTE_PROC_MY_NAME->jobid == job) {
/* if this is going someplace other than the daemon cmd
* processor, then I need to add a command to the buffer
* so the recipient daemons know what to do
*/
if (ORTE_RML_TAG_DAEMON != tag) {
/* since we have to pack some additional info into the buffer
* for this case, we create a new buffer into to contain all the
* for this case, we create a new buffer to contain all the
* info needed plus the payload
*/
buf = OBJ_NEW(opal_buffer_t);
@ -491,15 +575,48 @@ static int xcast_direct(orte_jobid_t job,
bfr = buf;
/* send this to the daemon tag so it gets processed correctly */
target = ORTE_RML_TAG_DAEMON;
/* set the number of targets to be the number of daemons */
num_targets = orte_process_info.num_procs;
/* send it */
goto SEND;
} else {
/* if already going to the daemon tag, then just point to
* the right places and send it
*/
bfr = buffer;
target = tag;
num_targets = orte_process_info.num_procs;
goto SEND;
}
}
/* if this is going to any other job,
* then I need to know the number of procs in that job so I can
* send it
*/
if (NULL == (jdata = orte_get_job_data_object(job))) {
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
rc = ORTE_ERR_NOT_FOUND;
goto CLEANUP;
}
/* set the number of targets */
num_targets = jdata->num_procs;
/* set the tag */
target = tag;
/* point to correct buffer to be sent */
bfr = buffer;
/* send it */
goto SEND;
}
SEND:
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_output,
"%s xcast_direct: buffer size %ld",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(long)buffer->bytes_used));
peer.jobid = job;
for(i=0; i<orte_process_info.num_procs; i++) {
for(i=0; i<num_targets; i++) {
peer.vpid = i;
OPAL_OUTPUT_VERBOSE((2, orte_grpcomm_base_output,
"%s xcast_direct: %s => %s",

Просмотреть файл

@ -24,7 +24,6 @@
#include "routed_tree.h"
static orte_routed_module_t* routed_tree_init(int* priority);
static bool selected=false;
/**
* component definition
@ -89,7 +88,6 @@ orte_routed_tree_module_init(void)
OBJ_CONSTRUCT(&orte_routed_tree_module.cond, opal_condition_t);
OBJ_CONSTRUCT(&orte_routed_tree_module.lock, opal_mutex_t);
selected = true;
return ORTE_SUCCESS;
}
@ -100,7 +98,6 @@ orte_routed_tree_finalize(void)
uint64_t key;
void * value, *node, *next_node;
if (selected) {
/* if I am an application process, indicate that I am
* truly finalizing prior to departure
*/
@ -112,6 +109,12 @@ orte_routed_tree_finalize(void)
return rc;
}
}
/* if I am the HNP, I need to stop the comm recv */
if (orte_process_info.hnp) {
orte_routed_base_comm_stop();
}
/* don't destruct the routes until *after* we send the
* sync as the oob will be asking us how to route
* the message!
@ -131,7 +134,6 @@ orte_routed_tree_finalize(void)
/* destruct the global condition and lock */
OBJ_DESTRUCT(&orte_routed_tree_module.cond);
OBJ_DESTRUCT(&orte_routed_tree_module.lock);
}
return ORTE_SUCCESS;
}

Просмотреть файл

@ -33,7 +33,6 @@
#include "routed_unity.h"
static orte_routed_module_t* routed_unity_init(int* priority);
static bool recv_issued=false;
static opal_condition_t cond;
static opal_mutex_t lock;
static opal_hash_table_t peer_list;
@ -84,21 +83,8 @@ routed_unity_init(int* priority)
return &orte_routed_unity_module;
}
static void orte_routed_unity_recv(int status, orte_process_name_t* sender,
opal_buffer_t* buffer, orte_rml_tag_t tag,
void* cbdata)
{
int rc;
if (ORTE_SUCCESS != (rc = orte_rml_base_update_contact_info(buffer))) {
ORTE_ERROR_LOG(rc);
}
}
int orte_routed_unity_module_init(void)
{
int rc;
/* setup the global condition and lock */
OBJ_CONSTRUCT(&cond, opal_condition_t);
OBJ_CONSTRUCT(&lock, opal_mutex_t);
@ -106,15 +92,6 @@ int orte_routed_unity_module_init(void)
OBJ_CONSTRUCT(&peer_list, opal_hash_table_t);
opal_hash_table_init(&peer_list, 128);
if (ORTE_SUCCESS != (rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
ORTE_RML_TAG_UPDATE_ROUTES,
ORTE_RML_PERSISTENT,
orte_routed_unity_recv,
NULL))) {
ORTE_ERROR_LOG(rc);
return rc;
}
recv_issued = true;
return ORTE_SUCCESS;
}
@ -125,12 +102,10 @@ orte_routed_unity_finalize(void)
uint64_t key;
void * value, *node, *next_node;
if (recv_issued) {
if (ORTE_SUCCESS != (rc = orte_rml.recv_cancel(ORTE_NAME_WILDCARD, ORTE_RML_TAG_UPDATE_ROUTES))) {
ORTE_ERROR_LOG(rc);
return rc;
/* if I am the HNP, I need to stop the comm recv */
if (orte_process_info.hnp) {
orte_routed_base_comm_stop();
}
recv_issued = false;
/* if I am an application process (but NOT a tool), indicate that I am
* truly finalizing prior to departure
@ -159,7 +134,6 @@ orte_routed_unity_finalize(void)
/* cleanup the global condition */
OBJ_DESTRUCT(&cond);
OBJ_DESTRUCT(&lock);
}
return ORTE_SUCCESS;
}
@ -192,7 +166,7 @@ orte_routed_unity_update_route(orte_process_name_t *target,
route_copy = malloc(sizeof(orte_process_name_t));
*route_copy = *route;
/* if we are routing everything for this target through one place,
* then the target vpid is ORTE_NS_VPID_WILDCARD. So no need for
* then the target vpid is ORTE_VPID_WILDCARD. So no need for
* special cases, just add it
*/
rc = opal_hash_table_set_value_uint64(&peer_list, orte_util_hash_name(target),