- Second patch, as discussed in Louisville.
Replace short macros in orte/util/name_fns.h to the actual fct. call. - Compiles on linux/x86-64 This commit was SVN r20740.
This commit is contained in:
parent
781caee0b6
commit
2a70618a77
@ -408,10 +408,10 @@ static int mca_bml_r2_add_procs( size_t nprocs,
|
||||
orte_show_help("help-mca-bml-r2.txt",
|
||||
"unreachable proc",
|
||||
true,
|
||||
ORTE_NAME_PRINT(&(ompi_proc_local_proc->proc_name)),
|
||||
orte_util_print_name_args(&(ompi_proc_local_proc->proc_name)),
|
||||
(ompi_proc_local_proc->proc_hostname ?
|
||||
ompi_proc_local_proc->proc_hostname : "unknown!"),
|
||||
ORTE_NAME_PRINT(&(unreach_proc->proc_name)),
|
||||
orte_util_print_name_args(&(unreach_proc->proc_name)),
|
||||
(unreach_proc->proc_hostname ?
|
||||
unreach_proc->proc_hostname : "unknown!"),
|
||||
btl_names);
|
||||
|
@ -63,7 +63,7 @@ void mca_btl_base_error_no_nics(const char* transport,
|
||||
char *procid;
|
||||
if (mca_btl_base_warn_component_unused) {
|
||||
/* print out no-nic warning if user told us to */
|
||||
asprintf(&procid, "%s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
|
||||
asprintf(&procid, "%s", orte_util_print_name_args(ORTE_PROC_MY_NAME));
|
||||
|
||||
orte_show_help("help-mpi-btl-base.txt", "btl:no-nics",
|
||||
true, procid, transport, orte_proc_info.nodename,
|
||||
|
@ -39,7 +39,7 @@ OMPI_DECLSPEC extern int mca_btl_base_out(const char*, ...);
|
||||
do { \
|
||||
mca_btl_base_out("[%s]%s[%s:%d:%s] ", \
|
||||
orte_proc_info.nodename, \
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME), \
|
||||
__FILE__, __LINE__, __func__); \
|
||||
mca_btl_base_out args; \
|
||||
mca_btl_base_out("\n"); \
|
||||
@ -50,7 +50,7 @@ do { \
|
||||
do { \
|
||||
mca_btl_base_err("[%s]%s[%s:%d:%s] ", \
|
||||
orte_proc_info.nodename, \
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME), \
|
||||
__FILE__, __LINE__, __func__); \
|
||||
mca_btl_base_err args; \
|
||||
mca_btl_base_err("\n"); \
|
||||
@ -59,7 +59,7 @@ do { \
|
||||
#define BTL_PEER_ERROR(proc, args) \
|
||||
do { \
|
||||
mca_btl_base_err("%s[%s:%d:%s] from %s ", \
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME), \
|
||||
__FILE__, __LINE__, __func__, \
|
||||
orte_proc_info.nodename); \
|
||||
if(proc && proc->proc_hostname) { \
|
||||
@ -76,7 +76,7 @@ do { \
|
||||
if(mca_btl_base_verbose > 0) { \
|
||||
mca_btl_base_err("[%s]%s[%s:%d:%s] ", \
|
||||
orte_proc_info.nodename, \
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME), \
|
||||
__FILE__, __LINE__, __func__); \
|
||||
mca_btl_base_err args; \
|
||||
mca_btl_base_err("\n"); \
|
||||
|
@ -434,7 +434,7 @@ static int mca_btl_gm_discover( void )
|
||||
"%s gm_port %08lX, "
|
||||
"board %" PRIu32 ", global %" PRIu32 " "
|
||||
"node %" PRIu32 "port %" PRIu32 "\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
(unsigned long) port, board_no, global_id, node_id, port_no);
|
||||
}
|
||||
|
||||
|
@ -130,14 +130,14 @@ mca_btl_gm_proc_t* mca_btl_gm_proc_create(ompi_proc_t* ompi_proc)
|
||||
&size);
|
||||
if(OMPI_SUCCESS != rc) {
|
||||
opal_output(0, "[%s:%d] ompi_modex_recv failed for peer %s",
|
||||
__FILE__,__LINE__,ORTE_NAME_PRINT(&ompi_proc->proc_name));
|
||||
__FILE__,__LINE__,orte_util_print_name_args(&ompi_proc->proc_name));
|
||||
OBJ_RELEASE(gm_proc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if((size % sizeof(mca_btl_gm_addr_t)) != 0) {
|
||||
opal_output(0, "[%s:%d] invalid gm address for peer %s",
|
||||
__FILE__,__LINE__,ORTE_NAME_PRINT(&ompi_proc->proc_name));
|
||||
__FILE__,__LINE__,orte_util_print_name_args(&ompi_proc->proc_name));
|
||||
OBJ_RELEASE(gm_proc);
|
||||
return NULL;
|
||||
}
|
||||
@ -192,7 +192,7 @@ int mca_btl_gm_proc_insert(
|
||||
if(mca_btl_gm_component.gm_debug > 0) {
|
||||
opal_output(0, "%s mapped global id %" PRIu32
|
||||
" to node id %" PRIu32 "\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
gm_endpoint->endpoint_addr.global_id,
|
||||
gm_endpoint->endpoint_addr.node_id);
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ mca_btl_mx_proc_t* mca_btl_mx_proc_create(ompi_proc_t* ompi_proc)
|
||||
ompi_proc, (void*)&mx_peers, &size );
|
||||
if( OMPI_SUCCESS != rc ) {
|
||||
opal_output( 0, "mca_pml_base_modex_recv failed for peer %s",
|
||||
ORTE_NAME_PRINT(&ompi_proc->proc_name) );
|
||||
orte_util_print_name_args(&ompi_proc->proc_name) );
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -131,7 +131,7 @@ mca_btl_mx_proc_t* mca_btl_mx_proc_create(ompi_proc_t* ompi_proc)
|
||||
}
|
||||
if( (size % sizeof(mca_btl_mx_addr_t)) != 0 ) {
|
||||
opal_output( 0, "invalid mx address for peer %s",
|
||||
ORTE_NAME_PRINT(&ompi_proc->proc_name) );
|
||||
orte_util_print_name_args(&ompi_proc->proc_name) );
|
||||
return NULL;
|
||||
}
|
||||
/* Let's see if we have a way to connect to the remote proc using MX.
|
||||
|
@ -129,14 +129,14 @@ mca_btl_ud_proc_t* mca_btl_ud_proc_create(ompi_proc_t* ompi_proc)
|
||||
if(OMPI_SUCCESS != rc) {
|
||||
opal_output(0,
|
||||
"[%s:%d] ompi_modex_recv failed for peer %s",
|
||||
__FILE__,__LINE__,ORTE_NAME_PRINT(&ompi_proc->proc_name));
|
||||
__FILE__,__LINE__,orte_util_print_name_args(&ompi_proc->proc_name));
|
||||
OBJ_RELEASE(module_proc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if((size % sizeof(mca_btl_ud_addr_t)) != 0) {
|
||||
opal_output(0, "[%s:%d] invalid module address for peer %s",
|
||||
__FILE__,__LINE__,ORTE_NAME_PRINT(&ompi_proc->proc_name));
|
||||
__FILE__,__LINE__,orte_util_print_name_args(&ompi_proc->proc_name));
|
||||
OBJ_RELEASE(module_proc);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ mca_btl_openib_proc_t* mca_btl_openib_proc_create(ompi_proc_t* ompi_proc)
|
||||
if (OMPI_SUCCESS != rc) {
|
||||
BTL_ERROR(("[%s:%d] ompi_modex_recv failed for peer %s",
|
||||
__FILE__, __LINE__,
|
||||
ORTE_NAME_PRINT(&ompi_proc->proc_name)));
|
||||
orte_util_print_name_args(&ompi_proc->proc_name)));
|
||||
OBJ_RELEASE(module_proc);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ int mca_btl_pcie_proc_create(ompi_proc_t* ompi_proc,
|
||||
&size);
|
||||
if (OMPI_SUCCESS != rc) {
|
||||
opal_output(mca_btl_base_output, "[%s:%d] ompi_modex_recv failed for peer %s",
|
||||
__FILE__, __LINE__, ORTE_NAME_PRINT(&ompi_proc->proc_name));
|
||||
__FILE__, __LINE__, orte_util_print_name_args(&ompi_proc->proc_name));
|
||||
OBJ_RELEASE(pcie_proc);
|
||||
*ret_proc = NULL;
|
||||
return OMPI_ERROR;
|
||||
|
@ -483,7 +483,7 @@ static int mca_btl_tcp_endpoint_recv_connect_ack(mca_btl_base_endpoint_t* btl_en
|
||||
/* compare this to the expected values */
|
||||
if (OPAL_EQUAL != orte_util_compare_name_fields(ORTE_NS_CMP_ALL, &btl_proc->proc_name, &guid)) {
|
||||
BTL_ERROR(("received unexpected process identifier %s",
|
||||
ORTE_NAME_PRINT(&guid)));
|
||||
orte_util_print_name_args(&guid)));
|
||||
mca_btl_tcp_endpoint_close(btl_endpoint);
|
||||
return OMPI_ERR_UNREACH;
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ mca_btl_udapl_proc_t* mca_btl_udapl_proc_create(ompi_proc_t* ompi_proc)
|
||||
if(OMPI_SUCCESS != rc) {
|
||||
BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL,
|
||||
("ompi_modex_recv failed for peer %s",
|
||||
ORTE_NAME_PRINT(&ompi_proc->proc_name)));
|
||||
orte_util_print_name_args(&ompi_proc->proc_name)));
|
||||
OBJ_RELEASE(udapl_proc);
|
||||
return NULL;
|
||||
}
|
||||
@ -141,7 +141,7 @@ mca_btl_udapl_proc_t* mca_btl_udapl_proc_create(ompi_proc_t* ompi_proc)
|
||||
if((size % sizeof(mca_btl_udapl_addr_t)) != 0) {
|
||||
BTL_UDAPL_VERBOSE_OUTPUT(VERBOSE_CRITICAL,
|
||||
("invalid udapl address for peer %s",
|
||||
ORTE_NAME_PRINT(&ompi_proc->proc_name)));
|
||||
orte_util_print_name_args(&ompi_proc->proc_name)));
|
||||
OBJ_RELEASE(udapl_proc);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1477,7 +1477,7 @@ ompi_crcp_base_pml_state_t* ompi_crcp_bkmrk_pml_del_procs(
|
||||
if(NULL == item) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: del_procs: Unable to find peer %s\n",
|
||||
ORTE_NAME_PRINT(&(procs[i]->proc_name)));
|
||||
orte_util_print_name_args(&(procs[i]->proc_name)));
|
||||
exit_status = OMPI_ERROR;
|
||||
goto DONE;
|
||||
}
|
||||
@ -3021,7 +3021,7 @@ ompi_crcp_base_pml_state_t* ompi_crcp_bkmrk_pml_ft_event(
|
||||
if( OMPI_SUCCESS != (ret = ft_event_coordinate_peers()) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s ft_event: Checkpoint Coordination Failed %d",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto DONE;
|
||||
@ -3366,7 +3366,7 @@ static int traffic_message_find_mark_persistent(ompi_crcp_bkmrk_pml_traffic_mess
|
||||
else if( loc_breq->req_sequence == breq->req_sequence ) {
|
||||
OPAL_OUTPUT_VERBOSE((25, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"%s %8s Request [%d] (%s) %d : %d",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
(set_is_active ? "Start" : (NULL != c_ref ? "Drain" : "Complete")),
|
||||
(int)msg_ref->msg_id,
|
||||
(content_ref->active ? "T" : "F"),
|
||||
@ -3467,8 +3467,8 @@ static int traffic_message_create_drain_message(bool post_drain,
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
" --> Create Drain Msg: %s %4d = min(%4d / %4d)",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
(post_drain ? "Posting" : "Not Posting"),
|
||||
m_total, (*posted_msg_ref)->active, max_post ));
|
||||
|
||||
@ -3495,8 +3495,8 @@ static int traffic_message_create_drain_message(bool post_drain,
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
" \t--> Find Content: %s (%4d of %4d)",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
(post_drain ? "Posting" : "Not Posting"),
|
||||
m_iter, m_total));
|
||||
|
||||
@ -3573,8 +3573,8 @@ static int traffic_message_create_drain_message(bool post_drain,
|
||||
OPAL_OUTPUT_VERBOSE((15, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Added %d messages to the drained list (size = %d)",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
(*num_posted),
|
||||
(int)opal_list_get_size(&(peer_ref->drained_list)) ));
|
||||
|
||||
@ -4212,7 +4212,7 @@ static int ft_event_coordinate_peers(void)
|
||||
if( stall_for_completion ) {
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s **** STALLING %s in PID %d ***",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
(current_msg_type == COORD_MSG_TYPE_B_SEND ? "Send" : "Recv"),
|
||||
getpid() ));
|
||||
step_to_return_to = 1;
|
||||
@ -4239,7 +4239,7 @@ static int ft_event_coordinate_peers(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((5, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s Coordination Finished...\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
||||
|
||||
/*
|
||||
* Now that all our peer channels are marked as drained
|
||||
@ -4428,10 +4428,10 @@ static int ft_event_check_bookmarks(void)
|
||||
"---------------------------------------------"));
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"Process %s Match Table",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"%s %5s | %7s | %7s | %7s | %7s |",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
"Vpid", "T_Send", "M_Recv", "M_Send", "T_Recv"));
|
||||
|
||||
for(item = opal_list_get_first(&ompi_crcp_bkmrk_pml_peer_refs);
|
||||
@ -4449,7 +4449,7 @@ static int ft_event_check_bookmarks(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"%s %5d | %7d | %7d | %7d | %7d |",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
peer_ref->proc_name.vpid,
|
||||
t_send, m_recv, m_send, t_recv));
|
||||
}
|
||||
@ -4491,8 +4491,8 @@ static int ft_event_check_bookmarks(void)
|
||||
"crcp:bkmrk: %s --> %s "
|
||||
"Total Sent (%4d) = Matched Recv. (%4d) => Diff (%4d). "
|
||||
" WARNING: Peer received more than was sent. :(\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
p_n_to_p_m,
|
||||
p_n_from_p_m,
|
||||
(p_n_to_p_m - p_n_from_p_m)
|
||||
@ -4505,8 +4505,8 @@ static int ft_event_check_bookmarks(void)
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s --> %s "
|
||||
"Total Sent (%4d) = Matched Recv. (%4d). Peer needs %4d.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
p_n_to_p_m,
|
||||
p_n_from_p_m,
|
||||
(p_n_to_p_m - p_n_from_p_m)
|
||||
@ -4519,7 +4519,7 @@ static int ft_event_check_bookmarks(void)
|
||||
if( OMPI_SUCCESS != (ret = send_msg_details(peer_ref, p_n_to_p_m, p_n_from_p_m) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: check_bookmarks: Unable to send message details to peer %s: Return %d\n",
|
||||
ORTE_NAME_PRINT(&peer_ref->proc_name),
|
||||
orte_util_print_name_args(&peer_ref->proc_name),
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
@ -4538,8 +4538,8 @@ static int ft_event_check_bookmarks(void)
|
||||
"crcp:bkmrk: %s --> %s "
|
||||
"Matched Sent (%4d) = Total Recv. (%4d) => Diff (%4d). "
|
||||
" WARNING: I received more than the peer sent. :(\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
p_n_to_p_m,
|
||||
p_n_from_p_m,
|
||||
(p_n_to_p_m - p_n_from_p_m)
|
||||
@ -4552,8 +4552,8 @@ static int ft_event_check_bookmarks(void)
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Matched Sent (%4d) = Total Recv. (%4d). I need %4d.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
p_n_to_p_m,
|
||||
p_n_from_p_m,
|
||||
(p_n_to_p_m - p_n_from_p_m)
|
||||
@ -4565,7 +4565,7 @@ static int ft_event_check_bookmarks(void)
|
||||
if( OMPI_SUCCESS != (ret = recv_msg_details(peer_ref, p_n_to_p_m, p_n_from_p_m) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: check_bookmarks: Unable to recv message details from peer %s: Return %d\n",
|
||||
ORTE_NAME_PRINT(&peer_ref->proc_name),
|
||||
orte_util_print_name_args(&peer_ref->proc_name),
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
@ -4586,8 +4586,8 @@ static int ft_event_check_bookmarks(void)
|
||||
"crcp:bkmrk: %s --> %s "
|
||||
"Matched Sent (%4d) = Total Recv. (%4d) => Diff (%4d). "
|
||||
" WARNING: I received more than the peer sent. :(\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
p_n_to_p_m,
|
||||
p_n_from_p_m,
|
||||
(p_n_to_p_m - p_n_from_p_m)
|
||||
@ -4600,8 +4600,8 @@ static int ft_event_check_bookmarks(void)
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Matched Sent (%4d) = Total Recv. (%4d). I need %4d.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
p_n_to_p_m,
|
||||
p_n_from_p_m,
|
||||
(p_n_to_p_m - p_n_from_p_m)
|
||||
@ -4613,7 +4613,7 @@ static int ft_event_check_bookmarks(void)
|
||||
if( OMPI_SUCCESS != (ret = recv_msg_details(peer_ref, p_n_to_p_m, p_n_from_p_m) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: check_bookmarks: Unable to recv message details from peer %s: Return %d\n",
|
||||
ORTE_NAME_PRINT(&peer_ref->proc_name),
|
||||
orte_util_print_name_args(&peer_ref->proc_name),
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
@ -4632,8 +4632,8 @@ static int ft_event_check_bookmarks(void)
|
||||
"crcp:bkmrk: %s --> %s "
|
||||
"Total Sent (%4d) = Matched Recv. (%4d) => Diff (%4d). "
|
||||
" WARNING: Peer received more than was sent. :(\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
p_n_to_p_m,
|
||||
p_n_from_p_m,
|
||||
(p_n_to_p_m - p_n_from_p_m)
|
||||
@ -4646,8 +4646,8 @@ static int ft_event_check_bookmarks(void)
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s --> %s "
|
||||
"Total Sent (%4d) = Matched Recv. (%4d). Peer needs %4d.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
p_n_to_p_m,
|
||||
p_n_from_p_m,
|
||||
(p_n_to_p_m - p_n_from_p_m)
|
||||
@ -4660,7 +4660,7 @@ static int ft_event_check_bookmarks(void)
|
||||
if( OMPI_SUCCESS != (ret = send_msg_details(peer_ref, p_n_to_p_m, p_n_from_p_m) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: check_bookmarks: Unable to send message details to peer %s: Return %d\n",
|
||||
ORTE_NAME_PRINT(&peer_ref->proc_name),
|
||||
orte_util_print_name_args(&peer_ref->proc_name),
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
@ -4685,7 +4685,7 @@ static int ft_event_post_drain_acks(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s Wait on %d Drain ACK Messages.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
(int)req_size));
|
||||
|
||||
/*
|
||||
@ -4705,8 +4705,8 @@ static int ft_event_post_drain_acks(void)
|
||||
NULL) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s: Failed to post a RML receive to the peer\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(drain_msg_ack->peer)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(drain_msg_ack->peer)));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -4748,8 +4748,8 @@ static void drain_message_ack_cbfunc(int status,
|
||||
drain_msg_ack->complete = true;
|
||||
OPAL_OUTPUT_VERBOSE((5, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s --> %s Received ACK of FLUSH from peer\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(sender) ));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(sender) ));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -4757,8 +4757,8 @@ static void drain_message_ack_cbfunc(int status,
|
||||
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s --> %s ERROR: Unable to match ACK to peer\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(sender) );
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(sender) );
|
||||
|
||||
cleanup:
|
||||
return;
|
||||
@ -4891,8 +4891,8 @@ static int ft_event_post_drained(void)
|
||||
if( peer_total > 0 || stall_for_completion ) {
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s Will be draining %4d messages from this peer. Total %4d %s\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(cur_peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(cur_peer_ref->proc_name)),
|
||||
peer_total,
|
||||
quiesce_request_count,
|
||||
(stall_for_completion ? "(And Stalling)" : "") ));
|
||||
@ -4922,8 +4922,8 @@ static int ft_event_post_drain_message(ompi_crcp_bkmrk_pml_drain_message_ref_t
|
||||
if( content_ref->already_posted ) {
|
||||
OPAL_OUTPUT_VERBOSE((15, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s Found a message that we do not need to post.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(drain_msg_ref->proc_name)) ));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(drain_msg_ref->proc_name)) ));
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
|
||||
@ -4936,8 +4936,8 @@ static int ft_event_post_drain_message(ompi_crcp_bkmrk_pml_drain_message_ref_t
|
||||
*/
|
||||
OPAL_OUTPUT_VERBOSE((20, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s Posting a message to be drained from rank %d.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(drain_msg_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(drain_msg_ref->proc_name)),
|
||||
drain_msg_ref->rank));
|
||||
if( OMPI_SUCCESS != (ret = wrapped_pml_module->pml_irecv(content_ref->buffer,
|
||||
(drain_msg_ref->count * drain_msg_ref->ddt_size),
|
||||
@ -4948,8 +4948,8 @@ static int ft_event_post_drain_message(ompi_crcp_bkmrk_pml_drain_message_ref_t
|
||||
&(content_ref->request) ) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s Failed to post the Draining PML iRecv\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(drain_msg_ref->proc_name)) );
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(drain_msg_ref->proc_name)) );
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4967,7 +4967,7 @@ static int ft_event_wait_quiesce(void)
|
||||
if( OMPI_SUCCESS != (ret = wait_quiesce_drained() ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: wait_quiesce: %s Failed to quiesce drained messages\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME) );
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME) );
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
}
|
||||
@ -4978,7 +4978,7 @@ static int ft_event_wait_quiesce(void)
|
||||
if( OMPI_SUCCESS != (ret = wait_quiesce_drain_ack() ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: wait_quiesce: %s Failed to recv all drain ACKs\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME) );
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME) );
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
}
|
||||
@ -5000,7 +5000,7 @@ static int wait_quiesce_drained(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((5, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s Waiting on %d messages to drain\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
(int)quiesce_request_count));
|
||||
|
||||
/*
|
||||
@ -5044,8 +5044,8 @@ static int wait_quiesce_drained(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((5, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s --> %s Send ACKs to Peer\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(cur_peer_ref->proc_name)) ));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(cur_peer_ref->proc_name)) ));
|
||||
|
||||
/* Send All Clear to Peer */
|
||||
if (NULL == (buffer = OBJ_NEW(opal_buffer_t))) {
|
||||
@ -5145,7 +5145,7 @@ static int coord_request_wait_all( size_t count,
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((15, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s Request Wait: Done with idx %d of %d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
(int)i, (int)count));
|
||||
}
|
||||
|
||||
@ -5184,7 +5184,7 @@ static int wait_quiesce_drain_ack(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s Waiting on %d Drain ACK messages\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
num_outstanding));
|
||||
|
||||
while(0 < num_outstanding) {
|
||||
@ -5240,8 +5240,8 @@ static int send_bookmarks(int peer_idx)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((15, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s --> %s Sending bookmark (S[%6d] R[%6d])\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&peer_name),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&peer_name),
|
||||
peer_ref->total_msgs_sent,
|
||||
peer_ref->total_msgs_recvd));
|
||||
|
||||
@ -5261,7 +5261,7 @@ static int send_bookmarks(int peer_idx)
|
||||
if ( 0 > ( ret = orte_rml.send_buffer(&peer_name, buffer, OMPI_CRCP_COORD_BOOKMARK_TAG, 0)) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: send_bookmarks: Failed to send bookmark to peer %s: Return %d\n",
|
||||
ORTE_NAME_PRINT(&peer_name),
|
||||
orte_util_print_name_args(&peer_name),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
@ -5297,7 +5297,7 @@ static int recv_bookmarks(int peer_idx)
|
||||
NULL) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: recv_bookmarks: Failed to post receive bookmark from peer %s: Return %d\n",
|
||||
ORTE_NAME_PRINT(&peer_name),
|
||||
orte_util_print_name_args(&peer_name),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
@ -5348,8 +5348,8 @@ static void recv_bookmarks_cbfunc(int status,
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((15, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s Received bookmark (S[%6d] R[%6d]) vs. (S[%6d] R[%6d])\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(sender),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(sender),
|
||||
peer_ref->matched_msgs_sent,
|
||||
peer_ref->matched_msgs_recvd,
|
||||
peer_ref->total_msgs_sent,
|
||||
@ -5413,8 +5413,8 @@ static int send_msg_details(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
if(OMPI_SUCCESS != (ret = do_send_msg_detail(peer_ref, msg_ref, &num_matches, &p_total_found, &finished)) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: send_msg_details: %s --> %s Failed to send message details to peer. Return %d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
ret);
|
||||
}
|
||||
|
||||
@ -5475,8 +5475,8 @@ static int send_msg_details(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
opal_list_append(&drained_msg_ack_list, &(d_msg_ack->super));
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-> %s Message Inflight! Will wait on ACK from this peer.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name))));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name))));
|
||||
|
||||
END_TIMER(CRCP_TIMER_CKPT_CHECK_PEER_S);
|
||||
DISPLAY_INDV_TIMER(CRCP_TIMER_CKPT_CHECK_PEER_S, peer_ref->proc_name.vpid, total_details_sent);
|
||||
@ -5553,7 +5553,7 @@ static int do_send_msg_detail(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
OMPI_CRCP_COORD_BOOKMARK_TAG, 0)) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: do_send_msg_detail: Unable to send message details to peer %s: Return %d\n",
|
||||
ORTE_NAME_PRINT(&peer_ref->proc_name),
|
||||
orte_util_print_name_args(&peer_ref->proc_name),
|
||||
ret);
|
||||
|
||||
exit_status = OMPI_ERROR;
|
||||
@ -5580,8 +5580,8 @@ static int do_send_msg_detail(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
OMPI_CRCP_COORD_BOOKMARK_TAG, 0) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: do_send_msg_detail: %s --> %s Failed to receive ACK buffer from peer. Return %d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
@ -5667,8 +5667,8 @@ static int recv_msg_details(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: recv_msg_details: %s <-- %s "
|
||||
"Failed to receive message detail from peer. Return %d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
@ -5688,8 +5688,8 @@ static int recv_msg_details(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: recv_msg_details: %s <-- %s "
|
||||
"Failed to check message detail from peer. Return %d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
@ -5700,8 +5700,8 @@ static int recv_msg_details(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s Recv Detail: Stage --: [%3d / %3d] [%3d, %3d, %s]",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
need, found,
|
||||
num_resolved, total_details_recv,
|
||||
( need <= found ? "T" : "F") ));
|
||||
@ -5718,8 +5718,8 @@ static int recv_msg_details(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
if(OMPI_SUCCESS != (ret = do_recv_msg_detail_resp(peer_ref, response, num_resolved, found))) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: recv_msg_details: %s <-- %s Failed to respond to peer. Return %d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
@ -5754,8 +5754,8 @@ static int do_recv_msg_detail(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
if ( 0 > (ret = orte_rml.recv_buffer(&peer_ref->proc_name, buffer, OMPI_CRCP_COORD_BOOKMARK_TAG, 0) ) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: do_recv_msg_detail: %s <-- %s Failed to receive buffer from peer. Return %d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
@ -5814,8 +5814,8 @@ static int do_recv_msg_detail_check_drain(ompi_crcp_bkmrk_pml_peer_ref_t *peer_r
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Stage 0: Ck.Drain: [TR %3d/MS %3d] sent %4d, unres %4d, res %4d",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
peer_ref->total_msgs_recvd,
|
||||
peer_ref->matched_msgs_sent,
|
||||
p_num_sent,
|
||||
@ -5837,8 +5837,8 @@ static int do_recv_msg_detail_check_drain(ompi_crcp_bkmrk_pml_peer_ref_t *peer_r
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: recv_msg_detail_check: %s -- %s "
|
||||
"Failed to determine if we have received this message. Return %d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
ret);
|
||||
exit_status = ret;
|
||||
goto cleanup;
|
||||
@ -5887,8 +5887,8 @@ static int do_recv_msg_detail_check_drain(ompi_crcp_bkmrk_pml_peer_ref_t *peer_r
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Stage 1: Ck.Drain: [TR %3d/MS %3d] sent %4d, unres %4d, res %4d",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
peer_ref->total_msgs_recvd,
|
||||
peer_ref->matched_msgs_sent,
|
||||
p_num_sent,
|
||||
@ -5972,8 +5972,8 @@ static int do_recv_msg_detail_check_drain(ompi_crcp_bkmrk_pml_peer_ref_t *peer_r
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Stage 2: Ck.Drain: [TR %3d/MS %3d] sent %4d, unres %4d, res %4d, active %4d",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
peer_ref->total_msgs_recvd,
|
||||
peer_ref->matched_msgs_sent,
|
||||
p_num_sent,
|
||||
@ -6021,8 +6021,8 @@ static int do_recv_msg_detail_check_drain(ompi_crcp_bkmrk_pml_peer_ref_t *peer_r
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Recv Check: Found a message that is 'active'! Prepare to STALL.\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)) ));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)) ));
|
||||
stall_for_completion = true;
|
||||
}
|
||||
else {
|
||||
@ -6030,8 +6030,8 @@ static int do_recv_msg_detail_check_drain(ompi_crcp_bkmrk_pml_peer_ref_t *peer_r
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Recv Check: Found a message that is 'active', but is not the current recv! "
|
||||
"No stall required [%3d, %3d, %3d, %3d].\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
(int)current_msg_id,
|
||||
(int)current_msg_type,
|
||||
(int)posted_recv_msg_ref->msg_id,
|
||||
@ -6089,8 +6089,8 @@ static int do_recv_msg_detail_check_drain(ompi_crcp_bkmrk_pml_peer_ref_t *peer_r
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Stage 3: Ck.Drain: [TR %3d/MS %3d] sent %4d, unres %4d, res %4d, active %4d",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
peer_ref->total_msgs_recvd,
|
||||
peer_ref->matched_msgs_sent,
|
||||
p_num_sent,
|
||||
@ -6126,8 +6126,8 @@ static int do_recv_msg_detail_check_drain(ompi_crcp_bkmrk_pml_peer_ref_t *peer_r
|
||||
OPAL_OUTPUT_VERBOSE((10, mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: %s <-- %s "
|
||||
"Stage 4: Ck.Drain: [TR %3d/MS %3d] sent %4d, unres %4d, res %4d, active %4d",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
peer_ref->total_msgs_recvd,
|
||||
peer_ref->matched_msgs_sent,
|
||||
p_num_sent,
|
||||
@ -6163,7 +6163,7 @@ static int do_recv_msg_detail_resp(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
if ( 0 > ( ret = orte_rml.send_buffer(&peer_ref->proc_name, buffer, OMPI_CRCP_COORD_BOOKMARK_TAG, 0)) ) {
|
||||
opal_output(mca_crcp_bkmrk_component.super.output_handle,
|
||||
"crcp:bkmrk: recv_msg_detail_resp: Unable to send message detail response to peer %s: Return %d\n",
|
||||
ORTE_NAME_PRINT(&peer_ref->proc_name),
|
||||
orte_util_print_name_args(&peer_ref->proc_name),
|
||||
ret);
|
||||
exit_status = OMPI_ERROR;
|
||||
goto cleanup;
|
||||
@ -6478,8 +6478,8 @@ static void traffic_message_dump_peer(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
|
||||
|
||||
opal_output(0, "------------- %s ---------------------------------", msg);
|
||||
opal_output(0, "%s <-> %s Totals Sent [ %3d / %3d ] Recv [ %3d / %3d ]",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&(peer_ref->proc_name)),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&(peer_ref->proc_name)),
|
||||
peer_ref->total_msgs_sent,
|
||||
peer_ref->matched_msgs_sent,
|
||||
peer_ref->total_msgs_recvd,
|
||||
|
@ -137,7 +137,7 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((1, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept with port %s %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
port_string, send_first ? "sending first" : "recv first"));
|
||||
|
||||
/* set default error return */
|
||||
@ -199,8 +199,8 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept adding %s to proc list",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&proc_list[i]->proc_name)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&proc_list[i]->proc_name)));
|
||||
ompi_proc_pack(proc_list, size, nbuf);
|
||||
}
|
||||
|
||||
@ -218,13 +218,13 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
if ( send_first ) {
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept sending first to %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&port)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&port)));
|
||||
rc = orte_rml.send_buffer(&port, nbuf, tag, 0);
|
||||
/* setup to recv */
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept waiting for response",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
||||
recv_completed = false;
|
||||
rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, tag,
|
||||
ORTE_RML_NON_PERSISTENT, recv_cb, NULL);
|
||||
@ -232,13 +232,13 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
ORTE_PROGRESSED_WAIT(recv_completed, 0, 1);
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept got data from %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&carport)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&carport)));
|
||||
|
||||
} else {
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept recving first",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
||||
/* setup to recv */
|
||||
recv_completed = false;
|
||||
rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, tag,
|
||||
@ -248,8 +248,8 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
/* now send our info */
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept sending info to %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&carport)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&carport)));
|
||||
rc = orte_rml.send_buffer(&carport, nbuf, tag, 0);
|
||||
}
|
||||
|
||||
@ -269,7 +269,7 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
/* bcast the buffer-length to all processes in the local comm */
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept bcast buffer length",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
||||
rc = comm->c_coll.coll_bcast (&rnamebuflen_int, 1, MPI_INT, root, comm,
|
||||
comm->c_coll.coll_bcast_module);
|
||||
if ( OMPI_SUCCESS != rc ) {
|
||||
@ -293,7 +293,7 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
*/
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept bcast proc list",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
||||
rc = comm->c_coll.coll_bcast (rnamebuf, rnamebuflen_int, MPI_BYTE, root, comm,
|
||||
comm->c_coll.coll_bcast_module);
|
||||
if ( OMPI_SUCCESS != rc ) {
|
||||
@ -322,7 +322,7 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept unpacked %d new procs",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), new_proc_len));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME), new_proc_len));
|
||||
|
||||
/* If we added new procs, we need to do the modex and then call
|
||||
PML add_procs */
|
||||
@ -339,8 +339,8 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
opal_list_append(&all_procs, &name->item);
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept send first adding %s to allgather list",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&name->name)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&name->name)));
|
||||
}
|
||||
for (i = 0 ; i < group->grp_proc_count ; ++i) {
|
||||
name = OBJ_NEW(orte_namelist_t);
|
||||
@ -348,8 +348,8 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
opal_list_append(&all_procs, &name->item);
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept send first adding %s to allgather list",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&name->name)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&name->name)));
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -359,8 +359,8 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
opal_list_append(&all_procs, &name->item);
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept recv first adding %s to allgather list",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&name->name)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&name->name)));
|
||||
}
|
||||
for (i = 0 ; i < rsize ; ++i) {
|
||||
name = OBJ_NEW(orte_namelist_t);
|
||||
@ -368,8 +368,8 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
|
||||
opal_list_append(&all_procs, &name->item);
|
||||
OPAL_OUTPUT_VERBOSE((3, ompi_dpm_base_output,
|
||||
"%s dpm:orte:connect_accept recv first adding %s to allgather list",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&name->name)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&name->name)));
|
||||
}
|
||||
|
||||
}
|
||||
@ -917,7 +917,7 @@ static int dyn_init(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((1, ompi_dpm_base_output,
|
||||
"%s dpm:orte:dyn_init with port %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
port_name));
|
||||
|
||||
rc = connect_accept (MPI_COMM_WORLD, root, port_name, send_first, &newcomm);
|
||||
|
@ -136,7 +136,7 @@ mca_mpool_base_module_t* mca_mpool_base_module_create(
|
||||
#endif
|
||||
} else {
|
||||
orte_show_help("help-mpool-base.txt", "leave pinned failed",
|
||||
true, ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
true, orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_proc_info.nodename);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -172,13 +172,13 @@ void mca_mpool_base_tree_print(void)
|
||||
if (num_leaks <= ompi_debug_show_mpi_alloc_mem_leaks ||
|
||||
ompi_debug_show_mpi_alloc_mem_leaks < 0) {
|
||||
orte_show_help("help-mpool-base.txt", "all mem leaks",
|
||||
true, ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
true, orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_proc_info.nodename,
|
||||
orte_proc_info.pid, leak_msg);
|
||||
} else {
|
||||
int i = num_leaks - ompi_debug_show_mpi_alloc_mem_leaks;
|
||||
orte_show_help("help-mpool-base.txt", "some mem leaks",
|
||||
true, ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
true, orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_proc_info.nodename,
|
||||
orte_proc_info.pid, leak_msg, i,
|
||||
(i > 1) ? "s were" : " was",
|
||||
|
@ -439,7 +439,7 @@ void mca_mpool_rdma_finalize(struct mca_mpool_base_module_t *mpool)
|
||||
if(true == mca_mpool_rdma_component.print_stats) {
|
||||
opal_output(0, "%s rdma: stats "
|
||||
"(hit/miss/found/not found/evicted): %d/%d/%d/%d/%d\n",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
mpool_rdma->stat_cache_hit, mpool_rdma->stat_cache_miss,
|
||||
mpool_rdma->stat_cache_found, mpool_rdma->stat_cache_notfound,
|
||||
mpool_rdma->stat_evicted);
|
||||
|
@ -378,14 +378,14 @@ mca_pml_base_pml_check_selected(const char *my_pml,
|
||||
(0 != strcmp(my_pml, remote_pml))) {
|
||||
if (procs[0]->proc_hostname) {
|
||||
opal_output(0, "%s selected pml %s, but peer %s on %s selected pml %s",
|
||||
ORTE_NAME_PRINT(&ompi_proc_local()->proc_name),
|
||||
my_pml, ORTE_NAME_PRINT(&procs[0]->proc_name),
|
||||
orte_util_print_name_args(&ompi_proc_local()->proc_name),
|
||||
my_pml, orte_util_print_name_args(&procs[0]->proc_name),
|
||||
procs[0]->proc_hostname,
|
||||
remote_pml);
|
||||
} else {
|
||||
opal_output(0, "%s selected pml %s, but peer %s selected pml %s",
|
||||
ORTE_NAME_PRINT(&ompi_proc_local()->proc_name),
|
||||
my_pml, ORTE_NAME_PRINT(&procs[0]->proc_name),
|
||||
orte_util_print_name_args(&ompi_proc_local()->proc_name),
|
||||
my_pml, orte_util_print_name_args(&procs[0]->proc_name),
|
||||
remote_pml);
|
||||
}
|
||||
free(remote_pml); /* cleanup before returning */
|
||||
|
@ -70,7 +70,7 @@ static void setup_server(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_output,
|
||||
"%s pubsub:orte: setting up server at URI %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
(NULL == mca_pubsub_orte_component.server_uri) ? "NULL" : mca_pubsub_orte_component.server_uri));
|
||||
|
||||
/* flag setup as completed so we only pass through here once */
|
||||
@ -113,8 +113,8 @@ static void setup_server(void)
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_output,
|
||||
"%s pubsub:orte: server %s setup",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
ORTE_NAME_PRINT(&mca_pubsub_orte_component.server)));
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(&mca_pubsub_orte_component.server)));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -175,7 +175,7 @@ static int publish ( char *service_name, ompi_info_t *info, char *port_name )
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_output,
|
||||
"%s pubsub:orte: publishing service %s scope %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
service_name, global_scope ? "Global" : "Local"));
|
||||
|
||||
/* construct the buffer */
|
||||
@ -323,7 +323,7 @@ static char* lookup ( char *service_name, ompi_info_t *info )
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_output,
|
||||
"%s pubsub:orte: lookup service %s scope %d",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
service_name, lookup[0]));
|
||||
|
||||
/* go find the value */
|
||||
@ -475,7 +475,7 @@ static int unpublish ( char *service_name, ompi_info_t *info )
|
||||
|
||||
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_output,
|
||||
"%s pubsub:orte: unpublish service %s scope %s",
|
||||
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
|
||||
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
||||
service_name, global_scope ? "Global" : "Local"));
|
||||
|
||||
/* construct the buffer */
|
||||
|
@ -262,7 +262,7 @@ int main(int argc, char *argv[])
|
||||
opal_progress_set_event_flag(OPAL_EVLOOP_ONCE);
|
||||
|
||||
if (debug) {
|
||||
opal_output(0, "%s ompi-server: up and running!", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
|
||||
opal_output(0, "%s ompi-server: up and running!", orte_util_print_name_args(ORTE_PROC_MY_NAME));
|
||||