1
1
openmpi/ompi/mca/btl/mx/btl_mx_proc.c
George Bosilca 7dfdf3e907 A lot of MX fixes.
1. Allow MX bonding via btl_mx_bonding MCA parameter. With this on, Open MPI
  suppose that lib MX will do the bonding, and we will only return one BTL.
  Otherwise, we return as many as devices.
2. Decrease the memory footprint, by cleaning up what we store about the
  peers and how we store it.
3. Allow multiple MX routes that share the same mapper. In this particular
  case we will link by their nic_id.
4. Allow multiple MX routes with multiple mappers. In this case we match
  the NICs based on the last 6 digits of the mapper MAC.
5. Increase the size of the eager and rendez-vous eager limits in the
  case where we are unable to register an unexpected callback with MX.
6. Increase the default max number of MX fragments.
7. Increase the max number of MX BTLs.
8. Only allow mx_if_include and mx_if_exclude if we have acess to the
  mapper.

This commit was SVN r19788.
2008-10-22 20:12:30 +00:00

257 строки
8.8 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2008 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/class/opal_hash_table.h"
#include "orte/util/name_fns.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "btl_mx.h"
#include "btl_mx_proc.h"
static void mca_btl_mx_proc_construct(mca_btl_mx_proc_t* proc);
static void mca_btl_mx_proc_destruct(mca_btl_mx_proc_t* proc);
OBJ_CLASS_INSTANCE(mca_btl_mx_proc_t,
opal_list_item_t, mca_btl_mx_proc_construct,
mca_btl_mx_proc_destruct);
void mca_btl_mx_proc_construct(mca_btl_mx_proc_t* proc)
{
proc->proc_ompi = 0;
proc->mx_peers_count = 0;
proc->mx_peers = NULL;
proc->mx_routing = NULL;
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
/* add to list of all proc instance */
OPAL_THREAD_LOCK(&mca_btl_mx_component.mx_lock);
opal_list_append(&mca_btl_mx_component.mx_procs, &proc->super);
OPAL_THREAD_UNLOCK(&mca_btl_mx_component.mx_lock);
}
/*
* Cleanup MX proc instance
*/
void mca_btl_mx_proc_destruct(mca_btl_mx_proc_t* proc)
{
/* remove from list of all proc instances */
OPAL_THREAD_LOCK(&mca_btl_mx_component.mx_lock);
opal_list_remove_item(&mca_btl_mx_component.mx_procs, &proc->super);
OPAL_THREAD_UNLOCK(&mca_btl_mx_component.mx_lock);
/* release resources */
if( NULL != proc->mx_peers ) {
free(proc->mx_peers);
proc->mx_peers = NULL;
}
if( NULL != proc->mx_routing ) {
free(proc->mx_routing);
proc->mx_routing = NULL;
}
}
/*
* Look for an existing MX process instances based on the associated
* ompi_proc_t instance.
*/
static mca_btl_mx_proc_t* mca_btl_mx_proc_lookup_ompi(ompi_proc_t* ompi_proc)
{
mca_btl_mx_proc_t* mx_proc;
OPAL_THREAD_LOCK(&mca_btl_mx_component.mx_lock);
for( mx_proc = (mca_btl_mx_proc_t*)opal_list_get_first(&mca_btl_mx_component.mx_procs);
mx_proc != (mca_btl_mx_proc_t*)opal_list_get_end(&mca_btl_mx_component.mx_procs);
mx_proc = (mca_btl_mx_proc_t*)opal_list_get_next(mx_proc) ) {
if(mx_proc->proc_ompi == ompi_proc) {
OPAL_THREAD_UNLOCK(&mca_btl_mx_component.mx_lock);
return mx_proc;
}
}
OPAL_THREAD_UNLOCK(&mca_btl_mx_component.mx_lock);
return NULL;
}
/**
* Create a MX process structure. There is a one-to-one correspondence
* between a ompi_proc_t and a mca_btl_mx_proc_t instance. We cache
* additional data (specifically the list of mca_btl_mx_endpoint_t instances,
* and published addresses) associated w/ a given destination on this
* datastructure.
*/
mca_btl_mx_proc_t* mca_btl_mx_proc_create(ompi_proc_t* ompi_proc)
{
mca_btl_mx_proc_t* module_proc = NULL;
mca_btl_mx_addr_t *mx_peers;
int i, j, rc, mx_peers_count, *mx_routing;
bool at_least_one_route = false;
size_t size;
/* Check if we have already created a MX proc
* structure for this ompi process */
module_proc = mca_btl_mx_proc_lookup_ompi(ompi_proc);
if( module_proc != NULL ) {
return module_proc; /* Gotcha! */
}
/* query for the peer address info */
rc = ompi_modex_recv( &mca_btl_mx_component.super.btl_version,
ompi_proc, (void*)&mx_peers, &size );
if( OMPI_SUCCESS != rc ) {
opal_output( 0, "mca_pml_base_modex_recv failed for peer %s",
ORTE_NAME_PRINT(&ompi_proc->proc_name) );
return NULL;
}
if( size < sizeof(mca_btl_mx_addr_t) ) { /* no available connection */
return NULL;
}
if( (size % sizeof(mca_btl_mx_addr_t)) != 0 ) {
opal_output( 0, "invalid mx address for peer %s",
ORTE_NAME_PRINT(&ompi_proc->proc_name) );
return NULL;
}
/* Let's see if we have a way to connect to the remote proc using MX.
* Without the routing information from the mapper, it is pretty
* to do this. Right now, we base this connection detection on the last
* 6 digits of the mapper MAC.
*/
mx_peers_count = size / sizeof(mca_btl_mx_addr_t);
mx_routing = (int*)malloc( mx_peers_count * sizeof(int) );
for( i = 0; i < mx_peers_count; mx_routing[i++] = -1 );
for( i = 0; i < mx_peers_count; i++ ) {
mca_btl_mx_module_t* mx_btl;
#if OMPI_ENABLE_HETEROGENEOUS_SUPPORT
BTL_MX_ADDR_NTOH(mx_peers[rc]);
#endif
for( j = 0; j < mca_btl_mx_component.mx_num_btls; j++ ) {
mx_btl = mca_btl_mx_component.mx_btls[j];
if( mx_btl->mx_unique_network_id == mx_peers[j].unique_network_id ) {
/* There is at least one connection between these two nodes */
if( -1 == mx_routing[j] ) {
/* First connection */
mx_routing[j] = i;
at_least_one_route = true;
break;
}
/* If multiple remote endpoints match mine, we keep going. As a
* result we will match them in order, i.e. remote endpoint 0
* will be connected to local endpoint 0.
*/
}
}
}
if( false == at_least_one_route ) {
free(mx_routing);
return NULL;
}
module_proc = OBJ_NEW(mca_btl_mx_proc_t);
module_proc->proc_ompi = ompi_proc;
module_proc->mx_peers_count = mx_peers_count;
module_proc->mx_peers = mx_peers;
module_proc->mx_routing = mx_routing;
return module_proc;
}
/**
* Note that this routine must be called with the lock on the process
* already held. Insert a btl instance into the proc array and assign
* it an address.
*/
int mca_btl_mx_proc_insert( mca_btl_mx_proc_t* module_proc,
mca_btl_mx_endpoint_t* module_endpoint )
{
mca_btl_mx_module_t* mx_btl;
int btl_index, peer_endpoint_index;
for( btl_index = 0; btl_index < mca_btl_mx_component.mx_num_btls; btl_index++ ) {
mx_btl = mca_btl_mx_component.mx_btls[btl_index];
peer_endpoint_index = module_proc->mx_routing[btl_index];
if( (-1 != peer_endpoint_index) && (mx_btl == module_endpoint->endpoint_btl) ) {
module_endpoint->mx_peer = module_proc->mx_peers + peer_endpoint_index;
module_endpoint->endpoint_proc = module_proc;
return OMPI_SUCCESS;
}
}
module_proc->mx_peers_count = 0;
/**
* No Myrinet connectivity. Let the PML layer figure out another
* way to communicate with the peer.
*/
return OMPI_ERROR;
}
int mca_btl_mx_proc_connect( mca_btl_mx_endpoint_t* module_endpoint )
{
int num_retry = 0;
mx_return_t mx_status;
mx_endpoint_addr_t mx_remote_addr;
mca_btl_mx_proc_t* module_proc = module_endpoint->endpoint_proc;
module_endpoint->status = MCA_BTL_MX_CONNECTION_PENDING;
retry_connect:
#if 0
{
uint64_t nic_id;
uint32_t endpoint_id;
mx_decompose_endpoint_addr( module_endpoint->endpoint_btl->mx_endpoint_addr,
&nic_id, &endpoint_id );
opal_output(0, "Connect local (nic_id %llx, endpoint_id %x) to \n"
" remote (nic_id %llx, endpoint_id %x)\n",
nic_id, endpoint_id,
module_endpoint->mx_peer->nic_id, module_endpoint->mx_peer->endpoint_id );
}
#endif
mx_status = mx_connect( module_endpoint->endpoint_btl->mx_endpoint,
module_endpoint->mx_peer->nic_id, module_endpoint->mx_peer->endpoint_id,
mca_btl_mx_component.mx_filter, mca_btl_mx_component.mx_timeout, &mx_remote_addr );
if( MX_SUCCESS != mx_status ) {
if( MX_TIMEOUT == mx_status )
if( num_retry++ < mca_btl_mx_component.mx_connection_retries )
goto retry_connect;
{
char peer_name[MX_MAX_HOSTNAME_LEN];
if( MX_SUCCESS != mx_nic_id_to_hostname( module_endpoint->mx_peer->nic_id, peer_name ) )
sprintf( peer_name, "unknown %lx nic_id", (long)module_endpoint->mx_peer->nic_id );
opal_output( 0, "mx_connect fail for %s with key %x (error %s)\n\tUnique ID (local %x remote %x)\n",
peer_name, mca_btl_mx_component.mx_filter, mx_strerror(mx_status),
module_endpoint->endpoint_btl->mx_unique_network_id,
module_endpoint->mx_peer->unique_network_id );
}
module_endpoint->status = MCA_BTL_MX_NOT_REACHEABLE;
return OMPI_ERROR;
}
module_endpoint->mx_peer_addr = mx_remote_addr;
module_endpoint->status = MCA_BTL_MX_CONNECTED;
return OMPI_SUCCESS;
}