1
1
openmpi/ompi/mca/btl/openib/btl_openib_proc.c

229 строки
7.8 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
This commit brings in two major things: 1. Galen's fine-grain control of queue pair resources in the openib BTL. 1. Pasha's new implementation of asychronous HCA event handling. Pasha's new implementation doesn't take much explanation, but the new "multifrag" stuff does. Note that "svn merge" was not used to bring this new code from the /tmp/ib_multifrag branch -- something Bad happened in the periodic trunk pulls on that branch making an actual merge back to the trunk effectively impossible (i.e., lots and lots of arbitrary conflicts and artifical changes). :-( == Fine-grain control of queue pair resources == Galen's fine-grain control of queue pair resources to the OpenIB BTL (thanks to Gleb for fixing broken code and providing additional functionality, Pasha for finding broken code, and Jeff for doing all the svn work and regression testing). Prior to this commit, the OpenIB BTL created two queue pairs: one for eager size fragments and one for max send size fragments. When the use of the shared receive queue (SRQ) was specified (via "-mca btl_openib_use_srq 1"), these QPs would use a shared receive queue for receive buffers instead of the default per-peer (PP) receive queues and buffers. One consequence of this design is that receive buffer utilization (the size of the data received as a percentage of the receive buffer used for the data) was quite poor for a number of applications. The new design allows multiple QPs to be specified at runtime. Each QP can be setup to use PP or SRQ receive buffers as well as giving fine-grained control over receive buffer size, number of receive buffers to post, when to replenish the receive queue (low water mark) and for SRQ QPs, the number of outstanding sends can also be specified. The following is an example of the syntax to describe QPs to the OpenIB BTL using the new MCA parameter btl_openib_receive_queues: {{{ -mca btl_openib_receive_queues \ "P,128,16,4;S,1024,256,128,32;S,4096,256,128,32;S,65536,256,128,32" }}} Each QP description is delimited by ";" (semicolon) with individual fields of the QP description delimited by "," (comma). The above example therefore describes 4 QPs. The first QP is: P,128,16,4 Meaning: per-peer receive buffer QPs are indicated by a starting field of "P"; the first QP (shown above) is therefore a per-peer based QP. The second field indicates the size of the receive buffer in bytes (128 bytes). The third field indicates the number of receive buffers to allocate to the QP (16). The fourth field indicates the low watermark for receive buffers at which time the BTL will repost receive buffers to the QP (4). The second QP is: S,1024,256,128,32 Shared receive queue based QPs are indicated by a starting field of "S"; the second QP (shown above) is therefore a shared receive queue based QP. The second, third and fourth fields are the same as in the per-peer based QP. The fifth field is the number of outstanding sends that are allowed at a given time on the QP (32). This provides a "good enough" mechanism of flow control for some regular communication patterns. QPs MUST be specified in ascending receive buffer size order. This requirement may be removed prior to 1.3 release. This commit was SVN r15474.
2007-07-18 05:15:59 +04:00
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2006-2007 Voltaire All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/class/opal_hash_table.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "ompi/datatype/dt_arch.h"
#include "btl_openib.h"
#include "btl_openib_proc.h"
static void mca_btl_openib_proc_construct(mca_btl_openib_proc_t* proc);
static void mca_btl_openib_proc_destruct(mca_btl_openib_proc_t* proc);
OBJ_CLASS_INSTANCE(mca_btl_openib_proc_t,
opal_list_item_t, mca_btl_openib_proc_construct,
mca_btl_openib_proc_destruct);
void mca_btl_openib_proc_construct(mca_btl_openib_proc_t* proc)
{
proc->proc_ompi = 0;
proc->proc_port_count = 0;
proc->proc_endpoints = 0;
proc->proc_endpoint_count = 0;
OBJ_CONSTRUCT(&proc->proc_lock, opal_mutex_t);
/* add to list of all proc instance */
OPAL_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
opal_list_append(&mca_btl_openib_component.ib_procs, &proc->super);
OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
}
/*
* Cleanup ib proc instance
*/
void mca_btl_openib_proc_destruct(mca_btl_openib_proc_t* proc)
{
/* remove from list of all proc instances */
OPAL_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
opal_list_remove_item(&mca_btl_openib_component.ib_procs, &proc->super);
OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
/* release resources */
if(NULL != proc->proc_endpoints) {
free(proc->proc_endpoints);
}
}
/*
* Look for an existing IB process instances based on the associated
* ompi_proc_t instance.
*/
static mca_btl_openib_proc_t* mca_btl_openib_proc_lookup_ompi(ompi_proc_t* ompi_proc)
{
mca_btl_openib_proc_t* ib_proc;
OPAL_THREAD_LOCK(&mca_btl_openib_component.ib_lock);
for(ib_proc = (mca_btl_openib_proc_t*)
opal_list_get_first(&mca_btl_openib_component.ib_procs);
ib_proc != (mca_btl_openib_proc_t*)
opal_list_get_end(&mca_btl_openib_component.ib_procs);
ib_proc = (mca_btl_openib_proc_t*)opal_list_get_next(ib_proc)) {
if(ib_proc->proc_ompi == ompi_proc) {
OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
return ib_proc;
}
}
OPAL_THREAD_UNLOCK(&mca_btl_openib_component.ib_lock);
return NULL;
}
/*
* Create a IB process structure. There is a one-to-one correspondence
* between a ompi_proc_t and a mca_btl_openib_proc_t instance. We cache
* additional data (specifically the list of mca_btl_openib_endpoint_t instances,
* and published addresses) associated w/ a given destination on this
* datastructure.
*/
mca_btl_openib_proc_t* mca_btl_openib_proc_create(ompi_proc_t* ompi_proc)
{
mca_btl_openib_proc_t* module_proc = NULL;
size_t msg_size;
uint32_t size;
size_t i;
int rc;
void *message;
char *offset;
/* Check if we have already created a IB proc
* structure for this ompi process */
module_proc = mca_btl_openib_proc_lookup_ompi(ompi_proc);
if (NULL != module_proc) {
/* Gotcha! */
return module_proc;
}
/* Oops! First time, gotta create a new IB proc
* out of the ompi_proc ... */
module_proc = OBJ_NEW(mca_btl_openib_proc_t);
/* Initialize number of peer */
module_proc->proc_endpoint_count = 0;
module_proc->proc_ompi = ompi_proc;
/* build a unique identifier (of arbitrary
* size) to represent the proc */
module_proc->proc_guid = ompi_proc->proc_name;
/* query for the peer address info */
rc = ompi_modex_recv(&mca_btl_openib_component.super.btl_version,
ompi_proc,
&message,
&msg_size);
if (OMPI_SUCCESS != rc) {
BTL_ERROR(("[%s:%d] ompi_modex_recv failed for peer %s",
__FILE__, __LINE__,
ORTE_NAME_PRINT(&ompi_proc->proc_name)));
OBJ_RELEASE(module_proc);
return NULL;
}
if (0 == msg_size) {
return NULL;
}
/* Message was packed in btl_openib_component.c; the format is
listed in a comment in that file */
/* Unpack the number of ports in the message */
offset = message;
memcpy(&(module_proc->proc_port_count), offset, sizeof(uint32_t));
#if !defined(WORDS_BIGENDIAN) && OMPI_ENABLE_HETEROGENEOUS_SUPPORT
module_proc->proc_port_count = ntohl(module_proc->proc_port_count);
#endif
module_proc->proc_ports = (mca_btl_openib_port_info_t *)malloc(sizeof(mca_btl_openib_port_info_t) * module_proc->proc_port_count);
offset += sizeof(uint32_t);
/* Loop over unpacking all the ports */
for (i = 0; i < module_proc->proc_port_count; i++) {
/* Unpack the port */
memcpy(&module_proc->proc_ports[i], offset,
sizeof(mca_btl_openib_port_info_t));
#if !defined(WORDS_BIGENDIAN) && OMPI_ENABLE_HETEROGENEOUS_SUPPORT
MCA_BTL_OPENIB_PORT_INFO_NTOH(module_proc->proc_ports[i]);
#endif
offset += sizeof(mca_btl_openib_port_info_t);
/* Unpack the string length */
memcpy(&size, offset, sizeof(size));
#if !defined(WORDS_BIGENDIAN) && OMPI_ENABLE_HETEROGENEOUS_SUPPORT
size = ntohl(size);
#endif
offset += sizeof(size);
/* Unpack the string */
module_proc->proc_ports[i].cpclist = malloc(size + 1);
if (NULL == module_proc->proc_ports[i].cpclist) {
/* JMS some error */
}
memcpy(module_proc->proc_ports[i].cpclist, offset, size);
module_proc->proc_ports[i].cpclist[size] = '\0';
offset += size;
}
if (0 == module_proc->proc_port_count) {
module_proc->proc_endpoints = NULL;
} else {
module_proc->proc_endpoints = (mca_btl_base_endpoint_t**)
malloc(module_proc->proc_port_count * sizeof(mca_btl_base_endpoint_t*));
}
if (NULL == module_proc->proc_endpoints) {
OBJ_RELEASE(module_proc);
return NULL;
}
return module_proc;
}
/*
* Note that this routine must be called with the lock on the process
* already held. Insert a btl instance into the proc array and assign
* it an address.
*/
int mca_btl_openib_proc_insert(mca_btl_openib_proc_t* module_proc,
mca_btl_base_endpoint_t* module_endpoint)
{
/* insert into endpoint array */
#ifndef WORDS_BIGENDIAN
/* if we are little endian and our peer is not so lucky, then we
need to put all information sent to him in big endian (aka
Network Byte Order) and expect all information received to
be in NBO. Since big endian machines always send and receive
in NBO, we don't care so much about that case. */
if (module_proc->proc_ompi->proc_arch & OMPI_ARCH_ISBIGENDIAN) {
module_endpoint->nbo = true;
}
#endif
/* only allow eager rdma if the peers agree on the size of a long */
if((module_proc->proc_ompi->proc_arch & OMPI_ARCH_LONGISxx) !=
(ompi_proc_local()->proc_arch & OMPI_ARCH_LONGISxx)) {
module_endpoint->use_eager_rdma = false;
}
module_endpoint->endpoint_proc = module_proc;
module_proc->proc_endpoints[module_proc->proc_endpoint_count++] = module_endpoint;
return OMPI_SUCCESS;
}