1
1
openmpi/ompi/mca/btl/openib/btl_openib_async.c
Jeff Squyres 8ace07efed This commit brings in two major things:
1. Galen's fine-grain control of queue pair resources in the openib
   BTL.
1. Pasha's new implementation of asychronous HCA event handling.

Pasha's new implementation doesn't take much explanation, but the new
"multifrag" stuff does.  

Note that "svn merge" was not used to bring this new code from the
/tmp/ib_multifrag branch -- something Bad happened in the periodic
trunk pulls on that branch making an actual merge back to the trunk
effectively impossible (i.e., lots and lots of arbitrary conflicts and
artifical changes).  :-(

== Fine-grain control of queue pair resources ==

Galen's fine-grain control of queue pair resources to the OpenIB BTL
(thanks to Gleb for fixing broken code and providing additional
functionality, Pasha for finding broken code, and Jeff for doing all
the svn work and regression testing).

Prior to this commit, the OpenIB BTL created two queue pairs: one for
eager size fragments and one for max send size fragments.  When the
use of the shared receive queue (SRQ) was specified (via "-mca
btl_openib_use_srq 1"), these QPs would use a shared receive queue for
receive buffers instead of the default per-peer (PP) receive queues
and buffers.  One consequence of this design is that receive buffer
utilization (the size of the data received as a percentage of the
receive buffer used for the data) was quite poor for a number of
applications.

The new design allows multiple QPs to be specified at runtime.  Each
QP can be setup to use PP or SRQ receive buffers as well as giving
fine-grained control over receive buffer size, number of receive
buffers to post, when to replenish the receive queue (low water mark)
and for SRQ QPs, the number of outstanding sends can also be
specified.  The following is an example of the syntax to describe QPs
to the OpenIB BTL using the new MCA parameter btl_openib_receive_queues:

{{{
-mca btl_openib_receive_queues \
     "P,128,16,4;S,1024,256,128,32;S,4096,256,128,32;S,65536,256,128,32"
}}}

Each QP description is delimited by ";" (semicolon) with individual
fields of the QP description delimited by "," (comma).  The above
example therefore describes 4 QPs.

The first QP is:

    P,128,16,4

Meaning: per-peer receive buffer QPs are indicated by a starting field
of "P"; the first QP (shown above) is therefore a per-peer based QP.
The second field indicates the size of the receive buffer in bytes
(128 bytes).  The third field indicates the number of receive buffers
to allocate to the QP (16).  The fourth field indicates the low
watermark for receive buffers at which time the BTL will repost
receive buffers to the QP (4).

The second QP is:

    S,1024,256,128,32

Shared receive queue based QPs are indicated by a starting field of
"S"; the second QP (shown above) is therefore a shared receive queue
based QP.  The second, third and fourth fields are the same as in the
per-peer based QP.  The fifth field is the number of outstanding sends
that are allowed at a given time on the QP (32).  This provides a
"good enough" mechanism of flow control for some regular communication
patterns.

QPs MUST be specified in ascending receive buffer size order.  This
requirement may be removed prior to 1.3 release.

This commit was SVN r15474.
2007-07-18 01:15:59 +00:00

314 строки
12 KiB
C

/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#if OMPI_HAVE_THREADS
#include <infiniband/verbs.h>
#include <fcntl.h>
#include <sys/poll.h>
#include <unistd.h>
#include <errno.h>
#include "opal/util/output.h"
#include "opal/util/show_help.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/base.h"
#include "btl_openib.h"
#include "btl_openib_mca.h"
#include "btl_openib_async.h"
struct mca_btl_openib_async_poll {
int active_poll_size;
int poll_size;
struct pollfd *async_pollfd;
};
typedef struct mca_btl_openib_async_poll mca_btl_openib_async_poll;
static int btl_openib_async_poll_init(struct mca_btl_openib_async_poll *hcas_poll);
static int btl_openib_async_commandh(struct mca_btl_openib_async_poll *hcas_poll);
static int btl_openib_async_hcah(struct mca_btl_openib_async_poll *hcas_poll, int index);
static const char *openib_event_to_str (enum ibv_event_type event);
/* Function converts event to string (name)
* Open Fabris don't have function that do this job :(
*/
static const char *openib_event_to_str (enum ibv_event_type event)
{
switch (event) {
case IBV_EVENT_CQ_ERR:
return "IBV_EVENT_CQ_ERR";
case IBV_EVENT_QP_FATAL:
return "IBV_EVENT_QP_FATAL";
case IBV_EVENT_QP_REQ_ERR:
return "IBV_EVENT_QP_REQ_ERR";
case IBV_EVENT_QP_ACCESS_ERR:
return "IBV_EVENT_QP_ACCESS_ERR";
case IBV_EVENT_PATH_MIG:
return "IBV_EVENT_PATH_MIG";
case IBV_EVENT_PATH_MIG_ERR:
return "IBV_EVENT_PATH_MIG_ERR";
case IBV_EVENT_DEVICE_FATAL:
return "IBV_EVENT_DEVICE_FATAL";
case IBV_EVENT_SRQ_ERR:
return "IBV_EVENT_SRQ_ERR";
case IBV_EVENT_PORT_ERR:
return "IBV_EVENT_PORT_ERR";
case IBV_EVENT_COMM_EST:
return "IBV_EVENT_COMM_EST";
case IBV_EVENT_PORT_ACTIVE:
return "IBV_EVENT_PORT_ACTIVE";
case IBV_EVENT_SQ_DRAINED:
return "IBV_EVENT_SQ_DRAINED";
case IBV_EVENT_LID_CHANGE:
return "IBV_EVENT_LID_CHANGE";
case IBV_EVENT_PKEY_CHANGE:
return "IBV_EVENT_PKEY_CHANGE";
case IBV_EVENT_SM_CHANGE:
return "IBV_EVENT_SM_CHANGE";
case IBV_EVENT_QP_LAST_WQE_REACHED:
return "IBV_EVENT_QP_LAST_WQE_REACHED";
#if HAVE_DECL_IBV_EVENT_CLIENT_REREGISTER
case IBV_EVENT_CLIENT_REREGISTER:
return "IBV_EVENT_CLIENT_REREGISTER";
#endif
case IBV_EVENT_SRQ_LIMIT_REACHED:
return "IBV_EVENT_SRQ_LIMIT_REACHED";
default:
return "UNKNOWN";
}
}
/* Function inits mca_btl_openib_async_poll */
static int btl_openib_async_poll_init(struct mca_btl_openib_async_poll *hcas_poll)
{
hcas_poll->active_poll_size = 1;
hcas_poll->poll_size = 4;
hcas_poll->async_pollfd = malloc(sizeof(struct pollfd) * hcas_poll->poll_size);
if (NULL == hcas_poll->async_pollfd) {
BTL_ERROR(("Failed malloc: %s:%d"
, __FILE__, __LINE__));
return OMPI_ERROR;
}
/* Creating comunication channel with the main thread */
hcas_poll->async_pollfd[0].fd = mca_btl_openib_component.async_pipe[0];
hcas_poll->async_pollfd[0].events = POLLIN;
hcas_poll->async_pollfd[0].revents = 0;
return OMPI_SUCCESS;
}
/* Function handle async thread commands */
static int btl_openib_async_commandh(struct mca_btl_openib_async_poll *hcas_poll)
{
struct pollfd *async_pollfd_tmp;
int fd,flags,j;
/* Got command from main thread */
if (read(hcas_poll->async_pollfd[0].fd, &fd, sizeof(int)) < 0) {
BTL_ERROR(("Read failed [%d]",errno));
return OMPI_ERROR;
}
BTL_VERBOSE(("GOT event from -> %d",fd));
if (fd > 0) {
BTL_VERBOSE(("Adding HCA [%d] to async event poll[%d]"
,fd,hcas_poll->active_poll_size));
flags = fcntl(fd, F_GETFL);
if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) < 0) {
BTL_ERROR(("Failed to change file descriptor of async event"));
return OMPI_ERROR;
}
if ((hcas_poll->active_poll_size + 1) > hcas_poll->poll_size) {
hcas_poll->poll_size+=hcas_poll->poll_size;
async_pollfd_tmp = malloc(sizeof(struct pollfd) * hcas_poll->poll_size);
if (NULL == async_pollfd_tmp) {
BTL_ERROR(("Failed malloc: %s:%d"
"Fatal error, stoping asyn event thread"
, __FILE__, __LINE__));
return OMPI_ERROR;
}
memcpy (async_pollfd_tmp,hcas_poll->async_pollfd,
sizeof(struct pollfd) * (hcas_poll->active_poll_size));
free(hcas_poll->async_pollfd);
hcas_poll->async_pollfd = async_pollfd_tmp;
}
hcas_poll->async_pollfd[hcas_poll->active_poll_size].fd = fd;
hcas_poll->async_pollfd[hcas_poll->active_poll_size].events = POLLIN;
hcas_poll->async_pollfd[hcas_poll->active_poll_size].revents = 0;
hcas_poll->active_poll_size++;
} else if (fd < 0) {
bool fd_found = false;
/* Removing HCA from poll */
fd = -(fd);
BTL_VERBOSE(("Removing HCA [%d] from async event poll [%d]"
,fd,hcas_poll->active_poll_size));
if (hcas_poll->active_poll_size > 1) {
for (j=0; (j < hcas_poll->active_poll_size || !fd_found); j++) {
if (hcas_poll->async_pollfd[j].fd == fd) {
hcas_poll->async_pollfd[j].fd =
hcas_poll->async_pollfd[hcas_poll->active_poll_size-1].fd;
hcas_poll->async_pollfd[j].events =
hcas_poll->async_pollfd[hcas_poll->active_poll_size-1].events;
hcas_poll->async_pollfd[j].revents =
hcas_poll->async_pollfd[hcas_poll->active_poll_size-1].revents;
fd_found = true;
}
}
if (!fd_found) {
BTL_ERROR(("Requested FD[%d] was not found in poll array\n",fd));
return OMPI_ERROR;
}
}
hcas_poll->active_poll_size--;
} else {
/* Got 0 - command to close the thread */
BTL_VERBOSE(("Async event thread exit"));
free(hcas_poll->async_pollfd);
pthread_exit(NULL);
}
return OMPI_SUCCESS;
}
/* Function handle async hca events */
static int btl_openib_async_hcah(struct mca_btl_openib_async_poll *hcas_poll, int index)
{
int j;
mca_btl_openib_hca_t *hca = NULL;
struct ibv_async_event event;
/* We need to find correct hca and process this event */
for (j=0; j < mca_btl_openib_component.ib_num_btls; j++) {
if (mca_btl_openib_component.openib_btls[j]->hca->ib_dev_context->async_fd ==
hcas_poll->async_pollfd[index].fd ) {
hca = mca_btl_openib_component.openib_btls[j]->hca;
}
}
if (NULL != hca) {
if (ibv_get_async_event((struct ibv_context *)hca->ib_dev_context,&event) < 0) {
if (EWOULDBLOCK == errno) {
/* No event found ?
* It was handled by somebody other */
return OMPI_SUCCESS;
} else {
BTL_ERROR(("Failed to get async event"));
return OMPI_ERROR;
}
}
switch(event.event_type) {
case IBV_EVENT_DEVICE_FATAL:
/* Set the flag to fatal */
hca->got_fatal_event = true;
/* It is not critical to protect the counter */
OPAL_THREAD_ADD32(&mca_btl_openib_component.fatal_counter, 1);
case IBV_EVENT_CQ_ERR:
case IBV_EVENT_QP_FATAL:
case IBV_EVENT_QP_REQ_ERR:
case IBV_EVENT_QP_ACCESS_ERR:
case IBV_EVENT_PATH_MIG:
case IBV_EVENT_PATH_MIG_ERR:
case IBV_EVENT_SRQ_ERR:
case IBV_EVENT_PORT_ERR:
opal_show_help("help-mpi-btl-openib.txt", "of error event",
true,orte_system_info.nodename, orte_process_info.pid,
event.event_type, openib_event_to_str(event.event_type));
break;
case IBV_EVENT_COMM_EST:
case IBV_EVENT_PORT_ACTIVE:
case IBV_EVENT_SQ_DRAINED:
case IBV_EVENT_LID_CHANGE:
case IBV_EVENT_PKEY_CHANGE:
case IBV_EVENT_SM_CHANGE:
case IBV_EVENT_QP_LAST_WQE_REACHED:
#if HAVE_DECL_IBV_EVENT_CLIENT_REREGISTER
case IBV_EVENT_CLIENT_REREGISTER:
#endif
case IBV_EVENT_SRQ_LIMIT_REACHED:
break;
default:
opal_show_help("help-mpi-btl-openib.txt", "of unknown event",
true,orte_system_info.nodename, orte_process_info.pid,
event.event_type);
}
ibv_ack_async_event(&event);
} else {
/* the hca == NULL , we failed to locate the HCA
* this failure should not never happed */
BTL_ERROR(("Failed to find HCA with FD %d."
"Fatal error, stoping asyn event thread"
,hcas_poll->async_pollfd[index].fd));
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
/* This Async event thread is handling all async event of
* all btls/hcas in openib component
*/
void* btl_openib_async_thread(void * async)
{
int rc;
int i;
struct mca_btl_openib_async_poll hcas_poll;
if (OMPI_SUCCESS != btl_openib_async_poll_init(&hcas_poll)) {
BTL_ERROR(("Fatal error, stoping asyn event thread"));
pthread_exit(NULL);
}
while(1) {
rc = poll(hcas_poll.async_pollfd, hcas_poll.active_poll_size, -1);
if (rc < 0) {
if (errno != EINTR) {
BTL_ERROR(("Poll failed.Fatal error, stoping asyn event thread"));
pthread_exit(NULL);
} else {
/* EINTR - we got interupt */
continue;
}
}
for(i = 0; i < hcas_poll.active_poll_size; i++) {
switch (hcas_poll.async_pollfd[i].revents) {
case 0:
/* no events */
break;
case POLLIN:
/* Processing our event */
if (0 == i) {
/* 0 poll we use for comunication with main thread */
if (OMPI_SUCCESS != btl_openib_async_commandh(&hcas_poll)) {
free(hcas_poll.async_pollfd);
BTL_ERROR(("Failed to process async thread process."
"Fatal error, stoping asyn event thread"));
pthread_exit(NULL);
}
} else {
/* We get hca event */
if (btl_openib_async_hcah(&hcas_poll, i)) {
free(hcas_poll.async_pollfd);
BTL_ERROR(("Failed to process async thread process."
"Fatal error, stoping asyn event thread"));
pthread_exit(NULL);
}
}
break;
default:
/* Get event other than POLLIN
* this case should not never happend */
BTL_ERROR(("Got unexpected event %d."
"Fatal error, stoping asyn event thread"
,hcas_poll.async_pollfd[i].revents));
free(hcas_poll.async_pollfd);
pthread_exit(NULL);
}
}
}
return PTHREAD_CANCELED;
}
#endif