1
1
openmpi/ompi/mca/osc/rdma/osc_rdma_comm.c
Brian Barrett 0ba0a60ada * Merge in new version of the pt2pt one-sided communication component,
implemented entirely on top of the PML.  This allows us to have a
  one-sided interface even when we are using the CM PML and MTLs for
  point-to-point transport (and therefore not using the BML/BTLs)
* Old pt2pt component was renamed "rdma", as it will soon be having
  real RDMA support added to it.

Work was done in a temporary branch.  Commit is the result of the
merge command:

  svn merge -r10862:11099 https://svn.open-mpi.org/svn/ompi/tmp/bwb-osc-pt2pt

This commit was SVN r11100.

The following SVN revisions from the original message are invalid or
inconsistent and therefore were not cross-referenced:
  r10862
  r11099
2006-08-03 00:10:19 +00:00

211 строки
7.4 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "mpi.h"
#include <stdio.h>
#include "osc_rdma.h"
#include "osc_rdma_sendreq.h"
#include "osc_rdma_header.h"
#include "osc_rdma_data_move.h"
static int
enqueue_sendreq(ompi_osc_rdma_module_t *module,
ompi_osc_rdma_sendreq_t *sendreq)
{
OPAL_THREAD_LOCK(&(module->p2p_lock));
opal_list_append(&(module->p2p_pending_sendreqs),
(opal_list_item_t*) sendreq);
module->p2p_num_pending_sendreqs[sendreq->req_target_rank]++;
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
return OMPI_SUCCESS;
}
int
ompi_osc_rdma_module_accumulate(void *origin_addr, int origin_count,
struct ompi_datatype_t *origin_dt,
int target, int target_disp, int target_count,
struct ompi_datatype_t *target_dt,
struct ompi_op_t *op, ompi_win_t *win)
{
int ret;
ompi_osc_rdma_sendreq_t *sendreq;
if (OMPI_WIN_FENCE & ompi_win_get_mode(win)) {
/* well, we're definitely in an access epoch now */
ompi_win_set_mode(win, OMPI_WIN_FENCE | OMPI_WIN_ACCESS_EPOCH |
OMPI_WIN_EXPOSE_EPOCH);
}
if (op != &ompi_mpi_op_replace &&
!ompi_ddt_is_predefined(target_dt)) {
fprintf(stderr, "MPI_Accumulate currently does not support reductions\n");
fprintf(stderr, "with any user-defined types. This will be rectified\n");
fprintf(stderr, "in a future release.\n");
return MPI_ERR_UNSUPPORTED_OPERATION;
}
/* shortcut 0 count case */
if (0 == origin_count || 0 == target_count) {
return OMPI_SUCCESS;
}
/* create sendreq */
ret = ompi_osc_rdma_sendreq_alloc_init(OMPI_OSC_RDMA_ACC,
origin_addr,
origin_count,
origin_dt,
target,
target_disp,
target_count,
target_dt,
P2P_MODULE(win),
&sendreq);
if (OMPI_SUCCESS != ret) return ret;
sendreq->req_op_id = op->o_f_to_c_index;
/* enqueue sendreq */
ret = enqueue_sendreq(P2P_MODULE(win), sendreq);
return ret;
}
int
ompi_osc_rdma_module_get(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_dt,
int target,
int target_disp,
int target_count,
struct ompi_datatype_t *target_dt,
ompi_win_t *win)
{
int ret;
ompi_osc_rdma_sendreq_t *sendreq;
if (OMPI_WIN_FENCE & ompi_win_get_mode(win)) {
/* well, we're definitely in an access epoch now */
ompi_win_set_mode(win, OMPI_WIN_FENCE | OMPI_WIN_ACCESS_EPOCH |
OMPI_WIN_EXPOSE_EPOCH);
}
/* shortcut 0 count case */
if (0 == origin_count || 0 == target_count) {
return OMPI_SUCCESS;
}
/* create sendreq */
ret = ompi_osc_rdma_sendreq_alloc_init(OMPI_OSC_RDMA_GET,
origin_addr,
origin_count,
origin_dt,
target,
target_disp,
target_count,
target_dt,
P2P_MODULE(win),
&sendreq);
if (OMPI_SUCCESS != ret) return ret;
/* if we're doing fence synchronization, try to actively send
right now */
if (P2P_MODULE(win)->p2p_eager_send &&
(OMPI_WIN_FENCE & ompi_win_get_mode(win))) {
OPAL_THREAD_ADD32(&(sendreq->req_module->p2p_num_pending_out), 1);
ret = ompi_osc_rdma_sendreq_send(P2P_MODULE(win), sendreq);
if (OMPI_SUCCESS == ret) {
OPAL_THREAD_LOCK(&(P2P_MODULE(win)->p2p_lock));
P2P_MODULE(win)->p2p_num_pending_sendreqs[sendreq->req_target_rank]++;
OPAL_THREAD_UNLOCK(&(P2P_MODULE(win)->p2p_lock));
} else {
OPAL_THREAD_ADD32(&(sendreq->req_module->p2p_num_pending_out), -1);
ret = enqueue_sendreq(P2P_MODULE(win), sendreq);
}
} else {
/* enqueue sendreq */
ret = enqueue_sendreq(P2P_MODULE(win), sendreq);
}
return ret;
}
int
ompi_osc_rdma_module_put(void *origin_addr, int origin_count,
struct ompi_datatype_t *origin_dt,
int target, int target_disp, int target_count,
struct ompi_datatype_t *target_dt, ompi_win_t *win)
{
int ret;
ompi_osc_rdma_sendreq_t *sendreq;
if (OMPI_WIN_FENCE & ompi_win_get_mode(win)) {
/* well, we're definitely in an access epoch now */
ompi_win_set_mode(win, OMPI_WIN_FENCE | OMPI_WIN_ACCESS_EPOCH |
OMPI_WIN_EXPOSE_EPOCH);
}
/* shortcut 0 count case */
if (0 == origin_count || 0 == target_count) {
return OMPI_SUCCESS;
}
/* create sendreq */
ret = ompi_osc_rdma_sendreq_alloc_init(OMPI_OSC_RDMA_PUT,
origin_addr,
origin_count,
origin_dt,
target,
target_disp,
target_count,
target_dt,
P2P_MODULE(win),
&sendreq);
if (OMPI_SUCCESS != ret) return ret;
/* if we're doing fence synchronization, try to actively send
right now */
if (P2P_MODULE(win)->p2p_eager_send &&
(OMPI_WIN_FENCE & ompi_win_get_mode(win))) {
OPAL_THREAD_ADD32(&(sendreq->req_module->p2p_num_pending_out), 1);
ret = ompi_osc_rdma_sendreq_send(P2P_MODULE(win), sendreq);
if (OMPI_SUCCESS == ret) {
OPAL_THREAD_LOCK(&(P2P_MODULE(win)->p2p_lock));
P2P_MODULE(win)->p2p_num_pending_sendreqs[sendreq->req_target_rank]++;
OPAL_THREAD_UNLOCK(&(P2P_MODULE(win)->p2p_lock));
} else {
OPAL_THREAD_ADD32(&(sendreq->req_module->p2p_num_pending_out), -1);
ret = enqueue_sendreq(P2P_MODULE(win), sendreq);
}
} else {
/* enqueue sendreq */
ret = enqueue_sendreq(P2P_MODULE(win), sendreq);
}
return ret;
}