2006-07-18 02:08:55 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2007-05-24 19:41:24 +04:00
|
|
|
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
|
|
|
|
* reserved.
|
2006-07-18 02:08:55 +04:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
|
|
|
#include "osc_rdma.h"
|
|
|
|
#include "osc_rdma_sendreq.h"
|
|
|
|
#include "osc_rdma_longreq.h"
|
|
|
|
#include "osc_rdma_header.h"
|
|
|
|
#include "osc_rdma_data_move.h"
|
|
|
|
|
|
|
|
#include "mpi.h"
|
|
|
|
#include "opal/runtime/opal_progress.h"
|
|
|
|
#include "opal/threads/mutex.h"
|
|
|
|
#include "ompi/communicator/communicator.h"
|
2006-08-17 18:52:20 +04:00
|
|
|
#include "ompi/mca/osc/base/base.h"
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
/* Must hold module's lock before calling... */
|
2006-07-18 02:08:55 +04:00
|
|
|
static inline void
|
|
|
|
ompi_osc_rdma_flip_sendreqs(ompi_osc_rdma_module_t *module)
|
|
|
|
{
|
2006-11-28 00:41:29 +03:00
|
|
|
unsigned int *tmp;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
tmp = module->m_copy_num_pending_sendreqs;
|
|
|
|
module->m_copy_num_pending_sendreqs =
|
|
|
|
module->m_num_pending_sendreqs;
|
|
|
|
module->m_num_pending_sendreqs = tmp;
|
|
|
|
memset(module->m_num_pending_sendreqs, 0,
|
|
|
|
sizeof(unsigned int) * ompi_comm_size(module->m_comm));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* Copy in all the pending requests */
|
2007-05-24 19:41:24 +04:00
|
|
|
opal_list_join(&module->m_copy_pending_sendreqs,
|
|
|
|
opal_list_get_end(&module->m_copy_pending_sendreqs),
|
|
|
|
&module->m_pending_sendreqs);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_module_fence(int assert, ompi_win_t *win)
|
|
|
|
{
|
2006-11-28 00:41:29 +03:00
|
|
|
unsigned int incoming_reqs;
|
2006-07-18 02:08:55 +04:00
|
|
|
int ret = OMPI_SUCCESS, i;
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_module_t *module = GET_MODULE(win);
|
|
|
|
int num_outgoing = 0;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
if (0 != (assert & MPI_MODE_NOPRECEDE)) {
|
|
|
|
/* check that the user didn't lie to us - since NOPRECEDED
|
|
|
|
must be specified by all processes if it is specified by
|
|
|
|
any process, if we see this it is safe to assume that there
|
|
|
|
are no pending operations anywhere needed to close out this
|
|
|
|
epoch. */
|
2007-05-24 19:41:24 +04:00
|
|
|
if (0 != opal_list_get_size(&(module->m_pending_sendreqs))) {
|
2006-07-18 02:08:55 +04:00
|
|
|
return MPI_ERR_RMA_SYNC;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
opal_list_item_t *item;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
/* "atomically" copy all the data we're going to be modifying
|
|
|
|
into the copy... */
|
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
ompi_osc_rdma_flip_sendreqs(module);
|
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
|
|
|
|
|
|
|
num_outgoing = opal_list_get_size(&(module->m_copy_pending_sendreqs));
|
|
|
|
|
|
|
|
/* find out how much data everyone is going to send us. Need
|
|
|
|
to have the lock during this period so that we have a sane
|
|
|
|
view of the number of sendreqs */
|
|
|
|
ret = module->m_comm->
|
|
|
|
c_coll.coll_reduce_scatter(module->m_copy_num_pending_sendreqs,
|
|
|
|
&incoming_reqs,
|
|
|
|
module->m_fence_coll_counts,
|
|
|
|
MPI_UNSIGNED,
|
|
|
|
MPI_SUM,
|
|
|
|
module->m_comm);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
/* put the stupid data back for the user. This is not
|
|
|
|
cheap, but the user lost his data if we don't. */
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->m_lock));
|
|
|
|
opal_list_join(&module->m_pending_sendreqs,
|
|
|
|
opal_list_get_end(&module->m_pending_sendreqs),
|
|
|
|
&module->m_copy_pending_sendreqs);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
for (i = 0 ; i < ompi_comm_size(module->m_comm) ; ++i) {
|
|
|
|
module->m_num_pending_sendreqs[i] +=
|
|
|
|
module->m_copy_num_pending_sendreqs[i];
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&(module->m_lock));
|
2006-07-18 02:08:55 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"fence: waiting on %d in and %d out",
|
|
|
|
module->m_num_pending_in,
|
|
|
|
module->m_num_pending_out));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* try to start all the requests. We've copied everything we
|
|
|
|
need out of pending_sendreqs, so don't need the lock
|
|
|
|
here */
|
|
|
|
while (NULL !=
|
2007-05-24 19:41:24 +04:00
|
|
|
(item = opal_list_remove_first(&(module->m_copy_pending_sendreqs)))) {
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_sendreq_t *req =
|
|
|
|
(ompi_osc_rdma_sendreq_t*) item;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
ret = ompi_osc_rdma_sendreq_send(module, req);
|
2006-07-18 02:08:55 +04:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
2007-05-24 19:41:24 +04:00
|
|
|
opal_list_append(&(module->m_copy_pending_sendreqs), item);
|
2007-05-24 21:21:56 +04:00
|
|
|
break;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
2007-05-24 21:21:56 +04:00
|
|
|
/* if some requests couldn't be started, push into the
|
|
|
|
"queued" list, where we will try to restart them later. */
|
|
|
|
if (opal_list_get_size(&module->m_copy_pending_sendreqs)) {
|
|
|
|
opal_list_join(&module->m_queued_sendreqs,
|
|
|
|
opal_list_get_end(&module->m_queued_sendreqs),
|
|
|
|
&module->m_copy_pending_sendreqs);
|
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
/* possible we've already received a couple in messages, so
|
|
|
|
atomicall add however many we're going to wait for */
|
|
|
|
module->m_num_pending_in += incoming_reqs;
|
|
|
|
module->m_num_pending_out += num_outgoing;
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
/* now we know how many things we're waiting for - wait for them... */
|
2007-05-24 19:41:24 +04:00
|
|
|
while (module->m_num_pending_in > 0 ||
|
|
|
|
0 != module->m_num_pending_out) {
|
|
|
|
opal_condition_wait(&module->m_cond, &module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* all transfers are done - back to the real world we go */
|
|
|
|
if (0 == (assert & MPI_MODE_NOSUCCEED)) {
|
|
|
|
ompi_win_set_mode(win, OMPI_WIN_FENCE);
|
|
|
|
} else {
|
|
|
|
ompi_win_set_mode(win, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_module_start(ompi_group_t *group,
|
|
|
|
int assert,
|
|
|
|
ompi_win_t *win)
|
|
|
|
{
|
2007-05-24 19:41:24 +04:00
|
|
|
int i, ret = OMPI_SUCCESS;
|
|
|
|
ompi_osc_rdma_module_t *module = GET_MODULE(win);
|
2007-05-30 21:06:19 +04:00
|
|
|
int32_t count;
|
2006-09-22 00:49:15 +04:00
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
OBJ_RETAIN(group);
|
|
|
|
ompi_group_increment_proc_count(group);
|
|
|
|
|
2007-05-30 21:06:19 +04:00
|
|
|
module->m_eager_send_active = false;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
2007-05-30 21:06:19 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
if (NULL != module->m_sc_group) {
|
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
|
|
|
ret = MPI_ERR_RMA_SYNC;
|
|
|
|
goto clean;
|
|
|
|
}
|
|
|
|
module->m_sc_group = group;
|
|
|
|
|
|
|
|
/* possible we've already received a couple in messages, so
|
|
|
|
add however many we're going to wait for */
|
2007-05-30 21:06:19 +04:00
|
|
|
count = (module->m_num_post_msgs += ompi_group_size(module->m_sc_group));
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&(module->m_lock));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
memset(module->m_sc_remote_active_ranks, 0,
|
|
|
|
sizeof(bool) * ompi_comm_size(module->m_comm));
|
2006-09-22 00:49:15 +04:00
|
|
|
|
|
|
|
/* for each process in the specified group, find it's rank in our
|
|
|
|
communicator, store those indexes, and set the true / false in
|
|
|
|
the active ranks table */
|
|
|
|
for (i = 0 ; i < ompi_group_size(group) ; i++) {
|
|
|
|
int comm_rank = -1, j;
|
|
|
|
|
|
|
|
/* no need to increment ref count - the communicator isn't
|
|
|
|
going anywhere while we're here */
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_group_t *comm_group = module->m_comm->c_local_group;
|
2006-09-22 00:49:15 +04:00
|
|
|
|
|
|
|
/* find the rank in the communicator associated with this windows */
|
|
|
|
for (j = 0 ;
|
|
|
|
j < ompi_group_size(comm_group) ;
|
|
|
|
++j) {
|
2007-05-24 19:41:24 +04:00
|
|
|
if (module->m_sc_group->grp_proc_pointers[i] ==
|
2006-09-22 00:49:15 +04:00
|
|
|
comm_group->grp_proc_pointers[j]) {
|
|
|
|
comm_rank = j;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (comm_rank == -1) {
|
2007-05-24 19:41:24 +04:00
|
|
|
ret = MPI_ERR_RMA_SYNC;
|
|
|
|
goto clean;
|
2006-09-22 00:49:15 +04:00
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
module->m_sc_remote_active_ranks[comm_rank] = true;
|
|
|
|
module->m_sc_remote_ranks[i] = comm_rank;
|
2006-09-22 00:49:15 +04:00
|
|
|
}
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
/* Set our mode to access w/ start */
|
2006-08-26 00:39:33 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_FENCE);
|
|
|
|
ompi_win_append_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_STARTED);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-30 21:06:19 +04:00
|
|
|
if (count == 0) {
|
|
|
|
module->m_eager_send_active = module->m_eager_send_ok;
|
|
|
|
}
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
return OMPI_SUCCESS;
|
2007-05-24 19:41:24 +04:00
|
|
|
|
|
|
|
clean:
|
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
return ret;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_module_complete(ompi_win_t *win)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
ompi_group_t *group;
|
|
|
|
opal_list_item_t *item;
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_module_t *module = GET_MODULE(win);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* wait for all the post messages */
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
while (0 != module->m_num_post_msgs) {
|
|
|
|
opal_condition_wait(&module->m_cond, &module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_flip_sendreqs(module);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* for each process in group, send a control message with number
|
|
|
|
of updates coming, then start all the requests */
|
2007-05-30 21:06:19 +04:00
|
|
|
module->m_num_pending_out +=
|
|
|
|
(int32_t) opal_list_get_size(&module->m_copy_pending_sendreqs);
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
for (i = 0 ; i < ompi_group_size(module->m_sc_group) ; ++i) {
|
|
|
|
int comm_rank = module->m_sc_remote_ranks[i];
|
|
|
|
ret = ompi_osc_rdma_control_send(module,
|
|
|
|
module->m_sc_group->grp_proc_pointers[i],
|
2006-11-28 00:41:29 +03:00
|
|
|
OMPI_OSC_RDMA_HDR_COMPLETE,
|
2007-05-24 19:41:24 +04:00
|
|
|
module->m_copy_num_pending_sendreqs[comm_rank],
|
2006-11-28 00:41:29 +03:00
|
|
|
0);
|
|
|
|
assert(ret == OMPI_SUCCESS);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* try to start all the requests. We've copied everything we
|
|
|
|
need out of pending_sendreqs, so don't need the lock
|
|
|
|
here */
|
|
|
|
while (NULL !=
|
2007-05-24 19:41:24 +04:00
|
|
|
(item = opal_list_remove_first(&(module->m_copy_pending_sendreqs)))) {
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_sendreq_t *req =
|
|
|
|
(ompi_osc_rdma_sendreq_t*) item;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
ret = ompi_osc_rdma_sendreq_send(module, req);
|
2006-07-18 02:08:55 +04:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
2007-05-24 19:41:24 +04:00
|
|
|
opal_list_append(&(module->m_copy_pending_sendreqs), item);
|
2007-05-24 21:21:56 +04:00
|
|
|
break;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
2007-05-24 21:21:56 +04:00
|
|
|
/* if some requests couldn't be started, push into the
|
|
|
|
"queued" list, where we will try to restart them later. */
|
|
|
|
if (opal_list_get_size(&module->m_copy_pending_sendreqs)) {
|
|
|
|
opal_list_join(&module->m_queued_sendreqs,
|
|
|
|
opal_list_get_end(&module->m_queued_sendreqs),
|
|
|
|
&module->m_copy_pending_sendreqs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* wait for all the requests */
|
2007-05-24 19:41:24 +04:00
|
|
|
while (0 != module->m_num_pending_out) {
|
|
|
|
opal_condition_wait(&module->m_cond, &module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
group = module->m_sc_group;
|
|
|
|
module->m_sc_group = NULL;
|
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->m_lock));
|
|
|
|
|
2006-08-26 00:39:33 +04:00
|
|
|
/* remove WIN_POSTED from our mode */
|
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_STARTED);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
int
|
|
|
|
ompi_osc_rdma_module_post(ompi_group_t *group,
|
|
|
|
int assert,
|
|
|
|
ompi_win_t *win)
|
|
|
|
{
|
|
|
|
int i;
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_module_t *module = GET_MODULE(win);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
OBJ_RETAIN(group);
|
|
|
|
ompi_group_increment_proc_count(group);
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->m_lock));
|
|
|
|
assert(NULL == module->m_pw_group);
|
|
|
|
module->m_pw_group = group;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* Set our mode to expose w/ post */
|
2006-08-26 00:39:33 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_FENCE);
|
2006-11-28 00:41:29 +03:00
|
|
|
ompi_win_append_mode(win, OMPI_WIN_EXPOSE_EPOCH | OMPI_WIN_POSTED);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* list how many complete counters we're still waiting on */
|
2007-05-24 19:41:24 +04:00
|
|
|
module->m_num_complete_msgs +=
|
|
|
|
ompi_group_size(module->m_pw_group);
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->m_lock));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* send a hello counter to everyone in group */
|
2007-05-24 19:41:24 +04:00
|
|
|
for (i = 0 ; i < ompi_group_size(module->m_pw_group) ; ++i) {
|
|
|
|
ompi_osc_rdma_control_send(module,
|
2006-07-18 02:08:55 +04:00
|
|
|
group->grp_proc_pointers[i],
|
2006-08-03 04:10:19 +04:00
|
|
|
OMPI_OSC_RDMA_HDR_POST, 1, 0);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_module_wait(ompi_win_t *win)
|
|
|
|
{
|
|
|
|
ompi_group_t *group;
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_module_t *module = GET_MODULE(win);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
while (0 != (module->m_num_pending_in) ||
|
|
|
|
0 != (module->m_num_complete_msgs)) {
|
|
|
|
opal_condition_wait(&module->m_cond, &module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
group = module->m_pw_group;
|
|
|
|
module->m_pw_group = NULL;
|
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_EXPOSE_EPOCH | OMPI_WIN_POSTED);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_module_test(ompi_win_t *win,
|
|
|
|
int *flag)
|
|
|
|
{
|
|
|
|
ompi_group_t *group;
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_module_t *module = GET_MODULE(win);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
#if !OMPI_ENABLE_PROGRESS_THREADS
|
|
|
|
opal_progress();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (0 != (module->m_num_pending_in) ||
|
|
|
|
0 != (module->m_num_complete_msgs)) {
|
|
|
|
*flag = 0;
|
|
|
|
return OMPI_SUCCESS;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
*flag = 1;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->m_lock));
|
|
|
|
group = module->m_pw_group;
|
|
|
|
module->m_pw_group = NULL;
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->m_lock));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-30 21:06:19 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_EXPOSE_EPOCH | OMPI_WIN_POSTED);
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct ompi_osc_rdma_pending_lock_t {
|
|
|
|
opal_list_item_t super;
|
|
|
|
ompi_proc_t *proc;
|
|
|
|
int32_t lock_type;
|
|
|
|
};
|
|
|
|
typedef struct ompi_osc_rdma_pending_lock_t ompi_osc_rdma_pending_lock_t;
|
|
|
|
OBJ_CLASS_INSTANCE(ompi_osc_rdma_pending_lock_t, opal_list_item_t,
|
|
|
|
NULL, NULL);
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_module_lock(int lock_type,
|
|
|
|
int target,
|
|
|
|
int assert,
|
|
|
|
ompi_win_t *win)
|
|
|
|
{
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_module_t *module = GET_MODULE(win);
|
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->m_comm, target );
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
assert(lock_type != 0);
|
|
|
|
|
|
|
|
/* set our mode on the window */
|
2006-10-13 02:52:13 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_FENCE);
|
|
|
|
ompi_win_append_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_LOCK_ACCESS);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"%d sending lock request to %d",
|
|
|
|
module->m_comm->c_my_rank, target));
|
2006-07-18 02:08:55 +04:00
|
|
|
/* generate a lock request */
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_control_send(module,
|
|
|
|
proc,
|
|
|
|
OMPI_OSC_RDMA_HDR_LOCK_REQ,
|
|
|
|
module->m_comm->c_my_rank,
|
|
|
|
lock_type);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-30 21:06:19 +04:00
|
|
|
module->m_eager_send_active = false;
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
/* return */
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_module_unlock(int target,
|
|
|
|
ompi_win_t *win)
|
|
|
|
{
|
|
|
|
int32_t out_count;
|
|
|
|
opal_list_item_t *item;
|
|
|
|
int ret;
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_module_t *module = GET_MODULE(win);
|
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->m_comm, target );
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
while (0 == module->m_lock_received_ack) {
|
|
|
|
opal_condition_wait(&module->m_cond, &module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
2007-05-24 19:41:24 +04:00
|
|
|
|
|
|
|
module->m_lock_received_ack -= 1;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* start all the requests */
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_flip_sendreqs(module);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* try to start all the requests. We've copied everything we need
|
|
|
|
out of pending_sendreqs, so don't need the lock here */
|
2007-05-24 19:41:24 +04:00
|
|
|
out_count = opal_list_get_size(&module->m_copy_pending_sendreqs);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
/* we want to send all the requests, plus we wait for one more
|
|
|
|
completion event for the control message ack from the unlocker
|
|
|
|
saying we're done */
|
|
|
|
module->m_num_pending_out += (out_count + 1);
|
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
|
|
|
|
|
|
|
/* send the unlock request */
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"%d sending unlock request to %d",
|
|
|
|
module->m_comm->c_my_rank, target));
|
|
|
|
ompi_osc_rdma_control_send(module,
|
|
|
|
proc,
|
|
|
|
OMPI_OSC_RDMA_HDR_UNLOCK_REQ,
|
|
|
|
module->m_comm->c_my_rank,
|
|
|
|
out_count);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 21:21:56 +04:00
|
|
|
/* try to start all the requests. We've copied everything we
|
|
|
|
need out of pending_sendreqs, so don't need the lock
|
|
|
|
here */
|
2006-07-18 02:08:55 +04:00
|
|
|
while (NULL !=
|
2007-05-24 19:41:24 +04:00
|
|
|
(item = opal_list_remove_first(&(module->m_copy_pending_sendreqs)))) {
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_sendreq_t *req =
|
|
|
|
(ompi_osc_rdma_sendreq_t*) item;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
ret = ompi_osc_rdma_sendreq_send(module, req);
|
2006-07-18 02:08:55 +04:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
2007-05-24 19:41:24 +04:00
|
|
|
opal_list_append(&(module->m_copy_pending_sendreqs), item);
|
|
|
|
break;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
2007-05-24 21:21:56 +04:00
|
|
|
/* if some requests couldn't be started, push into the
|
|
|
|
"queued" list, where we will try to restart them later. */
|
|
|
|
if (opal_list_get_size(&module->m_copy_pending_sendreqs)) {
|
|
|
|
opal_list_join(&module->m_queued_sendreqs,
|
|
|
|
opal_list_get_end(&module->m_queued_sendreqs),
|
|
|
|
&module->m_copy_pending_sendreqs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* wait for all the requests */
|
2007-05-24 19:41:24 +04:00
|
|
|
while (0 != module->m_num_pending_out) {
|
|
|
|
opal_condition_wait(&module->m_cond, &module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* set our mode on the window */
|
2006-10-13 02:52:13 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_LOCK_ACCESS);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-30 21:06:19 +04:00
|
|
|
module->m_eager_send_active = module->m_eager_send_ok;
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_passive_lock(ompi_osc_rdma_module_t *module,
|
2007-05-24 19:41:24 +04:00
|
|
|
int32_t origin,
|
|
|
|
int32_t lock_type)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
|
|
|
bool send_ack = false;
|
|
|
|
int ret = OMPI_SUCCESS;
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->m_comm, origin );
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_pending_lock_t *new_pending;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->m_lock));
|
2006-07-18 02:08:55 +04:00
|
|
|
if (lock_type == MPI_LOCK_EXCLUSIVE) {
|
2007-05-24 19:41:24 +04:00
|
|
|
if (module->m_lock_status == 0) {
|
|
|
|
module->m_lock_status = MPI_LOCK_EXCLUSIVE;
|
|
|
|
ompi_win_append_mode(module->m_win, OMPI_WIN_EXPOSE_EPOCH);
|
2006-07-18 02:08:55 +04:00
|
|
|
send_ack = true;
|
|
|
|
} else {
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"%d queuing lock request from %d (%d)",
|
|
|
|
module->m_comm->c_my_rank, origin, lock_type));
|
2006-07-18 02:08:55 +04:00
|
|
|
new_pending = OBJ_NEW(ompi_osc_rdma_pending_lock_t);
|
|
|
|
new_pending->proc = proc;
|
|
|
|
new_pending->lock_type = lock_type;
|
2007-05-24 19:41:24 +04:00
|
|
|
opal_list_append(&(module->m_locks_pending), &(new_pending->super));
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
} else if (lock_type == MPI_LOCK_SHARED) {
|
2007-05-24 19:41:24 +04:00
|
|
|
if (module->m_lock_status != MPI_LOCK_EXCLUSIVE) {
|
|
|
|
module->m_lock_status = MPI_LOCK_SHARED;
|
|
|
|
module->m_shared_count++;
|
|
|
|
ompi_win_append_mode(module->m_win, OMPI_WIN_EXPOSE_EPOCH);
|
2006-07-18 02:08:55 +04:00
|
|
|
send_ack = true;
|
|
|
|
} else {
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"queuing lock request from %d (%d) lock_type:%d",
|
|
|
|
module->m_comm->c_my_rank, origin, lock_type));
|
2006-07-18 02:08:55 +04:00
|
|
|
new_pending = OBJ_NEW(ompi_osc_rdma_pending_lock_t);
|
|
|
|
new_pending->proc = proc;
|
|
|
|
new_pending->lock_type = lock_type;
|
2007-05-24 19:41:24 +04:00
|
|
|
opal_list_append(&(module->m_locks_pending), &(new_pending->super));
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = OMPI_ERROR;
|
|
|
|
}
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&(module->m_lock));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
if (send_ack) {
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"%d sending lock ack to %d",
|
|
|
|
module->m_comm->c_my_rank, origin));
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_control_send(module, proc,
|
2007-05-24 19:41:24 +04:00
|
|
|
OMPI_OSC_RDMA_HDR_LOCK_REQ,
|
|
|
|
module->m_comm->c_my_rank,
|
|
|
|
OMPI_SUCCESS);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_passive_unlock(ompi_osc_rdma_module_t *module,
|
2007-05-24 19:41:24 +04:00
|
|
|
int32_t origin,
|
|
|
|
int32_t count)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->m_comm, origin );
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_pending_lock_t *new_pending = NULL;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
assert(module->m_lock_status != 0);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"received unlock request from %d with %d requests\n",
|
|
|
|
origin, count));
|
|
|
|
|
|
|
|
new_pending = OBJ_NEW(ompi_osc_rdma_pending_lock_t);
|
|
|
|
new_pending->proc = proc;
|
|
|
|
new_pending->lock_type = 0;
|
|
|
|
OPAL_THREAD_LOCK(&(module->m_lock));
|
|
|
|
module->m_num_pending_in += count;
|
|
|
|
opal_list_append(&module->m_unlocks_pending, &(new_pending->super));
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->m_lock));
|
|
|
|
|
|
|
|
return ompi_osc_rdma_passive_unlock_complete(module);
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
int
|
|
|
|
ompi_osc_rdma_passive_unlock_complete(ompi_osc_rdma_module_t *module)
|
|
|
|
{
|
|
|
|
ompi_osc_rdma_pending_lock_t *new_pending = NULL;
|
|
|
|
|
|
|
|
if (module->m_num_pending_in != 0) return OMPI_SUCCESS;
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
if (module->m_lock_status == MPI_LOCK_EXCLUSIVE) {
|
|
|
|
ompi_win_remove_mode(module->m_win, OMPI_WIN_EXPOSE_EPOCH);
|
|
|
|
module->m_lock_status = 0;
|
2006-07-18 02:08:55 +04:00
|
|
|
} else {
|
2007-05-24 19:41:24 +04:00
|
|
|
module->m_shared_count -= opal_list_get_size(&module->m_unlocks_pending);
|
|
|
|
if (module->m_shared_count == 0) {
|
|
|
|
ompi_win_remove_mode(module->m_win, OMPI_WIN_EXPOSE_EPOCH);
|
|
|
|
module->m_lock_status = 0;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
/* issue whichever unlock acks we should issue */
|
|
|
|
while (NULL != (new_pending = (ompi_osc_rdma_pending_lock_t*)
|
|
|
|
opal_list_remove_first(&module->m_unlocks_pending))) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"sending unlock reply to proc"));
|
|
|
|
ompi_osc_rdma_control_send(module,
|
|
|
|
new_pending->proc,
|
|
|
|
OMPI_OSC_RDMA_HDR_UNLOCK_REPLY,
|
|
|
|
OMPI_SUCCESS, OMPI_SUCCESS);
|
|
|
|
OBJ_DESTRUCT(new_pending);
|
|
|
|
}
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
/* if we were really unlocked, see if we have more to process */
|
|
|
|
new_pending = (ompi_osc_rdma_pending_lock_t*)
|
2007-05-24 19:41:24 +04:00
|
|
|
opal_list_remove_first(&(module->m_locks_pending));
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->m_lock));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
if (NULL != new_pending) {
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
|
|
|
"sending lock request to proc"));
|
|
|
|
ompi_win_append_mode(module->m_win, OMPI_WIN_EXPOSE_EPOCH);
|
2006-07-18 02:08:55 +04:00
|
|
|
/* set lock state and generate a lock request */
|
2007-05-24 19:41:24 +04:00
|
|
|
module->m_lock_status = new_pending->lock_type;
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_control_send(module,
|
|
|
|
new_pending->proc,
|
2006-08-03 04:10:19 +04:00
|
|
|
OMPI_OSC_RDMA_HDR_LOCK_REQ,
|
2007-05-24 19:41:24 +04:00
|
|
|
module->m_comm->c_my_rank,
|
2006-07-18 02:08:55 +04:00
|
|
|
OMPI_SUCCESS);
|
|
|
|
OBJ_DESTRUCT(new_pending);
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|