2006-01-28 18:38:37 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University.
|
|
|
|
* All rights reserved.
|
2013-07-04 12:34:37 +04:00
|
|
|
* Copyright (c) 2004-2013 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2006-01-28 18:38:37 +03:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2012-04-06 18:23:13 +04:00
|
|
|
* Copyright (c) 2012 Los Alamos National Security, LLC. All rights
|
|
|
|
* reserved.
|
2006-01-28 18:38:37 +03:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
|
|
|
#include "osc_pt2pt.h"
|
|
|
|
#include "osc_pt2pt_sendreq.h"
|
|
|
|
#include "osc_pt2pt_header.h"
|
|
|
|
#include "osc_pt2pt_data_move.h"
|
|
|
|
|
|
|
|
#include "mpi.h"
|
|
|
|
#include "opal/runtime/opal_progress.h"
|
|
|
|
#include "opal/threads/mutex.h"
|
|
|
|
#include "ompi/communicator/communicator.h"
|
2006-08-17 18:52:20 +04:00
|
|
|
#include "ompi/mca/osc/base/base.h"
|
2006-01-28 18:38:37 +03:00
|
|
|
|
|
|
|
|
2007-05-22 00:53:02 +04:00
|
|
|
/* Must hold module's lock before calling... */
|
2006-02-07 15:16:23 +03:00
|
|
|
static inline void
|
|
|
|
ompi_osc_pt2pt_flip_sendreqs(ompi_osc_pt2pt_module_t *module)
|
|
|
|
{
|
2006-11-28 00:41:29 +03:00
|
|
|
unsigned int *tmp;
|
2006-02-07 15:16:23 +03:00
|
|
|
|
|
|
|
tmp = module->p2p_copy_num_pending_sendreqs;
|
|
|
|
module->p2p_copy_num_pending_sendreqs =
|
|
|
|
module->p2p_num_pending_sendreqs;
|
|
|
|
module->p2p_num_pending_sendreqs = tmp;
|
|
|
|
memset(module->p2p_num_pending_sendreqs, 0,
|
2006-11-28 00:41:29 +03:00
|
|
|
sizeof(unsigned int) * ompi_comm_size(module->p2p_comm));
|
2006-02-07 15:16:23 +03:00
|
|
|
|
|
|
|
/* Copy in all the pending requests */
|
|
|
|
opal_list_join(&module->p2p_copy_pending_sendreqs,
|
|
|
|
opal_list_get_end(&module->p2p_copy_pending_sendreqs),
|
|
|
|
&module->p2p_pending_sendreqs);
|
|
|
|
}
|
|
|
|
|
2006-01-28 18:38:37 +03:00
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_module_fence(int assert, ompi_win_t *win)
|
|
|
|
{
|
2006-11-28 00:41:29 +03:00
|
|
|
unsigned int incoming_reqs;
|
2006-03-21 17:10:07 +03:00
|
|
|
int ret = OMPI_SUCCESS, i;
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);
|
2007-05-22 00:53:02 +04:00
|
|
|
int num_outgoing = 0;
|
2006-02-07 15:16:23 +03:00
|
|
|
|
|
|
|
if (0 != (assert & MPI_MODE_NOPRECEDE)) {
|
|
|
|
/* check that the user didn't lie to us - since NOPRECEDED
|
|
|
|
must be specified by all processes if it is specified by
|
|
|
|
any process, if we see this it is safe to assume that there
|
|
|
|
are no pending operations anywhere needed to close out this
|
2007-05-21 06:21:25 +04:00
|
|
|
epoch. No need to lock, since it's a lookup and any
|
|
|
|
pending modification of the pending_sendreqs during this
|
|
|
|
time is an erroneous program. */
|
|
|
|
if (0 != opal_list_get_size(&(module->p2p_pending_sendreqs))) {
|
2006-02-07 15:16:23 +03:00
|
|
|
return MPI_ERR_RMA_SYNC;
|
|
|
|
}
|
2006-01-28 18:38:37 +03:00
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
} else {
|
|
|
|
opal_list_item_t *item;
|
2006-01-28 18:38:37 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
/* "atomically" copy all the data we're going to be modifying
|
|
|
|
into the copy... */
|
2007-05-22 00:53:02 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->p2p_lock));
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_flip_sendreqs(module);
|
2007-05-22 00:53:02 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
|
|
|
|
|
|
|
|
num_outgoing = opal_list_get_size(&(module->p2p_copy_pending_sendreqs));
|
2006-02-07 15:16:23 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
/* find out how much data everyone is going to send us. */
|
|
|
|
ret = module->p2p_comm->
|
|
|
|
c_coll.coll_reduce_scatter(module->p2p_copy_num_pending_sendreqs,
|
2006-08-03 04:10:19 +04:00
|
|
|
&incoming_reqs,
|
2007-05-21 06:21:25 +04:00
|
|
|
module->p2p_fence_coll_counts,
|
2006-11-28 00:41:29 +03:00
|
|
|
MPI_UNSIGNED,
|
2006-08-03 04:10:19 +04:00
|
|
|
MPI_SUM,
|
2007-08-19 07:37:49 +04:00
|
|
|
module->p2p_comm,
|
|
|
|
module->p2p_comm->c_coll.coll_reduce_scatter_module);
|
2006-03-16 21:40:42 +03:00
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
/* put the stupid data back for the user. This is not
|
|
|
|
cheap, but the user lost his data if we don't. */
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->p2p_lock));
|
|
|
|
opal_list_join(&module->p2p_pending_sendreqs,
|
|
|
|
opal_list_get_end(&module->p2p_pending_sendreqs),
|
|
|
|
&module->p2p_copy_pending_sendreqs);
|
2006-02-07 15:16:23 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
for (i = 0 ; i < ompi_comm_size(module->p2p_comm) ; ++i) {
|
|
|
|
module->p2p_num_pending_sendreqs[i] +=
|
|
|
|
module->p2p_copy_num_pending_sendreqs[i];
|
2006-02-07 15:16:23 +03:00
|
|
|
}
|
2006-01-28 18:38:37 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
|
2006-02-07 15:16:23 +03:00
|
|
|
return ret;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-02-25 04:03:19 +03:00
|
|
|
"fence: waiting on %d in and %d out",
|
2007-05-21 06:21:25 +04:00
|
|
|
module->p2p_num_pending_in,
|
|
|
|
module->p2p_num_pending_out));
|
2006-02-22 08:14:34 +03:00
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
/* try to start all the requests. We've copied everything we
|
|
|
|
need out of pending_sendreqs, so don't need the lock
|
|
|
|
here */
|
|
|
|
while (NULL !=
|
2007-05-21 06:21:25 +04:00
|
|
|
(item = opal_list_remove_first(&(module->p2p_copy_pending_sendreqs)))) {
|
2006-02-07 15:16:23 +03:00
|
|
|
ompi_osc_pt2pt_sendreq_t *req =
|
|
|
|
(ompi_osc_pt2pt_sendreq_t*) item;
|
2006-01-28 18:38:37 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
ret = ompi_osc_pt2pt_sendreq_send(module, req);
|
2006-01-28 18:38:37 +03:00
|
|
|
|
2012-04-06 18:23:13 +04:00
|
|
|
if (OMPI_ERR_TEMP_OUT_OF_RESOURCE == ret) {
|
2013-03-28 01:17:31 +04:00
|
|
|
opal_output_verbose(5, ompi_osc_base_framework.framework_output,
|
2009-05-01 02:36:09 +04:00
|
|
|
"complete: failure in starting sendreq (%d). Will try later.",
|
2006-08-17 18:52:20 +04:00
|
|
|
ret);
|
2007-05-21 06:21:25 +04:00
|
|
|
opal_list_append(&(module->p2p_copy_pending_sendreqs), item);
|
2009-05-01 02:36:09 +04:00
|
|
|
} else if (OMPI_SUCCESS != ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->p2p_lock);
|
2007-05-22 00:53:02 +04:00
|
|
|
/* possible we've already received a couple in messages, so
|
|
|
|
add however many we're going to wait for */
|
|
|
|
module->p2p_num_pending_in += incoming_reqs;
|
|
|
|
module->p2p_num_pending_out += num_outgoing;
|
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
/* now we know how many things we're waiting for - wait for them... */
|
2007-05-21 06:21:25 +04:00
|
|
|
while (module->p2p_num_pending_in > 0 ||
|
|
|
|
0 != module->p2p_num_pending_out) {
|
|
|
|
opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* all transfers are done - back to the real world we go */
|
|
|
|
if (0 == (assert & MPI_MODE_NOSUCCEED)) {
|
2006-02-24 16:04:15 +03:00
|
|
|
ompi_win_set_mode(win, OMPI_WIN_FENCE);
|
2006-01-28 18:38:37 +03:00
|
|
|
} else {
|
2006-02-01 00:40:12 +03:00
|
|
|
ompi_win_set_mode(win, 0);
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
return OMPI_SUCCESS;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_module_start(ompi_group_t *group,
|
2006-01-31 05:44:08 +03:00
|
|
|
int assert,
|
|
|
|
ompi_win_t *win)
|
2006-01-28 18:38:37 +03:00
|
|
|
{
|
2007-05-21 06:21:25 +04:00
|
|
|
int i, ret = OMPI_SUCCESS;
|
|
|
|
ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);
|
2006-09-22 00:49:15 +04:00
|
|
|
|
2006-01-31 05:44:08 +03:00
|
|
|
OBJ_RETAIN(group);
|
|
|
|
ompi_group_increment_proc_count(group);
|
2006-02-07 15:16:23 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->p2p_lock));
|
|
|
|
if (NULL != module->p2p_sc_group) {
|
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
|
|
|
ret = MPI_ERR_RMA_SYNC;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
module->p2p_sc_group = group;
|
2007-05-22 00:53:02 +04:00
|
|
|
|
|
|
|
/* possible we've already received a couple in messages, so
|
|
|
|
add however many we're going to wait for */
|
|
|
|
module->p2p_num_post_msgs += ompi_group_size(module->p2p_sc_group);
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
memset(module->p2p_sc_remote_active_ranks, 0,
|
|
|
|
sizeof(bool) * ompi_comm_size(module->p2p_comm));
|
2006-11-28 00:41:29 +03:00
|
|
|
|
2006-09-22 00:49:15 +04:00
|
|
|
/* for each process in the specified group, find it's rank in our
|
|
|
|
communicator, store those indexes, and set the true / false in
|
|
|
|
the active ranks table */
|
|
|
|
for (i = 0 ; i < ompi_group_size(group) ; i++) {
|
|
|
|
int comm_rank = -1, j;
|
|
|
|
|
|
|
|
/* find the rank in the communicator associated with this windows */
|
2007-07-11 21:16:06 +04:00
|
|
|
for (j = 0 ; j < ompi_comm_size(module->p2p_comm) ; ++j) {
|
|
|
|
if (ompi_group_peer_lookup(module->p2p_sc_group, i) ==
|
|
|
|
ompi_comm_peer_lookup(module->p2p_comm, j)) {
|
2006-09-22 00:49:15 +04:00
|
|
|
comm_rank = j;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (comm_rank == -1) {
|
2007-05-21 06:21:25 +04:00
|
|
|
ret = MPI_ERR_RMA_SYNC;
|
|
|
|
goto cleanup;
|
2006-09-22 00:49:15 +04:00
|
|
|
}
|
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
module->p2p_sc_remote_active_ranks[comm_rank] = true;
|
|
|
|
module->p2p_sc_remote_ranks[i] = comm_rank;
|
2006-09-22 00:49:15 +04:00
|
|
|
}
|
|
|
|
|
2006-01-31 05:44:08 +03:00
|
|
|
/* Set our mode to access w/ start */
|
2006-08-26 00:39:33 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_FENCE);
|
|
|
|
ompi_win_append_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_STARTED);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2007-05-21 06:21:25 +04:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
return ret;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_module_complete(ompi_win_t *win)
|
|
|
|
{
|
2006-01-31 05:44:08 +03:00
|
|
|
int i;
|
|
|
|
int ret = OMPI_SUCCESS;
|
2006-02-07 15:16:23 +03:00
|
|
|
ompi_group_t *group;
|
|
|
|
opal_list_item_t *item;
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
|
|
|
/* wait for all the post messages */
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->p2p_lock);
|
|
|
|
while (0 != module->p2p_num_post_msgs) {
|
|
|
|
opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
|
2006-01-31 05:44:08 +03:00
|
|
|
}
|
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_flip_sendreqs(module);
|
2006-02-07 15:16:23 +03:00
|
|
|
|
2006-01-31 05:44:08 +03:00
|
|
|
/* for each process in group, send a control message with number
|
|
|
|
of updates coming, then start all the requests */
|
2007-05-21 06:21:25 +04:00
|
|
|
for (i = 0 ; i < ompi_group_size(module->p2p_sc_group) ; ++i) {
|
|
|
|
int comm_rank = module->p2p_sc_remote_ranks[i];
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2007-05-22 00:53:02 +04:00
|
|
|
module->p2p_num_pending_out +=
|
|
|
|
module->p2p_copy_num_pending_sendreqs[comm_rank];
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
|
|
|
|
|
|
|
for (i = 0 ; i < ompi_group_size(module->p2p_sc_group) ; ++i) {
|
|
|
|
int comm_rank = module->p2p_sc_remote_ranks[i];
|
2007-05-21 06:21:25 +04:00
|
|
|
ret = ompi_osc_pt2pt_control_send(module,
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_group_peer_lookup(module->p2p_sc_group, i),
|
2006-11-28 00:41:29 +03:00
|
|
|
OMPI_OSC_PT2PT_HDR_COMPLETE,
|
2007-05-21 06:21:25 +04:00
|
|
|
module->p2p_copy_num_pending_sendreqs[comm_rank],
|
2006-11-28 00:41:29 +03:00
|
|
|
0);
|
|
|
|
assert(ret == OMPI_SUCCESS);
|
2006-02-07 15:16:23 +03:00
|
|
|
}
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
/* try to start all the requests. We've copied everything we
|
|
|
|
need out of pending_sendreqs, so don't need the lock
|
|
|
|
here */
|
|
|
|
while (NULL !=
|
2007-05-21 06:21:25 +04:00
|
|
|
(item = opal_list_remove_first(&(module->p2p_copy_pending_sendreqs)))) {
|
2006-02-07 15:16:23 +03:00
|
|
|
ompi_osc_pt2pt_sendreq_t *req =
|
|
|
|
(ompi_osc_pt2pt_sendreq_t*) item;
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
ret = ompi_osc_pt2pt_sendreq_send(module, req);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2012-04-06 18:23:13 +04:00
|
|
|
if (OMPI_ERR_TEMP_OUT_OF_RESOURCE == ret) {
|
2013-03-28 01:17:31 +04:00
|
|
|
opal_output_verbose(5, ompi_osc_base_framework.framework_output,
|
2006-08-17 18:52:20 +04:00
|
|
|
"complete: failure in starting sendreq (%d). Will try later.",
|
|
|
|
ret);
|
2007-05-21 06:21:25 +04:00
|
|
|
opal_list_append(&(module->p2p_copy_pending_sendreqs), item);
|
2009-05-01 02:36:09 +04:00
|
|
|
} else if (OMPI_SUCCESS != ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
2006-01-31 05:44:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* wait for all the requests */
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->p2p_lock);
|
|
|
|
while (0 != module->p2p_num_pending_out) {
|
|
|
|
opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
|
2006-01-31 05:44:08 +03:00
|
|
|
}
|
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
group = module->p2p_sc_group;
|
|
|
|
module->p2p_sc_group = NULL;
|
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
|
|
|
|
2006-08-26 00:39:33 +04:00
|
|
|
/* remove WIN_POSTED from our mode */
|
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_STARTED);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
2006-01-31 05:44:08 +03:00
|
|
|
return ret;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
|
2006-01-28 18:38:37 +03:00
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_module_post(ompi_group_t *group,
|
2006-01-31 05:44:08 +03:00
|
|
|
int assert,
|
|
|
|
ompi_win_t *win)
|
2006-01-28 18:38:37 +03:00
|
|
|
{
|
2006-01-31 05:44:08 +03:00
|
|
|
int i;
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
|
|
|
OBJ_RETAIN(group);
|
|
|
|
ompi_group_increment_proc_count(group);
|
2006-02-07 15:16:23 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->p2p_lock));
|
|
|
|
assert(NULL == module->p2p_pw_group);
|
|
|
|
module->p2p_pw_group = group;
|
2006-01-31 05:44:08 +03:00
|
|
|
|
|
|
|
/* Set our mode to expose w/ post */
|
2006-08-26 00:39:33 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_FENCE);
|
2006-11-27 06:22:44 +03:00
|
|
|
ompi_win_append_mode(win, OMPI_WIN_EXPOSE_EPOCH | OMPI_WIN_POSTED);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
|
|
|
/* list how many complete counters we're still waiting on */
|
2007-05-22 00:53:02 +04:00
|
|
|
module->p2p_num_complete_msgs +=
|
|
|
|
ompi_group_size(module->p2p_pw_group);
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
|
2006-01-31 05:44:08 +03:00
|
|
|
|
|
|
|
/* send a hello counter to everyone in group */
|
2007-05-21 06:21:25 +04:00
|
|
|
for (i = 0 ; i < ompi_group_size(module->p2p_pw_group) ; ++i) {
|
|
|
|
ompi_osc_pt2pt_control_send(module,
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_group_peer_lookup(group, i),
|
2006-02-07 21:45:18 +03:00
|
|
|
OMPI_OSC_PT2PT_HDR_POST, 1, 0);
|
2007-05-22 00:53:02 +04:00
|
|
|
}
|
2006-01-31 05:44:08 +03:00
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_module_wait(ompi_win_t *win)
|
|
|
|
{
|
2006-02-07 15:16:23 +03:00
|
|
|
ompi_group_t *group;
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);
|
2006-02-07 15:16:23 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->p2p_lock);
|
|
|
|
while (0 != (module->p2p_num_pending_in) ||
|
|
|
|
0 != (module->p2p_num_complete_msgs)) {
|
|
|
|
opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
|
2006-01-31 05:44:08 +03:00
|
|
|
}
|
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
group = module->p2p_pw_group;
|
|
|
|
module->p2p_pw_group = NULL;
|
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_EXPOSE_EPOCH | OMPI_WIN_POSTED);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
2006-01-31 05:44:08 +03:00
|
|
|
return OMPI_SUCCESS;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_module_test(ompi_win_t *win,
|
2006-01-31 05:44:08 +03:00
|
|
|
int *flag)
|
2006-01-28 18:38:37 +03:00
|
|
|
{
|
2006-02-07 15:16:23 +03:00
|
|
|
ompi_group_t *group;
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);
|
2006-02-07 15:16:23 +03:00
|
|
|
|
Update libevent to the 2.0 series, currently at 2.0.7rc. We will update to their final release when it becomes available. Currently known errors exist in unused portions of the libevent code. This revision passes the IBM test suite on a Linux machine and on a standalone Mac.
This is a fairly intrusive change, but outside of the moving of opal/event to opal/mca/event, the only changes involved (a) changing all calls to opal_event functions to reflect the new framework instead, and (b) ensuring that all opal_event_t objects are properly constructed since they are now true opal_objects.
Note: Shiqing has just returned from vacation and has not yet had a chance to complete the Windows integration. Thus, this commit almost certainly breaks Windows support on the trunk. However, I want this to have a chance to soak for as long as possible before I become less available a week from today (going to be at a class for 5 days, and thus will only be sparingly available) so we can find and fix any problems.
Biggest change is moving the libevent code from opal/event to a new opal/mca/event framework. This was done to make it much easier to update libevent in the future. New versions can be inserted as a new component and tested in parallel with the current version until validated, then we can remove the earlier version if we so choose. This is a statically built framework ala installdirs, so only one component will build at a time. There is no selection logic - the sole compiled component simply loads its function pointers into the opal_event struct.
I have gone thru the code base and converted all the libevent calls I could find. However, I cannot compile nor test every environment. It is therefore quite likely that errors remain in the system. Please keep an eye open for two things:
1. compile-time errors: these will be obvious as calls to the old functions (e.g., opal_evtimer_new) must be replaced by the new framework APIs (e.g., opal_event.evtimer_new)
2. run-time errors: these will likely show up as segfaults due to missing constructors on opal_event_t objects. It appears that it became a typical practice for people to "init" an opal_event_t by simply using memset to zero it out. This will no longer work - you must either OBJ_NEW or OBJ_CONSTRUCT an opal_event_t. I tried to catch these cases, but may have missed some. Believe me, you'll know when you hit it.
There is also the issue of the new libevent "no recursion" behavior. As I described on a recent email, we will have to discuss this and figure out what, if anything, we need to do.
This commit was SVN r23925.
2010-10-24 22:35:54 +04:00
|
|
|
#if !OMPI_ENABLE_PROGRESS_THREADS
|
2007-05-21 06:21:25 +04:00
|
|
|
opal_progress();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (0 != (module->p2p_num_pending_in) ||
|
|
|
|
0 != (module->p2p_num_complete_msgs)) {
|
|
|
|
*flag = 0;
|
|
|
|
return OMPI_SUCCESS;
|
2006-01-31 05:44:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
*flag = 1;
|
|
|
|
|
2006-08-26 00:39:33 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_EXPOSE_EPOCH | OMPI_WIN_POSTED);
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->p2p_lock));
|
|
|
|
group = module->p2p_pw_group;
|
|
|
|
module->p2p_pw_group = NULL;
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
|
2006-01-31 05:44:08 +03:00
|
|
|
|
2006-02-07 15:16:23 +03:00
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
2006-01-31 05:44:08 +03:00
|
|
|
return OMPI_SUCCESS;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-02-07 21:45:18 +03:00
|
|
|
struct ompi_osc_pt2pt_pending_lock_t {
|
|
|
|
opal_list_item_t super;
|
|
|
|
ompi_proc_t *proc;
|
|
|
|
int32_t lock_type;
|
|
|
|
};
|
|
|
|
typedef struct ompi_osc_pt2pt_pending_lock_t ompi_osc_pt2pt_pending_lock_t;
|
|
|
|
OBJ_CLASS_INSTANCE(ompi_osc_pt2pt_pending_lock_t, opal_list_item_t,
|
|
|
|
NULL, NULL);
|
|
|
|
|
|
|
|
|
2006-01-28 18:38:37 +03:00
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_module_lock(int lock_type,
|
2006-02-01 00:40:12 +03:00
|
|
|
int target,
|
|
|
|
int assert,
|
|
|
|
ompi_win_t *win)
|
2006-01-28 18:38:37 +03:00
|
|
|
{
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);
|
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, target );
|
2006-02-07 21:45:18 +03:00
|
|
|
|
|
|
|
assert(lock_type != 0);
|
|
|
|
|
|
|
|
/* set our mode on the window */
|
2006-10-13 02:52:13 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_FENCE);
|
|
|
|
ompi_win_append_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_LOCK_ACCESS);
|
2006-02-07 21:45:18 +03:00
|
|
|
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: sending lock request to %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
2007-06-25 02:36:00 +04:00
|
|
|
target));
|
2006-02-07 21:45:18 +03:00
|
|
|
/* generate a lock request */
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_control_send(module,
|
2006-02-07 21:45:18 +03:00
|
|
|
proc,
|
|
|
|
OMPI_OSC_PT2PT_HDR_LOCK_REQ,
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
2006-02-07 21:45:18 +03:00
|
|
|
lock_type);
|
|
|
|
|
2012-04-04 20:27:24 +04:00
|
|
|
if (ompi_comm_rank(module->p2p_comm) == target) {
|
|
|
|
/* If we're trying to lock locally, have to wait to actually
|
|
|
|
acquire the lock */
|
|
|
|
OPAL_THREAD_LOCK(&module->p2p_lock);
|
|
|
|
while (module->p2p_lock_received_ack == 0) {
|
|
|
|
opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
|
|
|
}
|
|
|
|
|
2006-02-07 21:45:18 +03:00
|
|
|
/* return */
|
|
|
|
return OMPI_SUCCESS;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_module_unlock(int target,
|
2006-02-01 00:40:12 +03:00
|
|
|
ompi_win_t *win)
|
2006-01-28 18:38:37 +03:00
|
|
|
{
|
2006-02-07 21:45:18 +03:00
|
|
|
int32_t out_count;
|
|
|
|
opal_list_item_t *item;
|
|
|
|
int ret;
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_module_t *module = P2P_MODULE(win);
|
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, target );
|
2006-02-07 21:45:18 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->p2p_lock);
|
|
|
|
while (0 == module->p2p_lock_received_ack) {
|
|
|
|
opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
|
2006-02-07 21:45:18 +03:00
|
|
|
}
|
2007-05-21 06:21:25 +04:00
|
|
|
|
2007-05-22 00:53:02 +04:00
|
|
|
module->p2p_lock_received_ack -= 1;
|
2006-02-07 21:45:18 +03:00
|
|
|
|
|
|
|
/* start all the requests */
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_flip_sendreqs(module);
|
2006-02-07 21:45:18 +03:00
|
|
|
|
|
|
|
/* try to start all the requests. We've copied everything we need
|
|
|
|
out of pending_sendreqs, so don't need the lock here */
|
2007-05-21 06:21:25 +04:00
|
|
|
out_count = opal_list_get_size(&(module->p2p_copy_pending_sendreqs));
|
2006-02-07 21:45:18 +03:00
|
|
|
|
Send the unlock request before starting the requests. We won't unlock until we get an ack from the remote side,
so there's no longer a race there (I used to do the unlock request last, after local completion of all the
requests completed, to try to avoid having the passive side reply to the active side, but I don't do that
anymore). The unlock side will not "unlock" the window until it actually receives the correct number of results,
so we're good there.
This fixes an issue where we would receive data on the remote side we weren't expecting that could cause
us to release a lock before it really should have been released to the requesting peer. It could also
cause a deadlock if one of the processes trying to unlock was "self", as that would result in the active
unlock never sending the unlock request, even though it sent the payload, which could cause a counter
that should always be positive to hit -1, causing an infinite loop that could only be solved by
popping up the stack, which was an impossibility.
Refs trac:785
This commit was SVN r13160.
The following Trac tickets were found above:
Ticket 785 --> https://svn.open-mpi.org/trac/ompi/ticket/785
2007-01-18 00:13:12 +03:00
|
|
|
/* we want to send all the requests, plus we wait for one more
|
|
|
|
completion event for the control message ack from the unlocker
|
|
|
|
saying we're done */
|
2007-05-22 00:53:02 +04:00
|
|
|
module->p2p_num_pending_out += (out_count + 1);
|
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
Send the unlock request before starting the requests. We won't unlock until we get an ack from the remote side,
so there's no longer a race there (I used to do the unlock request last, after local completion of all the
requests completed, to try to avoid having the passive side reply to the active side, but I don't do that
anymore). The unlock side will not "unlock" the window until it actually receives the correct number of results,
so we're good there.
This fixes an issue where we would receive data on the remote side we weren't expecting that could cause
us to release a lock before it really should have been released to the requesting peer. It could also
cause a deadlock if one of the processes trying to unlock was "self", as that would result in the active
unlock never sending the unlock request, even though it sent the payload, which could cause a counter
that should always be positive to hit -1, causing an infinite loop that could only be solved by
popping up the stack, which was an impossibility.
Refs trac:785
This commit was SVN r13160.
The following Trac tickets were found above:
Ticket 785 --> https://svn.open-mpi.org/trac/ompi/ticket/785
2007-01-18 00:13:12 +03:00
|
|
|
|
|
|
|
/* send the unlock request */
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: sending unlock request to %d with %d requests",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm), target,
|
2007-06-25 02:36:00 +04:00
|
|
|
out_count));
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_control_send(module,
|
Send the unlock request before starting the requests. We won't unlock until we get an ack from the remote side,
so there's no longer a race there (I used to do the unlock request last, after local completion of all the
requests completed, to try to avoid having the passive side reply to the active side, but I don't do that
anymore). The unlock side will not "unlock" the window until it actually receives the correct number of results,
so we're good there.
This fixes an issue where we would receive data on the remote side we weren't expecting that could cause
us to release a lock before it really should have been released to the requesting peer. It could also
cause a deadlock if one of the processes trying to unlock was "self", as that would result in the active
unlock never sending the unlock request, even though it sent the payload, which could cause a counter
that should always be positive to hit -1, causing an infinite loop that could only be solved by
popping up the stack, which was an impossibility.
Refs trac:785
This commit was SVN r13160.
The following Trac tickets were found above:
Ticket 785 --> https://svn.open-mpi.org/trac/ompi/ticket/785
2007-01-18 00:13:12 +03:00
|
|
|
proc,
|
|
|
|
OMPI_OSC_PT2PT_HDR_UNLOCK_REQ,
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
Send the unlock request before starting the requests. We won't unlock until we get an ack from the remote side,
so there's no longer a race there (I used to do the unlock request last, after local completion of all the
requests completed, to try to avoid having the passive side reply to the active side, but I don't do that
anymore). The unlock side will not "unlock" the window until it actually receives the correct number of results,
so we're good there.
This fixes an issue where we would receive data on the remote side we weren't expecting that could cause
us to release a lock before it really should have been released to the requesting peer. It could also
cause a deadlock if one of the processes trying to unlock was "self", as that would result in the active
unlock never sending the unlock request, even though it sent the payload, which could cause a counter
that should always be positive to hit -1, causing an infinite loop that could only be solved by
popping up the stack, which was an impossibility.
Refs trac:785
This commit was SVN r13160.
The following Trac tickets were found above:
Ticket 785 --> https://svn.open-mpi.org/trac/ompi/ticket/785
2007-01-18 00:13:12 +03:00
|
|
|
out_count);
|
2006-02-07 21:45:18 +03:00
|
|
|
|
|
|
|
while (NULL !=
|
2007-05-21 06:21:25 +04:00
|
|
|
(item = opal_list_remove_first(&(module->p2p_copy_pending_sendreqs)))) {
|
2006-02-07 21:45:18 +03:00
|
|
|
ompi_osc_pt2pt_sendreq_t *req =
|
|
|
|
(ompi_osc_pt2pt_sendreq_t*) item;
|
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
ret = ompi_osc_pt2pt_sendreq_send(module, req);
|
2006-02-07 21:45:18 +03:00
|
|
|
|
2012-04-06 18:23:13 +04:00
|
|
|
if (OMPI_ERR_TEMP_OUT_OF_RESOURCE == ret) {
|
2013-03-28 01:17:31 +04:00
|
|
|
opal_output_verbose(5, ompi_osc_base_framework.framework_output,
|
2009-05-01 02:36:09 +04:00
|
|
|
"complete: failure in starting sendreq (%d). Will try later.",
|
2006-08-17 18:52:20 +04:00
|
|
|
ret);
|
2007-05-21 06:21:25 +04:00
|
|
|
opal_list_append(&(module->p2p_copy_pending_sendreqs), item);
|
2009-05-01 02:36:09 +04:00
|
|
|
} else if (OMPI_SUCCESS != ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
2006-02-07 21:45:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* wait for all the requests */
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->p2p_lock);
|
|
|
|
while (0 != module->p2p_num_pending_out) {
|
|
|
|
opal_condition_wait(&module->p2p_cond, &module->p2p_lock);
|
2006-02-07 21:45:18 +03:00
|
|
|
}
|
2007-05-21 06:21:25 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
2006-02-07 21:45:18 +03:00
|
|
|
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: finished unlock to %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm), target));
|
2007-06-25 02:36:00 +04:00
|
|
|
|
2006-02-07 21:45:18 +03:00
|
|
|
/* set our mode on the window */
|
2006-10-13 02:52:13 +04:00
|
|
|
ompi_win_remove_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_LOCK_ACCESS);
|
2006-02-07 21:45:18 +03:00
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_passive_lock(ompi_osc_pt2pt_module_t *module,
|
|
|
|
int32_t origin,
|
|
|
|
int32_t lock_type)
|
|
|
|
{
|
|
|
|
bool send_ack = false;
|
2006-09-21 02:14:46 +04:00
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, origin );
|
2006-02-07 21:45:18 +03:00
|
|
|
ompi_osc_pt2pt_pending_lock_t *new_pending;
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&(module->p2p_lock));
|
|
|
|
if (lock_type == MPI_LOCK_EXCLUSIVE) {
|
|
|
|
if (module->p2p_lock_status == 0) {
|
|
|
|
module->p2p_lock_status = MPI_LOCK_EXCLUSIVE;
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: setting lock status to EXCLUSIVE (from %d)",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm), origin));
|
2006-10-13 02:52:13 +04:00
|
|
|
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
|
2006-02-07 21:45:18 +03:00
|
|
|
send_ack = true;
|
|
|
|
} else {
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: queuing lock request from %d (type=%d)",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm), origin, lock_type));
|
2006-02-07 21:45:18 +03:00
|
|
|
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
|
|
|
|
new_pending->proc = proc;
|
|
|
|
new_pending->lock_type = lock_type;
|
|
|
|
opal_list_append(&(module->p2p_locks_pending), &(new_pending->super));
|
|
|
|
}
|
|
|
|
} else if (lock_type == MPI_LOCK_SHARED) {
|
|
|
|
if (module->p2p_lock_status != MPI_LOCK_EXCLUSIVE) {
|
|
|
|
module->p2p_lock_status = MPI_LOCK_SHARED;
|
|
|
|
module->p2p_shared_count++;
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: setting lock status to SHARED (from %d), count %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm), origin, module->p2p_shared_count));
|
2006-10-13 02:52:13 +04:00
|
|
|
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
|
2006-02-07 21:45:18 +03:00
|
|
|
send_ack = true;
|
|
|
|
} else {
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: queuing lock request from %d (type=%d)",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm), origin, lock_type));
|
2006-02-07 21:45:18 +03:00
|
|
|
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
|
|
|
|
new_pending->proc = proc;
|
|
|
|
new_pending->lock_type = lock_type;
|
|
|
|
opal_list_append(&(module->p2p_locks_pending), &(new_pending->super));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
|
|
|
|
|
|
|
|
if (send_ack) {
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: sending lock ack to %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm), origin));
|
2006-02-07 21:45:18 +03:00
|
|
|
ompi_osc_pt2pt_control_send(module, proc,
|
|
|
|
OMPI_OSC_PT2PT_HDR_LOCK_REQ,
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
2006-02-07 21:45:18 +03:00
|
|
|
OMPI_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_passive_unlock(ompi_osc_pt2pt_module_t *module,
|
|
|
|
int32_t origin,
|
|
|
|
int32_t count)
|
|
|
|
{
|
2007-01-15 01:08:38 +03:00
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, origin );
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_pending_lock_t *new_pending = NULL;
|
2006-02-07 21:45:18 +03:00
|
|
|
|
|
|
|
assert(module->p2p_lock_status != 0);
|
|
|
|
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: received unlock request from %d with %d requests\n",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
2007-06-22 02:09:37 +04:00
|
|
|
origin, count));
|
2007-05-21 06:21:25 +04:00
|
|
|
|
|
|
|
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
|
|
|
|
new_pending->proc = proc;
|
|
|
|
new_pending->lock_type = 0;
|
|
|
|
OPAL_THREAD_LOCK(&(module->p2p_lock));
|
2007-05-22 00:53:02 +04:00
|
|
|
module->p2p_num_pending_in += count;
|
2007-05-21 06:21:25 +04:00
|
|
|
opal_list_append(&module->p2p_unlocks_pending, &(new_pending->super));
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
|
2006-02-07 21:45:18 +03:00
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
return ompi_osc_pt2pt_passive_unlock_complete(module);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_pt2pt_passive_unlock_complete(ompi_osc_pt2pt_module_t *module)
|
|
|
|
{
|
|
|
|
ompi_osc_pt2pt_pending_lock_t *new_pending = NULL;
|
2007-06-25 02:36:00 +04:00
|
|
|
opal_list_t copy_unlock_acks;
|
2007-05-21 06:21:25 +04:00
|
|
|
|
|
|
|
if (module->p2p_num_pending_in != 0) return OMPI_SUCCESS;
|
2006-02-07 21:45:18 +03:00
|
|
|
|
2007-06-25 02:36:00 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->p2p_lock));
|
|
|
|
if (module->p2p_num_pending_in != 0) {
|
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2007-06-22 19:25:39 +04:00
|
|
|
if (module->p2p_lock_status == MPI_LOCK_EXCLUSIVE) {
|
|
|
|
ompi_win_remove_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
|
|
|
|
module->p2p_lock_status = 0;
|
|
|
|
} else {
|
|
|
|
module->p2p_shared_count -= opal_list_get_size(&module->p2p_unlocks_pending);
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: decrementing shared count to %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
|
|
|
module->p2p_shared_count));
|
2007-06-22 19:25:39 +04:00
|
|
|
if (module->p2p_shared_count == 0) {
|
|
|
|
ompi_win_remove_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
|
|
|
|
module->p2p_lock_status = 0;
|
|
|
|
}
|
2006-02-07 21:45:18 +03:00
|
|
|
}
|
|
|
|
|
2007-06-25 02:36:00 +04:00
|
|
|
OBJ_CONSTRUCT(©_unlock_acks, opal_list_t);
|
|
|
|
/* copy over any unlocks that have been satisfied (possibly
|
|
|
|
multiple if SHARED) */
|
|
|
|
opal_list_join(©_unlock_acks,
|
|
|
|
opal_list_get_end(©_unlock_acks),
|
|
|
|
&module->p2p_unlocks_pending);
|
|
|
|
OPAL_THREAD_UNLOCK(&module->p2p_lock);
|
|
|
|
|
2007-05-21 06:21:25 +04:00
|
|
|
/* issue whichever unlock acks we should issue */
|
|
|
|
while (NULL != (new_pending = (ompi_osc_pt2pt_pending_lock_t*)
|
2007-06-25 02:36:00 +04:00
|
|
|
opal_list_remove_first(©_unlock_acks))) {
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: sending unlock ack to proc %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
2007-06-25 02:36:00 +04:00
|
|
|
new_pending->proc->proc_name.vpid));
|
2007-05-21 06:21:25 +04:00
|
|
|
ompi_osc_pt2pt_control_send(module,
|
|
|
|
new_pending->proc,
|
|
|
|
OMPI_OSC_PT2PT_HDR_UNLOCK_REPLY,
|
|
|
|
OMPI_SUCCESS, OMPI_SUCCESS);
|
2007-06-25 02:36:00 +04:00
|
|
|
OBJ_RELEASE(new_pending);
|
2007-05-21 06:21:25 +04:00
|
|
|
}
|
2007-01-15 01:08:38 +03:00
|
|
|
|
2007-06-25 02:36:00 +04:00
|
|
|
OBJ_DESTRUCT(©_unlock_acks);
|
|
|
|
|
|
|
|
/* if we were really unlocked, see if we have another lock request
|
|
|
|
we can satisfy */
|
|
|
|
OPAL_THREAD_LOCK(&module->p2p_lock);
|
|
|
|
if (0 == module->p2p_lock_status) {
|
|
|
|
new_pending = (ompi_osc_pt2pt_pending_lock_t*)
|
|
|
|
opal_list_remove_first(&(module->p2p_locks_pending));
|
|
|
|
if (NULL != new_pending) {
|
2013-03-28 01:17:31 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2007-06-25 02:36:00 +04:00
|
|
|
"%d: sending lock ack to proc %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
2007-06-25 02:36:00 +04:00
|
|
|
new_pending->proc->proc_name.vpid));
|
|
|
|
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
|
|
|
|
/* set lock state and generate a lock request */
|
|
|
|
module->p2p_lock_status = new_pending->lock_type;
|
|
|
|
if (MPI_LOCK_SHARED == new_pending->lock_type) {
|
|
|
|
module->p2p_shared_count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
new_pending = NULL;
|
|
|
|
}
|
2007-06-22 19:25:39 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&(module->p2p_lock));
|
2006-02-07 21:45:18 +03:00
|
|
|
|
|
|
|
if (NULL != new_pending) {
|
|
|
|
ompi_osc_pt2pt_control_send(module,
|
|
|
|
new_pending->proc,
|
|
|
|
OMPI_OSC_PT2PT_HDR_LOCK_REQ,
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->p2p_comm),
|
2006-02-21 21:43:28 +03:00
|
|
|
OMPI_SUCCESS);
|
2007-06-25 02:36:00 +04:00
|
|
|
OBJ_RELEASE(new_pending);
|
2006-02-07 21:45:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2006-01-28 18:38:37 +03:00
|
|
|
}
|