84d1512fba
single threaded builds. In its default configuration, all this does is ensure that there's at least a good chance of threads building based on non-threaded development (since the variable names will be checked). There is also code to make sure that a "mutex" is never "double locked" when using the conditional macro mutex operations. This is off by default because there are a number of places in both ORTE and OMPI where this alarm spews mega bytes of errors on a simple test. So we have some work to do on our path towards thread support. Also removed the macro versions of the non-conditional thread locks, as the only places they were used, the author of the code intended to use the conditional thread locks. So now you have upper-case macros for conditional thread locks and lowercase functions for non-conditional locks. Simple, right? :). This commit was SVN r15011.
146 строки
4.9 KiB
C
146 строки
4.9 KiB
C
/*
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
* University Research and Technology
|
|
* Corporation. All rights reserved.
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
* of Tennessee Research Foundation. All rights
|
|
* reserved.
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
* University of Stuttgart. All rights reserved.
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
* All rights reserved.
|
|
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
|
|
* reserved.
|
|
* $COPYRIGHT$
|
|
*
|
|
* Additional copyrights may follow
|
|
*
|
|
* $HEADER$
|
|
*/
|
|
|
|
#include "ompi_config.h"
|
|
|
|
#include "pml_ob1.h"
|
|
#include "pml_ob1_sendreq.h"
|
|
#include "pml_ob1_recvreq.h"
|
|
#include "ompi/peruse/peruse-internal.h"
|
|
|
|
int mca_pml_ob1_isend_init(void *buf,
|
|
size_t count,
|
|
ompi_datatype_t * datatype,
|
|
int dst,
|
|
int tag,
|
|
mca_pml_base_send_mode_t sendmode,
|
|
ompi_communicator_t * comm,
|
|
ompi_request_t ** request)
|
|
{
|
|
int rc;
|
|
|
|
mca_pml_ob1_send_request_t *sendreq = NULL;
|
|
MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
|
|
if (rc != OMPI_SUCCESS)
|
|
return rc;
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
|
buf,
|
|
count,
|
|
datatype,
|
|
dst, tag,
|
|
comm, sendmode, true);
|
|
|
|
*request = (ompi_request_t *) sendreq;
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
|
|
int mca_pml_ob1_isend(void *buf,
|
|
size_t count,
|
|
ompi_datatype_t * datatype,
|
|
int dst,
|
|
int tag,
|
|
mca_pml_base_send_mode_t sendmode,
|
|
ompi_communicator_t * comm,
|
|
ompi_request_t ** request)
|
|
{
|
|
int rc;
|
|
mca_pml_ob1_send_request_t *sendreq = NULL;
|
|
MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
|
|
if (rc != OMPI_SUCCESS)
|
|
return rc;
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
|
buf,
|
|
count,
|
|
datatype,
|
|
dst, tag,
|
|
comm, sendmode, false);
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_START(sendreq, rc);
|
|
*request = (ompi_request_t *) sendreq;
|
|
return rc;
|
|
}
|
|
|
|
|
|
int mca_pml_ob1_send(void *buf,
|
|
size_t count,
|
|
ompi_datatype_t * datatype,
|
|
int dst,
|
|
int tag,
|
|
mca_pml_base_send_mode_t sendmode,
|
|
ompi_communicator_t * comm)
|
|
{
|
|
int rc;
|
|
mca_pml_ob1_send_request_t *sendreq;
|
|
MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq, rc);
|
|
if (rc != OMPI_SUCCESS)
|
|
return rc;
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
|
buf,
|
|
count,
|
|
datatype,
|
|
dst, tag,
|
|
comm, sendmode, false);
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_START(sendreq, rc);
|
|
if (rc != OMPI_SUCCESS) {
|
|
MCA_PML_OB1_SEND_REQUEST_RETURN( sendreq );
|
|
return rc;
|
|
}
|
|
|
|
if (sendreq->req_send.req_base.req_ompi.req_complete == false) {
|
|
#if OMPI_ENABLE_PROGRESS_THREADS
|
|
if(opal_progress_spin(&sendreq->req_send.req_base.req_ompi.req_complete)) {
|
|
ompi_request_free( (ompi_request_t**)&sendreq );
|
|
return OMPI_SUCCESS;
|
|
}
|
|
#endif
|
|
|
|
/* give up and sleep until completion */
|
|
if (opal_using_threads()) {
|
|
opal_mutex_lock(&ompi_request_lock);
|
|
ompi_request_waiting++;
|
|
while (sendreq->req_send.req_base.req_ompi.req_complete == false)
|
|
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
|
|
ompi_request_waiting--;
|
|
opal_mutex_unlock(&ompi_request_lock);
|
|
} else {
|
|
#if OMPI_ENABLE_DEBUG && !OMPI_HAVE_THREAD_SUPPORT
|
|
OPAL_THREAD_LOCK(&ompi_request_lock);
|
|
#endif
|
|
ompi_request_waiting++;
|
|
while (sendreq->req_send.req_base.req_ompi.req_complete == false)
|
|
opal_condition_wait(&ompi_request_cond, &ompi_request_lock);
|
|
ompi_request_waiting--;
|
|
#if OMPI_ENABLE_DEBUG && !OMPI_HAVE_THREAD_SUPPORT
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
rc = sendreq->req_send.req_base.req_ompi.req_status.MPI_ERROR;
|
|
ompi_request_free( (ompi_request_t**)&sendreq );
|
|
return rc;
|
|
}
|
|
|