1
1
openmpi/ompi/mca/pml/cm/pml_cm_recv.c
Rolf vandeVaart 0324fdb407 Created two new macros that are used when filling in either the
status structure or the _ucount field in the status structure.
On 64-bit sparc, the macros resolve into integer array assignments.
For all others, they are just simple assignments.  This fixes 
possible BUS errors seen when running on the SPARC processor.
This bug was introduced when the _count field changed from an int
into a size_t.  See the changes to request.h for additional details.

This commit fixes trac:2514.

This commit was SVN r23554.

The following Trac tickets were found above:
  Ticket 2514 --> https://svn.open-mpi.org/trac/ompi/ticket/2514
2010-08-04 19:36:40 +00:00

126 строки
3.6 KiB
C

/*
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/prefetch.h"
#include "ompi/request/request.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "pml_cm.h"
#include "pml_cm_recvreq.h"
int
mca_pml_cm_irecv_init(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
struct ompi_request_t **request)
{
int ret;
mca_pml_cm_hvy_recv_request_t *recvreq;
ompi_proc_t* ompi_proc;
MCA_PML_CM_HVY_RECV_REQUEST_ALLOC(recvreq, ret);
if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) return ret;
MCA_PML_CM_HVY_RECV_REQUEST_INIT(recvreq, ompi_proc, comm, tag, src,
datatype, addr, count, true);
*request = (ompi_request_t*) recvreq;
return OMPI_SUCCESS;
}
int
mca_pml_cm_irecv(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
struct ompi_request_t **request)
{
int ret;
mca_pml_cm_thin_recv_request_t *recvreq;
ompi_proc_t* ompi_proc;
MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq, ret);
if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) return ret;
MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
ompi_proc,
comm,
tag,
src,
datatype,
addr,
count);
MCA_PML_CM_THIN_RECV_REQUEST_START(recvreq, comm, tag, src, ret);
if( OPAL_LIKELY(OMPI_SUCCESS == ret) ) *request = (ompi_request_t*) recvreq;
return ret;
}
int
mca_pml_cm_recv(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
ompi_status_public_t * status)
{
int ret;
mca_pml_cm_thin_recv_request_t *recvreq;
ompi_proc_t* ompi_proc;
MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq, ret);
if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) return ret;
MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
ompi_proc,
comm,
tag,
src,
datatype,
addr,
count);
MCA_PML_CM_THIN_RECV_REQUEST_START(recvreq, comm, tag, src, ret);
if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) {
/* BWB - XXX - need cleanup of request here */
MCA_PML_CM_THIN_RECV_REQUEST_RETURN(recvreq);
return ret;
}
ompi_request_wait_completion(&recvreq->req_base.req_ompi);
if (NULL != status) { /* return status */
OMPI_STATUS_SET(status, &recvreq->req_base.req_ompi.req_status);
}
ret = recvreq->req_base.req_ompi.req_status.MPI_ERROR;
ompi_request_free( (ompi_request_t**)&recvreq );
return ret;
}