1
1
openmpi/ompi/mca/pml/cm/pml_cm.c
Jeff Squyres c8bb7537e7 Remove include/opal/sys/cache.h -- its only purpose in life was to
#define CACHE_LINE_SIZE to 128.  This name has a conflict on NetBSD,
and it seems kinda odd to have a header file that ''only'' defines a
single value.  Also, we'll soon be raising hwloc to be a first-class
item, so having this file around seemed kinda weird.

Therefore, I replaced CACHE_LINE_SIZE with opal_cache_line_size, an
int (in opal/runtime/opal_init.c and opal/runtime/opal.h) on the
rationale that we can fill this in at runtime with hwloc info (trunk
and v1.5/beyond, only).  The only place we ''needed'' a compile-time
CACHE_LINE_SIZE was in the BTL SM (for struct padding), so I made a
new BTL_SM_ preprocessor macro with the old CACHE_LINE_SIZE value
(128).  That use isn't suitable for run-time hwloc information,
anyway.

This commit was SVN r23349.
2010-07-06 14:33:36 +00:00

184 строки
5.0 KiB
C

/*
* Copyright (c) 2006-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/communicator/communicator.h"
#include "ompi/mca/pml/base/pml_base_request.h"
#include "ompi/mca/pml/base/pml_base_bsend.h"
#include "ompi/mca/pml/base/base.h"
#include "pml_cm.h"
#include "pml_cm_sendreq.h"
#include "pml_cm_recvreq.h"
ompi_pml_cm_t ompi_pml_cm = {
{
mca_pml_cm_add_procs,
mca_pml_cm_del_procs,
mca_pml_cm_enable,
NULL, /* No progress function. The MTL register their own */
mca_pml_cm_add_comm,
mca_pml_cm_del_comm,
mca_pml_cm_irecv_init,
mca_pml_cm_irecv,
mca_pml_cm_recv,
mca_pml_cm_isend_init,
mca_pml_cm_isend,
mca_pml_cm_send,
mca_pml_cm_iprobe,
mca_pml_cm_probe,
mca_pml_cm_start,
mca_pml_cm_dump,
NULL,
0,
0
}
};
int
mca_pml_cm_enable(bool enable)
{
/* BWB - FIX ME - need to have this actually do something,
maybe? */
ompi_free_list_init_new(&mca_pml_base_send_requests,
sizeof(mca_pml_cm_hvy_send_request_t) + ompi_mtl->mtl_request_size,
opal_cache_line_size,
OBJ_CLASS(mca_pml_cm_hvy_send_request_t),
0,opal_cache_line_size,
ompi_pml_cm.free_list_num,
ompi_pml_cm.free_list_max,
ompi_pml_cm.free_list_inc,
NULL);
ompi_free_list_init_new(&mca_pml_base_recv_requests,
sizeof(mca_pml_cm_hvy_recv_request_t) + ompi_mtl->mtl_request_size,
opal_cache_line_size,
OBJ_CLASS(mca_pml_cm_hvy_recv_request_t),
0,opal_cache_line_size,
ompi_pml_cm.free_list_num,
ompi_pml_cm.free_list_max,
ompi_pml_cm.free_list_inc,
NULL);
return OMPI_SUCCESS;
}
int
mca_pml_cm_add_comm(ompi_communicator_t* comm)
{
/* should never happen, but it was, so check */
if (comm->c_contextid > ompi_pml_cm.super.pml_max_contextid) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* setup our per-communicator data */
comm->c_pml_comm = NULL;
return OMPI_SUCCESS;
}
int
mca_pml_cm_del_comm(ompi_communicator_t* comm)
{
/* clean up our per-communicator data */
comm->c_pml_comm = NULL;
return OMPI_SUCCESS;
}
int
mca_pml_cm_add_procs(struct ompi_proc_t** procs, size_t nprocs)
{
int ret;
size_t i;
struct mca_mtl_base_endpoint_t **endpoints;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
for (i = 0 ; i < nprocs ; ++i) {
if (procs[i]->proc_arch != ompi_proc_local()->proc_arch) {
return OMPI_ERR_NOT_SUPPORTED;
}
}
#endif
/* make sure remote procs are using the same PML as us */
if (OMPI_SUCCESS != (ret = mca_pml_base_pml_check_selected("cm",
procs,
nprocs))) {
return ret;
}
endpoints = (struct mca_mtl_base_endpoint_t**)malloc(nprocs * sizeof(struct mca_mtl_base_endpoint_t*));
if (NULL == endpoints) return OMPI_ERROR;
#if OPAL_ENABLE_DEBUG
for (i = 0 ; i < nprocs ; ++i) {
endpoints[i] = NULL;
}
#endif
ret = OMPI_MTL_CALL(add_procs(ompi_mtl, nprocs, procs, endpoints));
if (OMPI_SUCCESS != ret) {
free(endpoints);
return ret;
}
for (i = 0 ; i < nprocs ; ++i) {
procs[i]->proc_pml = (struct mca_pml_endpoint_t*) endpoints[i];
}
free(endpoints);
return OMPI_SUCCESS;
}
int
mca_pml_cm_del_procs(struct ompi_proc_t** procs, size_t nprocs)
{
int ret;
size_t i;
struct mca_mtl_base_endpoint_t **endpoints;
endpoints = (struct mca_mtl_base_endpoint_t**)malloc(nprocs * sizeof(struct mca_mtl_base_endpoint_t*));
if (NULL == endpoints) return OMPI_ERROR;
for (i = 0 ; i < nprocs ; ++i) {
endpoints[i] = (struct mca_mtl_base_endpoint_t*) procs[i]->proc_pml;
}
ret = OMPI_MTL_CALL(del_procs(ompi_mtl, nprocs, procs, endpoints));
if (OMPI_SUCCESS != ret) {
free(endpoints);
return ret;
}
free(endpoints);
return OMPI_SUCCESS;
}
/* print any available useful information from this communicator */
int
mca_pml_cm_dump(struct ompi_communicator_t* comm, int verbose)
{
return OMPI_ERR_NOT_IMPLEMENTED;
}