1
1

A long time waiting patch. Get rid of the comm->c_pml_procs. It was (and that was

long ago) supposed to be used as a cache for accessing the PML procs. But in
all of the PMLs the PML proc contain only one field i.e. a pointer to the ompi_proc.
This pointer can be accessed using the c_remote_group easily. Therefore, there is no
meaning of keeping the PML procs around. Slim fast commit ...

This commit was SVN r11730.
Этот коммит содержится в:
George Bosilca 2006-09-20 22:14:46 +00:00
родитель 20459bd982
Коммит 688a16ea78
45 изменённых файлов: 45 добавлений и 393 удалений

Просмотреть файл

@ -264,7 +264,6 @@ static void ompi_comm_construct(ompi_communicator_t* comm)
comm->c_remote_group = NULL;
comm->error_handler = NULL;
comm->c_pml_comm = NULL;
comm->c_pml_procs = NULL;
comm->c_topo = NULL;
comm->c_topo_component = NULL;
comm->c_topo_comm = NULL;

Просмотреть файл

@ -136,7 +136,6 @@ struct ompi_communicator_t {
/* Hooks for PML to hang things */
struct mca_pml_comm_t *c_pml_comm;
struct mca_pml_proc_t **c_pml_procs;
mca_coll_base_module_1_0_0_t c_coll;
/**< Selected collective module, saved by value for speed (instead

Просмотреть файл

@ -41,9 +41,8 @@ ompi_mtl_mx_send(struct mca_mtl_base_module_t* mtl,
size_t length;
mx_status_t mx_status;
uint32_t result;
mca_mtl_mx_endpoint_t* mx_endpoint =
(mca_mtl_mx_endpoint_t*) comm->c_pml_procs[dest]->proc_ompi->proc_pml;
ompi_proc_t* ompi_proc = ompi_comm_peer_lookup( comm, dest );
mca_mtl_mx_endpoint_t* mx_endpoint = (mca_mtl_mx_endpoint_t*) ompi_proc->proc_pml;
assert(mtl == &ompi_mtl_mx.super);
@ -139,10 +138,8 @@ ompi_mtl_mx_isend(struct mca_mtl_base_module_t* mtl,
int ret;
mca_mtl_mx_request_t * mtl_mx_request = (mca_mtl_mx_request_t*) mtl_request;
size_t length;
mca_mtl_mx_endpoint_t* mx_endpoint =
(mca_mtl_mx_endpoint_t*) comm->c_pml_procs[dest]->proc_ompi->proc_pml;
ompi_proc_t* ompi_proc = ompi_comm_peer_lookup( comm, dest );
mca_mtl_mx_endpoint_t* mx_endpoint = (mca_mtl_mx_endpoint_t*) ompi_proc->proc_pml;
assert(mtl == &ompi_mtl_mx.super);

Просмотреть файл

@ -240,7 +240,8 @@ ompi_mtl_portals_irecv(struct mca_mtl_base_module_t* mtl,
remote_proc.nid = PTL_NID_ANY;
remote_proc.pid = PTL_PID_ANY;
} else {
endpoint = (mca_mtl_base_endpoint_t*) comm->c_pml_procs[src]->proc_ompi->proc_pml;
ompi_proc_t* ompi_proc = ompi_comm_peer_lookup( comm, src );
endpoint = (mca_mtl_base_endpoint_t*) ompi_proc->proc_pml;
remote_proc = endpoint->ptl_proc;
}

Просмотреть файл

@ -118,8 +118,8 @@ ompi_mtl_portals_isend(struct mca_mtl_base_module_t* mtl,
ptl_md_t md;
ptl_handle_md_t md_h;
ptl_handle_me_t me_h;
mca_mtl_base_endpoint_t *endpoint =
(mca_mtl_base_endpoint_t*) comm->c_pml_procs[dest]->proc_ompi->proc_pml;
ompi_proc_t* ompi_proc = ompi_comm_peer_lookup( comm, dest );
mca_mtl_base_endpoint_t *endpoint = (mca_mtl_base_endpoint_t*) ompi_proc->proc_pml;
ompi_mtl_portals_request_t *ptl_request =
(ompi_mtl_portals_request_t*) mtl_request;
size_t buflen;

Просмотреть файл

@ -41,9 +41,8 @@ ompi_mtl_psm_send(struct mca_mtl_base_module_t* mtl,
uint32_t flags = 0;
int ret;
size_t length;
mca_mtl_psm_endpoint_t* psm_endpoint =
(mca_mtl_psm_endpoint_t*) comm->c_pml_procs[dest]->proc_ompi->proc_pml;
ompi_proc_t* ompi_proc = ompi_comm_peer_lookup( comm, dest );
mca_mtl_psm_endpoint_t* psm_endpoint = (mca_mtl_psm_endpoint_t*) ompi_proc->proc_pml;
assert(mtl == &ompi_mtl_psm.super);
@ -93,9 +92,8 @@ ompi_mtl_psm_isend(struct mca_mtl_base_module_t* mtl,
int ret;
mca_mtl_psm_request_t * mtl_psm_request = (mca_mtl_psm_request_t*) mtl_request;
size_t length;
mca_mtl_psm_endpoint_t* psm_endpoint =
(mca_mtl_psm_endpoint_t*) comm->c_pml_procs[dest]->proc_ompi->proc_pml;
ompi_proc_t* proc = ompi_comm_peer_lookup( comm, dest );
mca_mtl_psm_endpoint_t* psm_endpoint = (mca_mtl_psm_endpoint_t*)ompi_proc->proc_pml;
assert(mtl == &ompi_mtl_psm.super);

Просмотреть файл

@ -475,7 +475,7 @@ ompi_osc_pt2pt_component_fragment_cb(ompi_osc_pt2pt_module_t *module,
}
/* create or get a pointer to our datatype */
proc = module->p2p_comm->c_pml_procs[header->hdr_origin]->proc_ompi;
proc = ompi_comm_peer_lookup( module->p2p_comm, header->hdr_origin );
datatype = ompi_osc_pt2pt_datatype_create(proc, &payload);
/* create replyreq sendreq */

Просмотреть файл

@ -488,7 +488,7 @@ ompi_osc_pt2pt_sendreq_recv_put(ompi_osc_pt2pt_module_t *module,
int ret = OMPI_SUCCESS;
void *target = (unsigned char*) module->p2p_win->w_baseptr +
(header->hdr_target_disp * module->p2p_win->w_disp_unit);
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[header->hdr_origin]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, header->hdr_origin );
struct ompi_datatype_t *datatype =
ompi_osc_pt2pt_datatype_create(proc, &inbuf);
@ -608,7 +608,7 @@ ompi_osc_pt2pt_sendreq_recv_accum(ompi_osc_pt2pt_module_t *module,
{
int ret = OMPI_SUCCESS;
struct ompi_op_t *op = ompi_osc_pt2pt_op_create(header->hdr_target_op);
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[header->hdr_origin]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, header->hdr_origin );
struct ompi_datatype_t *datatype =
ompi_osc_pt2pt_datatype_create(proc, &payload);

Просмотреть файл

@ -73,7 +73,7 @@ ompi_osc_pt2pt_replyreq_alloc(ompi_osc_pt2pt_module_t *module,
{
int ret;
opal_free_list_item_t *item;
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[origin_rank]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, origin_rank );
/* BWB - FIX ME - is this really the right return code? */
if (NULL == proc) return OMPI_ERR_OUT_OF_RESOURCE;

Просмотреть файл

@ -89,7 +89,7 @@ ompi_osc_pt2pt_sendreq_alloc(ompi_osc_pt2pt_module_t *module,
{
int ret;
opal_free_list_item_t *item;
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[target_rank]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, target_rank );
/* BWB - FIX ME - is this really the right return code? */
if (NULL == proc) return OMPI_ERR_OUT_OF_RESOURCE;

Просмотреть файл

@ -406,7 +406,7 @@ ompi_osc_pt2pt_module_lock(int lock_type,
int assert,
ompi_win_t *win)
{
ompi_proc_t *proc = P2P_MODULE(win)->p2p_comm->c_pml_procs[target]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( P2P_MODULE(win)->p2p_comm, target );
assert(lock_type != 0);
@ -435,7 +435,7 @@ ompi_osc_pt2pt_module_unlock(int target,
int32_t out_count;
opal_list_item_t *item;
int ret;
ompi_proc_t *proc = P2P_MODULE(win)->p2p_comm->c_pml_procs[target]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( P2P_MODULE(win)->p2p_comm, target );
while (0 == P2P_MODULE(win)->p2p_lock_received_ack) {
ompi_osc_pt2pt_progress_long(P2P_MODULE(win));
@ -495,7 +495,7 @@ ompi_osc_pt2pt_passive_lock(ompi_osc_pt2pt_module_t *module,
{
bool send_ack = false;
int ret = OMPI_SUCCESS;
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[origin]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, origin );
ompi_osc_pt2pt_pending_lock_t *new_pending;
OPAL_THREAD_LOCK(&(module->p2p_lock));

Просмотреть файл

@ -498,7 +498,7 @@ ompi_osc_rdma_component_fragment_cb(struct mca_btl_base_module_t *btl,
}
/* create or get a pointer to our datatype */
proc = module->p2p_comm->c_pml_procs[header->hdr_origin]->proc_ompi;
proc = ompi_comm_peer_lookup( module->p2p_comm, header->hdr_origin );
datatype = ompi_osc_rdma_datatype_create(proc, &payload);
/* create replyreq sendreq */

Просмотреть файл

@ -496,7 +496,7 @@ ompi_osc_rdma_sendreq_recv_put(ompi_osc_rdma_module_t *module,
int ret = OMPI_SUCCESS;
void *target = (unsigned char*) module->p2p_win->w_baseptr +
(header->hdr_target_disp * module->p2p_win->w_disp_unit);
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[header->hdr_origin]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, header->hdr_origin );
struct ompi_datatype_t *datatype =
ompi_osc_rdma_datatype_create(proc, &inbuf);
@ -616,7 +616,7 @@ ompi_osc_rdma_sendreq_recv_accum(ompi_osc_rdma_module_t *module,
{
int ret = OMPI_SUCCESS;
struct ompi_op_t *op = ompi_osc_rdma_op_create(header->hdr_target_op);
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[header->hdr_origin]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, header->hdr_origin );
struct ompi_datatype_t *datatype =
ompi_osc_rdma_datatype_create(proc, &payload);

Просмотреть файл

@ -73,7 +73,7 @@ ompi_osc_rdma_replyreq_alloc(ompi_osc_rdma_module_t *module,
{
int ret;
opal_free_list_item_t *item;
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[origin_rank]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, origin_rank );
/* BWB - FIX ME - is this really the right return code? */
if (NULL == proc) return OMPI_ERR_OUT_OF_RESOURCE;

Просмотреть файл

@ -89,7 +89,7 @@ ompi_osc_rdma_sendreq_alloc(ompi_osc_rdma_module_t *module,
{
int ret;
opal_free_list_item_t *item;
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[target_rank]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, target_rank );
/* BWB - FIX ME - is this really the right return code? */
if (NULL == proc) return OMPI_ERR_OUT_OF_RESOURCE;

Просмотреть файл

@ -441,7 +441,7 @@ ompi_osc_rdma_module_lock(int lock_type,
int assert,
ompi_win_t *win)
{
ompi_proc_t *proc = P2P_MODULE(win)->p2p_comm->c_pml_procs[target]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( P2P_MODULE(win)->p2p_comm, target );
assert(lock_type != 0);
@ -470,7 +470,7 @@ ompi_osc_rdma_module_unlock(int target,
int32_t out_count;
opal_list_item_t *item;
int ret;
ompi_proc_t *proc = P2P_MODULE(win)->p2p_comm->c_pml_procs[target]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( P2P_MODULE(win)->p2p_comm, target );
while (0 == P2P_MODULE(win)->p2p_lock_received_ack) {
ompi_osc_rdma_progress(P2P_MODULE(win));
@ -530,7 +530,7 @@ ompi_osc_rdma_passive_lock(ompi_osc_rdma_module_t *module,
{
bool send_ack = false;
int ret = OMPI_SUCCESS;
ompi_proc_t *proc = module->p2p_comm->c_pml_procs[origin]->proc_ompi;
ompi_proc_t *proc = ompi_comm_peer_lookup( module->p2p_comm, origin );
ompi_osc_rdma_pending_lock_t *new_pending;
OPAL_THREAD_LOCK(&(module->p2p_lock));

Просмотреть файл

@ -29,8 +29,6 @@ local_sources = \
pml_cm_component.c \
pml_cm_component.h \
pml_cm_probe.c \
pml_cm_proc.h \
pml_cm_proc.c \
pml_cm_recv.c \
pml_cm_recvreq.h \
pml_cm_recvreq.c \

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi/mca/pml/base/pml_base_bsend.h"
#include "pml_cm.h"
#include "pml_cm_proc.h"
#include "pml_cm_sendreq.h"
#include "pml_cm_recvreq.h"
@ -56,26 +55,9 @@ mca_pml_cm_enable(bool enable)
int
mca_pml_cm_add_comm(ompi_communicator_t* comm)
{
mca_pml_cm_proc_t *pml_proc;
int i;
/* setup our per-communicator data */
comm->c_pml_comm = NULL;
/* setup our proc cache on the communicator. This should be
something that can be safely cast to a mca_pml_proc_t* */
comm->c_pml_procs = (mca_pml_proc_t**) malloc(
comm->c_remote_group->grp_proc_count * sizeof(mca_pml_proc_t*));
if(NULL == comm->c_pml_procs) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
for(i = 0 ; i < comm->c_remote_group->grp_proc_count ; i++){
pml_proc = OBJ_NEW(mca_pml_cm_proc_t);
pml_proc->base.proc_ompi = comm->c_remote_group->grp_proc_pointers[i];
comm->c_pml_procs[i] = (mca_pml_proc_t*) pml_proc;
}
return OMPI_SUCCESS;
}
@ -83,22 +65,9 @@ mca_pml_cm_add_comm(ompi_communicator_t* comm)
int
mca_pml_cm_del_comm(ompi_communicator_t* comm)
{
int i;
/* clean up our per-communicator data */
comm->c_pml_comm = NULL;
/* clean up our proc cache on the communicator */
if (comm->c_pml_procs != NULL) {
for(i = 0 ; i < comm->c_remote_group->grp_proc_count ; i++){
mca_pml_cm_proc_t *pml_proc =
(mca_pml_cm_proc_t*) comm->c_pml_procs[i];
OBJ_RELEASE(pml_proc);
}
free(comm->c_pml_procs);
comm->c_pml_procs = NULL;
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,37 +0,0 @@
/*
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/sys/atomic.h"
#include "pml_cm.h"
#include "pml_cm_proc.h"
static void mca_pml_cm_proc_construct(mca_pml_cm_proc_t* proc)
{
proc->base.proc_ompi = NULL;
OBJ_CONSTRUCT(&proc->base.proc_lock, opal_mutex_t);
}
static void mca_pml_cm_proc_destruct(mca_pml_cm_proc_t* proc)
{
OBJ_DESTRUCT(&proc->base.proc_lock);
}
OBJ_CLASS_INSTANCE(
mca_pml_cm_proc_t,
opal_list_item_t,
mca_pml_cm_proc_construct,
mca_pml_cm_proc_destruct
);

Просмотреть файл

@ -1,39 +0,0 @@
/*
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_PML_CM_PROC_H
#define MCA_PML_CM_PROC_H
#include "opal/threads/mutex.h"
#include "ompi/communicator/communicator.h"
#include "ompi/group/group.h"
#include "ompi/proc/proc.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/mtl/mtl.h"
struct mca_mtl_base_procinfo_t;
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
struct mca_pml_cm_proc_t {
mca_pml_proc_t base;
};
typedef struct mca_pml_cm_proc_t mca_pml_cm_proc_t;
OMPI_DECLSPEC extern opal_class_t mca_pml_cm_proc_t_class;
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif

Просмотреть файл

@ -102,8 +102,7 @@ do { \
if( MPI_ANY_SOURCE == src ) { \
ompi_proc = ompi_proc_local_proc; \
} else { \
ompi_proc = \
comm->c_pml_procs[src]->proc_ompi; \
ompi_proc = ompi_comm_peer_lookup( comm, src ); \
} \
ompi_convertor_copy_and_prepare_for_recv( \
ompi_proc->proc_convertor, \
@ -139,8 +138,7 @@ do { \
if( MPI_ANY_SOURCE == src ) { \
ompi_proc = ompi_proc_local_proc; \
} else { \
ompi_proc = \
comm->c_pml_procs[src]->proc_ompi; \
ompi_proc = ompi_comm_peer_lookup( comm, src ); \
} \
ompi_convertor_copy_and_prepare_for_recv( \
ompi_proc->proc_convertor, \

Просмотреть файл

@ -63,8 +63,7 @@ OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_hvy_send_request_t);
{ \
do{ \
ompi_free_list_item_t* item; \
ompi_proc = \
comm->c_pml_procs[dst]->proc_ompi; \
ompi_proc = ompi_comm_peer_lookup( comm, dst ); \
\
if(NULL == ompi_proc) { \
rc = OMPI_ERR_OUT_OF_RESOURCE; \
@ -83,8 +82,7 @@ OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_cm_hvy_send_request_t);
ompi_proc, rc) \
{ \
ompi_free_list_item_t* item; \
ompi_proc = \
comm->c_pml_procs[dst]->proc_ompi; \
ompi_proc = ompi_comm_peer_lookup( comm, dst ); \
if(NULL == ompi_proc) { \
rc = OMPI_ERR_OUT_OF_RESOURCE; \
sendreq = NULL; \

Просмотреть файл

@ -30,8 +30,6 @@ dr_sources = \
pml_dr_iprobe.c \
pml_dr_irecv.c \
pml_dr_isend.c \
pml_dr_proc.c \
pml_dr_proc.h \
pml_dr_progress.c \
pml_dr_recvfrag.c \
pml_dr_recvfrag.h \

Просмотреть файл

@ -29,7 +29,6 @@
#include "pml_dr.h"
#include "pml_dr_component.h"
#include "pml_dr_comm.h"
#include "pml_dr_proc.h"
#include "pml_dr_hdr.h"
#include "pml_dr_recvfrag.h"
#include "pml_dr_sendreq.h"
@ -76,7 +75,6 @@ int mca_pml_dr_add_comm(ompi_communicator_t* comm)
{
/* allocate pml specific comm data */
mca_pml_dr_comm_t* pml_comm = OBJ_NEW(mca_pml_dr_comm_t);
mca_pml_dr_proc_t* pml_proc = NULL;
int i;
if (NULL == pml_comm) {
@ -84,16 +82,9 @@ int mca_pml_dr_add_comm(ompi_communicator_t* comm)
}
mca_pml_dr_comm_init(pml_comm, comm);
comm->c_pml_comm = pml_comm;
comm->c_pml_procs = (mca_pml_proc_t**)malloc(
comm->c_remote_group->grp_proc_count * sizeof(mca_pml_proc_t));
if(NULL == comm->c_pml_procs) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
for(i=0; i<comm->c_remote_group->grp_proc_count; i++){
pml_proc = OBJ_NEW(mca_pml_dr_proc_t);
pml_proc->base.proc_ompi = comm->c_remote_group->grp_proc_pointers[i];
comm->c_pml_procs[i] = (mca_pml_proc_t*) pml_proc; /* comm->c_remote_group->grp_proc_pointers[i]->proc_pml; */
for( i = 0; i < comm->c_remote_group->grp_proc_count; i++ ) {
pml_comm->procs[i].ompi_proc = comm->c_remote_group->grp_proc_pointers[i];
}
return OMPI_SUCCESS;
}
@ -102,15 +93,9 @@ int mca_pml_dr_del_comm(ompi_communicator_t* comm)
{
OBJ_RELEASE(comm->c_pml_comm);
comm->c_pml_comm = NULL;
if(comm->c_pml_procs != NULL)
free(comm->c_pml_procs);
comm->c_pml_procs = NULL;
return OMPI_SUCCESS;
}
/*
* For each proc setup a datastructure that indicates the PTLs
* that can be used to reach the destination.

Просмотреть файл

@ -26,7 +26,6 @@
#include "opal/mca/base/mca_base_param.h"
#include "ompi/mca/pml/base/pml_base_bsend.h"
#include "pml_dr.h"
#include "pml_dr_proc.h"
#include "pml_dr_hdr.h"
#include "pml_dr_sendreq.h"
#include "pml_dr_recvreq.h"

Просмотреть файл

@ -19,7 +19,6 @@
#include "ompi_config.h"
#include "pml_dr.h"
#include "pml_dr_proc.h"
#include "pml_dr_sendreq.h"
#include "pml_dr_recvreq.h"

Просмотреть файл

@ -1,45 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/sys/atomic.h"
#include "pml_dr.h"
#include "pml_dr_proc.h"
static void mca_pml_dr_proc_construct(mca_pml_dr_proc_t* proc)
{
proc->base.proc_ompi = NULL;
OBJ_CONSTRUCT(&proc->base.proc_lock, opal_mutex_t);
}
static void mca_pml_dr_proc_destruct(mca_pml_dr_proc_t* proc)
{
OBJ_DESTRUCT(&proc->base.proc_lock);
}
OBJ_CLASS_INSTANCE(
mca_pml_dr_proc_t,
opal_list_item_t,
mca_pml_dr_proc_construct,
mca_pml_dr_proc_destruct
);

Просмотреть файл

@ -1,47 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_PML_PROC_H
#define MCA_PML_PROC_H
#include "opal/threads/mutex.h"
#include "ompi/communicator/communicator.h"
#include "ompi/group/group.h"
#include "ompi/proc/proc.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
/**
* Structure associated w/ ompi_proc_t that contains data specific
* to the PML. Note that this name is not PML specific.
*/
struct mca_pml_dr_proc_t {
mca_pml_proc_t base;
};
typedef struct mca_pml_dr_proc_t mca_pml_dr_proc_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_pml_dr_proc_t);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif

Просмотреть файл

@ -27,7 +27,6 @@
#include "pml_dr.h"
#include "pml_dr_hdr.h"
#include "pml_dr_proc.h"
#include "pml_dr_vfrag.h"
#include "pml_dr_comm.h"

Просмотреть файл

@ -29,7 +29,6 @@
#include "ompi/mca/mpool/mpool.h"
#include "pml_dr.h"
#include "pml_dr_hdr.h"
#include "pml_dr_proc.h"
#include "pml_dr_sendreq.h"
#include "pml_dr_recvreq.h"
#include "ompi/mca/bml/base/base.h"

Просмотреть файл

@ -30,7 +30,6 @@
#include "ompi/mca/bml/bml.h"
#include "ompi/mca/btl/btl.h"
#include "pml_dr_proc.h"
#include "pml_dr_comm.h"
#include "pml_dr_hdr.h"
#include "pml_dr_vfrag.h"
@ -73,8 +72,7 @@ OBJ_CLASS_DECLARATION(mca_pml_dr_send_request_t);
sendreq, \
rc) \
{ \
ompi_proc_t *proc = \
comm->c_pml_procs[dst]->proc_ompi; \
ompi_proc_t *proc = ompi_comm_peer_lookup( comm, dst ); \
ompi_free_list_item_t* item; \
\
if(NULL == proc) { \

Просмотреть файл

@ -30,8 +30,6 @@ ob1_sources = \
pml_ob1_iprobe.c \
pml_ob1_irecv.c \
pml_ob1_isend.c \
pml_ob1_proc.c \
pml_ob1_proc.h \
pml_ob1_progress.c \
pml_ob1_rdma.c \
pml_ob1_rdma.h \

Просмотреть файл

@ -28,7 +28,6 @@
#include "pml_ob1.h"
#include "pml_ob1_component.h"
#include "pml_ob1_comm.h"
#include "pml_ob1_proc.h"
#include "pml_ob1_hdr.h"
#include "pml_ob1_recvfrag.h"
#include "pml_ob1_sendreq.h"
@ -76,7 +75,6 @@ int mca_pml_ob1_add_comm(ompi_communicator_t* comm)
{
/* allocate pml specific comm data */
mca_pml_ob1_comm_t* pml_comm = OBJ_NEW(mca_pml_ob1_comm_t);
mca_pml_ob1_proc_t* pml_proc = NULL;
int i;
if (NULL == pml_comm) {
@ -84,17 +82,9 @@ int mca_pml_ob1_add_comm(ompi_communicator_t* comm)
}
mca_pml_ob1_comm_init_size(pml_comm, comm->c_remote_group->grp_proc_count);
comm->c_pml_comm = pml_comm;
comm->c_pml_procs = (mca_pml_proc_t**)malloc(
comm->c_remote_group->grp_proc_count * sizeof(mca_pml_proc_t));
if(NULL == comm->c_pml_procs) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
for(i=0; i<comm->c_remote_group->grp_proc_count; i++){
pml_proc = OBJ_NEW(mca_pml_ob1_proc_t);
pml_proc->base.proc_ompi = comm->c_remote_group->grp_proc_pointers[i];
comm->c_pml_procs[i] = (mca_pml_proc_t*) pml_proc; /* comm->c_remote_group->grp_proc_pointers[i]->proc_pml; */
pml_comm->procs[i].proc_ompi = comm->c_remote_group->grp_proc_pointers[i];
for( i = 0; i < comm->c_remote_group->grp_proc_count; i++ ) {
pml_comm->procs[i].ompi_proc = comm->c_remote_group->grp_proc_pointers[i];
}
return OMPI_SUCCESS;
}
@ -103,9 +93,6 @@ int mca_pml_ob1_del_comm(ompi_communicator_t* comm)
{
OBJ_RELEASE(comm->c_pml_comm);
comm->c_pml_comm = NULL;
if(comm->c_pml_procs != NULL)
free(comm->c_pml_procs);
comm->c_pml_procs = NULL;
return OMPI_SUCCESS;
}
@ -205,7 +192,7 @@ int mca_pml_ob1_dump(struct ompi_communicator_t* comm, int verbose)
/* iterate through all procs on communicator */
for(i=0; i<pml_comm->num_procs; i++) {
mca_pml_ob1_comm_proc_t* proc = &pml_comm->procs[i];
mca_bml_base_endpoint_t* ep = (mca_bml_base_endpoint_t*)proc->proc_ompi->proc_bml;
mca_bml_base_endpoint_t* ep = (mca_bml_base_endpoint_t*)proc->ompi_proc->proc_bml;
size_t n;
opal_output(0, "[Rank %d]\n", i);

Просмотреть файл

@ -28,7 +28,7 @@ static void mca_pml_ob1_comm_proc_construct(mca_pml_ob1_comm_proc_t* proc)
{
proc->expected_sequence = 1;
proc->send_sequence = 0;
proc->proc_ompi = NULL;
proc->ompi_proc = NULL;
OBJ_CONSTRUCT(&proc->frags_cant_match, opal_list_t);
OBJ_CONSTRUCT(&proc->specific_receives, opal_list_t);
OBJ_CONSTRUCT(&proc->unexpected_frags, opal_list_t);

Просмотреть файл

@ -32,7 +32,7 @@ extern "C" {
struct mca_pml_ob1_comm_proc_t {
opal_object_t super;
uint16_t expected_sequence; /**< send message sequence number - receiver side */
struct ompi_proc_t* proc_ompi;
struct ompi_proc_t* ompi_proc;
#if OMPI_HAVE_THREAD_SUPPORT
volatile int32_t send_sequence; /**< send side sequence number */
#else

Просмотреть файл

@ -26,7 +26,6 @@
#include "opal/mca/base/mca_base_param.h"
#include "ompi/mca/pml/base/pml_base_bsend.h"
#include "pml_ob1.h"
#include "pml_ob1_proc.h"
#include "pml_ob1_hdr.h"
#include "pml_ob1_sendreq.h"
#include "pml_ob1_recvreq.h"

Просмотреть файл

@ -19,7 +19,6 @@
#include "ompi_config.h"
#include "pml_ob1.h"
#include "pml_ob1_proc.h"
#include "pml_ob1_sendreq.h"
#include "pml_ob1_recvreq.h"
#include "ompi/peruse/peruse-internal.h"

Просмотреть файл

@ -1,45 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/sys/atomic.h"
#include "pml_ob1.h"
#include "pml_ob1_proc.h"
static void mca_pml_ob1_proc_construct(mca_pml_ob1_proc_t* proc)
{
proc->base.proc_ompi = NULL;
OBJ_CONSTRUCT(&proc->base.proc_lock, opal_mutex_t);
}
static void mca_pml_ob1_proc_destruct(mca_pml_ob1_proc_t* proc)
{
OBJ_DESTRUCT(&proc->base.proc_lock);
}
OBJ_CLASS_INSTANCE(
mca_pml_ob1_proc_t,
opal_list_item_t,
mca_pml_ob1_proc_construct,
mca_pml_ob1_proc_destruct
);

Просмотреть файл

@ -1,46 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_PML_PROC_H
#define MCA_PML_PROC_H
#include "opal/threads/mutex.h"
#include "ompi/communicator/communicator.h"
#include "ompi/group/group.h"
#include "ompi/proc/proc.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
/**
* Structure associated w/ ompi_proc_t that contains data specific
* to the PML. Note that this name is not PML specific.
*/
struct mca_pml_ob1_proc_t {
mca_pml_proc_t base;
};
typedef struct mca_pml_ob1_proc_t mca_pml_ob1_proc_t;
OMPI_DECLSPEC extern opal_class_t mca_pml_ob1_proc_t_class;
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif

Просмотреть файл

@ -210,7 +210,7 @@ do { \
if ( (frag_tag == recv_tag) || \
( (recv_tag == OMPI_ANY_TAG) && (0 <= frag_tag) ) ) { \
\
generic_recv->req_recv.req_base.req_proc = proc->proc_ompi; \
generic_recv->req_recv.req_base.req_proc = proc->ompi_proc; \
/* Match made */ \
return_match = generic_recv; \
\

Просмотреть файл

@ -977,7 +977,7 @@ static mca_pml_ob1_recv_frag_t* mca_pml_ob1_recv_request_match_specific_proc(
}
return NULL;
find_fragment:
request->req_recv.req_base.req_proc = proc->proc_ompi;
request->req_recv.req_base.req_proc = proc->ompi_proc;
if( !((MCA_PML_REQUEST_IPROBE == request->req_recv.req_base.req_type) ||
(MCA_PML_REQUEST_PROBE == request->req_recv.req_base.req_type)) ) {
PERUSE_TRACE_MSG_EVENT( PERUSE_COMM_MSG_REMOVE_FROM_UNEX_Q,

Просмотреть файл

@ -22,7 +22,6 @@
#define OMPI_PML_OB1_RECV_REQUEST_H
#include "pml_ob1.h"
#include "pml_ob1_proc.h"
#include "pml_ob1_rdma.h"
#include "pml_ob1_rdmafrag.h"
#include "ompi/proc/proc.h"
@ -105,7 +104,7 @@ do { \
persistent); \
if( MPI_ANY_SOURCE != src ) { \
(request)->req_recv.req_base.req_proc = \
comm->c_pml_comm->procs[src].proc_ompi; \
comm->c_pml_comm->procs[src].ompi_proc; \
if( (0 != (datatype)->size) && (0 != count) ) { \
ompi_convertor_copy_and_prepare_for_recv( \
(request)->req_recv.req_base.req_proc->proc_convertor, \

Просмотреть файл

@ -25,7 +25,6 @@
#include "ompi/mca/mpool/mpool.h"
#include "pml_ob1.h"
#include "pml_ob1_hdr.h"
#include "pml_ob1_proc.h"
#include "pml_ob1_sendreq.h"
#include "pml_ob1_rdmafrag.h"
#include "pml_ob1_recvreq.h"

Просмотреть файл

@ -22,7 +22,6 @@
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/pml/base/pml_base_sendreq.h"
#include "ompi/mca/mpool/base/base.h"
#include "pml_ob1_proc.h"
#include "pml_ob1_comm.h"
#include "pml_ob1_hdr.h"
#include "pml_ob1_rdma.h"
@ -72,8 +71,7 @@ OBJ_CLASS_DECLARATION(mca_pml_ob1_send_request_t);
sendreq, \
rc) \
{ \
ompi_proc_t *proc = \
comm->c_pml_procs[dst]->proc_ompi; \
ompi_proc_t *proc = ompi_comm_peer_lookup( comm, dst ); \
ompi_free_list_item_t* item; \
\
if(NULL == proc) { \

Просмотреть файл

@ -77,9 +77,7 @@ typedef uint64_t mca_pml_sequence_t;
/**
* Base PML proc structure
*
* Base PML structure for caching proc information on a communicator.
* A PML should maintain an array of pointers to mca_pml_proc_t
* structures in the c_pml_procs structure of every communicator.
* Base PML structure for storing proc information.
* Note that the mca_pml_proc_t structure can not be instantiated
* directly, so each PML *must* provide a class that inherits from
* this class and provides the necessary integration logic.