1
1

Cleanup a bunch of warnings. Most were innocent enough (things not initialized that probably wouldn't be a problem), but a couple could be problematic.

One warning on my list was left unfixed: the "mca_ptl_tcp_peer_dump" function is defined but not used. It is commented in the file as a diagnostic, so I left it alone since someone may choose to use it for debugging purposes. For now, it is uncalled anywhere in the code.

This commit was SVN r3703.
Этот коммит содержится в:
Ralph Castain 2004-12-06 02:15:34 +00:00
родитель 1adf5cb3a0
Коммит b9a1132450
13 изменённых файлов: 42 добавлений и 38 удалений

Просмотреть файл

@ -61,7 +61,7 @@ const mca_llm_base_component_1_0_0_t mca_llm_hostfile_component = {
* component variables handles
*/
static int param_filename;
static int param_filename_deprecated;
/* static int param_filename_deprecated; */
static int param_priority;
int

Просмотреть файл

@ -323,9 +323,9 @@ mca_pcm_base_job_list_get_job_info(mca_pcm_base_job_list_t *me,
bool remove_started_pids)
{
int ret = OMPI_ERR_NOT_FOUND;
ompi_list_item_t *job_item, *pid_item;
mca_pcm_base_pids_t *pids;
mca_pcm_base_job_item_t *jobs;
ompi_list_item_t *job_item=NULL, *pid_item=NULL;
mca_pcm_base_pids_t *pids=NULL;
mca_pcm_base_job_item_t *jobs=NULL;
OMPI_LOCK(me->jobs_mutex);

Просмотреть файл

@ -237,7 +237,7 @@ mca_pcm_base_kill_cb(int status, ompi_process_name_t *peer,
static int
kill_start_recv(void)
{
int rc;
int rc=0;
if (! have_initialized_recv) {
OMPI_LOCK(&mutex);

Просмотреть файл

@ -122,7 +122,7 @@ mca_pcm_rsh_init(int *priority,
mca_base_param_lookup_string(mca_pcm_rsh_param_agent,
&(me->rsh_agent));
mca_base_param_lookup_int(mca_pcm_rsh_param_delay_time,
&(me->delay_time));
(int*)&(me->delay_time));
ret = mca_llm_base_select("rsh", &(me->llm), have_threads);

Просмотреть файл

@ -201,14 +201,14 @@ internal_need_profile(mca_pcm_rsh_module_t *me,
mca_llm_base_hostfile_node_t *start_node,
int stderr_is_error, bool *needs_profile)
{
struct passwd *p;
struct passwd *p=NULL;
char shellpath[PRS_BUFSIZE];
char** cmdv = NULL;
char *cmd0 = NULL;
int cmdc = 0;
char *printable = NULL;
char *username = NULL;
int ret;
int ret=0;
/*
* Figure out if we need to source the .profile on the other side.
@ -463,7 +463,8 @@ proc_cleanup:
mca_pcm_base_job_list_add_job_info(me->jobs,
jobid, pid, my_start_vpid,
my_start_vpid + num_procs - 1);
ret = mca_pcm_base_kill_register(me, jobid, my_start_vpid,
ret = mca_pcm_base_kill_register((mca_pcm_base_module_t*)me,
jobid, my_start_vpid,
my_start_vpid + num_procs - 1);
if (ret != OMPI_SUCCESS) goto cleanup;
ret = ompi_rte_wait_cb(pid, internal_wait_cb, me);
@ -504,7 +505,7 @@ internal_wait_cb(pid_t pid, int status, void *data)
mca_ns_base_vpid_t lower = 0;
mca_ns_base_vpid_t i = 0;
int ret;
char *proc_name;
ompi_process_name_t *proc_name;
mca_pcm_rsh_module_t *me = (mca_pcm_rsh_module_t*) data;
ompi_rte_process_status_t proc_status;
@ -528,5 +529,5 @@ internal_wait_cb(pid_t pid, int status, void *data)
free(proc_name);
}
mca_pcm_base_kill_unregister(me, jobid, lower, upper);
mca_pcm_base_kill_unregister((mca_pcm_base_module_t*)me, jobid, lower, upper);
}

Просмотреть файл

@ -42,15 +42,15 @@ int mca_pml_base_select(mca_pml_base_module_t *selected,
bool *allow_multi_user_threads,
bool *have_hidden_threads)
{
int priority, best_priority;
bool user_threads, hidden_threads;
bool best_user_threads, best_hidden_threads;
ompi_list_item_t *item;
mca_base_component_list_item_t *cli;
mca_pml_base_component_t *component, *best_component;
mca_pml_base_module_t *modules;
int priority=0, best_priority=0;
bool user_threads=false, hidden_threads=false;
bool best_user_threads=false, best_hidden_threads=false;
ompi_list_item_t *item=NULL;
mca_base_component_list_item_t *cli=NULL;
mca_pml_base_component_t *component=NULL, *best_component=NULL;
mca_pml_base_module_t *modules=NULL;
ompi_list_t opened;
opened_component_t *om;
opened_component_t *om=NULL;
/* Traverse the list of available components; call their init
functions. */

Просмотреть файл

@ -90,8 +90,8 @@ typedef struct mca_pml_base_send_request_t mca_pml_base_send_request_t;
request->req_base.req_addr = addr; \
request->req_base.req_count = count; \
request->req_base.req_datatype = datatype; \
request->req_base.req_peer = peer; \
request->req_base.req_tag = tag; \
request->req_base.req_peer = (int32_t)peer; \
request->req_base.req_tag = (int32_t)tag; \
request->req_base.req_comm = comm; \
request->req_base.req_proc = ompi_comm_peer_lookup(comm,peer); \
request->req_base.req_persistent = persistent; \
@ -114,7 +114,7 @@ typedef struct mca_pml_base_send_request_t mca_pml_base_send_request_t;
request->req_base.req_addr, \
0, NULL ); \
ompi_convertor_get_packed_size( &request->req_convertor, \
&(request->req_bytes_packed) ); \
(uint32_t*)&(request->req_bytes_packed) ); \
} else { \
request->req_bytes_packed = 0; \
} \

Просмотреть файл

@ -40,7 +40,8 @@ int mca_pml_teg_isend_init(void *buf,
buf,
count,
datatype,
dst, tag, comm, sendmode, true);
dst, tag,
comm, sendmode, true);
*request = (ompi_request_t *) sendreq;
return OMPI_SUCCESS;
@ -65,7 +66,8 @@ int mca_pml_teg_isend(void *buf,
buf,
count,
datatype,
dst, tag, comm, sendmode, false);
dst, tag,
comm, sendmode, false);
MCA_PML_TEG_SEND_REQUEST_START(sendreq, rc);
if (rc != OMPI_SUCCESS)
@ -93,7 +95,8 @@ int mca_pml_teg_send(void *buf,
buf,
count,
datatype,
dst, tag, comm, sendmode, false);
dst, tag,
comm, sendmode, false);
MCA_PML_TEG_SEND_REQUEST_START(sendreq, rc);
if (rc != OMPI_SUCCESS) {

Просмотреть файл

@ -450,8 +450,8 @@ static int mca_ptl_tcp_component_create_listen(void)
static int mca_ptl_tcp_component_exchange(void)
{
int rc;
size_t i;
int rc=0;
size_t i=0;
size_t size = mca_ptl_tcp_component.tcp_num_ptl_modules * sizeof(mca_ptl_tcp_addr_t);
if(mca_ptl_tcp_component.tcp_num_ptl_modules != 0) {
mca_ptl_tcp_addr_t *addrs = (mca_ptl_tcp_addr_t *)malloc(size);

Просмотреть файл

@ -37,9 +37,9 @@ int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_o
int root, MPI_Comm comm, MPI_Comm *intercomm,
int *array_of_errcodes)
{
int i, rc, rank, tag;
int i=0, rc=0, rank=0, tag=0;
int totalnumprocs=0;
ompi_communicator_t *newcomp;
ompi_communicator_t *newcomp=NULL;
int send_first=0; /* they are contacting us first */
if ( MPI_PARAM_CHECK ) {

Просмотреть файл

@ -37,11 +37,11 @@ int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
MPI_Comm bridge_comm, int remote_leader,
int tag, MPI_Comm *newintercomm)
{
int local_size, local_rank;
int lleader, rleader;
ompi_communicator_t *newcomp;
int local_size=0, local_rank=0;
int lleader=0, rleader=0;
ompi_communicator_t *newcomp=NULL;
ompi_proc_t **rprocs=NULL;
int rc, rsize;
int rc=0, rsize=0;
if ( MPI_PARAM_CHECK ) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);

Просмотреть файл

@ -92,12 +92,12 @@ int MPI_Sendrecv_replace(void * buf, int count, MPI_Datatype datatype,
/* setup a buffer for recv */
ompi_convertor_get_packed_size(&convertor, &packed_size);
if(packed_size > sizeof(recv_data)) {
iov.iov_base = malloc(packed_size);
iov.iov_base = (caddr_t)malloc(packed_size);
if(iov.iov_base == NULL) {
OMPI_ERRHANDLER_RETURN(OMPI_ERR_OUT_OF_RESOURCE, comm, MPI_ERR_BUFFER, FUNC_NAME);
}
} else {
iov.iov_base = recv_data;
iov.iov_base = (caddr_t)recv_data;
}
/* recv into temporary buffer */

Просмотреть файл

@ -26,11 +26,11 @@ int ompi_request_wait_any(
#if OMPI_HAVE_THREADS
int c;
#endif
size_t i, num_requests_null_inactive;
size_t i=0, num_requests_null_inactive=0;
int rc = OMPI_SUCCESS;
int completed = -1;
ompi_request_t **rptr;
ompi_request_t *request;
ompi_request_t **rptr=NULL;
ompi_request_t *request=NULL;
#if OMPI_HAVE_THREADS
/* poll for completion */