1
1

- Mainly type casts. Microsoft VC++ compiler is too strict.

This commit was SVN r19517.
Этот коммит содержится в:
Shiqing Fan 2008-09-08 15:39:30 +00:00
родитель 2f50fc8b92
Коммит 04ee20a880
12 изменённых файлов: 38 добавлений и 38 удалений

Просмотреть файл

@ -116,7 +116,7 @@ static int init_fifos(ompi_fifo_t *f, int n)
f[j].head = (ompi_cb_fifo_wrapper_t*)OMPI_CB_FREE;
f[j].tail = (ompi_cb_fifo_wrapper_t*)OMPI_CB_FREE;
if(opal_using_threads()) {
char *buf = mpool_calloc(2, CACHE_LINE_SIZE);
char *buf = (char *) mpool_calloc(2, CACHE_LINE_SIZE);
/* allocate head and tail locks on different cache lines */
if(NULL == buf)
return OMPI_ERROR;
@ -181,7 +181,7 @@ static void init_maffinity(int *my_mem_node, int *max_mem_node)
if((*max_mem_node = opal_value_array_get_size(&dists)) < 2)
goto out;
dist = opal_value_array_get_item(&dists, 0);
dist = (opal_carto_node_distance_t *) opal_value_array_get_item(&dists, 0);
opal_maffinity_base_node_name_to_id(dist->node->node_name, my_mem_node);
out:
if(myslot) free(myslot);
@ -201,7 +201,7 @@ static int sm_btl_first_time_init(mca_btl_sm_t *sm_btl, int n)
mca_btl_sm_component.num_mem_nodes = num_mem_nodes;
/* lookup shared memory pool */
mca_btl_sm_component.sm_mpools = calloc(num_mem_nodes,
mca_btl_sm_component.sm_mpools = (mca_mpool_base_module_t **) calloc(num_mem_nodes,
sizeof(mca_mpool_base_module_t*));
/* create mpool for each memory node */
@ -324,7 +324,7 @@ static int sm_btl_first_time_init(mca_btl_sm_t *sm_btl, int n)
mca_btl_sm_component.fifo[mca_btl_sm_component.my_smp_rank] = my_fifos;
mca_btl_sm_component.mem_nodes = malloc(sizeof(uint16_t) * n);
mca_btl_sm_component.mem_nodes = (uint16_t *) malloc(sizeof(uint16_t) * n);
if(NULL == mca_btl_sm_component.mem_nodes)
return OMPI_ERR_OUT_OF_RESOURCE;

Просмотреть файл

@ -557,7 +557,7 @@ static int mca_btl_tcp_component_create_listen(uint16_t af_family)
{ /* Don't reuse ports */
int flg = 0;
if (setsockopt (sd, SOL_SOCKET, SO_REUSEADDR, (void*)&flg, sizeof (flg)) < 0) {
if (setsockopt (sd, SOL_SOCKET, SO_REUSEADDR, (const char *)&flg, sizeof (flg)) < 0) {
BTL_ERROR((0, "mca_btl_tcp_create_listen: unable to unset the "
"SO_REUSEADDR option (%s:%d)\n",
strerror(opal_socket_errno), opal_socket_errno));

Просмотреть файл

@ -291,7 +291,7 @@ int mca_btl_tcp_proc_insert( mca_btl_tcp_proc_t* btl_proc,
if(-1 == index) {
index = num_local_interfaces++;
local_kindex_to_index[kindex] = index;
local_interfaces[index] = malloc(sizeof(mca_btl_tcp_interface_t));
local_interfaces[index] = (mca_btl_tcp_interface_t *) malloc(sizeof(mca_btl_tcp_interface_t));
assert(NULL != local_interfaces[index]);
mca_btl_tcp_initialise_interface(local_interfaces[index], kindex, index);
}
@ -304,7 +304,7 @@ int mca_btl_tcp_proc_insert( mca_btl_tcp_proc_t* btl_proc,
}
local_interfaces[local_kindex_to_index[kindex]]->ipv4_address =
malloc(sizeof(local_addr));
(struct sockaddr_storage*) malloc(sizeof(local_addr));
memcpy(local_interfaces[local_kindex_to_index[kindex]]->ipv4_address,
&local_addr, sizeof(local_addr));
opal_ifindextomask(idx,
@ -318,7 +318,7 @@ int mca_btl_tcp_proc_insert( mca_btl_tcp_proc_t* btl_proc,
}
local_interfaces[local_kindex_to_index[kindex]]->ipv6_address
= malloc(sizeof(local_addr));
= (struct sockaddr_storage*) malloc(sizeof(local_addr));
memcpy(local_interfaces[local_kindex_to_index[kindex]]->ipv6_address,
&local_addr, sizeof(local_addr));
opal_ifindextomask(idx,
@ -349,7 +349,7 @@ int mca_btl_tcp_proc_insert( mca_btl_tcp_proc_t* btl_proc,
if(-1 == index) {
index = num_peer_interfaces++;
peer_kindex_to_index[endpoint_addr->addr_ifkindex] = index;
peer_interfaces[index] = malloc(sizeof(mca_btl_tcp_interface_t));
peer_interfaces[index] = (mca_btl_tcp_interface_t *) malloc(sizeof(mca_btl_tcp_interface_t));
mca_btl_tcp_initialise_interface(peer_interfaces[index],
endpoint_addr->addr_ifkindex, index);
}
@ -364,13 +364,13 @@ int mca_btl_tcp_proc_insert( mca_btl_tcp_proc_t* btl_proc,
switch(endpoint_addr_ss.ss_family) {
case AF_INET:
peer_interfaces[index]->ipv4_address = malloc(sizeof(endpoint_addr_ss));
peer_interfaces[index]->ipv4_address = (struct sockaddr_storage*) malloc(sizeof(endpoint_addr_ss));
peer_interfaces[index]->ipv4_endpoint_addr = endpoint_addr;
memcpy(peer_interfaces[index]->ipv4_address,
&endpoint_addr_ss, sizeof(endpoint_addr_ss));
break;
case AF_INET6:
peer_interfaces[index]->ipv6_address = malloc(sizeof(endpoint_addr_ss));
peer_interfaces[index]->ipv6_address = (struct sockaddr_storage*) malloc(sizeof(endpoint_addr_ss));
peer_interfaces[index]->ipv6_endpoint_addr = endpoint_addr;
memcpy(peer_interfaces[index]->ipv6_address,
&endpoint_addr_ss, sizeof(endpoint_addr_ss));
@ -468,7 +468,7 @@ int mca_btl_tcp_proc_insert( mca_btl_tcp_proc_t* btl_proc,
* interfaces
*/
best_assignment = malloc (perm_size * sizeof(int));
best_assignment = (unsigned int *) malloc (perm_size * sizeof(int));
a = (int *) malloc(perm_size * sizeof(int));
if (NULL == a) {

Просмотреть файл

@ -201,7 +201,7 @@ static uint64_t message_seq_num = 1;
/* The current message being worked on */
static uint64_t current_msg_id = 0;
static ompi_crcp_coord_pml_message_type_t current_msg_type = 0;
static ompi_crcp_coord_pml_message_type_t current_msg_type = COORD_MSG_TYPE_UNKNOWN;
/* If we need to stall the C/R coordination until the current
* operation is complete */
@ -1609,7 +1609,7 @@ ompi_crcp_base_pml_state_t* ompi_crcp_coord_pml_irecv(
tag = drain_msg_ref->tag;
if( 0 != ompi_ddt_copy_content_same_ddt(datatype, count,
buf, drain_msg_ref->buffer) ) {
(char *) buf, (char *) drain_msg_ref->buffer) ) {
opal_output( mca_crcp_coord_component.super.output_handle,
"crcp:coord: pml_irecv(): Datatype copy failed (%d)",
ret);
@ -1794,7 +1794,7 @@ ompi_crcp_base_pml_state_t* ompi_crcp_coord_pml_recv(
src = drain_msg_ref->rank;
tag = drain_msg_ref->tag;
if( 0 != ompi_ddt_copy_content_same_ddt(datatype, count,
buf, drain_msg_ref->buffer) ) {
(char *) buf, (char *) drain_msg_ref->buffer) ) {
opal_output( mca_crcp_coord_component.super.output_handle,
"crcp:coord: pml_recv(): Datatype copy failed (%d)",
ret);
@ -2116,7 +2116,7 @@ ompi_crcp_base_pml_state_t* ompi_crcp_coord_pml_start(
/* Copy the drained message */
if( 0 != ompi_ddt_copy_content_same_ddt(msg_ref->datatype, msg_ref->count,
msg_ref->buffer, drain_msg_ref->buffer) ) {
(char *) msg_ref->buffer, (char *) drain_msg_ref->buffer) ) {
opal_output( mca_crcp_coord_component.super.output_handle,
"crcp:coord: pml_start(): Datatype copy failed (%d)",
ret);
@ -5072,11 +5072,11 @@ static void display_all_timers(int state) {
return;
}
opal_output(0, "crcp:coord: timing(%20s): ******************** Begin: [State = %12s]\n", "Summary", opal_crs_base_state_str(state));
opal_output(0, "crcp:coord: timing(%20s): ******************** Begin: [State = %12s]\n", "Summary", opal_crs_base_state_str((opal_crs_state_type_t)state));
for( i = 0; i < CRCP_TIMER_MAX; ++i) {
display_indv_timer_core(i, 0, 0, false);
}
opal_output(0, "crcp:coord: timing(%20s): ******************** End: [State = %12s]\n", "Summary", opal_crs_base_state_str(state));
opal_output(0, "crcp:coord: timing(%20s): ******************** End: [State = %12s]\n", "Summary", opal_crs_base_state_str((opal_crs_state_type_t)state));
}
static void display_indv_timer(int idx, int proc, int msgs) {

Просмотреть файл

@ -114,7 +114,7 @@ void* mca_mpool_rdma_alloc(mca_mpool_base_module_t *mpool, size_t size,
free(base_addr);
return NULL;
}
(*reg)->alloc_base = base_addr;
(*reg)->alloc_base = (unsigned char *) base_addr;
return addr;
}
@ -144,8 +144,8 @@ static int register_cache_bypass(mca_mpool_base_module_t *mpool,
unsigned char *base, *bound;
int rc;
base = down_align_addr(addr, mca_mpool_base_page_size_log);
bound = up_align_addr( (void*) ((char*) addr + size - 1),
base = (unsigned char *) down_align_addr(addr, mca_mpool_base_page_size_log);
bound = (unsigned char *) up_align_addr( (void*) ((char*) addr + size - 1),
mca_mpool_base_page_size_log);
OMPI_FREE_LIST_GET(&mpool_rdma->reg_list, item, rc);
if(OMPI_SUCCESS != rc) {
@ -190,8 +190,8 @@ int mca_mpool_rdma_register(mca_mpool_base_module_t *mpool, void *addr,
return register_cache_bypass(mpool, addr, size, flags, reg);
}
base = down_align_addr(addr, mca_mpool_base_page_size_log);
bound = up_align_addr((void*)((char*) addr + size - 1),
base = (unsigned char *) down_align_addr(addr, mca_mpool_base_page_size_log);
bound = (unsigned char *) up_align_addr((void*)((char*) addr + size - 1),
mca_mpool_base_page_size_log);
OPAL_THREAD_LOCK(&mpool->rcache->lock);
if(!opal_list_is_empty(&mpool_rdma->gc_list))
@ -324,8 +324,8 @@ int mca_mpool_rdma_find(struct mca_mpool_base_module_t *mpool, void *addr,
int rc;
unsigned char *base, *bound;
base = down_align_addr(addr, mca_mpool_base_page_size_log);
bound = up_align_addr((void*)((char*) addr + size - 1),
base = (unsigned char *) down_align_addr(addr, mca_mpool_base_page_size_log);
bound = (unsigned char *) up_align_addr((void*)((char*) addr + size - 1),
mca_mpool_base_page_size_log);
OPAL_THREAD_LOCK(&mpool->rcache->lock);

Просмотреть файл

@ -461,7 +461,7 @@ int orte_odls_base_default_construct_child_list(opal_buffer_t *data,
for (j=0; j < jobdat->num_procs; j++) {
proc.vpid = j;
/* ident this proc's node */
pmap = opal_value_array_get_item(&jobdat->procmap, j);
pmap = (orte_pmap_t *) opal_value_array_get_item(&jobdat->procmap, j);
if (pmap->node < 0 || pmap->node >= orte_daemonmap.size) {
ORTE_ERROR_LOG(ORTE_ERR_VALUE_OUT_OF_BOUNDS);
rc = ORTE_ERR_VALUE_OUT_OF_BOUNDS;

Просмотреть файл

@ -569,7 +569,7 @@ mca_oob_tcp_create_listen(int *target_sd, unsigned short *target_port, uint16_t
/* Disable reusing ports */
flags = 0;
if (setsockopt (*target_sd, SOL_SOCKET, SO_REUSEADDR, (void*)&flags, sizeof(flags)) < 0) {
if (setsockopt (*target_sd, SOL_SOCKET, SO_REUSEADDR, (const char *)&flags, sizeof(flags)) < 0) {
opal_output(0, "mca_oob_tcp_create_listen: unable to unset the "
"SO_REUSEADDR option (%s:%d)\n",
strerror(opal_socket_errno), opal_socket_errno);

Просмотреть файл

@ -140,7 +140,7 @@ rml_oob_init(int* priority)
/* Set default timeout for queued messages to be 1/2 second */
orte_rml_oob_module.timeout.tv_sec = 0;
orte_rml_oob_module.timeout.tv_usec = 500000;
orte_rml_oob_module.timer_event = malloc(sizeof(opal_event_t));
orte_rml_oob_module.timer_event = (opal_event_t *) malloc(sizeof(opal_event_t));
if (NULL == orte_rml_oob_module.timer_event) {
return NULL;
}
@ -500,7 +500,7 @@ rml_oob_recv_route_callback(int status,
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&next)));
ORTE_RML_OOB_MSG_HEADER_NTOH(*hdr);
qmsg->payload[0].iov_base = malloc(iov[0].iov_len);
qmsg->payload[0].iov_base = (char *) malloc(iov[0].iov_len);
if (NULL == qmsg->payload[0].iov_base) abort();
qmsg->payload[0].iov_len = iov[0].iov_len;
memcpy(qmsg->payload[0].iov_base, iov[0].iov_base, iov[0].iov_len);

Просмотреть файл

@ -339,7 +339,7 @@ static int update_route(orte_process_name_t *target,
}
/* not there, so add the route FOR THE JOB FAMILY*/
route_copy = malloc(sizeof(orte_process_name_t));
route_copy = (orte_process_name_t *) malloc(sizeof(orte_process_name_t));
*route_copy = *route;
rc = opal_hash_table_set_value_uint32(&vpid_wildcard_list,
ORTE_JOB_FAMILY(target->jobid), route_copy);
@ -374,7 +374,7 @@ static int update_route(orte_process_name_t *target,
}
/* not already present, so let's add it */
route_copy = malloc(sizeof(orte_process_name_t));
route_copy = (orte_process_name_t *) malloc(sizeof(orte_process_name_t));
*route_copy = *route;
rc = opal_hash_table_set_value_uint32(&vpid_wildcard_list,
target->jobid, route_copy);
@ -406,7 +406,7 @@ static int update_route(orte_process_name_t *target,
}
/* not present - add it to the table */
route_copy = malloc(sizeof(orte_process_name_t));
route_copy = (orte_process_name_t *) malloc(sizeof(orte_process_name_t));
*route_copy = *route;
rc = opal_hash_table_set_value_uint64(&peer_list,
orte_util_hash_name(target), route_copy);

Просмотреть файл

@ -236,7 +236,7 @@ static int update_route(orte_process_name_t *target,
}
/* not there, so add the route FOR THE JOB FAMILY*/
route_copy = malloc(sizeof(orte_process_name_t));
route_copy = (orte_process_name_t *) malloc(sizeof(orte_process_name_t));
*route_copy = *route;
rc = opal_hash_table_set_value_uint32(&peer_list,
ORTE_JOB_FAMILY(target->jobid), route_copy);

Просмотреть файл

@ -323,7 +323,7 @@ static int update_route(orte_process_name_t *target,
}
/* not there, so add the route FOR THE JOB FAMILY*/
route_copy = malloc(sizeof(orte_process_name_t));
route_copy = (orte_process_name_t *) malloc(sizeof(orte_process_name_t));
*route_copy = *route;
rc = opal_hash_table_set_value_uint32(&vpid_wildcard_list,
ORTE_JOB_FAMILY(target->jobid), route_copy);
@ -358,7 +358,7 @@ static int update_route(orte_process_name_t *target,
}
/* not already present, so let's add it */
route_copy = malloc(sizeof(orte_process_name_t));
route_copy = (orte_process_name_t *) malloc(sizeof(orte_process_name_t));
*route_copy = *route;
rc = opal_hash_table_set_value_uint32(&vpid_wildcard_list,
target->jobid, route_copy);
@ -390,7 +390,7 @@ static int update_route(orte_process_name_t *target,
}
/* not present - add it to the table */
route_copy = malloc(sizeof(orte_process_name_t));
route_copy = (orte_process_name_t *) malloc(sizeof(orte_process_name_t));
*route_copy = *route;
rc = opal_hash_table_set_value_uint64(&peer_list,
orte_util_hash_name(target), route_copy);

Просмотреть файл

@ -116,7 +116,7 @@ int orte_dt_size_job(size_t *size, orte_job_t *src, opal_data_type_t type)
for (i=0; i < src->procs->size; i++) {
if (NULL != src->procs->addr[i]) {
orte_dt_size_proc(&sz, src->procs->addr[i], ORTE_PROC);
orte_dt_size_proc(&sz, (orte_proc_t *) src->procs->addr[i], ORTE_PROC);
*size += sz;
}
}
@ -157,7 +157,7 @@ int orte_dt_size_node(size_t *size, orte_node_t *src, opal_data_type_t type)
for (i=0; i < src->procs->size; i++) {
if (NULL != src->procs->addr[i]) {
orte_dt_size_proc(&sz, src->procs->addr[i], ORTE_PROC);
orte_dt_size_proc(&sz, (orte_proc_t *) src->procs->addr[i], ORTE_PROC);
*size += sz;
}
}