1
1

Merge pull request #3738 from bosilca/topic/tcp_event_count

Fix the TCP performance impact when BTL not used
Этот коммит содержится в:
bosilca 2017-09-19 23:08:58 -04:00 коммит произвёл GitHub
родитель 2c59fb7a58 bd5650d680
Коммит ab68aced23
2 изменённых файлов: 16 добавлений и 7 удалений

Просмотреть файл

@ -137,11 +137,6 @@ int mca_btl_tcp_add_procs( struct mca_btl_base_module_t* btl,
} }
peers[i] = tcp_endpoint; peers[i] = tcp_endpoint;
/* we increase the count of MPI users of the event library
once per peer, so that we are used until we aren't
connected to a peer */
opal_progress_event_users_increment();
} }
return OPAL_SUCCESS; return OPAL_SUCCESS;
@ -160,7 +155,6 @@ int mca_btl_tcp_del_procs(struct mca_btl_base_module_t* btl,
mca_btl_tcp_endpoint_t* tcp_endpoint = endpoints[i]; mca_btl_tcp_endpoint_t* tcp_endpoint = endpoints[i];
opal_list_remove_item(&tcp_btl->tcp_endpoints, (opal_list_item_t*)tcp_endpoint); opal_list_remove_item(&tcp_btl->tcp_endpoints, (opal_list_item_t*)tcp_endpoint);
OBJ_RELEASE(tcp_endpoint); OBJ_RELEASE(tcp_endpoint);
opal_progress_event_users_decrement();
} }
OPAL_THREAD_UNLOCK(&tcp_btl->tcp_endpoints_mutex); OPAL_THREAD_UNLOCK(&tcp_btl->tcp_endpoints_mutex);
return OPAL_SUCCESS; return OPAL_SUCCESS;
@ -495,7 +489,6 @@ int mca_btl_tcp_finalize(struct mca_btl_base_module_t* btl)
item = opal_list_remove_first(&tcp_btl->tcp_endpoints)) { item = opal_list_remove_first(&tcp_btl->tcp_endpoints)) {
mca_btl_tcp_endpoint_t *endpoint = (mca_btl_tcp_endpoint_t*)item; mca_btl_tcp_endpoint_t *endpoint = (mca_btl_tcp_endpoint_t*)item;
OBJ_RELEASE(endpoint); OBJ_RELEASE(endpoint);
opal_progress_event_users_decrement();
} }
free(tcp_btl); free(tcp_btl);
return OPAL_SUCCESS; return OPAL_SUCCESS;

Просмотреть файл

@ -463,6 +463,10 @@ static void *mca_btl_tcp_endpoint_complete_accept(int fd, int flags, void *conte
mca_btl_tcp_endpoint_event_init(btl_endpoint); mca_btl_tcp_endpoint_event_init(btl_endpoint);
MCA_BTL_TCP_ENDPOINT_DUMP(10, btl_endpoint, true, "event_add(recv) [endpoint_accept]"); MCA_BTL_TCP_ENDPOINT_DUMP(10, btl_endpoint, true, "event_add(recv) [endpoint_accept]");
opal_event_add(&btl_endpoint->endpoint_recv_event, 0); opal_event_add(&btl_endpoint->endpoint_recv_event, 0);
if( mca_btl_tcp_event_base == opal_sync_event_base ) {
/* If no progress thread then raise the awarness of the default progress engine */
opal_progress_event_users_increment();
}
mca_btl_tcp_endpoint_connected(btl_endpoint); mca_btl_tcp_endpoint_connected(btl_endpoint);
MCA_BTL_TCP_ENDPOINT_DUMP(10, btl_endpoint, true, "accepted"); MCA_BTL_TCP_ENDPOINT_DUMP(10, btl_endpoint, true, "accepted");
@ -512,6 +516,10 @@ void mca_btl_tcp_endpoint_close(mca_btl_base_endpoint_t* btl_endpoint)
btl_endpoint->endpoint_retries++; btl_endpoint->endpoint_retries++;
MCA_BTL_TCP_ENDPOINT_DUMP(1, btl_endpoint, false, "event_del(recv) [close]"); MCA_BTL_TCP_ENDPOINT_DUMP(1, btl_endpoint, false, "event_del(recv) [close]");
opal_event_del(&btl_endpoint->endpoint_recv_event); opal_event_del(&btl_endpoint->endpoint_recv_event);
if( mca_btl_tcp_event_base == opal_sync_event_base ) {
/* If no progress thread then lower the awarness of the default progress engine */
opal_progress_event_users_decrement();
}
MCA_BTL_TCP_ENDPOINT_DUMP(1, btl_endpoint, false, "event_del(send) [close]"); MCA_BTL_TCP_ENDPOINT_DUMP(1, btl_endpoint, false, "event_del(send) [close]");
opal_event_del(&btl_endpoint->endpoint_send_event); opal_event_del(&btl_endpoint->endpoint_send_event);
@ -739,6 +747,10 @@ static int mca_btl_tcp_endpoint_start_connect(mca_btl_base_endpoint_t* btl_endpo
btl_endpoint->endpoint_state = MCA_BTL_TCP_CONNECT_ACK; btl_endpoint->endpoint_state = MCA_BTL_TCP_CONNECT_ACK;
MCA_BTL_TCP_ENDPOINT_DUMP(10, btl_endpoint, true, "event_add(recv) [start_connect]"); MCA_BTL_TCP_ENDPOINT_DUMP(10, btl_endpoint, true, "event_add(recv) [start_connect]");
opal_event_add(&btl_endpoint->endpoint_recv_event, 0); opal_event_add(&btl_endpoint->endpoint_recv_event, 0);
if( mca_btl_tcp_event_base == opal_sync_event_base ) {
/* If no progress thread then raise the awarness of the default progress engine */
opal_progress_event_users_increment();
}
return OPAL_SUCCESS; return OPAL_SUCCESS;
} }
/* We connected to the peer, but he close the socket before we got a chance to send our guid */ /* We connected to the peer, but he close the socket before we got a chance to send our guid */
@ -826,6 +838,10 @@ static int mca_btl_tcp_endpoint_complete_connect(mca_btl_base_endpoint_t* btl_en
if(mca_btl_tcp_endpoint_send_connect_ack(btl_endpoint) == OPAL_SUCCESS) { if(mca_btl_tcp_endpoint_send_connect_ack(btl_endpoint) == OPAL_SUCCESS) {
btl_endpoint->endpoint_state = MCA_BTL_TCP_CONNECT_ACK; btl_endpoint->endpoint_state = MCA_BTL_TCP_CONNECT_ACK;
opal_event_add(&btl_endpoint->endpoint_recv_event, 0); opal_event_add(&btl_endpoint->endpoint_recv_event, 0);
if( mca_btl_tcp_event_base == opal_sync_event_base ) {
/* If no progress thread then raise the awarness of the default progress engine */
opal_progress_event_users_increment();
}
MCA_BTL_TCP_ENDPOINT_DUMP(10, btl_endpoint, false, "event_add(recv) [complete_connect]"); MCA_BTL_TCP_ENDPOINT_DUMP(10, btl_endpoint, false, "event_add(recv) [complete_connect]");
return OPAL_SUCCESS; return OPAL_SUCCESS;
} }