1
1

corrections to the scheduling logic

This commit was SVN r3376.
Этот коммит содержится в:
Tim Woodall 2004-10-27 20:28:09 +00:00
родитель 0210b6e8dd
Коммит 79548a9ade
4 изменённых файлов: 30 добавлений и 11 удалений

Просмотреть файл

@ -123,6 +123,23 @@ static inline bool mca_pml_base_send_request_matched(
return (NULL != request->req_peer_match.pval);
}
/**
* Atomically increase the request offset.
*
* @param request (IN) Send request.
* @param offset (IN) Increment.
* return TRUE if an ack/match has been received from peer.
*/
static inline void mca_pml_base_send_request_offset(
mca_pml_base_send_request_t* request,
size_t offset)
{
OMPI_THREAD_LOCK(&ompi_request_lock);
request->req_offset += offset;
OMPI_THREAD_UNLOCK(&ompi_request_lock);
}
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif

Просмотреть файл

@ -181,7 +181,7 @@ int mca_pml_teg_add_procs(ompi_proc_t** procs, size_t nprocs)
uint32_t* proc_arch;
size_t size = sizeof(uint32_t);
rc = mca_base_modex_recv(&mca_pml_teg_component.pmlm_version, procs[p],
&proc_arch, &size);
(void**)&proc_arch, &size);
if(rc != OMPI_SUCCESS)
return rc;
if(size != sizeof(uint32_t))

Просмотреть файл

@ -67,6 +67,7 @@ int mca_pml_teg_send_request_schedule(mca_pml_base_send_request_t* req)
{
ompi_proc_t *proc = ompi_comm_peer_lookup(req->req_base.req_comm, req->req_base.req_peer);
mca_pml_proc_t* proc_pml = proc->proc_pml;
int send_count = 0;
/* allocate remaining bytes to PTLs */
size_t bytes_remaining = req->req_bytes_packed - req->req_offset;
@ -100,12 +101,13 @@ int mca_pml_teg_send_request_schedule(mca_pml_base_send_request_t* req)
rc = ptl->ptl_put(ptl, ptl_proc->ptl_peer, req, req->req_offset, bytes_to_frag, 0);
if(rc == OMPI_SUCCESS) {
send_count++;
bytes_remaining = req->req_bytes_packed - req->req_offset;
}
}
/* unable to complete send - signal request failed */
if(bytes_remaining > 0) {
/* unable to complete send - queue for later */
if(send_count == 0) {
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
ompi_list_append(&mca_pml_teg.teg_send_pending, (ompi_list_item_t*)req);
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
@ -130,9 +132,8 @@ void mca_pml_teg_send_request_progress(
mca_pml_base_send_request_t* req,
size_t bytes_sent)
{
bool first_frag;
bool schedule = false;
OMPI_THREAD_LOCK(&ompi_request_lock);
first_frag = (req->req_bytes_sent == 0 && req->req_bytes_packed > 0);
req->req_bytes_sent += bytes_sent;
if (req->req_bytes_sent >= req->req_bytes_packed) {
req->req_base.req_pml_complete = true;
@ -148,19 +149,20 @@ void mca_pml_teg_send_request_progress(
} else if (req->req_base.req_free_called) {
MCA_PML_TEG_FREE((ompi_request_t**)&req);
}
/* dont try and schedule if done */
first_frag = false;
}
/* test to see if we have scheduled the entire request */
if (req->req_offset < req->req_bytes_packed)
schedule = true;
OMPI_THREAD_UNLOCK(&ompi_request_lock);
/* if first fragment - schedule remaining fragments */
if(first_frag == true) {
/* schedule remaining fragments of this request */
if(schedule) {
mca_pml_teg_send_request_schedule(req);
}
/* check for pending requests that need to be progressed */
while(ompi_list_get_size(&mca_pml_teg.teg_send_pending) != 0) {
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
req = (mca_pml_base_send_request_t*)ompi_list_remove_first(&mca_pml_teg.teg_send_pending);
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
if(req == NULL)

Просмотреть файл

@ -239,7 +239,7 @@ int mca_ptl_tcp_send(
/* must update the offset after actual fragment size is determined -- and very important --
* before attempting to send the fragment
*/
sendreq->req_offset += size;
mca_pml_base_send_request_offset(sendreq, size);
return mca_ptl_tcp_peer_send(ptl_peer, sendfrag, offset);
}