Commit c1d383b5 authored by Guy Shapiro's avatar Guy Shapiro Committed by Doug Ledford

IB/core: dma map/unmap locking optimizations

Currently, while mapping or unmapping pages for ODP, the umem mutex is locked
and unlocked once for each page. Such lock/unlock operation take few tens to
hundreds of nsecs. This makes a significant impact when mapping or unmapping few
MBs of memory.

To avoid this, the mutex should be locked only once per operation, and not per
page.
Signed-off-by: default avatarGuy Shapiro <guysh@mellanox.com>
Acked-by: default avatarShachar Raindel <raindel@mellanox.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5b6b8fe6
...@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page( ...@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page(
int remove_existing_mapping = 0; int remove_existing_mapping = 0;
int ret = 0; int ret = 0;
mutex_lock(&umem->odp_data->umem_mutex);
/* /*
* Note: we avoid writing if seq is different from the initial seq, to * Note: we avoid writing if seq is different from the initial seq, to
* handle case of a racing notifier. This check also allows us to bail * handle case of a racing notifier. This check also allows us to bail
...@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page( ...@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page(
} }
out: out:
mutex_unlock(&umem->odp_data->umem_mutex);
/* On Demand Paging - avoid pinning the page */ /* On Demand Paging - avoid pinning the page */
if (umem->context->invalidate_range || !stored_page) if (umem->context->invalidate_range || !stored_page)
put_page(page); put_page(page);
...@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, ...@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
user_virt += npages << PAGE_SHIFT; user_virt += npages << PAGE_SHIFT;
mutex_lock(&umem->odp_data->umem_mutex);
for (j = 0; j < npages; ++j) { for (j = 0; j < npages; ++j) {
ret = ib_umem_odp_map_dma_single_page( ret = ib_umem_odp_map_dma_single_page(
umem, k, base_virt_addr, local_page_list[j], umem, k, base_virt_addr, local_page_list[j],
...@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, ...@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
break; break;
k++; k++;
} }
mutex_unlock(&umem->odp_data->umem_mutex);
if (ret < 0) { if (ret < 0) {
/* Release left over pages when handling errors. */ /* Release left over pages when handling errors. */
...@@ -633,9 +632,9 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, ...@@ -633,9 +632,9 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* faults from completion. We might be racing with other * faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only * invalidations, so we must make sure we free each page only
* once. */ * once. */
mutex_lock(&umem->odp_data->umem_mutex);
for (addr = virt; addr < bound; addr += (u64)umem->page_size) { for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
mutex_lock(&umem->odp_data->umem_mutex);
if (umem->odp_data->page_list[idx]) { if (umem->odp_data->page_list[idx]) {
struct page *page = umem->odp_data->page_list[idx]; struct page *page = umem->odp_data->page_list[idx];
struct page *head_page = compound_head(page); struct page *head_page = compound_head(page);
...@@ -663,7 +662,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, ...@@ -663,7 +662,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
umem->odp_data->page_list[idx] = NULL; umem->odp_data->page_list[idx] = NULL;
umem->odp_data->dma_list[idx] = 0; umem->odp_data->dma_list[idx] = 0;
} }
mutex_unlock(&umem->odp_data->umem_mutex);
} }
mutex_unlock(&umem->odp_data->umem_mutex);
} }
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment