Commit def82284 authored by Mitko Haralanov's avatar Mitko Haralanov Committed by Greg Kroah-Hartman

staging/rdma/hfi1: Convert to use get_user_pages_fast

Convert hfi1_get_user_pages() to use get_user_pages_fast(),
which is much fatster. The mm semaphore is still taken to
update the pinned page count but is for a much shorter
amount of time.
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarMitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 483119a7
...@@ -1663,8 +1663,8 @@ static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo) ...@@ -1663,8 +1663,8 @@ static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
* Now that we know how many free RcvArray entries we have, * Now that we know how many free RcvArray entries we have,
* we can pin that many user pages. * we can pin that many user pages.
*/ */
ret = hfi1_get_user_pages(vaddr + (mapped * PAGE_SIZE), ret = hfi1_acquire_user_pages(vaddr + (mapped * PAGE_SIZE),
pinned, pages); pinned, true, pages);
if (ret) { if (ret) {
/* /*
* We can't continue because the pages array won't be * We can't continue because the pages array won't be
...@@ -1833,7 +1833,7 @@ static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo) ...@@ -1833,7 +1833,7 @@ static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
} }
} }
flush_wc(); flush_wc();
hfi1_release_user_pages(pshadow, pcount); hfi1_release_user_pages(pshadow, pcount, true);
clear_bit(bitidx, &uctxt->tidusemap[idx]); clear_bit(bitidx, &uctxt->tidusemap[idx]);
map &= ~(1ULL<<bitidx); map &= ~(1ULL<<bitidx);
} }
...@@ -1862,7 +1862,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt) ...@@ -1862,7 +1862,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
uctxt->physshadow[tid] = 0; uctxt->physshadow[tid] = 0;
uctxt->tid_pg_list[tid] = NULL; uctxt->tid_pg_list[tid] = NULL;
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE); pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
hfi1_release_user_pages(&p, 1); hfi1_release_user_pages(&p, 1, true);
} }
} }
......
...@@ -1587,8 +1587,8 @@ void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val); ...@@ -1587,8 +1587,8 @@ void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val);
*/ */
#define DEFAULT_RCVHDR_ENTSIZE 32 #define DEFAULT_RCVHDR_ENTSIZE 32
int hfi1_get_user_pages(unsigned long, size_t, struct page **); int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
void hfi1_release_user_pages(struct page **, size_t); void hfi1_release_user_pages(struct page **, size_t, bool);
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{ {
......
...@@ -49,59 +49,11 @@ ...@@ -49,59 +49,11 @@
*/ */
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h>
#include <linux/device.h> #include <linux/device.h>
#include "hfi.h" #include "hfi.h"
static void __hfi1_release_user_pages(struct page **p, size_t num_pages,
int dirty)
{
size_t i;
for (i = 0; i < num_pages; i++) {
if (dirty)
set_page_dirty_lock(p[i]);
put_page(p[i]);
}
}
/*
* Call with current->mm->mmap_sem held.
*/
static int __hfi1_get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p)
{
unsigned long lock_limit;
size_t got;
int ret;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
goto bail;
}
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
num_pages - got, 1, 1,
p + got, NULL);
if (ret < 0)
goto bail_release;
}
current->mm->pinned_vm += num_pages;
ret = 0;
goto bail;
bail_release:
__hfi1_release_user_pages(p, got, 0);
bail:
return ret;
}
/** /**
* hfi1_map_page - a safety wrapper around pci_map_page() * hfi1_map_page - a safety wrapper around pci_map_page()
* *
...@@ -116,41 +68,44 @@ dma_addr_t hfi1_map_page(struct pci_dev *hwdev, struct page *page, ...@@ -116,41 +68,44 @@ dma_addr_t hfi1_map_page(struct pci_dev *hwdev, struct page *page,
return phys; return phys;
} }
/** int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
* hfi1_get_user_pages - lock user pages into memory struct page **pages)
* @start_page: the start page
* @num_pages: the number of pages
* @p: the output page structures
*
* This function takes a given start page (page aligned user virtual
* address) and pins it and the following specified number of pages. For
* now, num_pages is always 1, but that will probably change at some point
* (because caller is doing expected sends on a single virtually contiguous
* buffer, so we can do all pages at once).
*/
int hfi1_get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p)
{ {
unsigned long pinned, lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
bool can_lock = capable(CAP_IPC_LOCK);
int ret; int ret;
down_write(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
pinned = current->mm->pinned_vm;
up_read(&current->mm->mmap_sem);
ret = __hfi1_get_user_pages(start_page, num_pages, p); if (pinned + npages > lock_limit && !can_lock)
return -ENOMEM;
ret = get_user_pages_fast(vaddr, npages, writable, pages);
if (ret < 0)
return ret;
down_write(&current->mm->mmap_sem);
current->mm->pinned_vm += ret;
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
return ret; return ret;
} }
void hfi1_release_user_pages(struct page **p, size_t num_pages) void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty)
{ {
if (current->mm) /* during close after signal, mm can be NULL */ size_t i;
down_write(&current->mm->mmap_sem);
__hfi1_release_user_pages(p, num_pages, 1); for (i = 0; i < npages; i++) {
if (dirty)
set_page_dirty_lock(p[i]);
put_page(p[i]);
}
if (current->mm) { if (current->mm) { /* during close after signal, mm can be NULL */
current->mm->pinned_vm -= num_pages; down_write(&current->mm->mmap_sem);
current->mm->pinned_vm -= npages;
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment