Commit 6c38c055 authored by Alex Williamson's avatar Alex Williamson

vfio/type1: Restore mapping performance with mdev support

As part of the mdev support, type1 now gets a task reference per
vfio_dma and uses that to get an mm reference for the task while
working on accounting.  That's correct, but it's not fast.  For some
paths, like vfio_pin_pages_remote(), we know we're only called from
user context, so we can restore the lighter weight calls.  In other
cases, we're effectively already testing whether we're in the stored
task context elsewhere, extend this vfio_lock_acct() as well.
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Reviewed by: Kirti Wankhede <kwankhede@nvidia.com>
parent 08c1a4ef
...@@ -268,28 +268,38 @@ static void vfio_lock_acct(struct task_struct *task, long npage) ...@@ -268,28 +268,38 @@ static void vfio_lock_acct(struct task_struct *task, long npage)
{ {
struct vwork *vwork; struct vwork *vwork;
struct mm_struct *mm; struct mm_struct *mm;
bool is_current;
if (!npage) if (!npage)
return; return;
mm = get_task_mm(task); is_current = (task->mm == current->mm);
mm = is_current ? task->mm : get_task_mm(task);
if (!mm) if (!mm)
return; /* process exited or nothing to do */ return; /* process exited */
if (down_write_trylock(&mm->mmap_sem)) { if (down_write_trylock(&mm->mmap_sem)) {
mm->locked_vm += npage; mm->locked_vm += npage;
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
if (!is_current)
mmput(mm); mmput(mm);
return; return;
} }
if (is_current) {
mm = get_task_mm(task);
if (!mm)
return;
}
/* /*
* Couldn't get mmap_sem lock, so must setup to update * Couldn't get mmap_sem lock, so must setup to update
* mm->locked_vm later. If locked_vm were atomic, we * mm->locked_vm later. If locked_vm were atomic, we
* wouldn't need this silliness * wouldn't need this silliness
*/ */
vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
if (!vwork) { if (WARN_ON(!vwork)) {
mmput(mm); mmput(mm);
return; return;
} }
...@@ -393,52 +403,50 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ...@@ -393,52 +403,50 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base) long npage, unsigned long *pfn_base)
{ {
unsigned long limit; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns, bool lock_cap = capable(CAP_IPC_LOCK);
CAP_IPC_LOCK); long ret, pinned = 0, lock_acct = 0;
struct mm_struct *mm;
long ret, i = 0, lock_acct = 0;
bool rsvd; bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova; dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
mm = get_task_mm(dma->task); /* This code path is only user initiated */
if (!mm) if (!current->mm)
return -ENODEV; return -ENODEV;
ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
if (ret) if (ret)
goto pin_pg_remote_exit; return ret;
pinned++;
rsvd = is_invalid_reserved_pfn(*pfn_base); rsvd = is_invalid_reserved_pfn(*pfn_base);
limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
/* /*
* Reserved pages aren't counted against the user, externally pinned * Reserved pages aren't counted against the user, externally pinned
* pages are already counted against the user. * pages are already counted against the user.
*/ */
if (!rsvd && !vfio_find_vpfn(dma, iova)) { if (!rsvd && !vfio_find_vpfn(dma, iova)) {
if (!lock_cap && mm->locked_vm + 1 > limit) { if (!lock_cap && current->mm->locked_vm + 1 > limit) {
put_pfn(*pfn_base, dma->prot); put_pfn(*pfn_base, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
limit << PAGE_SHIFT); limit << PAGE_SHIFT);
ret = -ENOMEM; return -ENOMEM;
goto pin_pg_remote_exit;
} }
lock_acct++; lock_acct++;
} }
i++; if (unlikely(disable_hugepages))
if (likely(!disable_hugepages)) { goto out;
/* Lock all the consecutive pages from pfn_base */ /* Lock all the consecutive pages from pfn_base */
for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage; for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
unsigned long pfn = 0; unsigned long pfn = 0;
ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn); ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
if (ret) if (ret)
break; break;
if (pfn != *pfn_base + i || if (pfn != *pfn_base + pinned ||
rsvd != is_invalid_reserved_pfn(pfn)) { rsvd != is_invalid_reserved_pfn(pfn)) {
put_pfn(pfn, dma->prot); put_pfn(pfn, dma->prot);
break; break;
...@@ -446,24 +454,20 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ...@@ -446,24 +454,20 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
if (!rsvd && !vfio_find_vpfn(dma, iova)) { if (!rsvd && !vfio_find_vpfn(dma, iova)) {
if (!lock_cap && if (!lock_cap &&
mm->locked_vm + lock_acct + 1 > limit) { current->mm->locked_vm + lock_acct + 1 > limit) {
put_pfn(pfn, dma->prot); put_pfn(pfn, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) " pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
"exceeded\n", __func__, __func__, limit << PAGE_SHIFT);
limit << PAGE_SHIFT);
break; break;
} }
lock_acct++; lock_acct++;
} }
} }
}
vfio_lock_acct(dma->task, lock_acct); out:
ret = i; vfio_lock_acct(current, lock_acct);
pin_pg_remote_exit: return pinned;
mmput(mm);
return ret;
} }
static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
...@@ -473,10 +477,10 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, ...@@ -473,10 +477,10 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
long unlocked = 0, locked = 0; long unlocked = 0, locked = 0;
long i; long i;
for (i = 0; i < npage; i++) { for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
if (put_pfn(pfn++, dma->prot)) { if (put_pfn(pfn++, dma->prot)) {
unlocked++; unlocked++;
if (vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT))) if (vfio_find_vpfn(dma, iova))
locked++; locked++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment