Commit 1e0b0a9e authored by Alex Williamson's avatar Alex Williamson Committed by Greg Kroah-Hartman

vfio/type1: Remove locked page accounting workqueue

commit 0cfef2b7 upstream.

If the mmap_sem is contented then the vfio type1 IOMMU backend will
defer locked page accounting updates to a workqueue task.  This has a
few problems and depending on which side the user tries to play, they
might be over-penalized for unmaps that haven't yet been accounted or
race the workqueue to enter more mappings than they're allowed.  The
original intent of this workqueue mechanism seems to be focused on
reducing latency through the ioctl, but we cannot do so at the cost
of correctness.  Remove this workqueue mechanism and update the
callers to allow for failure.  We can also now recheck the limit under
write lock to make sure we don't exceed it.

vfio_pin_pages_remote() also now necessarily includes an unwind path
which we can jump to directly if the consecutive page pinning finds
that we're exceeding the user's memory limits.  This avoids the
current lazy approach which does accounting and mapping up to the
fault, only to return an error on the next iteration to unwind the
entire vfio_dma.
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarKirti Wankhede <kwankhede@nvidia.com>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 34224e0e
...@@ -246,69 +246,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) ...@@ -246,69 +246,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
return ret; return ret;
} }
struct vwork { static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap)
struct mm_struct *mm;
long npage;
struct work_struct work;
};
/* delayed decrement/increment for locked_vm */
static void vfio_lock_acct_bg(struct work_struct *work)
{
struct vwork *vwork = container_of(work, struct vwork, work);
struct mm_struct *mm;
mm = vwork->mm;
down_write(&mm->mmap_sem);
mm->locked_vm += vwork->npage;
up_write(&mm->mmap_sem);
mmput(mm);
kfree(vwork);
}
static void vfio_lock_acct(struct task_struct *task, long npage)
{ {
struct vwork *vwork;
struct mm_struct *mm; struct mm_struct *mm;
bool is_current; bool is_current;
int ret;
if (!npage) if (!npage)
return; return 0;
is_current = (task->mm == current->mm); is_current = (task->mm == current->mm);
mm = is_current ? task->mm : get_task_mm(task); mm = is_current ? task->mm : get_task_mm(task);
if (!mm) if (!mm)
return; /* process exited */ return -ESRCH; /* process exited */
if (down_write_trylock(&mm->mmap_sem)) { ret = down_write_killable(&mm->mmap_sem);
mm->locked_vm += npage; if (!ret) {
up_write(&mm->mmap_sem); if (npage > 0) {
if (!is_current) if (lock_cap ? !*lock_cap :
mmput(mm); !has_capability(task, CAP_IPC_LOCK)) {
return; unsigned long limit;
limit = task_rlimit(task,
RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (mm->locked_vm + npage > limit)
ret = -ENOMEM;
}
} }
if (is_current) { if (!ret)
mm = get_task_mm(task); mm->locked_vm += npage;
if (!mm)
return; up_write(&mm->mmap_sem);
} }
/* if (!is_current)
* Couldn't get mmap_sem lock, so must setup to update
* mm->locked_vm later. If locked_vm were atomic, we
* wouldn't need this silliness
*/
vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
if (WARN_ON(!vwork)) {
mmput(mm); mmput(mm);
return;
} return ret;
INIT_WORK(&vwork->work, vfio_lock_acct_bg);
vwork->mm = mm;
vwork->npage = npage;
schedule_work(&vwork->work);
} }
/* /*
...@@ -405,7 +382,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ...@@ -405,7 +382,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base) long npage, unsigned long *pfn_base)
{ {
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
bool lock_cap = capable(CAP_IPC_LOCK); bool lock_cap = capable(CAP_IPC_LOCK);
long ret, pinned = 0, lock_acct = 0; long ret, pinned = 0, lock_acct = 0;
bool rsvd; bool rsvd;
...@@ -442,8 +419,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ...@@ -442,8 +419,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
/* Lock all the consecutive pages from pfn_base */ /* Lock all the consecutive pages from pfn_base */
for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
unsigned long pfn = 0;
ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
if (ret) if (ret)
break; break;
...@@ -460,14 +435,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ...@@ -460,14 +435,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
put_pfn(pfn, dma->prot); put_pfn(pfn, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
__func__, limit << PAGE_SHIFT); __func__, limit << PAGE_SHIFT);
break; ret = -ENOMEM;
goto unpin_out;
} }
lock_acct++; lock_acct++;
} }
} }
out: out:
vfio_lock_acct(current, lock_acct); ret = vfio_lock_acct(current, lock_acct, &lock_cap);
unpin_out:
if (ret) {
if (!rsvd) {
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
put_pfn(pfn, dma->prot);
}
return ret;
}
return pinned; return pinned;
} }
...@@ -488,7 +474,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, ...@@ -488,7 +474,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
} }
if (do_accounting) if (do_accounting)
vfio_lock_acct(dma->task, locked - unlocked); vfio_lock_acct(dma->task, locked - unlocked, NULL);
return unlocked; return unlocked;
} }
...@@ -522,8 +508,14 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, ...@@ -522,8 +508,14 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
goto pin_page_exit; goto pin_page_exit;
} }
if (!rsvd && do_accounting) if (!rsvd && do_accounting) {
vfio_lock_acct(dma->task, 1); ret = vfio_lock_acct(dma->task, 1, &lock_cap);
if (ret) {
put_pfn(*pfn_base, dma->prot);
goto pin_page_exit;
}
}
ret = 1; ret = 1;
pin_page_exit: pin_page_exit:
...@@ -543,7 +535,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, ...@@ -543,7 +535,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
if (do_accounting) if (do_accounting)
vfio_lock_acct(dma->task, -unlocked); vfio_lock_acct(dma->task, -unlocked, NULL);
return unlocked; return unlocked;
} }
...@@ -740,7 +732,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, ...@@ -740,7 +732,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma->iommu_mapped = false; dma->iommu_mapped = false;
if (do_accounting) { if (do_accounting) {
vfio_lock_acct(dma->task, -unlocked); vfio_lock_acct(dma->task, -unlocked, NULL);
return 0; return 0;
} }
return unlocked; return unlocked;
...@@ -1382,7 +1374,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) ...@@ -1382,7 +1374,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
if (!is_invalid_reserved_pfn(vpfn->pfn)) if (!is_invalid_reserved_pfn(vpfn->pfn))
locked++; locked++;
} }
vfio_lock_acct(dma->task, locked - unlocked); vfio_lock_acct(dma->task, locked - unlocked, NULL);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment