Commit 9a4903e4 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe

mm/hmm: replace the block argument to hmm_range_fault with a flags value

This allows easier expansion to other flags, and also makes the callers a
little easier to read.

Link: https://lore.kernel.org/r/20190726005650.2566-4-rcampbell@nvidia.comSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d2e8d551
...@@ -832,7 +832,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) ...@@ -832,7 +832,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
r = hmm_range_fault(range, true); r = hmm_range_fault(range, 0);
if (unlikely(r < 0)) { if (unlikely(r < 0)) {
if (likely(r == -EAGAIN)) { if (likely(r == -EAGAIN)) {
/* /*
......
...@@ -505,7 +505,7 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) ...@@ -505,7 +505,7 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
return -EBUSY; return -EBUSY;
} }
ret = hmm_range_fault(range, true); ret = hmm_range_fault(range, 0);
if (ret <= 0) { if (ret <= 0) {
if (ret == 0) if (ret == 0)
ret = -EBUSY; ret = -EBUSY;
......
...@@ -407,12 +407,19 @@ int hmm_range_register(struct hmm_range *range, ...@@ -407,12 +407,19 @@ int hmm_range_register(struct hmm_range *range,
unsigned long end, unsigned long end,
unsigned page_shift); unsigned page_shift);
void hmm_range_unregister(struct hmm_range *range); void hmm_range_unregister(struct hmm_range *range);
/*
* Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
*/
#define HMM_FAULT_ALLOW_RETRY (1 << 0)
long hmm_range_snapshot(struct hmm_range *range); long hmm_range_snapshot(struct hmm_range *range);
long hmm_range_fault(struct hmm_range *range, bool block); long hmm_range_fault(struct hmm_range *range, unsigned int flags);
long hmm_range_dma_map(struct hmm_range *range, long hmm_range_dma_map(struct hmm_range *range,
struct device *device, struct device *device,
dma_addr_t *daddrs, dma_addr_t *daddrs,
bool block); unsigned int flags);
long hmm_range_dma_unmap(struct hmm_range *range, long hmm_range_dma_unmap(struct hmm_range *range,
struct vm_area_struct *vma, struct vm_area_struct *vma,
struct device *device, struct device *device,
......
...@@ -281,7 +281,7 @@ struct hmm_vma_walk { ...@@ -281,7 +281,7 @@ struct hmm_vma_walk {
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
unsigned long last; unsigned long last;
bool fault; bool fault;
bool block; unsigned int flags;
}; };
static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
...@@ -293,8 +293,11 @@ static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, ...@@ -293,8 +293,11 @@ static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
vm_fault_t ret; vm_fault_t ret;
flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY)
flags |= write_fault ? FAULT_FLAG_WRITE : 0; flags |= FAULT_FLAG_ALLOW_RETRY;
if (write_fault)
flags |= FAULT_FLAG_WRITE;
ret = handle_mm_fault(vma, addr, flags); ret = handle_mm_fault(vma, addr, flags);
if (ret & VM_FAULT_RETRY) { if (ret & VM_FAULT_RETRY) {
/* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */ /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */
...@@ -1012,26 +1015,26 @@ long hmm_range_snapshot(struct hmm_range *range) ...@@ -1012,26 +1015,26 @@ long hmm_range_snapshot(struct hmm_range *range)
} }
EXPORT_SYMBOL(hmm_range_snapshot); EXPORT_SYMBOL(hmm_range_snapshot);
/* /**
* hmm_range_fault() - try to fault some address in a virtual address range * hmm_range_fault - try to fault some address in a virtual address range
* @range: range being faulted * @range: range being faulted
* @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) * @flags: HMM_FAULT_* flags
* Return: number of valid pages in range->pfns[] (from range start
* address). This may be zero. If the return value is negative,
* then one of the following values may be returned:
* *
* -EINVAL invalid arguments or mm or virtual address are in an * Return: the number of valid pages in range->pfns[] (from range start
* invalid vma (for instance device file vma). * address), which may be zero. On error one of the following status codes
* -ENOMEM: Out of memory. * can be returned:
* -EPERM: Invalid permission (for instance asking for write and *
* range is read only). * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
* -EAGAIN: If you need to retry and mmap_sem was drop. This can only * (e.g., device file vma).
* happens if block argument is false. * -ENOMEM: Out of memory.
* -EBUSY: If the the range is being invalidated and you should wait * -EPERM: Invalid permission (e.g., asking for write and range is read
* for invalidation to finish. * only).
* -EFAULT: Invalid (ie either no valid vma or it is illegal to access * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped.
* that range), number of valid pages in range->pfns[] (from * -EBUSY: The range has been invalidated and the caller needs to wait for
* range start address). * the invalidation to finish.
* -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access
* that range) number of valid pages in range->pfns[] (from
* range start address).
* *
* This is similar to a regular CPU page fault except that it will not trigger * This is similar to a regular CPU page fault except that it will not trigger
* any memory migration if the memory being faulted is not accessible by CPUs * any memory migration if the memory being faulted is not accessible by CPUs
...@@ -1040,7 +1043,7 @@ EXPORT_SYMBOL(hmm_range_snapshot); ...@@ -1040,7 +1043,7 @@ EXPORT_SYMBOL(hmm_range_snapshot);
* On error, for one virtual address in the range, the function will mark the * On error, for one virtual address in the range, the function will mark the
* corresponding HMM pfn entry with an error flag. * corresponding HMM pfn entry with an error flag.
*/ */
long hmm_range_fault(struct hmm_range *range, bool block) long hmm_range_fault(struct hmm_range *range, unsigned int flags)
{ {
const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
unsigned long start = range->start, end; unsigned long start = range->start, end;
...@@ -1086,7 +1089,7 @@ long hmm_range_fault(struct hmm_range *range, bool block) ...@@ -1086,7 +1089,7 @@ long hmm_range_fault(struct hmm_range *range, bool block)
hmm_vma_walk.pgmap = NULL; hmm_vma_walk.pgmap = NULL;
hmm_vma_walk.last = start; hmm_vma_walk.last = start;
hmm_vma_walk.fault = true; hmm_vma_walk.fault = true;
hmm_vma_walk.block = block; hmm_vma_walk.flags = flags;
hmm_vma_walk.range = range; hmm_vma_walk.range = range;
mm_walk.private = &hmm_vma_walk; mm_walk.private = &hmm_vma_walk;
end = min(range->end, vma->vm_end); end = min(range->end, vma->vm_end);
...@@ -1125,25 +1128,22 @@ long hmm_range_fault(struct hmm_range *range, bool block) ...@@ -1125,25 +1128,22 @@ long hmm_range_fault(struct hmm_range *range, bool block)
EXPORT_SYMBOL(hmm_range_fault); EXPORT_SYMBOL(hmm_range_fault);
/** /**
* hmm_range_dma_map() - hmm_range_fault() and dma map page all in one. * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
* @range: range being faulted * @range: range being faulted
* @device: device against to dma map page to * @device: device to map page to
* @daddrs: dma address of mapped pages * @daddrs: array of dma addresses for the mapped pages
* @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) * @flags: HMM_FAULT_*
* Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
* drop and you need to try again, some other error value otherwise
* *
* Note same usage pattern as hmm_range_fault(). * Return: the number of pages mapped on success (including zero), or any
* status return from hmm_range_fault() otherwise.
*/ */
long hmm_range_dma_map(struct hmm_range *range, long hmm_range_dma_map(struct hmm_range *range, struct device *device,
struct device *device, dma_addr_t *daddrs, unsigned int flags)
dma_addr_t *daddrs,
bool block)
{ {
unsigned long i, npages, mapped; unsigned long i, npages, mapped;
long ret; long ret;
ret = hmm_range_fault(range, block); ret = hmm_range_fault(range, flags);
if (ret <= 0) if (ret <= 0)
return ret ? ret : -EBUSY; return ret ? ret : -EBUSY;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment