Commit 3e9a9e25 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

mm: add a vmap_pfn function

Add a proper helper to remap PFNs into kernel virtual space so that
drivers don't have to abuse alloc_vm_area and open coded PTE manipulation
for it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Link: https://lkml.kernel.org/r/20201002122204.1534411-4-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b944afc9
......@@ -122,6 +122,7 @@ extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
......
......@@ -816,6 +816,9 @@ config DEVICE_PRIVATE
memory; i.e., memory that is only accessible from the device (or
group of devices). You likely also want to select HMM_MIRROR.
config VMAP_PFN
bool
config FRAME_VECTOR
bool
......
......@@ -2413,6 +2413,51 @@ void *vmap(struct page **pages, unsigned int count,
}
EXPORT_SYMBOL(vmap);
#ifdef CONFIG_VMAP_PFN
struct vmap_pfn_data {
unsigned long *pfns;
pgprot_t prot;
unsigned int idx;
};
static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
{
struct vmap_pfn_data *data = private;
if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
return -EINVAL;
*pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
return 0;
}
/**
* vmap_pfn - map an array of PFNs into virtually contiguous space
* @pfns: array of PFNs
* @count: number of pages to map
* @prot: page protection for the mapping
*
* Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
* the start address of the mapping.
*/
void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
{
struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
struct vm_struct *area;
area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
__builtin_return_address(0));
if (!area)
return NULL;
if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
count * PAGE_SIZE, vmap_pfn_apply, &data)) {
free_vm_area(area);
return NULL;
}
return area->addr;
}
EXPORT_SYMBOL_GPL(vmap_pfn);
#endif /* CONFIG_VMAP_PFN */
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment