Commit bcfa4b72 authored by Matthew Wilcox's avatar Matthew Wilcox

memremap: Convert to XArray

Use the new xa_store_range function instead of the radix tree.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 0e9446c3
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2015 Intel Corporation. All rights reserved. */ /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
#include <linux/radix-tree.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/types.h>
#include <linux/pfn_t.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/mm.h>
#include <linux/memory_hotplug.h> #include <linux/memory_hotplug.h>
#include <linux/mm.h>
#include <linux/pfn_t.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/swapops.h> #include <linux/swapops.h>
#include <linux/types.h>
#include <linux/wait_bit.h> #include <linux/wait_bit.h>
#include <linux/xarray.h>
static DEFINE_MUTEX(pgmap_lock); static DEFINE_XARRAY(pgmap_array);
static RADIX_TREE(pgmap_radix, GFP_KERNEL);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT) #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
static unsigned long order_at(struct resource *res, unsigned long pgoff)
{
unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
unsigned long nr_pages, mask;
nr_pages = PHYS_PFN(resource_size(res));
if (nr_pages == pgoff)
return ULONG_MAX;
/*
* What is the largest aligned power-of-2 range available from
* this resource pgoff to the end of the resource range,
* considering the alignment of the current pgoff?
*/
mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
if (!mask)
return ULONG_MAX;
return find_first_bit(&mask, BITS_PER_LONG);
}
#define foreach_order_pgoff(res, order, pgoff) \
for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
pgoff += 1UL << order, order = order_at((res), pgoff))
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
...@@ -70,18 +44,10 @@ vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, ...@@ -70,18 +44,10 @@ vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
EXPORT_SYMBOL(device_private_entry_fault); EXPORT_SYMBOL(device_private_entry_fault);
#endif /* CONFIG_DEVICE_PRIVATE */ #endif /* CONFIG_DEVICE_PRIVATE */
static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff) static void pgmap_array_delete(struct resource *res)
{ {
unsigned long pgoff, order; xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
NULL, GFP_KERNEL);
mutex_lock(&pgmap_lock);
foreach_order_pgoff(res, order, pgoff) {
if (pgoff >= end_pgoff)
break;
radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
}
mutex_unlock(&pgmap_lock);
synchronize_rcu(); synchronize_rcu();
} }
...@@ -142,7 +108,7 @@ static void devm_memremap_pages_release(void *data) ...@@ -142,7 +108,7 @@ static void devm_memremap_pages_release(void *data)
mem_hotplug_done(); mem_hotplug_done();
untrack_pfn(NULL, PHYS_PFN(align_start), align_size); untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
pgmap_radix_release(res, -1); pgmap_array_delete(res);
dev_WARN_ONCE(dev, pgmap->altmap.alloc, dev_WARN_ONCE(dev, pgmap->altmap.alloc,
"%s: failed to free all reserved pages\n", __func__); "%s: failed to free all reserved pages\n", __func__);
} }
...@@ -175,7 +141,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -175,7 +141,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
struct vmem_altmap *altmap = pgmap->altmap_valid ? struct vmem_altmap *altmap = pgmap->altmap_valid ?
&pgmap->altmap : NULL; &pgmap->altmap : NULL;
struct resource *res = &pgmap->res; struct resource *res = &pgmap->res;
unsigned long pfn, pgoff, order; unsigned long pfn;
pgprot_t pgprot = PAGE_KERNEL; pgprot_t pgprot = PAGE_KERNEL;
int error, nid, is_ram; int error, nid, is_ram;
struct dev_pagemap *conflict_pgmap; struct dev_pagemap *conflict_pgmap;
...@@ -216,20 +182,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -216,20 +182,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
pgmap->dev = dev; pgmap->dev = dev;
mutex_lock(&pgmap_lock); error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
error = 0; PHYS_PFN(res->end), pgmap, GFP_KERNEL));
foreach_order_pgoff(res, order, pgoff) {
error = __radix_tree_insert(&pgmap_radix,
PHYS_PFN(res->start) + pgoff, order, pgmap);
if (error) {
dev_err(dev, "%s: failed: %d\n", __func__, error);
break;
}
}
mutex_unlock(&pgmap_lock);
if (error) if (error)
goto err_radix; goto err_array;
nid = dev_to_node(dev); nid = dev_to_node(dev);
if (nid < 0) if (nid < 0)
...@@ -279,8 +235,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -279,8 +235,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
err_kasan: err_kasan:
untrack_pfn(NULL, PHYS_PFN(align_start), align_size); untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
err_pfn_remap: err_pfn_remap:
err_radix: pgmap_array_delete(res);
pgmap_radix_release(res, pgoff); err_array:
return ERR_PTR(error); return ERR_PTR(error);
} }
EXPORT_SYMBOL(devm_memremap_pages); EXPORT_SYMBOL(devm_memremap_pages);
...@@ -320,7 +276,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, ...@@ -320,7 +276,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
/* fall back to slow path lookup */ /* fall back to slow path lookup */
rcu_read_lock(); rcu_read_lock();
pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
pgmap = NULL; pgmap = NULL;
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment