Commit a4574f63 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds

mm/memremap_pages: convert to 'struct range'

The 'struct resource' in 'struct dev_pagemap' is only used for holding
resource span information.  The other fields, 'name', 'flags', 'desc',
'parent', 'sibling', and 'child' are all unused wasted space.

This is in preparation for introducing a multi-range extension of
devm_memremap_pages().

The bulk of this change is unwinding all the places internal to libnvdimm
that used 'struct resource' unnecessarily, and replacing instances of
'struct dev_pagemap'.res with 'struct dev_pagemap'.range.

P2PDMA had a minor usage of the resource flags field, but only to report
failures with "%pR".  That is replaced with an open coded print of the
range.

[dan.carpenter@oracle.com: mm/hmm/test: use after free in dmirror_allocate_chunk()]
  Link: https://lkml.kernel.org/r/20200926121402.GA7467@kadamSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>	[xen]
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brice Goglin <Brice.Goglin@inria.fr>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hulk Robot <hulkci@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Jason Yan <yanaijie@huawei.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jia He <justin.he@arm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lkml.kernel.org/r/159643103173.4062302.768998885691711532.stgit@dwillia2-desk3.amr.corp.intel.com
Link: https://lkml.kernel.org/r/160106115761.30709.13539840236873663620.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fcffb6a1
...@@ -687,9 +687,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) ...@@ -687,9 +687,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
struct kvmppc_uvmem_page_pvt *pvt; struct kvmppc_uvmem_page_pvt *pvt;
unsigned long pfn_last, pfn_first; unsigned long pfn_last, pfn_first;
pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT; pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
pfn_last = pfn_first + pfn_last = pfn_first +
(resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT); (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
spin_lock(&kvmppc_uvmem_bitmap_lock); spin_lock(&kvmppc_uvmem_bitmap_lock);
bit = find_first_zero_bit(kvmppc_uvmem_bitmap, bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
...@@ -1007,7 +1007,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) ...@@ -1007,7 +1007,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
static void kvmppc_uvmem_page_free(struct page *page) static void kvmppc_uvmem_page_free(struct page *page)
{ {
unsigned long pfn = page_to_pfn(page) - unsigned long pfn = page_to_pfn(page) -
(kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT); (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
struct kvmppc_uvmem_page_pvt *pvt; struct kvmppc_uvmem_page_pvt *pvt;
spin_lock(&kvmppc_uvmem_bitmap_lock); spin_lock(&kvmppc_uvmem_bitmap_lock);
...@@ -1170,7 +1170,8 @@ int kvmppc_uvmem_init(void) ...@@ -1170,7 +1170,8 @@ int kvmppc_uvmem_init(void)
} }
kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
kvmppc_uvmem_pgmap.res = *res; kvmppc_uvmem_pgmap.range.start = res->start;
kvmppc_uvmem_pgmap.range.end = res->end;
kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
/* just one global instance: */ /* just one global instance: */
kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap; kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
...@@ -1205,7 +1206,7 @@ void kvmppc_uvmem_free(void) ...@@ -1205,7 +1206,7 @@ void kvmppc_uvmem_free(void)
return; return;
memunmap_pages(&kvmppc_uvmem_pgmap); memunmap_pages(&kvmppc_uvmem_pgmap);
release_mem_region(kvmppc_uvmem_pgmap.res.start, release_mem_region(kvmppc_uvmem_pgmap.range.start,
resource_size(&kvmppc_uvmem_pgmap.res)); range_len(&kvmppc_uvmem_pgmap.range));
kfree(kvmppc_uvmem_bitmap); kfree(kvmppc_uvmem_bitmap);
} }
...@@ -515,7 +515,7 @@ static void dax_region_unregister(void *region) ...@@ -515,7 +515,7 @@ static void dax_region_unregister(void *region)
} }
struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align, struct range *range, int target_node, unsigned int align,
unsigned long flags) unsigned long flags)
{ {
struct dax_region *dax_region; struct dax_region *dax_region;
...@@ -530,8 +530,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id, ...@@ -530,8 +530,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
return NULL; return NULL;
} }
if (!IS_ALIGNED(res->start, align) if (!IS_ALIGNED(range->start, align)
|| !IS_ALIGNED(resource_size(res), align)) || !IS_ALIGNED(range_len(range), align))
return NULL; return NULL;
dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
...@@ -546,8 +546,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id, ...@@ -546,8 +546,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
dax_region->target_node = target_node; dax_region->target_node = target_node;
ida_init(&dax_region->ida); ida_init(&dax_region->ida);
dax_region->res = (struct resource) { dax_region->res = (struct resource) {
.start = res->start, .start = range->start,
.end = res->end, .end = range->end,
.flags = IORESOURCE_MEM | flags, .flags = IORESOURCE_MEM | flags,
}; };
......
...@@ -13,7 +13,7 @@ void dax_region_put(struct dax_region *dax_region); ...@@ -13,7 +13,7 @@ void dax_region_put(struct dax_region *dax_region);
#define IORESOURCE_DAX_STATIC (1UL << 0) #define IORESOURCE_DAX_STATIC (1UL << 0)
struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align, struct range *range, int target_node, unsigned int align,
unsigned long flags); unsigned long flags);
enum dev_dax_subsys { enum dev_dax_subsys {
......
...@@ -61,11 +61,6 @@ struct dev_dax { ...@@ -61,11 +61,6 @@ struct dev_dax {
struct range range; struct range range;
}; };
static inline u64 range_len(struct range *range)
{
return range->end - range->start + 1;
}
static inline struct dev_dax *to_dev_dax(struct device *dev) static inline struct dev_dax *to_dev_dax(struct device *dev)
{ {
return container_of(dev, struct dev_dax, dev); return container_of(dev, struct dev_dax, dev);
......
...@@ -416,8 +416,7 @@ int dev_dax_probe(struct dev_dax *dev_dax) ...@@ -416,8 +416,7 @@ int dev_dax_probe(struct dev_dax *dev_dax)
pgmap = devm_kzalloc(dev, sizeof(*pgmap), GFP_KERNEL); pgmap = devm_kzalloc(dev, sizeof(*pgmap), GFP_KERNEL);
if (!pgmap) if (!pgmap)
return -ENOMEM; return -ENOMEM;
pgmap->res.start = range->start; pgmap->range = *range;
pgmap->res.end = range->end;
} }
pgmap->type = MEMORY_DEVICE_GENERIC; pgmap->type = MEMORY_DEVICE_GENERIC;
addr = devm_memremap_pages(dev, pgmap); addr = devm_memremap_pages(dev, pgmap);
......
...@@ -13,13 +13,16 @@ static int dax_hmem_probe(struct platform_device *pdev) ...@@ -13,13 +13,16 @@ static int dax_hmem_probe(struct platform_device *pdev)
struct dev_dax_data data; struct dev_dax_data data;
struct dev_dax *dev_dax; struct dev_dax *dev_dax;
struct resource *res; struct resource *res;
struct range range;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) if (!res)
return -ENOMEM; return -ENOMEM;
mri = dev->platform_data; mri = dev->platform_data;
dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node, range.start = res->start;
range.end = res->end;
dax_region = alloc_dax_region(dev, pdev->id, &range, mri->target_node,
PMD_SIZE, 0); PMD_SIZE, 0);
if (!dax_region) if (!dax_region)
return -ENOMEM; return -ENOMEM;
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
{ {
struct resource res; struct range range;
int rc, id, region_id; int rc, id, region_id;
resource_size_t offset; resource_size_t offset;
struct nd_pfn_sb *pfn_sb; struct nd_pfn_sb *pfn_sb;
...@@ -50,10 +50,10 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) ...@@ -50,10 +50,10 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
if (rc != 2) if (rc != 2)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* adjust the dax_region resource to the start of data */ /* adjust the dax_region range to the start of data */
memcpy(&res, &pgmap.res, sizeof(res)); range = pgmap.range;
res.start += offset; range.start += offset,
dax_region = alloc_dax_region(dev, region_id, &res, dax_region = alloc_dax_region(dev, region_id, &range,
nd_region->target_node, le32_to_cpu(pfn_sb->align), nd_region->target_node, le32_to_cpu(pfn_sb->align),
IORESOURCE_DAX_STATIC); IORESOURCE_DAX_STATIC);
if (!dax_region) if (!dax_region)
...@@ -64,7 +64,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) ...@@ -64,7 +64,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
.id = id, .id = id,
.pgmap = &pgmap, .pgmap = &pgmap,
.subsys = subsys, .subsys = subsys,
.size = resource_size(&res), .size = range_len(&range),
}; };
dev_dax = devm_create_dev_dax(&data); dev_dax = devm_create_dev_dax(&data);
......
...@@ -101,7 +101,7 @@ unsigned long nouveau_dmem_page_addr(struct page *page) ...@@ -101,7 +101,7 @@ unsigned long nouveau_dmem_page_addr(struct page *page)
{ {
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) - unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
chunk->pagemap.res.start; chunk->pagemap.range.start;
return chunk->bo->offset + off; return chunk->bo->offset + off;
} }
...@@ -249,7 +249,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) ...@@ -249,7 +249,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->drm = drm; chunk->drm = drm;
chunk->pagemap.type = MEMORY_DEVICE_PRIVATE; chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
chunk->pagemap.res = *res; chunk->pagemap.range.start = res->start;
chunk->pagemap.range.end = res->end;
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops; chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
chunk->pagemap.owner = drm->dev; chunk->pagemap.owner = drm->dev;
...@@ -273,7 +274,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) ...@@ -273,7 +274,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
list_add(&chunk->list, &drm->dmem->chunks); list_add(&chunk->list, &drm->dmem->chunks);
mutex_unlock(&drm->dmem->mutex); mutex_unlock(&drm->dmem->mutex);
pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT; pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
page = pfn_to_page(pfn_first); page = pfn_to_page(pfn_first);
spin_lock(&drm->dmem->lock); spin_lock(&drm->dmem->lock);
for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) { for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
...@@ -294,8 +295,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) ...@@ -294,8 +295,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
out_bo_free: out_bo_free:
nouveau_bo_ref(NULL, &chunk->bo); nouveau_bo_ref(NULL, &chunk->bo);
out_release: out_release:
release_mem_region(chunk->pagemap.res.start, release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
resource_size(&chunk->pagemap.res));
out_free: out_free:
kfree(chunk); kfree(chunk);
out: out:
...@@ -382,8 +382,8 @@ nouveau_dmem_fini(struct nouveau_drm *drm) ...@@ -382,8 +382,8 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
nouveau_bo_ref(NULL, &chunk->bo); nouveau_bo_ref(NULL, &chunk->bo);
list_del(&chunk->list); list_del(&chunk->list);
memunmap_pages(&chunk->pagemap); memunmap_pages(&chunk->pagemap);
release_mem_region(chunk->pagemap.res.start, release_mem_region(chunk->pagemap.range.start,
resource_size(&chunk->pagemap.res)); range_len(&chunk->pagemap.range));
kfree(chunk); kfree(chunk);
} }
......
...@@ -211,7 +211,7 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) ...@@ -211,7 +211,7 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
} }
static void badblocks_populate(struct badrange *badrange, static void badblocks_populate(struct badrange *badrange,
struct badblocks *bb, const struct resource *res) struct badblocks *bb, const struct range *range)
{ {
struct badrange_entry *bre; struct badrange_entry *bre;
...@@ -222,34 +222,34 @@ static void badblocks_populate(struct badrange *badrange, ...@@ -222,34 +222,34 @@ static void badblocks_populate(struct badrange *badrange,
u64 bre_end = bre->start + bre->length - 1; u64 bre_end = bre->start + bre->length - 1;
/* Discard intervals with no intersection */ /* Discard intervals with no intersection */
if (bre_end < res->start) if (bre_end < range->start)
continue; continue;
if (bre->start > res->end) if (bre->start > range->end)
continue; continue;
/* Deal with any overlap after start of the namespace */ /* Deal with any overlap after start of the namespace */
if (bre->start >= res->start) { if (bre->start >= range->start) {
u64 start = bre->start; u64 start = bre->start;
u64 len; u64 len;
if (bre_end <= res->end) if (bre_end <= range->end)
len = bre->length; len = bre->length;
else else
len = res->start + resource_size(res) len = range->start + range_len(range)
- bre->start; - bre->start;
__add_badblock_range(bb, start - res->start, len); __add_badblock_range(bb, start - range->start, len);
continue; continue;
} }
/* /*
* Deal with overlap for badrange starting before * Deal with overlap for badrange starting before
* the namespace. * the namespace.
*/ */
if (bre->start < res->start) { if (bre->start < range->start) {
u64 len; u64 len;
if (bre_end < res->end) if (bre_end < range->end)
len = bre->start + bre->length - res->start; len = bre->start + bre->length - range->start;
else else
len = resource_size(res); len = range_len(range);
__add_badblock_range(bb, 0, len); __add_badblock_range(bb, 0, len);
} }
} }
...@@ -267,7 +267,7 @@ static void badblocks_populate(struct badrange *badrange, ...@@ -267,7 +267,7 @@ static void badblocks_populate(struct badrange *badrange,
* and add badblocks entries for all matching sub-ranges * and add badblocks entries for all matching sub-ranges
*/ */
void nvdimm_badblocks_populate(struct nd_region *nd_region, void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res) struct badblocks *bb, const struct range *range)
{ {
struct nvdimm_bus *nvdimm_bus; struct nvdimm_bus *nvdimm_bus;
...@@ -279,7 +279,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region, ...@@ -279,7 +279,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nvdimm_bus_lock(&nvdimm_bus->dev); nvdimm_bus_lock(&nvdimm_bus->dev);
badblocks_populate(&nvdimm_bus->badrange, bb, res); badblocks_populate(&nvdimm_bus->badrange, bb, range);
nvdimm_bus_unlock(&nvdimm_bus->dev); nvdimm_bus_unlock(&nvdimm_bus->dev);
} }
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate); EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
...@@ -303,13 +303,16 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, ...@@ -303,13 +303,16 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
resource_size_t size) resource_size_t size)
{ {
struct resource *res = &nsio->res;
struct nd_namespace_common *ndns = &nsio->common; struct nd_namespace_common *ndns = &nsio->common;
struct range range = {
.start = nsio->res.start,
.end = nsio->res.end,
};
nsio->size = size; nsio->size = size;
if (!devm_request_mem_region(dev, res->start, size, if (!devm_request_mem_region(dev, range.start, size,
dev_name(&ndns->dev))) { dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res); dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
return -EBUSY; return -EBUSY;
} }
...@@ -317,9 +320,9 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, ...@@ -317,9 +320,9 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
if (devm_init_badblocks(dev, &nsio->bb)) if (devm_init_badblocks(dev, &nsio->bb))
return -ENOMEM; return -ENOMEM;
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb, nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
&nsio->res); &range);
nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM); nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
return PTR_ERR_OR_ZERO(nsio->addr); return PTR_ERR_OR_ZERO(nsio->addr);
} }
......
...@@ -377,8 +377,9 @@ int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt); ...@@ -377,8 +377,9 @@ int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name); char *name);
unsigned int pmem_sector_size(struct nd_namespace_common *ndns); unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
struct range;
void nvdimm_badblocks_populate(struct nd_region *nd_region, void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res); struct badblocks *bb, const struct range *range);
int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns, int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
resource_size_t size); resource_size_t size);
void devm_namespace_disable(struct device *dev, void devm_namespace_disable(struct device *dev,
......
...@@ -672,7 +672,7 @@ static unsigned long init_altmap_reserve(resource_size_t base) ...@@ -672,7 +672,7 @@ static unsigned long init_altmap_reserve(resource_size_t base)
static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
{ {
struct resource *res = &pgmap->res; struct range *range = &pgmap->range;
struct vmem_altmap *altmap = &pgmap->altmap; struct vmem_altmap *altmap = &pgmap->altmap;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = le64_to_cpu(pfn_sb->dataoff); u64 offset = le64_to_cpu(pfn_sb->dataoff);
...@@ -689,16 +689,16 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) ...@@ -689,16 +689,16 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
.end_pfn = PHYS_PFN(end), .end_pfn = PHYS_PFN(end),
}; };
memcpy(res, &nsio->res, sizeof(*res)); *range = (struct range) {
res->start += start_pad; .start = nsio->res.start + start_pad,
res->end -= end_trunc; .end = nsio->res.end - end_trunc,
};
if (nd_pfn->mode == PFN_MODE_RAM) { if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < reserve) if (offset < reserve)
return -EINVAL; return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
} else if (nd_pfn->mode == PFN_MODE_PMEM) { } else if (nd_pfn->mode == PFN_MODE_PMEM) {
nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset)); nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev, dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n", "number of pfns truncated from %lld to %ld\n",
......
...@@ -375,7 +375,7 @@ static int pmem_attach_disk(struct device *dev, ...@@ -375,7 +375,7 @@ static int pmem_attach_disk(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_region *nd_region = to_nd_region(dev->parent);
int nid = dev_to_node(dev), fua; int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res; struct resource *res = &nsio->res;
struct resource bb_res; struct range bb_range;
struct nd_pfn *nd_pfn = NULL; struct nd_pfn *nd_pfn = NULL;
struct dax_device *dax_dev; struct dax_device *dax_dev;
struct nd_pfn_sb *pfn_sb; struct nd_pfn_sb *pfn_sb;
...@@ -434,24 +434,26 @@ static int pmem_attach_disk(struct device *dev, ...@@ -434,24 +434,26 @@ static int pmem_attach_disk(struct device *dev,
pfn_sb = nd_pfn->pfn_sb; pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
pmem->pfn_pad = resource_size(res) - pmem->pfn_pad = resource_size(res) -
resource_size(&pmem->pgmap.res); range_len(&pmem->pgmap.range);
pmem->pfn_flags |= PFN_MAP; pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); bb_range = pmem->pgmap.range;
bb_res.start += pmem->data_offset; bb_range.start += pmem->data_offset;
} else if (pmem_should_map_pages(dev)) { } else if (pmem_should_map_pages(dev)) {
memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res)); pmem->pgmap.range.start = res->start;
pmem->pgmap.range.end = res->end;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops; pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap); addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP; pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); bb_range = pmem->pgmap.range;
} else { } else {
if (devm_add_action_or_reset(dev, pmem_release_queue, if (devm_add_action_or_reset(dev, pmem_release_queue,
&pmem->pgmap)) &pmem->pgmap))
return -ENOMEM; return -ENOMEM;
addr = devm_memremap(dev, pmem->phys_addr, addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM); pmem->size, ARCH_MEMREMAP_PMEM);
memcpy(&bb_res, &nsio->res, sizeof(bb_res)); bb_range.start = res->start;
bb_range.end = res->end;
} }
if (IS_ERR(addr)) if (IS_ERR(addr))
...@@ -480,7 +482,7 @@ static int pmem_attach_disk(struct device *dev, ...@@ -480,7 +482,7 @@ static int pmem_attach_disk(struct device *dev,
/ 512); / 512);
if (devm_init_badblocks(dev, &pmem->bb)) if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM; return -ENOMEM;
nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res); nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
disk->bb = &pmem->bb; disk->bb = &pmem->bb;
if (is_nvdimm_sync(nd_region)) if (is_nvdimm_sync(nd_region))
...@@ -591,8 +593,8 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) ...@@ -591,8 +593,8 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
resource_size_t offset = 0, end_trunc = 0; resource_size_t offset = 0, end_trunc = 0;
struct nd_namespace_common *ndns; struct nd_namespace_common *ndns;
struct nd_namespace_io *nsio; struct nd_namespace_io *nsio;
struct resource res;
struct badblocks *bb; struct badblocks *bb;
struct range range;
struct kernfs_node *bb_state; struct kernfs_node *bb_state;
if (event != NVDIMM_REVALIDATE_POISON) if (event != NVDIMM_REVALIDATE_POISON)
...@@ -628,9 +630,9 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) ...@@ -628,9 +630,9 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
nsio = to_nd_namespace_io(&ndns->dev); nsio = to_nd_namespace_io(&ndns->dev);
} }
res.start = nsio->res.start + offset; range.start = nsio->res.start + offset;
res.end = nsio->res.end - end_trunc; range.end = nsio->res.end - end_trunc;
nvdimm_badblocks_populate(nd_region, bb, &res); nvdimm_badblocks_populate(nd_region, bb, &range);
if (bb_state) if (bb_state)
sysfs_notify_dirent(bb_state); sysfs_notify_dirent(bb_state);
} }
......
...@@ -35,7 +35,10 @@ static int nd_region_probe(struct device *dev) ...@@ -35,7 +35,10 @@ static int nd_region_probe(struct device *dev)
return rc; return rc;
if (is_memory(&nd_region->dev)) { if (is_memory(&nd_region->dev)) {
struct resource ndr_res; struct range range = {
.start = nd_region->ndr_start,
.end = nd_region->ndr_start + nd_region->ndr_size - 1,
};
if (devm_init_badblocks(dev, &nd_region->bb)) if (devm_init_badblocks(dev, &nd_region->bb))
return -ENODEV; return -ENODEV;
...@@ -44,9 +47,7 @@ static int nd_region_probe(struct device *dev) ...@@ -44,9 +47,7 @@ static int nd_region_probe(struct device *dev)
if (!nd_region->bb_state) if (!nd_region->bb_state)
dev_warn(&nd_region->dev, dev_warn(&nd_region->dev,
"'badblocks' notification disabled\n"); "'badblocks' notification disabled\n");
ndr_res.start = nd_region->ndr_start; nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
} }
rc = nd_region_register_namespaces(nd_region, &err); rc = nd_region_register_namespaces(nd_region, &err);
...@@ -121,14 +122,16 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event) ...@@ -121,14 +122,16 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
{ {
if (event == NVDIMM_REVALIDATE_POISON) { if (event == NVDIMM_REVALIDATE_POISON) {
struct nd_region *nd_region = to_nd_region(dev); struct nd_region *nd_region = to_nd_region(dev);
struct resource res;
if (is_memory(&nd_region->dev)) { if (is_memory(&nd_region->dev)) {
res.start = nd_region->ndr_start; struct range range = {
res.end = nd_region->ndr_start + .start = nd_region->ndr_start,
nd_region->ndr_size - 1; .end = nd_region->ndr_start +
nd_region->ndr_size - 1,
};
nvdimm_badblocks_populate(nd_region, nvdimm_badblocks_populate(nd_region,
&nd_region->bb, &res); &nd_region->bb, &range);
if (nd_region->bb_state) if (nd_region->bb_state)
sysfs_notify_dirent(nd_region->bb_state); sysfs_notify_dirent(nd_region->bb_state);
} }
......
...@@ -185,9 +185,8 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, ...@@ -185,9 +185,8 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
return -ENOMEM; return -ENOMEM;
pgmap = &p2p_pgmap->pgmap; pgmap = &p2p_pgmap->pgmap;
pgmap->res.start = pci_resource_start(pdev, bar) + offset; pgmap->range.start = pci_resource_start(pdev, bar) + offset;
pgmap->res.end = pgmap->res.start + size - 1; pgmap->range.end = pgmap->range.start + size - 1;
pgmap->res.flags = pci_resource_flags(pdev, bar);
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
p2p_pgmap->provider = pdev; p2p_pgmap->provider = pdev;
...@@ -202,13 +201,13 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, ...@@ -202,13 +201,13 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr, error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset, pci_bus_address(pdev, bar) + offset,
resource_size(&pgmap->res), dev_to_node(&pdev->dev), range_len(&pgmap->range), dev_to_node(&pdev->dev),
pgmap->ref); pgmap->ref);
if (error) if (error)
goto pages_free; goto pages_free;
pci_info(pdev, "added peer-to-peer DMA memory %pR\n", pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
&pgmap->res); pgmap->range.start, pgmap->range.end);
return 0; return 0;
......
...@@ -18,27 +18,37 @@ static unsigned int list_count; ...@@ -18,27 +18,37 @@ static unsigned int list_count;
static int fill_list(unsigned int nr_pages) static int fill_list(unsigned int nr_pages)
{ {
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
struct resource *res;
void *vaddr; void *vaddr;
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
int ret; int ret = -ENOMEM;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
if (!pgmap) if (!pgmap)
return -ENOMEM; goto err_pgmap;
pgmap->type = MEMORY_DEVICE_GENERIC; pgmap->type = MEMORY_DEVICE_GENERIC;
pgmap->res.name = "Xen scratch"; res->name = "Xen scratch";
pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, &pgmap->res, ret = allocate_resource(&iomem_resource, res,
alloc_pages * PAGE_SIZE, 0, -1, alloc_pages * PAGE_SIZE, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) { if (ret < 0) {
pr_err("Cannot allocate new IOMEM resource\n"); pr_err("Cannot allocate new IOMEM resource\n");
kfree(pgmap); goto err_resource;
return ret;
} }
pgmap->range = (struct range) {
.start = res->start,
.end = res->end,
};
pgmap->owner = res;
#ifdef CONFIG_XEN_HAVE_PVMMU #ifdef CONFIG_XEN_HAVE_PVMMU
/* /*
* memremap will build page tables for the new memory so * memremap will build page tables for the new memory so
...@@ -50,14 +60,13 @@ static int fill_list(unsigned int nr_pages) ...@@ -50,14 +60,13 @@ static int fill_list(unsigned int nr_pages)
* conflict with any devices. * conflict with any devices.
*/ */
if (!xen_feature(XENFEAT_auto_translated_physmap)) { if (!xen_feature(XENFEAT_auto_translated_physmap)) {
xen_pfn_t pfn = PFN_DOWN(pgmap->res.start); xen_pfn_t pfn = PFN_DOWN(res->start);
for (i = 0; i < alloc_pages; i++) { for (i = 0; i < alloc_pages; i++) {
if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
pr_warn("set_phys_to_machine() failed, no memory added\n"); pr_warn("set_phys_to_machine() failed, no memory added\n");
release_resource(&pgmap->res); ret = -ENOMEM;
kfree(pgmap); goto err_memremap;
return -ENOMEM;
} }
} }
} }
...@@ -66,9 +75,8 @@ static int fill_list(unsigned int nr_pages) ...@@ -66,9 +75,8 @@ static int fill_list(unsigned int nr_pages)
vaddr = memremap_pages(pgmap, NUMA_NO_NODE); vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
pr_err("Cannot remap memory range\n"); pr_err("Cannot remap memory range\n");
release_resource(&pgmap->res); ret = PTR_ERR(vaddr);
kfree(pgmap); goto err_memremap;
return PTR_ERR(vaddr);
} }
for (i = 0; i < alloc_pages; i++) { for (i = 0; i < alloc_pages; i++) {
...@@ -80,6 +88,14 @@ static int fill_list(unsigned int nr_pages) ...@@ -80,6 +88,14 @@ static int fill_list(unsigned int nr_pages)
} }
return 0; return 0;
err_memremap:
release_resource(res);
err_resource:
kfree(pgmap);
err_pgmap:
kfree(res);
return ret;
} }
/** /**
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_ #ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_
#include <linux/range.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/percpu-refcount.h> #include <linux/percpu-refcount.h>
...@@ -93,7 +94,7 @@ struct dev_pagemap_ops { ...@@ -93,7 +94,7 @@ struct dev_pagemap_ops {
/** /**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings * struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations * @altmap: pre-allocated/reserved memory for vmemmap allocations
* @res: physical address range covered by @ref * @range: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping * @ref: reference count that pins the devm_memremap_pages() mapping
* @internal_ref: internal reference if @ref is not provided by the caller * @internal_ref: internal reference if @ref is not provided by the caller
* @done: completion for @internal_ref * @done: completion for @internal_ref
...@@ -106,7 +107,7 @@ struct dev_pagemap_ops { ...@@ -106,7 +107,7 @@ struct dev_pagemap_ops {
*/ */
struct dev_pagemap { struct dev_pagemap {
struct vmem_altmap altmap; struct vmem_altmap altmap;
struct resource res; struct range range;
struct percpu_ref *ref; struct percpu_ref *ref;
struct percpu_ref internal_ref; struct percpu_ref internal_ref;
struct completion done; struct completion done;
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RANGE_H #ifndef _LINUX_RANGE_H
#define _LINUX_RANGE_H #define _LINUX_RANGE_H
#include <linux/types.h>
struct range { struct range {
u64 start; u64 start;
u64 end; u64 end;
}; };
static inline u64 range_len(const struct range *range)
{
return range->end - range->start + 1;
}
int add_range(struct range *range, int az, int nr_range, int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end); u64 start, u64 end);
......
...@@ -460,6 +460,21 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, ...@@ -460,6 +460,21 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
unsigned long pfn_last; unsigned long pfn_last;
void *ptr; void *ptr;
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
if (!devmem)
return -ENOMEM;
res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
"hmm_dmirror");
if (IS_ERR(res))
goto err_devmem;
devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
devmem->pagemap.range.start = res->start;
devmem->pagemap.range.end = res->end;
devmem->pagemap.ops = &dmirror_devmem_ops;
devmem->pagemap.owner = mdevice;
mutex_lock(&mdevice->devmem_lock); mutex_lock(&mdevice->devmem_lock);
if (mdevice->devmem_count == mdevice->devmem_capacity) { if (mdevice->devmem_count == mdevice->devmem_capacity) {
...@@ -472,33 +487,18 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, ...@@ -472,33 +487,18 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
sizeof(new_chunks[0]) * new_capacity, sizeof(new_chunks[0]) * new_capacity,
GFP_KERNEL); GFP_KERNEL);
if (!new_chunks) if (!new_chunks)
goto err; goto err_release;
mdevice->devmem_capacity = new_capacity; mdevice->devmem_capacity = new_capacity;
mdevice->devmem_chunks = new_chunks; mdevice->devmem_chunks = new_chunks;
} }
res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
"hmm_dmirror");
if (IS_ERR(res))
goto err;
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
if (!devmem)
goto err_release;
devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
devmem->pagemap.res = *res;
devmem->pagemap.ops = &dmirror_devmem_ops;
devmem->pagemap.owner = mdevice;
ptr = memremap_pages(&devmem->pagemap, numa_node_id()); ptr = memremap_pages(&devmem->pagemap, numa_node_id());
if (IS_ERR(ptr)) if (IS_ERR(ptr))
goto err_free; goto err_release;
devmem->mdevice = mdevice; devmem->mdevice = mdevice;
pfn_first = devmem->pagemap.res.start >> PAGE_SHIFT; pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
pfn_last = pfn_first + pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
(resource_size(&devmem->pagemap.res) >> PAGE_SHIFT);
mdevice->devmem_chunks[mdevice->devmem_count++] = devmem; mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
mutex_unlock(&mdevice->devmem_lock); mutex_unlock(&mdevice->devmem_lock);
...@@ -525,12 +525,12 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, ...@@ -525,12 +525,12 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
return true; return true;
err_free:
kfree(devmem);
err_release: err_release:
release_mem_region(res->start, resource_size(res));
err:
mutex_unlock(&mdevice->devmem_lock); mutex_unlock(&mdevice->devmem_lock);
release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
err_devmem:
kfree(devmem);
return false; return false;
} }
...@@ -1100,8 +1100,8 @@ static void dmirror_device_remove(struct dmirror_device *mdevice) ...@@ -1100,8 +1100,8 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
mdevice->devmem_chunks[i]; mdevice->devmem_chunks[i];
memunmap_pages(&devmem->pagemap); memunmap_pages(&devmem->pagemap);
release_mem_region(devmem->pagemap.res.start, release_mem_region(devmem->pagemap.range.start,
resource_size(&devmem->pagemap.res)); range_len(&devmem->pagemap.range));
kfree(devmem); kfree(devmem);
} }
kfree(mdevice->devmem_chunks); kfree(mdevice->devmem_chunks);
......
...@@ -70,24 +70,24 @@ static void devmap_managed_enable_put(void) ...@@ -70,24 +70,24 @@ static void devmap_managed_enable_put(void)
} }
#endif /* CONFIG_DEV_PAGEMAP_OPS */ #endif /* CONFIG_DEV_PAGEMAP_OPS */
static void pgmap_array_delete(struct resource *res) static void pgmap_array_delete(struct range *range)
{ {
xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
NULL, GFP_KERNEL); NULL, GFP_KERNEL);
synchronize_rcu(); synchronize_rcu();
} }
static unsigned long pfn_first(struct dev_pagemap *pgmap) static unsigned long pfn_first(struct dev_pagemap *pgmap)
{ {
return PHYS_PFN(pgmap->res.start) + return PHYS_PFN(pgmap->range.start) +
vmem_altmap_offset(pgmap_altmap(pgmap)); vmem_altmap_offset(pgmap_altmap(pgmap));
} }
static unsigned long pfn_end(struct dev_pagemap *pgmap) static unsigned long pfn_end(struct dev_pagemap *pgmap)
{ {
const struct resource *res = &pgmap->res; const struct range *range = &pgmap->range;
return (res->start + resource_size(res)) >> PAGE_SHIFT; return (range->start + range_len(range)) >> PAGE_SHIFT;
} }
static unsigned long pfn_next(unsigned long pfn) static unsigned long pfn_next(unsigned long pfn)
...@@ -126,7 +126,7 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) ...@@ -126,7 +126,7 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
void memunmap_pages(struct dev_pagemap *pgmap) void memunmap_pages(struct dev_pagemap *pgmap)
{ {
struct resource *res = &pgmap->res; struct range *range = &pgmap->range;
struct page *first_page; struct page *first_page;
unsigned long pfn; unsigned long pfn;
int nid; int nid;
...@@ -143,20 +143,20 @@ void memunmap_pages(struct dev_pagemap *pgmap) ...@@ -143,20 +143,20 @@ void memunmap_pages(struct dev_pagemap *pgmap)
nid = page_to_nid(first_page); nid = page_to_nid(first_page);
mem_hotplug_begin(); mem_hotplug_begin();
remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(res->start), remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
PHYS_PFN(resource_size(res))); PHYS_PFN(range_len(range)));
if (pgmap->type == MEMORY_DEVICE_PRIVATE) { if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
__remove_pages(PHYS_PFN(res->start), __remove_pages(PHYS_PFN(range->start),
PHYS_PFN(resource_size(res)), NULL); PHYS_PFN(range_len(range)), NULL);
} else { } else {
arch_remove_memory(nid, res->start, resource_size(res), arch_remove_memory(nid, range->start, range_len(range),
pgmap_altmap(pgmap)); pgmap_altmap(pgmap));
kasan_remove_zero_shadow(__va(res->start), resource_size(res)); kasan_remove_zero_shadow(__va(range->start), range_len(range));
} }
mem_hotplug_done(); mem_hotplug_done();
untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
pgmap_array_delete(res); pgmap_array_delete(range);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
devmap_managed_enable_put(); devmap_managed_enable_put();
} }
...@@ -182,7 +182,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref) ...@@ -182,7 +182,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref)
*/ */
void *memremap_pages(struct dev_pagemap *pgmap, int nid) void *memremap_pages(struct dev_pagemap *pgmap, int nid)
{ {
struct resource *res = &pgmap->res; struct range *range = &pgmap->range;
struct dev_pagemap *conflict_pgmap; struct dev_pagemap *conflict_pgmap;
struct mhp_params params = { struct mhp_params params = {
/* /*
...@@ -251,7 +251,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -251,7 +251,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
return ERR_PTR(error); return ERR_PTR(error);
} }
conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL); conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
if (conflict_pgmap) { if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n"); WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap); put_dev_pagemap(conflict_pgmap);
...@@ -259,7 +259,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -259,7 +259,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
goto err_array; goto err_array;
} }
conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL); conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
if (conflict_pgmap) { if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n"); WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap); put_dev_pagemap(conflict_pgmap);
...@@ -267,26 +267,27 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -267,26 +267,27 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
goto err_array; goto err_array;
} }
is_ram = region_intersects(res->start, resource_size(res), is_ram = region_intersects(range->start, range_len(range),
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
if (is_ram != REGION_DISJOINT) { if (is_ram != REGION_DISJOINT) {
WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__, WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
is_ram == REGION_MIXED ? "mixed" : "ram", res); is_ram == REGION_MIXED ? "mixed" : "ram",
range->start, range->end);
error = -ENXIO; error = -ENXIO;
goto err_array; goto err_array;
} }
error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
PHYS_PFN(res->end), pgmap, GFP_KERNEL)); PHYS_PFN(range->end), pgmap, GFP_KERNEL));
if (error) if (error)
goto err_array; goto err_array;
if (nid < 0) if (nid < 0)
nid = numa_mem_id(); nid = numa_mem_id();
error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(res->start), error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(range->start), 0,
0, resource_size(res)); range_len(range));
if (error) if (error)
goto err_pfn_remap; goto err_pfn_remap;
...@@ -304,16 +305,16 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -304,16 +305,16 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
* arch_add_memory(). * arch_add_memory().
*/ */
if (pgmap->type == MEMORY_DEVICE_PRIVATE) { if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
error = add_pages(nid, PHYS_PFN(res->start), error = add_pages(nid, PHYS_PFN(range->start),
PHYS_PFN(resource_size(res)), &params); PHYS_PFN(range_len(range)), &params);
} else { } else {
error = kasan_add_zero_shadow(__va(res->start), resource_size(res)); error = kasan_add_zero_shadow(__va(range->start), range_len(range));
if (error) { if (error) {
mem_hotplug_done(); mem_hotplug_done();
goto err_kasan; goto err_kasan;
} }
error = arch_add_memory(nid, res->start, resource_size(res), error = arch_add_memory(nid, range->start, range_len(range),
&params); &params);
} }
...@@ -321,8 +322,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -321,8 +322,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
struct zone *zone; struct zone *zone;
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
move_pfn_range_to_zone(zone, PHYS_PFN(res->start), move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
PHYS_PFN(resource_size(res)), params.altmap); PHYS_PFN(range_len(range)), params.altmap);
} }
mem_hotplug_done(); mem_hotplug_done();
...@@ -334,17 +335,17 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -334,17 +335,17 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
* to allow us to do the work while not holding the hotplug lock. * to allow us to do the work while not holding the hotplug lock.
*/ */
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
PHYS_PFN(res->start), PHYS_PFN(range->start),
PHYS_PFN(resource_size(res)), pgmap); PHYS_PFN(range_len(range)), pgmap);
percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
return __va(res->start); return __va(range->start);
err_add_memory: err_add_memory:
kasan_remove_zero_shadow(__va(res->start), resource_size(res)); kasan_remove_zero_shadow(__va(range->start), range_len(range));
err_kasan: err_kasan:
untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
err_pfn_remap: err_pfn_remap:
pgmap_array_delete(res); pgmap_array_delete(range);
err_array: err_array:
dev_pagemap_kill(pgmap); dev_pagemap_kill(pgmap);
dev_pagemap_cleanup(pgmap); dev_pagemap_cleanup(pgmap);
...@@ -369,7 +370,7 @@ EXPORT_SYMBOL_GPL(memremap_pages); ...@@ -369,7 +370,7 @@ EXPORT_SYMBOL_GPL(memremap_pages);
* 'live' on entry and will be killed and reaped at * 'live' on entry and will be killed and reaped at
* devm_memremap_pages_release() time, or if this routine fails. * devm_memremap_pages_release() time, or if this routine fails.
* *
* 4/ res is expected to be a host memory range that could feasibly be * 4/ range is expected to be a host memory range that could feasibly be
* treated as a "System RAM" range, i.e. not a device mmio range, but * treated as a "System RAM" range, i.e. not a device mmio range, but
* this is not enforced. * this is not enforced.
*/ */
...@@ -426,7 +427,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, ...@@ -426,7 +427,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
* In the cached case we're already holding a live reference. * In the cached case we're already holding a live reference.
*/ */
if (pgmap) { if (pgmap) {
if (phys >= pgmap->res.start && phys <= pgmap->res.end) if (phys >= pgmap->range.start && phys <= pgmap->range.end)
return pgmap; return pgmap;
put_dev_pagemap(pgmap); put_dev_pagemap(pgmap);
} }
......
...@@ -126,7 +126,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref) ...@@ -126,7 +126,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref)
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
{ {
int error; int error;
resource_size_t offset = pgmap->res.start; resource_size_t offset = pgmap->range.start;
struct nfit_test_resource *nfit_res = get_nfit_res(offset); struct nfit_test_resource *nfit_res = get_nfit_res(offset);
if (!nfit_res) if (!nfit_res)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment