Commit 60e93dc0 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds

device-dax: add dis-contiguous resource support

Break the requirement that device-dax instances are physically contiguous.
With this constraint removed it allows fragmented available capacity to
be fully allocated.

This capability is useful to mitigate the "noisy neighbor" problem with
memory-side-cache management for virtual machines, or any other scenario
where a platform address boundary also designates a performance boundary.
For example a direct mapped memory side cache might rotate cache colors at
1GB boundaries.  With dis-contiguous allocations a device-dax instance
could be configured to contain only 1 cache color.

It also satisfies Joao's use case (see link) for partitioning memory for
exclusive guest access.  It allows for a future potential mode where the
host kernel need not allocate 'struct page' capacity up-front.
Reported-by: default avatarJoao Martins <joao.m.martins@oracle.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Brice Goglin <Brice.Goglin@inria.fr>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: David Airlie <airlied@linux.ie>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hulk Robot <hulkci@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Jason Yan <yanaijie@huawei.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Jia He <justin.he@arm.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/lkml/20200110190313.17144-1-joao.m.martins@oracle.com/
Link: https://lkml.kernel.org/r/159643104304.4062302.16561669534797528660.stgit@dwillia2-desk3.amr.corp.intel.com
Link: https://lkml.kernel.org/r/160106116875.30709.11456649969327399771.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b7b3c01b
This diff is collapsed.
...@@ -49,7 +49,8 @@ struct dax_region { ...@@ -49,7 +49,8 @@ struct dax_region {
* @id: ida allocated id * @id: ida allocated id
* @dev - device core * @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned) * @pgmap - pgmap for memmap setup / lifetime (driver owned)
* @range: resource range for the instance * @nr_range: size of @ranges
* @ranges: resource-span + pgoff tuples for the instance
*/ */
struct dev_dax { struct dev_dax {
struct dax_region *region; struct dax_region *region;
...@@ -58,7 +59,11 @@ struct dev_dax { ...@@ -58,7 +59,11 @@ struct dev_dax {
int id; int id;
struct device dev; struct device dev;
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
struct range range; int nr_range;
struct dev_dax_range {
unsigned long pgoff;
struct range range;
} *ranges;
}; };
static inline struct dev_dax *to_dev_dax(struct device *dev) static inline struct dev_dax *to_dev_dax(struct device *dev)
......
...@@ -55,15 +55,22 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, ...@@ -55,15 +55,22 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size) unsigned long size)
{ {
struct range *range = &dev_dax->range; int i;
phys_addr_t phys;
for (i = 0; i < dev_dax->nr_range; i++) {
phys = pgoff * PAGE_SIZE + range->start; struct dev_dax_range *dax_range = &dev_dax->ranges[i];
if (phys >= range->start && phys <= range->end) { struct range *range = &dax_range->range;
unsigned long long pgoff_end;
phys_addr_t phys;
pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
if (pgoff < dax_range->pgoff || pgoff > pgoff_end)
continue;
phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
if (phys + size - 1 <= range->end) if (phys + size - 1 <= range->end)
return phys; return phys;
break;
} }
return -1; return -1;
} }
...@@ -395,30 +402,40 @@ static void dev_dax_kill(void *dev_dax) ...@@ -395,30 +402,40 @@ static void dev_dax_kill(void *dev_dax)
int dev_dax_probe(struct dev_dax *dev_dax) int dev_dax_probe(struct dev_dax *dev_dax)
{ {
struct dax_device *dax_dev = dev_dax->dax_dev; struct dax_device *dax_dev = dev_dax->dax_dev;
struct range *range = &dev_dax->range;
struct device *dev = &dev_dax->dev; struct device *dev = &dev_dax->dev;
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
struct inode *inode; struct inode *inode;
struct cdev *cdev; struct cdev *cdev;
void *addr; void *addr;
int rc; int rc, i;
/* 1:1 map region resource range to device-dax instance range */
if (!devm_request_mem_region(dev, range->start, range_len(range),
dev_name(dev))) {
dev_warn(dev, "could not reserve range: %#llx - %#llx\n",
range->start, range->end);
return -EBUSY;
}
pgmap = dev_dax->pgmap; pgmap = dev_dax->pgmap;
if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1,
"static pgmap / multi-range device conflict\n"))
return -EINVAL;
if (!pgmap) { if (!pgmap) {
pgmap = devm_kzalloc(dev, sizeof(*pgmap), GFP_KERNEL); pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range)
* (dev_dax->nr_range - 1), GFP_KERNEL);
if (!pgmap) if (!pgmap)
return -ENOMEM; return -ENOMEM;
pgmap->range = *range; pgmap->nr_range = dev_dax->nr_range;
pgmap->nr_range = 1; }
for (i = 0; i < dev_dax->nr_range; i++) {
struct range *range = &dev_dax->ranges[i].range;
if (!devm_request_mem_region(dev, range->start,
range_len(range), dev_name(dev))) {
dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve range\n",
i, range->start, range->end);
return -EBUSY;
}
/* don't update the range for static pgmap */
if (!dev_dax->pgmap)
pgmap->ranges[i] = *range;
} }
pgmap->type = MEMORY_DEVICE_GENERIC; pgmap->type = MEMORY_DEVICE_GENERIC;
addr = devm_memremap_pages(dev, pgmap); addr = devm_memremap_pages(dev, pgmap);
if (IS_ERR(addr)) if (IS_ERR(addr))
......
...@@ -19,24 +19,28 @@ static const char *kmem_name; ...@@ -19,24 +19,28 @@ static const char *kmem_name;
/* Set if any memory will remain added when the driver will be unloaded. */ /* Set if any memory will remain added when the driver will be unloaded. */
static bool any_hotremove_failed; static bool any_hotremove_failed;
static struct range dax_kmem_range(struct dev_dax *dev_dax) static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
{ {
struct range range; struct dev_dax_range *dax_range = &dev_dax->ranges[i];
struct range *range = &dax_range->range;
/* memory-block align the hotplug range */ /* memory-block align the hotplug range */
range.start = ALIGN(dev_dax->range.start, memory_block_size_bytes()); r->start = ALIGN(range->start, memory_block_size_bytes());
range.end = ALIGN_DOWN(dev_dax->range.end + 1, memory_block_size_bytes()) - 1; r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1;
return range; if (r->start >= r->end) {
r->start = range->start;
r->end = range->end;
return -ENOSPC;
}
return 0;
} }
static int dev_dax_kmem_probe(struct dev_dax *dev_dax) static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{ {
struct range range = dax_kmem_range(dev_dax);
struct device *dev = &dev_dax->dev; struct device *dev = &dev_dax->dev;
struct resource *res; int i, mapped = 0;
char *res_name; char *res_name;
int numa_node; int numa_node;
int rc;
/* /*
* Ensure good NUMA information for the persistent memory. * Ensure good NUMA information for the persistent memory.
...@@ -55,31 +59,58 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) ...@@ -55,31 +59,58 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
if (!res_name) if (!res_name)
return -ENOMEM; return -ENOMEM;
/* Region is permanently reserved if hotremove fails. */ for (i = 0; i < dev_dax->nr_range; i++) {
res = request_mem_region(range.start, range_len(&range), res_name); struct resource *res;
if (!res) { struct range range;
dev_warn(dev, "could not reserve region [%#llx-%#llx]\n", range.start, range.end); int rc;
kfree(res_name);
return -EBUSY; rc = dax_kmem_range(dev_dax, i, &range);
} if (rc) {
dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
/* i, range.start, range.end);
* Set flags appropriate for System RAM. Leave ..._BUSY clear continue;
* so that add_memory() can add a child resource. Do not }
* inherit flags from the parent since it may set new flags
* unknown to us that will break add_memory() below. /* Region is permanently reserved if hotremove fails. */
*/ res = request_mem_region(range.start, range_len(&range), res_name);
res->flags = IORESOURCE_SYSTEM_RAM; if (!res) {
dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve region\n",
/* i, range.start, range.end);
* Ensure that future kexec'd kernels will not treat this as RAM /*
* automatically. * Once some memory has been onlined we can't
*/ * assume that it can be un-onlined safely.
rc = add_memory_driver_managed(numa_node, range.start, range_len(&range), kmem_name); */
if (rc) { if (mapped)
release_mem_region(range.start, range_len(&range)); continue;
kfree(res_name); kfree(res_name);
return rc; return -EBUSY;
}
/*
* Set flags appropriate for System RAM. Leave ..._BUSY clear
* so that add_memory() can add a child resource. Do not
* inherit flags from the parent since it may set new flags
* unknown to us that will break add_memory() below.
*/
res->flags = IORESOURCE_SYSTEM_RAM;
/*
* Ensure that future kexec'd kernels will not treat
* this as RAM automatically.
*/
rc = add_memory_driver_managed(numa_node, range.start,
range_len(&range), kmem_name);
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
i, range.start, range.end);
release_mem_region(range.start, range_len(&range));
if (mapped)
continue;
kfree(res_name);
return rc;
}
mapped++;
} }
dev_set_drvdata(dev, res_name); dev_set_drvdata(dev, res_name);
...@@ -90,9 +121,8 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) ...@@ -90,9 +121,8 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
static int dev_dax_kmem_remove(struct dev_dax *dev_dax) static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
{ {
int rc; int i, success = 0;
struct device *dev = &dev_dax->dev; struct device *dev = &dev_dax->dev;
struct range range = dax_kmem_range(dev_dax);
const char *res_name = dev_get_drvdata(dev); const char *res_name = dev_get_drvdata(dev);
/* /*
...@@ -101,17 +131,31 @@ static int dev_dax_kmem_remove(struct dev_dax *dev_dax) ...@@ -101,17 +131,31 @@ static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
* there is no way to hotremove this memory until reboot because device * there is no way to hotremove this memory until reboot because device
* unbind will succeed even if we return failure. * unbind will succeed even if we return failure.
*/ */
rc = remove_memory(dev_dax->target_node, range.start, range_len(&range)); for (i = 0; i < dev_dax->nr_range; i++) {
if (rc) { struct range range;
int rc;
rc = dax_kmem_range(dev_dax, i, &range);
if (rc)
continue;
rc = remove_memory(dev_dax->target_node, range.start,
range_len(&range));
if (rc == 0) {
release_mem_region(range.start, range_len(&range));
success++;
continue;
}
any_hotremove_failed = true; any_hotremove_failed = true;
dev_err(dev, "%#llx-%#llx cannot be hotremoved until the next reboot\n", dev_err(dev,
range.start, range.end); "mapping%d: %#llx-%#llx cannot be hotremoved until the next reboot\n",
return rc; i, range.start, range.end);
} }
/* Release and free dax resources */ if (success >= dev_dax->nr_range) {
release_mem_region(range.start, range_len(&range)); kfree(res_name);
kfree(res_name); dev_set_drvdata(dev, NULL);
}
return 0; return 0;
} }
......
...@@ -9,11 +9,18 @@ ...@@ -9,11 +9,18 @@
phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size) unsigned long size)
{ {
struct range *range = &dev_dax->range; int i;
phys_addr_t addr;
addr = pgoff * PAGE_SIZE + range->start; for (i = 0; i < dev_dax->nr_range; i++) {
if (addr >= range->start && addr <= range->end) { struct dev_dax_range *dax_range = &dev_dax->ranges[i];
struct range *range = &dax_range->range;
unsigned long long pgoff_end;
phys_addr_t addr;
pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
if (pgoff < dax_range->pgoff || pgoff > pgoff_end)
continue;
addr = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
if (addr + size - 1 <= range->end) { if (addr + size - 1 <= range->end) {
if (get_nfit_res(addr)) { if (get_nfit_res(addr)) {
struct page *page; struct page *page;
...@@ -23,9 +30,10 @@ phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, ...@@ -23,9 +30,10 @@ phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
page = vmalloc_to_page((void *)addr); page = vmalloc_to_page((void *)addr);
return PFN_PHYS(page_to_pfn(page)); return PFN_PHYS(page_to_pfn(page));
} else }
return addr; return addr;
} }
break;
} }
return -1; return -1;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment