Commit 68beef57 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.9-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:
 "A small series for fixing a problem with Xen PVH guests when running
  as backends (e.g. as dom0).

  Mapping other guests' memory is now working via ZONE_DEVICE, thus not
  requiring to abuse the memory hotplug functionality for that purpose"

* tag 'for-linus-5.9-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: add helpers to allocate unpopulated memory
  memremap: rename MEMORY_DEVICE_DEVDAX to MEMORY_DEVICE_GENERIC
  xen/balloon: add header guard
parents dd9fb9bb 9e2369c0
...@@ -429,7 +429,7 @@ int dev_dax_probe(struct device *dev) ...@@ -429,7 +429,7 @@ int dev_dax_probe(struct device *dev)
return -EBUSY; return -EBUSY;
} }
dev_dax->pgmap.type = MEMORY_DEVICE_DEVDAX; dev_dax->pgmap.type = MEMORY_DEVICE_GENERIC;
addr = devm_memremap_pages(dev, &dev_dax->pgmap); addr = devm_memremap_pages(dev, &dev_dax->pgmap);
if (IS_ERR(addr)) if (IS_ERR(addr))
return PTR_ERR(addr); return PTR_ERR(addr);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <drm/drm_probe_helper.h> #include <drm/drm_probe_helper.h>
#include <xen/balloon.h> #include <xen/balloon.h>
#include <xen/xen.h>
#include "xen_drm_front.h" #include "xen_drm_front.h"
#include "xen_drm_front_gem.h" #include "xen_drm_front_gem.h"
...@@ -99,8 +100,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) ...@@ -99,8 +100,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
* allocate ballooned pages which will be used to map * allocate ballooned pages which will be used to map
* grant references provided by the backend * grant references provided by the backend
*/ */
ret = alloc_xenballooned_pages(xen_obj->num_pages, ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
xen_obj->pages); xen_obj->pages);
if (ret < 0) { if (ret < 0) {
DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
xen_obj->num_pages, ret); xen_obj->num_pages, ret);
...@@ -152,8 +153,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) ...@@ -152,8 +153,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
} else { } else {
if (xen_obj->pages) { if (xen_obj->pages) {
if (xen_obj->be_alloc) { if (xen_obj->be_alloc) {
free_xenballooned_pages(xen_obj->num_pages, xen_free_unpopulated_pages(xen_obj->num_pages,
xen_obj->pages); xen_obj->pages);
gem_free_pages_array(xen_obj); gem_free_pages_array(xen_obj);
} else { } else {
drm_gem_put_pages(&xen_obj->base, drm_gem_put_pages(&xen_obj->base,
......
...@@ -325,4 +325,14 @@ config XEN_HAVE_VPMU ...@@ -325,4 +325,14 @@ config XEN_HAVE_VPMU
config XEN_FRONT_PGDIR_SHBUF config XEN_FRONT_PGDIR_SHBUF
tristate tristate
config XEN_UNPOPULATED_ALLOC
bool "Use unpopulated memory ranges for guest mappings"
depends on X86 && ZONE_DEVICE
default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
help
Use unpopulated memory ranges in order to create mappings for guest
memory regions, including grant maps and foreign pages. This avoids
having to balloon out RAM regions in order to obtain physical memory
space to create such mappings.
endmenu endmenu
...@@ -41,3 +41,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o ...@@ -41,3 +41,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
xen-gntalloc-y := gntalloc.o xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o xen-privcmd-y := privcmd.o privcmd-buf.o
obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o
...@@ -653,7 +653,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) ...@@ -653,7 +653,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
} }
EXPORT_SYMBOL(free_xenballooned_pages); EXPORT_SYMBOL(free_xenballooned_pages);
#ifdef CONFIG_XEN_PV #if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
static void __init balloon_add_region(unsigned long start_pfn, static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages) unsigned long pages)
{ {
...@@ -707,7 +707,7 @@ static int __init balloon_init(void) ...@@ -707,7 +707,7 @@ static int __init balloon_init(void)
register_sysctl_table(xen_root); register_sysctl_table(xen_root);
#endif #endif
#ifdef CONFIG_XEN_PV #if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
{ {
int i; int i;
......
...@@ -801,7 +801,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) ...@@ -801,7 +801,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
{ {
int ret; int ret;
ret = alloc_xenballooned_pages(nr_pages, pages); ret = xen_alloc_unpopulated_pages(nr_pages, pages);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -836,7 +836,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private); ...@@ -836,7 +836,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
void gnttab_free_pages(int nr_pages, struct page **pages) void gnttab_free_pages(int nr_pages, struct page **pages)
{ {
gnttab_pages_clear_private(nr_pages, pages); gnttab_pages_clear_private(nr_pages, pages);
free_xenballooned_pages(nr_pages, pages); xen_free_unpopulated_pages(nr_pages, pages);
} }
EXPORT_SYMBOL_GPL(gnttab_free_pages); EXPORT_SYMBOL_GPL(gnttab_free_pages);
......
...@@ -424,7 +424,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) ...@@ -424,7 +424,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
if (pages == NULL) if (pages == NULL)
return -ENOMEM; return -ENOMEM;
rc = alloc_xenballooned_pages(numpgs, pages); rc = xen_alloc_unpopulated_pages(numpgs, pages);
if (rc != 0) { if (rc != 0) {
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
numpgs, rc); numpgs, rc);
...@@ -895,7 +895,7 @@ static void privcmd_close(struct vm_area_struct *vma) ...@@ -895,7 +895,7 @@ static void privcmd_close(struct vm_area_struct *vma)
rc = xen_unmap_domain_gfn_range(vma, numgfns, pages); rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
if (rc == 0) if (rc == 0)
free_xenballooned_pages(numpgs, pages); xen_free_unpopulated_pages(numpgs, pages);
else else
pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
numpgs, rc); numpgs, rc);
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memremap.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <xen/page.h>
#include <xen/xen.h>
static DEFINE_MUTEX(list_lock);
static LIST_HEAD(page_list);
static unsigned int list_count;
static int fill_list(unsigned int nr_pages)
{
struct dev_pagemap *pgmap;
void *vaddr;
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
int ret;
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
if (!pgmap)
return -ENOMEM;
pgmap->type = MEMORY_DEVICE_GENERIC;
pgmap->res.name = "Xen scratch";
pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, &pgmap->res,
alloc_pages * PAGE_SIZE, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new IOMEM resource\n");
kfree(pgmap);
return ret;
}
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* memremap will build page tables for the new memory so
* the p2m must contain invalid entries so the correct
* non-present PTEs will be written.
*
* If a failure occurs, the original (identity) p2m entries
* are not restored since this region is now known not to
* conflict with any devices.
*/
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
for (i = 0; i < alloc_pages; i++) {
if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
pr_warn("set_phys_to_machine() failed, no memory added\n");
release_resource(&pgmap->res);
kfree(pgmap);
return -ENOMEM;
}
}
}
#endif
vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
if (IS_ERR(vaddr)) {
pr_err("Cannot remap memory range\n");
release_resource(&pgmap->res);
kfree(pgmap);
return PTR_ERR(vaddr);
}
for (i = 0; i < alloc_pages; i++) {
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
list_add(&pg->lru, &page_list);
list_count++;
}
return 0;
}
/**
* xen_alloc_unpopulated_pages - alloc unpopulated pages
* @nr_pages: Number of pages
* @pages: pages returned
* @return 0 on success, error otherwise
*/
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int i;
int ret = 0;
mutex_lock(&list_lock);
if (list_count < nr_pages) {
ret = fill_list(nr_pages - list_count);
if (ret)
goto out;
}
for (i = 0; i < nr_pages; i++) {
struct page *pg = list_first_entry_or_null(&page_list,
struct page,
lru);
BUG_ON(!pg);
list_del(&pg->lru);
list_count--;
pages[i] = pg;
#ifdef CONFIG_XEN_HAVE_PVMMU
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
ret = xen_alloc_p2m_entry(page_to_pfn(pg));
if (ret < 0) {
unsigned int j;
for (j = 0; j <= i; j++) {
list_add(&pages[j]->lru, &page_list);
list_count++;
}
goto out;
}
}
#endif
}
out:
mutex_unlock(&list_lock);
return ret;
}
EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
/**
* xen_free_unpopulated_pages - return unpopulated pages
* @nr_pages: Number of pages
* @pages: pages to return
*/
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int i;
mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) {
list_add(&pages[i]->lru, &page_list);
list_count++;
}
mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(xen_free_unpopulated_pages);
#ifdef CONFIG_XEN_PV
static int __init init(void)
{
unsigned int i;
if (!xen_domain())
return -ENODEV;
if (!xen_pv_domain())
return 0;
/*
* Initialize with pages from the extra memory regions (see
* arch/x86/xen/setup.c).
*/
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
unsigned int j;
for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
struct page *pg =
pfn_to_page(xen_extra_mem[i].start_pfn + j);
list_add(&pg->lru, &page_list);
list_count++;
}
}
return 0;
}
subsys_initcall(init);
#endif
...@@ -621,7 +621,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev, ...@@ -621,7 +621,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
bool leaked = false; bool leaked = false;
unsigned int nr_pages = XENBUS_PAGES(nr_grefs); unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
if (err) if (err)
goto out_err; goto out_err;
...@@ -662,7 +662,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev, ...@@ -662,7 +662,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
addr, nr_pages); addr, nr_pages);
out_free_ballooned_pages: out_free_ballooned_pages:
if (!leaked) if (!leaked)
free_xenballooned_pages(nr_pages, node->hvm.pages); xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
out_err: out_err:
return err; return err;
} }
...@@ -858,7 +858,7 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) ...@@ -858,7 +858,7 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
info.addrs); info.addrs);
if (!rv) { if (!rv) {
vunmap(vaddr); vunmap(vaddr);
free_xenballooned_pages(nr_pages, node->hvm.pages); xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
} }
else else
WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
......
...@@ -232,7 +232,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, ...@@ -232,7 +232,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
kfree(pages); kfree(pages);
return -ENOMEM; return -ENOMEM;
} }
rc = alloc_xenballooned_pages(nr_pages, pages); rc = xen_alloc_unpopulated_pages(nr_pages, pages);
if (rc) { if (rc) {
pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__, pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
nr_pages, rc); nr_pages, rc);
...@@ -249,7 +249,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, ...@@ -249,7 +249,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
if (!vaddr) { if (!vaddr) {
pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__, pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
nr_pages, rc); nr_pages, rc);
free_xenballooned_pages(nr_pages, pages); xen_free_unpopulated_pages(nr_pages, pages);
kfree(pages); kfree(pages);
kfree(pfns); kfree(pfns);
return -ENOMEM; return -ENOMEM;
......
...@@ -46,11 +46,10 @@ struct vmem_altmap { ...@@ -46,11 +46,10 @@ struct vmem_altmap {
* wakeup is used to coordinate physical address space management (ex: * wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma). * fs truncate/hole punch) vs pinned pages (ex: device dma).
* *
* MEMORY_DEVICE_DEVDAX: * MEMORY_DEVICE_GENERIC:
* Host memory that has similar access semantics as System RAM i.e. DMA * Host memory that has similar access semantics as System RAM i.e. DMA
* coherent and supports page pinning. In contrast to * coherent and supports page pinning. This is for example used by DAX devices
* MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax * that expose memory using a character device.
* character device.
* *
* MEMORY_DEVICE_PCI_P2PDMA: * MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
...@@ -60,7 +59,7 @@ enum memory_type { ...@@ -60,7 +59,7 @@ enum memory_type {
/* 0 is reserved to catch uninitialized type fields */ /* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_FS_DAX, MEMORY_DEVICE_FS_DAX,
MEMORY_DEVICE_DEVDAX, MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA, MEMORY_DEVICE_PCI_P2PDMA,
}; };
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
/****************************************************************************** /******************************************************************************
* Xen balloon functionality * Xen balloon functionality
*/ */
#ifndef _XEN_BALLOON_H
#define _XEN_BALLOON_H
#define RETRY_UNLIMITED 0 #define RETRY_UNLIMITED 0
...@@ -34,3 +36,5 @@ static inline void xen_balloon_init(void) ...@@ -34,3 +36,5 @@ static inline void xen_balloon_init(void)
{ {
} }
#endif #endif
#endif /* _XEN_BALLOON_H */
...@@ -52,4 +52,13 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, ...@@ -52,4 +52,13 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size; extern u64 xen_saved_max_mem_size;
#endif #endif
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
#else
#define xen_alloc_unpopulated_pages alloc_xenballooned_pages
#define xen_free_unpopulated_pages free_xenballooned_pages
#include <xen/balloon.h>
#endif
#endif /* _XEN_XEN_H */ #endif /* _XEN_XEN_H */
...@@ -216,7 +216,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -216,7 +216,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
break; break;
case MEMORY_DEVICE_DEVDAX: case MEMORY_DEVICE_GENERIC:
need_devmap_managed = false; need_devmap_managed = false;
break; break;
case MEMORY_DEVICE_PCI_P2PDMA: case MEMORY_DEVICE_PCI_P2PDMA:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment