Commit 2a11c76e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio/vhost fixes from Michael Tsirkin:

 - Fixes in the iommu and balloon devices.

 - Disable the meta-data optimization for now - I hope we can get it
   fixed shortly, but there's no point in making users suffer crashes
   while we are working on that.

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vhost: disable metadata prefetch optimization
  iommu/virtio: Update to most recent specification
  balloon: fix up comments
  mm/balloon_compaction: avoid duplicate page removal
parents 45aee68e 73f628ec
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Virtio driver for the paravirtualized IOMMU * Virtio driver for the paravirtualized IOMMU
* *
* Copyright (C) 2018 Arm Limited * Copyright (C) 2019 Arm Limited
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
...@@ -47,7 +47,10 @@ struct viommu_dev { ...@@ -47,7 +47,10 @@ struct viommu_dev {
/* Device configuration */ /* Device configuration */
struct iommu_domain_geometry geometry; struct iommu_domain_geometry geometry;
u64 pgsize_bitmap; u64 pgsize_bitmap;
u8 domain_bits; u32 first_domain;
u32 last_domain;
/* Supported MAP flags */
u32 map_flags;
u32 probe_size; u32 probe_size;
}; };
...@@ -62,6 +65,7 @@ struct viommu_domain { ...@@ -62,6 +65,7 @@ struct viommu_domain {
struct viommu_dev *viommu; struct viommu_dev *viommu;
struct mutex mutex; /* protects viommu pointer */ struct mutex mutex; /* protects viommu pointer */
unsigned int id; unsigned int id;
u32 map_flags;
spinlock_t mappings_lock; spinlock_t mappings_lock;
struct rb_root_cached mappings; struct rb_root_cached mappings;
...@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len) ...@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len)
return -ENOENT; return -ENOENT;
case VIRTIO_IOMMU_S_FAULT: case VIRTIO_IOMMU_S_FAULT:
return -EFAULT; return -EFAULT;
case VIRTIO_IOMMU_S_NOMEM:
return -ENOMEM;
case VIRTIO_IOMMU_S_IOERR: case VIRTIO_IOMMU_S_IOERR:
case VIRTIO_IOMMU_S_DEVERR: case VIRTIO_IOMMU_S_DEVERR:
default: default:
...@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, ...@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
{ {
int ret; int ret;
struct viommu_domain *vdomain = to_viommu_domain(domain); struct viommu_domain *vdomain = to_viommu_domain(domain);
unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
(1U << viommu->domain_bits) - 1;
vdomain->viommu = viommu; vdomain->viommu = viommu;
vdomain->map_flags = viommu->map_flags;
domain->pgsize_bitmap = viommu->pgsize_bitmap; domain->pgsize_bitmap = viommu->pgsize_bitmap;
domain->geometry = viommu->geometry; domain->geometry = viommu->geometry;
ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
viommu->last_domain, GFP_KERNEL);
if (ret >= 0) if (ret >= 0)
vdomain->id = (unsigned int)ret; vdomain->id = (unsigned int)ret;
...@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
int ret; int ret;
int flags; u32 flags;
struct virtio_iommu_req_map map; struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain); struct viommu_domain *vdomain = to_viommu_domain(domain);
...@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
if (flags & ~vdomain->map_flags)
return -EINVAL;
ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
if (ret) if (ret)
return ret; return ret;
...@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev) ...@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev)
goto err_free_vqs; goto err_free_vqs;
} }
viommu->domain_bits = 32; viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
viommu->last_domain = ~0U;
/* Optional features */ /* Optional features */
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
...@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev) ...@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev)
struct virtio_iommu_config, input_range.end, struct virtio_iommu_config, input_range.end,
&input_end); &input_end);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
struct virtio_iommu_config, domain_bits, struct virtio_iommu_config, domain_range.start,
&viommu->domain_bits); &viommu->first_domain);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
struct virtio_iommu_config, domain_range.end,
&viommu->last_domain);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
struct virtio_iommu_config, probe_size, struct virtio_iommu_config, probe_size,
...@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev) ...@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev)
.force_aperture = true, .force_aperture = true,
}; };
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
virtio_device_ready(vdev); virtio_device_ready(vdev);
...@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev) ...@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev)
static unsigned int features[] = { static unsigned int features[] = {
VIRTIO_IOMMU_F_MAP_UNMAP, VIRTIO_IOMMU_F_MAP_UNMAP,
VIRTIO_IOMMU_F_DOMAIN_BITS,
VIRTIO_IOMMU_F_INPUT_RANGE, VIRTIO_IOMMU_F_INPUT_RANGE,
VIRTIO_IOMMU_F_DOMAIN_RANGE,
VIRTIO_IOMMU_F_PROBE, VIRTIO_IOMMU_F_PROBE,
VIRTIO_IOMMU_F_MMIO,
}; };
static struct virtio_device_id id_table[] = { static struct virtio_device_id id_table[] = {
......
...@@ -96,7 +96,7 @@ struct vhost_uaddr { ...@@ -96,7 +96,7 @@ struct vhost_uaddr {
}; };
#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
#define VHOST_ARCH_CAN_ACCEL_UACCESS 1 #define VHOST_ARCH_CAN_ACCEL_UACCESS 0
#else #else
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #define VHOST_ARCH_CAN_ACCEL_UACCESS 0
#endif #endif
......
/* SPDX-License-Identifier: BSD-3-Clause */ /* SPDX-License-Identifier: BSD-3-Clause */
/* /*
* Virtio-iommu definition v0.9 * Virtio-iommu definition v0.12
* *
* Copyright (C) 2018 Arm Ltd. * Copyright (C) 2019 Arm Ltd.
*/ */
#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H #ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
#define _UAPI_LINUX_VIRTIO_IOMMU_H #define _UAPI_LINUX_VIRTIO_IOMMU_H
...@@ -11,26 +11,31 @@ ...@@ -11,26 +11,31 @@
/* Feature bits */ /* Feature bits */
#define VIRTIO_IOMMU_F_INPUT_RANGE 0 #define VIRTIO_IOMMU_F_INPUT_RANGE 0
#define VIRTIO_IOMMU_F_DOMAIN_BITS 1 #define VIRTIO_IOMMU_F_DOMAIN_RANGE 1
#define VIRTIO_IOMMU_F_MAP_UNMAP 2 #define VIRTIO_IOMMU_F_MAP_UNMAP 2
#define VIRTIO_IOMMU_F_BYPASS 3 #define VIRTIO_IOMMU_F_BYPASS 3
#define VIRTIO_IOMMU_F_PROBE 4 #define VIRTIO_IOMMU_F_PROBE 4
#define VIRTIO_IOMMU_F_MMIO 5
struct virtio_iommu_range { struct virtio_iommu_range_64 {
__u64 start; __le64 start;
__u64 end; __le64 end;
};
struct virtio_iommu_range_32 {
__le32 start;
__le32 end;
}; };
struct virtio_iommu_config { struct virtio_iommu_config {
/* Supported page sizes */ /* Supported page sizes */
__u64 page_size_mask; __le64 page_size_mask;
/* Supported IOVA range */ /* Supported IOVA range */
struct virtio_iommu_range input_range; struct virtio_iommu_range_64 input_range;
/* Max domain ID size */ /* Max domain ID size */
__u8 domain_bits; struct virtio_iommu_range_32 domain_range;
__u8 padding[3];
/* Probe buffer size */ /* Probe buffer size */
__u32 probe_size; __le32 probe_size;
}; };
/* Request types */ /* Request types */
...@@ -49,6 +54,7 @@ struct virtio_iommu_config { ...@@ -49,6 +54,7 @@ struct virtio_iommu_config {
#define VIRTIO_IOMMU_S_RANGE 0x05 #define VIRTIO_IOMMU_S_RANGE 0x05
#define VIRTIO_IOMMU_S_NOENT 0x06 #define VIRTIO_IOMMU_S_NOENT 0x06
#define VIRTIO_IOMMU_S_FAULT 0x07 #define VIRTIO_IOMMU_S_FAULT 0x07
#define VIRTIO_IOMMU_S_NOMEM 0x08
struct virtio_iommu_req_head { struct virtio_iommu_req_head {
__u8 type; __u8 type;
...@@ -78,12 +84,10 @@ struct virtio_iommu_req_detach { ...@@ -78,12 +84,10 @@ struct virtio_iommu_req_detach {
#define VIRTIO_IOMMU_MAP_F_READ (1 << 0) #define VIRTIO_IOMMU_MAP_F_READ (1 << 0)
#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1) #define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1)
#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2) #define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2)
#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3)
#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \ #define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \
VIRTIO_IOMMU_MAP_F_WRITE | \ VIRTIO_IOMMU_MAP_F_WRITE | \
VIRTIO_IOMMU_MAP_F_EXEC | \
VIRTIO_IOMMU_MAP_F_MMIO) VIRTIO_IOMMU_MAP_F_MMIO)
struct virtio_iommu_req_map { struct virtio_iommu_req_map {
......
...@@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, ...@@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
* memory corruption is possible and we should stop execution. * memory corruption is possible and we should stop execution.
*/ */
BUG_ON(!trylock_page(page)); BUG_ON(!trylock_page(page));
list_del(&page->lru);
balloon_page_insert(b_dev_info, page); balloon_page_insert(b_dev_info, page);
unlock_page(page); unlock_page(page);
__count_vm_event(BALLOON_INFLATE); __count_vm_event(BALLOON_INFLATE);
...@@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, ...@@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
* @b_dev_info: balloon device descriptor where we will insert a new page to * @b_dev_info: balloon device descriptor where we will insert a new page to
* @pages: pages to enqueue - allocated using balloon_page_alloc. * @pages: pages to enqueue - allocated using balloon_page_alloc.
* *
* Driver must call it to properly enqueue a balloon pages before definitively * Driver must call this function to properly enqueue balloon pages before
* removing it from the guest system. * definitively removing them from the guest system.
* *
* Return: number of pages that were enqueued. * Return: number of pages that were enqueued.
*/ */
...@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, ...@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags); spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) { list_for_each_entry_safe(page, tmp, pages, lru) {
list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page); balloon_page_enqueue_one(b_dev_info, page);
n_pages++; n_pages++;
} }
...@@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); ...@@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
* @n_req_pages: number of requested pages. * @n_req_pages: number of requested pages.
* *
* Driver must call this function to properly de-allocate a previous enlisted * Driver must call this function to properly de-allocate a previous enlisted
* balloon pages before definetively releasing it back to the guest system. * balloon pages before definitively releasing it back to the guest system.
* This function tries to remove @n_req_pages from the ballooned pages and * This function tries to remove @n_req_pages from the ballooned pages and
* return them to the caller in the @pages list. * return them to the caller in the @pages list.
* *
* Note that this function may fail to dequeue some pages temporarily empty due * Note that this function may fail to dequeue some pages even if the balloon
* to compaction isolated pages. * isn't empty - since the page list can be temporarily empty due to compaction
* of isolated pages.
* *
* Return: number of pages that were added to the @pages list. * Return: number of pages that were added to the @pages list.
*/ */
...@@ -114,10 +115,11 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); ...@@ -114,10 +115,11 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
* balloon_page_alloc - allocates a new page for insertion into the balloon * balloon_page_alloc - allocates a new page for insertion into the balloon
* page list. * page list.
* *
* Driver must call it to properly allocate a new enlisted balloon page. * Driver must call this function to properly allocate a new balloon page.
* Driver must call balloon_page_enqueue before definitively removing it from * Driver must call balloon_page_enqueue before definitively removing the page
* the guest system. This function returns the page address for the recently * from the guest system.
* allocated page or NULL in the case we fail to allocate a new page this turn. *
* Return: struct page for the allocated page or NULL on allocation failure.
*/ */
struct page *balloon_page_alloc(void) struct page *balloon_page_alloc(void)
{ {
...@@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void) ...@@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void)
EXPORT_SYMBOL_GPL(balloon_page_alloc); EXPORT_SYMBOL_GPL(balloon_page_alloc);
/* /*
* balloon_page_enqueue - allocates a new page and inserts it into the balloon * balloon_page_enqueue - inserts a new page into the balloon page list.
* page list. *
* @b_dev_info: balloon device descriptor where we will insert a new page to * @b_dev_info: balloon device descriptor where we will insert a new page
* @page: new page to enqueue - allocated using balloon_page_alloc. * @page: new page to enqueue - allocated using balloon_page_alloc.
* *
* Driver must call it to properly enqueue a new allocated balloon page * Drivers must call this function to properly enqueue a new allocated balloon
* before definitively removing it from the guest system. * page before definitively removing the page from the guest system.
* This function returns the page address for the recently enqueued page or *
* NULL in the case we fail to allocate a new page this turn. * Drivers must not call balloon_page_enqueue on pages that have been pushed to
* a list with balloon_page_push before removing them with balloon_page_pop. To
* enqueue a list of pages, use balloon_page_list_enqueue instead.
*/ */
void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
struct page *page) struct page *page)
...@@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue); ...@@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue);
/* /*
* balloon_page_dequeue - removes a page from balloon's page list and returns * balloon_page_dequeue - removes a page from balloon's page list and returns
* the its address to allow the driver release the page. * its address to allow the driver to release the page.
* @b_dev_info: balloon device decriptor where we will grab a page from. * @b_dev_info: balloon device decriptor where we will grab a page from.
* *
* Driver must call it to properly de-allocate a previous enlisted balloon page * Driver must call this function to properly dequeue a previously enqueued page
* before definetively releasing it back to the guest system. * before definitively releasing it back to the guest system.
* This function returns the page address for the recently dequeued page or *
* NULL in the case we find balloon's page list temporarily empty due to * Caller must perform its own accounting to ensure that this
* compaction isolated pages. * function is called only if some pages are actually enqueued.
*
* Note that this function may fail to dequeue some pages even if there are
* some enqueued pages - since the page list can be temporarily empty due to
* the compaction of isolated pages.
*
* TODO: remove the caller accounting requirements, and allow caller to wait
* until all pages can be dequeued.
*
* Return: struct page for the dequeued page, or NULL if no page was dequeued.
*/ */
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
{ {
...@@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) ...@@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
if (n_pages != 1) { if (n_pages != 1) {
/* /*
* If we are unable to dequeue a balloon page because the page * If we are unable to dequeue a balloon page because the page
* list is empty and there is no isolated pages, then something * list is empty and there are no isolated pages, then something
* went out of track and some balloon pages are lost. * went out of track and some balloon pages are lost.
* BUG() here, otherwise the balloon driver may get stuck into * BUG() here, otherwise the balloon driver may get stuck in
* an infinite loop while attempting to release all its pages. * an infinite loop while attempting to release all its pages.
*/ */
spin_lock_irqsave(&b_dev_info->pages_lock, flags); spin_lock_irqsave(&b_dev_info->pages_lock, flags);
...@@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping, ...@@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping,
/* /*
* We can not easily support the no copy case here so ignore it as it * We can not easily support the no copy case here so ignore it as it
* is unlikely to be use with ballon pages. See include/linux/hmm.h for * is unlikely to be used with balloon pages. See include/linux/hmm.h
* user of the MIGRATE_SYNC_NO_COPY mode. * for a user of the MIGRATE_SYNC_NO_COPY mode.
*/ */
if (mode == MIGRATE_SYNC_NO_COPY) if (mode == MIGRATE_SYNC_NO_COPY)
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment