Commit abafbc55 authored by Alex Williamson's avatar Alex Williamson

vfio-pci: Invalidate mmaps and block MMIO access on disabled memory

Accessing the disabled memory space of a PCI device would typically
result in a master abort response on conventional PCI, or an
unsupported request on PCI express.  The user would generally see
these as a -1 response for the read return data and the write would be
silently discarded, possibly with an uncorrected, non-fatal AER error
triggered on the host.  Some systems however take it upon themselves
to bring down the entire system when they see something that might
indicate a loss of data, such as this discarded write to a disabled
memory space.

To avoid this, we want to try to block the user from accessing memory
spaces while they're disabled.  We start with a semaphore around the
memory enable bit, where writers modify the memory enable state and
must be serialized, while readers make use of the memory region and
can access in parallel.  Writers include both direct manipulation via
the command register, as well as any reset path where the internal
mechanics of the reset may both explicitly and implicitly disable
memory access, and manipulation of the MSI-X configuration, where the
MSI-X vector table resides in MMIO space of the device.  Readers
include the read and write file ops to access the vfio device fd
offsets as well as memory mapped access.  In the latter case, we make
use of our new vma list support to zap, or invalidate, those memory
mappings in order to force them to be faulted back in on access.

Our semaphore usage will stall user access to MMIO spaces across
internal operations like reset, but the user might experience new
behavior when trying to access the MMIO space while disabled via the
PCI command register.  Access via read or write while disabled will
return -EIO and access via memory maps will result in a SIGBUS.  This
is expected to be compatible with known use cases and potentially
provides better error handling capabilities than present in the
hardware, while avoiding the more readily accessible and severe
platform error responses that might otherwise occur.

Fixes: CVE-2020-12888
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 11c4cd07
This diff is collapsed.
...@@ -395,6 +395,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) ...@@ -395,6 +395,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
*(__le32 *)(&p->write[off]) = cpu_to_le32(write); *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
} }
/* Caller should hold memory_lock semaphore */
bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
{
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
return cmd & PCI_COMMAND_MEMORY;
}
/* /*
* Restore the *real* BARs after we detect a FLR or backdoor reset. * Restore the *real* BARs after we detect a FLR or backdoor reset.
* (backdoor = some device specific technique that we didn't catch) * (backdoor = some device specific technique that we didn't catch)
...@@ -556,13 +564,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, ...@@ -556,13 +564,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
new_cmd = le32_to_cpu(val); new_cmd = le32_to_cpu(val);
phys_io = !!(phys_cmd & PCI_COMMAND_IO);
virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
new_io = !!(new_cmd & PCI_COMMAND_IO);
phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
phys_io = !!(phys_cmd & PCI_COMMAND_IO); if (!new_mem)
virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); vfio_pci_zap_and_down_write_memory_lock(vdev);
new_io = !!(new_cmd & PCI_COMMAND_IO); else
down_write(&vdev->memory_lock);
/* /*
* If the user is writing mem/io enable (new_mem/io) and we * If the user is writing mem/io enable (new_mem/io) and we
...@@ -579,8 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, ...@@ -579,8 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
} }
count = vfio_default_config_write(vdev, pos, count, perm, offset, val); count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0) if (count < 0) {
if (offset == PCI_COMMAND)
up_write(&vdev->memory_lock);
return count; return count;
}
/* /*
* Save current memory/io enable bits in vconfig to allow for * Save current memory/io enable bits in vconfig to allow for
...@@ -591,6 +607,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, ...@@ -591,6 +607,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
*virt_cmd &= cpu_to_le16(~mask); *virt_cmd &= cpu_to_le16(~mask);
*virt_cmd |= cpu_to_le16(new_cmd & mask); *virt_cmd |= cpu_to_le16(new_cmd & mask);
up_write(&vdev->memory_lock);
} }
/* Emulate INTx disable */ /* Emulate INTx disable */
...@@ -828,8 +846,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, ...@@ -828,8 +846,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
pos - offset + PCI_EXP_DEVCAP, pos - offset + PCI_EXP_DEVCAP,
&cap); &cap);
if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev); pci_try_reset_function(vdev->pdev);
up_write(&vdev->memory_lock);
}
} }
/* /*
...@@ -907,8 +928,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, ...@@ -907,8 +928,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
pos - offset + PCI_AF_CAP, pos - offset + PCI_AF_CAP,
&cap); &cap);
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev); pci_try_reset_function(vdev->pdev);
up_write(&vdev->memory_lock);
}
} }
return count; return count;
......
...@@ -249,6 +249,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) ...@@ -249,6 +249,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
struct pci_dev *pdev = vdev->pdev; struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret; int ret;
u16 cmd;
if (!is_irq_none(vdev)) if (!is_irq_none(vdev))
return -EINVAL; return -EINVAL;
...@@ -258,13 +259,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) ...@@ -258,13 +259,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
return -ENOMEM; return -ENOMEM;
/* return the number of supported vectors if we can't get all: */ /* return the number of supported vectors if we can't get all: */
cmd = vfio_pci_memory_lock_and_enable(vdev);
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
if (ret < nvec) { if (ret < nvec) {
if (ret > 0) if (ret > 0)
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
kfree(vdev->ctx); kfree(vdev->ctx);
return ret; return ret;
} }
vfio_pci_memory_unlock_and_restore(vdev, cmd);
vdev->num_ctx = nvec; vdev->num_ctx = nvec;
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
...@@ -287,6 +291,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ...@@ -287,6 +291,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
struct pci_dev *pdev = vdev->pdev; struct pci_dev *pdev = vdev->pdev;
struct eventfd_ctx *trigger; struct eventfd_ctx *trigger;
int irq, ret; int irq, ret;
u16 cmd;
if (vector < 0 || vector >= vdev->num_ctx) if (vector < 0 || vector >= vdev->num_ctx)
return -EINVAL; return -EINVAL;
...@@ -295,7 +300,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ...@@ -295,7 +300,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
if (vdev->ctx[vector].trigger) { if (vdev->ctx[vector].trigger) {
irq_bypass_unregister_producer(&vdev->ctx[vector].producer); irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
cmd = vfio_pci_memory_lock_and_enable(vdev);
free_irq(irq, vdev->ctx[vector].trigger); free_irq(irq, vdev->ctx[vector].trigger);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
kfree(vdev->ctx[vector].name); kfree(vdev->ctx[vector].name);
eventfd_ctx_put(vdev->ctx[vector].trigger); eventfd_ctx_put(vdev->ctx[vector].trigger);
vdev->ctx[vector].trigger = NULL; vdev->ctx[vector].trigger = NULL;
...@@ -323,6 +332,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ...@@ -323,6 +332,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
* such a reset it would be unsuccessful. To avoid this, restore the * such a reset it would be unsuccessful. To avoid this, restore the
* cached value of the message prior to enabling. * cached value of the message prior to enabling.
*/ */
cmd = vfio_pci_memory_lock_and_enable(vdev);
if (msix) { if (msix) {
struct msi_msg msg; struct msi_msg msg;
...@@ -332,6 +342,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ...@@ -332,6 +342,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
ret = request_irq(irq, vfio_msihandler, 0, ret = request_irq(irq, vfio_msihandler, 0,
vdev->ctx[vector].name, trigger); vdev->ctx[vector].name, trigger);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
if (ret) { if (ret) {
kfree(vdev->ctx[vector].name); kfree(vdev->ctx[vector].name);
eventfd_ctx_put(trigger); eventfd_ctx_put(trigger);
...@@ -376,6 +387,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) ...@@ -376,6 +387,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
{ {
struct pci_dev *pdev = vdev->pdev; struct pci_dev *pdev = vdev->pdev;
int i; int i;
u16 cmd;
for (i = 0; i < vdev->num_ctx; i++) { for (i = 0; i < vdev->num_ctx; i++) {
vfio_virqfd_disable(&vdev->ctx[i].unmask); vfio_virqfd_disable(&vdev->ctx[i].unmask);
...@@ -384,7 +396,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) ...@@ -384,7 +396,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
cmd = vfio_pci_memory_lock_and_enable(vdev);
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
vfio_pci_memory_unlock_and_restore(vdev, cmd);
/* /*
* Both disable paths above use pci_intx_for_msi() to clear DisINTx * Both disable paths above use pci_intx_for_msi() to clear DisINTx
......
...@@ -139,6 +139,7 @@ struct vfio_pci_device { ...@@ -139,6 +139,7 @@ struct vfio_pci_device {
struct notifier_block nb; struct notifier_block nb;
struct mutex vma_lock; struct mutex vma_lock;
struct list_head vma_list; struct list_head vma_list;
struct rw_semaphore memory_lock;
}; };
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
...@@ -181,6 +182,13 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, ...@@ -181,6 +182,13 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev, extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
pci_power_t state); pci_power_t state);
extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
*vdev);
extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
u16 cmd);
#ifdef CONFIG_VFIO_PCI_IGD #ifdef CONFIG_VFIO_PCI_IGD
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
#else #else
......
...@@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, ...@@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t x_start = 0, x_end = 0; size_t x_start = 0, x_end = 0;
resource_size_t end; resource_size_t end;
void __iomem *io; void __iomem *io;
struct resource *res = &vdev->pdev->resource[bar];
ssize_t done; ssize_t done;
if (pci_resource_start(pdev, bar)) if (pci_resource_start(pdev, bar))
...@@ -177,6 +178,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, ...@@ -177,6 +178,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
count = min(count, (size_t)(end - pos)); count = min(count, (size_t)(end - pos));
if (res->flags & IORESOURCE_MEM) {
down_read(&vdev->memory_lock);
if (!__vfio_pci_memory_enabled(vdev)) {
up_read(&vdev->memory_lock);
return -EIO;
}
}
if (bar == PCI_ROM_RESOURCE) { if (bar == PCI_ROM_RESOURCE) {
/* /*
* The ROM can fill less space than the BAR, so we start the * The ROM can fill less space than the BAR, so we start the
...@@ -184,13 +193,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, ...@@ -184,13 +193,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
* filling large ROM BARs much faster. * filling large ROM BARs much faster.
*/ */
io = pci_map_rom(pdev, &x_start); io = pci_map_rom(pdev, &x_start);
if (!io) if (!io) {
return -ENOMEM; done = -ENOMEM;
goto out;
}
x_end = end; x_end = end;
} else { } else {
int ret = vfio_pci_setup_barmap(vdev, bar); int ret = vfio_pci_setup_barmap(vdev, bar);
if (ret) if (ret) {
return ret; done = ret;
goto out;
}
io = vdev->barmap[bar]; io = vdev->barmap[bar];
} }
...@@ -207,6 +220,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, ...@@ -207,6 +220,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
if (bar == PCI_ROM_RESOURCE) if (bar == PCI_ROM_RESOURCE)
pci_unmap_rom(pdev, io); pci_unmap_rom(pdev, io);
out:
if (res->flags & IORESOURCE_MEM)
up_read(&vdev->memory_lock);
return done; return done;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment