Commit fbbea389 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v4.9-rc1' of git://github.com/awilliam/linux-vfio

Pull VFIO updates from Alex Williamson:
 - comment fixes (Wei Jiangang)
 - static symbols (Baoyou Xie)
 - FLR virtualization (Alex Williamson)
 - catching INTx enabling after MSI/X teardown (Alex Williamson)
 - update to pci_alloc_irq_vectors helpers (Christoph Hellwig)

* tag 'vfio-v4.9-rc1' of git://github.com/awilliam/linux-vfio:
  vfio_pci: use pci_alloc_irq_vectors
  vfio-pci: Disable INTx after MSI/X teardown
  vfio-pci: Virtualize PCIe & AF FLR
  vfio: platform: mark symbols static where possible
  vfio/pci: Fix typos in comments
parents c23112e0 61771468
......@@ -70,7 +70,7 @@ static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = {
/*
* Lengths of PCIe/PCI-X Extended Config Capabilities
* 0: Removed or masked from the user visible capabilty list
* 0: Removed or masked from the user visible capability list
* FF: Variable length
*/
static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
......@@ -355,7 +355,7 @@ static int alloc_perm_bits(struct perm_bits *perm, int size)
* ignore whether a read/write exceeds the defined capability
* structure. We can do this because:
* - Standard config space is already dword aligned
* - Capabilities are all dword alinged (bits 0:1 of next reserved)
* - Capabilities are all dword aligned (bits 0:1 of next reserved)
* - Express capabilities defined as dword aligned
*/
size = round_up(size, 4);
......@@ -804,6 +804,40 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
return 0;
}
static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
offset + PCI_EXP_DEVCTL);
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
return count;
/*
* The FLR bit is virtualized, if set and the device supports PCIe
* FLR, issue a reset_function. Regardless, clear the bit, the spec
* requires it to be always read as zero. NB, reset_function might
* not use a PCIe FLR, we don't have that level of granularity.
*/
if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
u32 cap;
int ret;
*ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
ret = pci_user_read_config_dword(vdev->pdev,
pos - offset + PCI_EXP_DEVCAP,
&cap);
if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
pci_try_reset_function(vdev->pdev);
}
return count;
}
/* Permissions for PCI Express capability */
static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
{
......@@ -811,26 +845,64 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
return -ENOMEM;
perm->writefn = vfio_exp_config_write;
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
/*
* Allow writes to device control fields (includes FLR!)
* but not to devctl_phantom which could confuse IOMMU
* or to the ARI bit in devctl2 which is set at probe time
* Allow writes to device control fields, except devctl_phantom,
* which could confuse IOMMU, and the ARI bit in devctl2, which
* is set at probe time. FLR gets virtualized via our writefn.
*/
p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
{
u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
return count;
/*
* The FLR bit is virtualized, if set and the device supports AF
* FLR, issue a reset_function. Regardless, clear the bit, the spec
* requires it to be always read as zero. NB, reset_function might
* not use an AF FLR, we don't have that level of granularity.
*/
if (*ctrl & PCI_AF_CTRL_FLR) {
u8 cap;
int ret;
*ctrl &= ~PCI_AF_CTRL_FLR;
ret = pci_user_read_config_byte(vdev->pdev,
pos - offset + PCI_AF_CAP,
&cap);
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
pci_try_reset_function(vdev->pdev);
}
return count;
}
/* Permissions for Advanced Function capability */
static int __init init_pci_cap_af_perm(struct perm_bits *perm)
{
if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
return -ENOMEM;
perm->writefn = vfio_af_config_write;
p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
return 0;
}
......@@ -1516,10 +1588,10 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
* space which tracks reads and writes to bits that we emulate for
* the user. Initial values filled from device.
*
* Using shared stuct perm_bits between all vfio-pci devices saves
* Using shared struct perm_bits between all vfio-pci devices saves
* us from allocating cfg_size buffers for virt and write for every
* device. We could remove vconfig and allocate individual buffers
* for each area requring emulated bits, but the array of pointers
* for each area requiring emulated bits, but the array of pointers
* would be comparable in size (at least for standard config space).
*/
int vfio_config_init(struct vfio_pci_device *vdev)
......
......@@ -250,6 +250,7 @@ static irqreturn_t vfio_msihandler(int irq, void *arg)
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret;
if (!is_irq_none(vdev))
......@@ -259,35 +260,13 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
if (!vdev->ctx)
return -ENOMEM;
if (msix) {
int i;
vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
GFP_KERNEL);
if (!vdev->msix) {
kfree(vdev->ctx);
return -ENOMEM;
}
for (i = 0; i < nvec; i++)
vdev->msix[i].entry = i;
ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec);
if (ret < nvec) {
if (ret > 0)
pci_disable_msix(pdev);
kfree(vdev->msix);
kfree(vdev->ctx);
return ret;
}
} else {
ret = pci_enable_msi_range(pdev, 1, nvec);
if (ret < nvec) {
if (ret > 0)
pci_disable_msi(pdev);
kfree(vdev->ctx);
return ret;
}
/* return the number of supported vectors if we can't get all: */
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
if (ret < nvec) {
if (ret > 0)
pci_free_irq_vectors(pdev);
kfree(vdev->ctx);
return ret;
}
vdev->num_ctx = nvec;
......@@ -315,7 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
if (vector < 0 || vector >= vdev->num_ctx)
return -EINVAL;
irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
irq = pci_irq_vector(pdev, vector);
if (vdev->ctx[vector].trigger) {
free_irq(irq, vdev->ctx[vector].trigger);
......@@ -408,11 +387,14 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
if (msix) {
pci_disable_msix(vdev->pdev);
kfree(vdev->msix);
} else
pci_disable_msi(pdev);
pci_free_irq_vectors(pdev);
/*
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
* via their shutdown paths. Restore for NoINTx devices.
*/
if (vdev->nointx)
pci_intx(pdev, 0);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
......
......@@ -72,7 +72,6 @@ struct vfio_pci_device {
struct perm_bits *msi_perm;
spinlock_t irqlock;
struct mutex igate;
struct msix_entry *msix;
struct vfio_pci_irq_ctx *ctx;
int num_ctx;
int irq_type;
......
......@@ -56,7 +56,7 @@ static void xmdio_write(void *ioaddr, unsigned int mmd,
iowrite32(value, ioaddr + ((mmd_address & 0xff) << 2));
}
int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
static int vfio_platform_amdxgbe_reset(struct vfio_platform_device *vdev)
{
struct vfio_platform_region *xgmac_regs = &vdev->regions[0];
struct vfio_platform_region *xpcs_regs = &vdev->regions[1];
......
......@@ -57,7 +57,7 @@ static inline void xgmac_mac_disable(void __iomem *ioaddr)
writel(value, ioaddr + XGMAC_CONTROL);
}
int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
static int vfio_platform_calxedaxgmac_reset(struct vfio_platform_device *vdev)
{
struct vfio_platform_region *reg = &vdev->regions[0];
......
......@@ -73,7 +73,7 @@ static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
}
int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
const char **extra_dbg)
{
#ifdef CONFIG_ACPI
......@@ -95,7 +95,7 @@ int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
#endif
}
bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
static bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
{
#ifdef CONFIG_ACPI
struct device *dev = vdev->device;
......@@ -637,7 +637,7 @@ static const struct vfio_device_ops vfio_platform_ops = {
.mmap = vfio_platform_mmap,
};
int vfio_platform_of_probe(struct vfio_platform_device *vdev,
static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
struct device *dev)
{
int ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment