Commit 5e49e0be authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v4.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:
 "These fixes are all for the AMD IOMMU driver:

   - A regression with HSA caused by the conversion of the driver to
     default domains.  The fixes make sure that an HSA device can still
     be attached to an IOMMUv2 domain and that these domains also allow
     non-IOMMUv2 capable devices.

   - Fix iommu=pt mode which did not work because the dma_ops where set
     to nommu_ops, which breaks devices that can only do 32bit DMA.

   - Fix an issue with non-PCI devices not working, because there are no
     dma_ops for them.  This issue was discovered recently as new AMD
     x86 platforms have non-PCI devices too"

* tag 'iommu-fixes-v4.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/amd: Allow non-ATS devices in IOMMUv2 domains
  iommu/amd: Set global dma_ops if swiotlb is disabled
  iommu/amd: Use swiotlb in passthrough mode
  iommu/amd: Allow non-IOMMUv2 devices in IOMMUv2 domains
  iommu/amd: Use iommu core for passthrough mode
  iommu/amd: Use iommu_attach_group()
parents 23ff9e19 1c1cc454
......@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
static struct protection_domain *pt_domain;
static const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
......@@ -96,7 +94,7 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
bool passthrough; /* Device is identity mapped */
struct {
bool enabled;
int qdep;
......@@ -116,7 +114,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int alloc_passthrough_domain(void);
static int protection_domain_init(struct protection_domain *domain);
/****************************************************************************
......@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
dev_data = get_dev_data(dev);
if (domain->flags & PD_IOMMUV2_MASK) {
if (!dev_data->iommu_v2 || !dev_data->passthrough)
if (!dev_data->passthrough)
return -EINVAL;
if (dev_data->iommu_v2) {
if (pdev_iommuv2_enable(pdev) != 0)
return -EINVAL;
dev_data->ats.enabled = true;
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
dev_data->pri_tlp = pci_pri_tlp_required(pdev);
}
} else if (amd_iommu_iotlb_sup &&
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
dev_data->ats.enabled = true;
......@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
do_detach(head);
spin_unlock_irqrestore(&domain->lock, flags);
/*
* If we run in passthrough mode the device must be assigned to the
* passthrough domain if it is detached from any other domain.
* Make sure we can deassign from the pt_domain itself.
*/
if (dev_data->passthrough &&
(dev_data->domain == NULL && domain != pt_domain))
__attach_device(dev_data, pt_domain);
}
/*
......@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
__detach_device(dev_data);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
if (domain->flags & PD_IOMMUV2_MASK)
if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
pdev_iommuv2_disable(to_pci_dev(dev));
else if (dev_data->ats.enabled)
pci_disable_ats(to_pci_dev(dev));
......@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
BUG_ON(!dev_data);
if (dev_data->iommu_v2)
if (iommu_pass_through || dev_data->iommu_v2)
iommu_request_dm_for_dev(dev);
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
dev->archdata.dma_ops = &nommu_dma_ops;
} else {
else
dev->archdata.dma_ops = &amd_iommu_dma_ops;
}
out:
iommu_completion_wait(iommu);
......@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
swiotlb = iommu_pass_through ? 1 : 0;
iommu_detected = 1;
swiotlb = 0;
/*
* In case we don't initialize SWIOTLB (actually the common case
* when AMD IOMMU is enabled), make sure there are global
* dma_ops set as a fall-back for devices not handled by this
* driver (for example non-PCI devices).
*/
if (!swiotlb)
dma_ops = &nommu_dma_ops;
amd_iommu_stats_init();
......@@ -2947,21 +2944,6 @@ static struct protection_domain *protection_domain_alloc(void)
return NULL;
}
static int alloc_passthrough_domain(void)
{
if (pt_domain != NULL)
return 0;
/* allocate passthrough domain */
pt_domain = protection_domain_alloc();
if (!pt_domain)
return -ENOMEM;
pt_domain->mode = PAGE_MODE_NONE;
return 0;
}
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
......@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
*
*****************************************************************************/
int __init amd_iommu_init_passthrough(void)
{
struct iommu_dev_data *dev_data;
struct pci_dev *dev = NULL;
int ret;
ret = alloc_passthrough_domain();
if (ret)
return ret;
for_each_pci_dev(dev) {
if (!check_device(&dev->dev))
continue;
dev_data = get_dev_data(&dev->dev);
dev_data->passthrough = true;
attach_device(&dev->dev, pt_domain);
}
amd_iommu_stats_init();
pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
return 0;
}
/* IOMMUv2 specific functions */
int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
{
......@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
struct amd_iommu *iommu;
int qdep;
BUG_ON(!dev_data->ats.enabled);
/*
There might be non-IOMMUv2 capable devices in an IOMMUv2
* domain.
*/
if (!dev_data->ats.enabled)
continue;
qdep = dev_data->ats.qdep;
iommu = amd_iommu_rlookup_table[dev_data->devid];
......
......@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
return true;
}
static int amd_iommu_init_dma(void)
{
if (iommu_pass_through)
return amd_iommu_init_passthrough();
else
return amd_iommu_init_dma_ops();
}
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
......@@ -2073,7 +2065,7 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
break;
case IOMMU_INTERRUPTS_EN:
ret = amd_iommu_init_dma();
ret = amd_iommu_init_dma_ops();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
break;
case IOMMU_DMA_OPS:
......
......@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
static void free_device_state(struct device_state *dev_state)
{
struct iommu_group *group;
/*
* First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain.
*/
iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
group = iommu_group_get(&dev_state->pdev->dev);
if (WARN_ON(!group))
return;
iommu_detach_group(dev_state->domain, group);
iommu_group_put(group);
/* Everything is down now, free the IOMMUv2 domain */
iommu_domain_free(dev_state->domain);
......@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
{
struct device_state *dev_state;
struct iommu_group *group;
unsigned long flags;
int ret, tmp;
u16 devid;
......@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (ret)
goto out_free_domain;
ret = iommu_attach_device(dev_state->domain, &pdev->dev);
if (ret != 0)
group = iommu_group_get(&pdev->dev);
if (!group)
goto out_free_domain;
ret = iommu_attach_group(dev_state->domain, group);
if (ret != 0)
goto out_drop_group;
iommu_group_put(group);
spin_lock_irqsave(&state_lock, flags);
if (__get_device_state(devid) != NULL) {
......@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
return 0;
out_drop_group:
iommu_group_put(group);
out_free_domain:
iommu_domain_free(dev_state->domain);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment