Commit fa954e68 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel

iommu/vt-d: Delegate the dma domain to upper layer

This allows the iommu generic layer to allocate a dma domain and
attach it to a device through the iommu api's. With all types of
domains being delegated to upper layer, we can remove an internal
flag which was used to distinguish domains mananged internally or
externally.
Signed-off-by: default avatarJames Sewart <jamessewart@arista.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 4de354ec
...@@ -302,14 +302,8 @@ static inline void context_clear_entry(struct context_entry *context) ...@@ -302,14 +302,8 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain; static struct dmar_domain *si_domain;
static int hw_pass_through = 1; static int hw_pass_through = 1;
/*
* Domain represents a virtual machine, more than one devices
* across iommus may be owned in one domain, e.g. kvm guest.
*/
#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
/* si_domain contains mulitple devices */ /* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) #define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
#define for_each_domain_iommu(idx, domain) \ #define for_each_domain_iommu(idx, domain) \
for (idx = 0; idx < g_num_of_iommus; idx++) \ for (idx = 0; idx < g_num_of_iommus; idx++) \
...@@ -540,22 +534,11 @@ static inline void free_devinfo_mem(void *vaddr) ...@@ -540,22 +534,11 @@ static inline void free_devinfo_mem(void *vaddr)
kmem_cache_free(iommu_devinfo_cache, vaddr); kmem_cache_free(iommu_devinfo_cache, vaddr);
} }
static inline int domain_type_is_vm(struct dmar_domain *domain)
{
return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
}
static inline int domain_type_is_si(struct dmar_domain *domain) static inline int domain_type_is_si(struct dmar_domain *domain)
{ {
return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY; return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
} }
static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
{
return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
DOMAIN_FLAG_STATIC_IDENTITY);
}
static inline int domain_pfn_supported(struct dmar_domain *domain, static inline int domain_pfn_supported(struct dmar_domain *domain,
unsigned long pfn) unsigned long pfn)
{ {
...@@ -603,7 +586,9 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) ...@@ -603,7 +586,9 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
int iommu_id; int iommu_id;
/* si_domain and vm domain should not get here. */ /* si_domain and vm domain should not get here. */
BUG_ON(domain_type_is_vm_or_si(domain)); if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
return NULL;
for_each_domain_iommu(iommu_id, domain) for_each_domain_iommu(iommu_id, domain)
break; break;
...@@ -1651,7 +1636,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) ...@@ -1651,7 +1636,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domain_ids) if (!iommu->domains || !iommu->domain_ids)
return; return;
again:
spin_lock_irqsave(&device_domain_lock, flags); spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) { list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
struct dmar_domain *domain; struct dmar_domain *domain;
...@@ -1665,18 +1649,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) ...@@ -1665,18 +1649,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
domain = info->domain; domain = info->domain;
__dmar_remove_one_dev_info(info); __dmar_remove_one_dev_info(info);
if (!domain_type_is_vm_or_si(domain)) {
/*
* The domain_exit() function can't be called under
* device_domain_lock, as it takes this lock itself.
* So release the lock here and re-run the loop
* afterwards.
*/
spin_unlock_irqrestore(&device_domain_lock, flags);
domain_exit(domain);
goto again;
}
} }
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
...@@ -2339,7 +2311,7 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -2339,7 +2311,7 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn, struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot) unsigned long nr_pages, int prot)
{ {
int ret; int iommu_id, ret;
struct intel_iommu *iommu; struct intel_iommu *iommu;
/* Do the real mapping first */ /* Do the real mapping first */
...@@ -2347,18 +2319,8 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -2347,18 +2319,8 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (ret) if (ret)
return ret; return ret;
/* Notify about the new mapping */ for_each_domain_iommu(iommu_id, domain) {
if (domain_type_is_vm(domain)) { iommu = g_iommus[iommu_id];
/* VM typed domains can have more than one IOMMUs */
int iommu_id;
for_each_domain_iommu(iommu_id, domain) {
iommu = g_iommus[iommu_id];
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
} else {
/* General domains only have one IOMMU */
iommu = domain_get_iommu(domain);
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages); __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
} }
...@@ -4599,9 +4561,6 @@ static int device_notifier(struct notifier_block *nb, ...@@ -4599,9 +4561,6 @@ static int device_notifier(struct notifier_block *nb,
return 0; return 0;
dmar_remove_one_dev_info(dev); dmar_remove_one_dev_info(dev);
if (!domain_type_is_vm_or_si(domain) &&
list_empty(&domain->devices))
domain_exit(domain);
} else if (action == BUS_NOTIFY_ADD_DEVICE) { } else if (action == BUS_NOTIFY_ADD_DEVICE) {
if (iommu_should_identity_map(dev, 1)) if (iommu_should_identity_map(dev, 1))
domain_add_dev_info(si_domain, dev); domain_add_dev_info(si_domain, dev);
...@@ -5070,8 +5029,10 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) ...@@ -5070,8 +5029,10 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
struct iommu_domain *domain; struct iommu_domain *domain;
switch (type) { switch (type) {
case IOMMU_DOMAIN_DMA:
/* fallthrough */
case IOMMU_DOMAIN_UNMANAGED: case IOMMU_DOMAIN_UNMANAGED:
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); dmar_domain = alloc_domain(0);
if (!dmar_domain) { if (!dmar_domain) {
pr_err("Can't allocate dmar_domain\n"); pr_err("Can't allocate dmar_domain\n");
return NULL; return NULL;
...@@ -5081,6 +5042,14 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) ...@@ -5081,6 +5042,14 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
domain_exit(dmar_domain); domain_exit(dmar_domain);
return NULL; return NULL;
} }
if (type == IOMMU_DOMAIN_DMA &&
init_iova_flush_queue(&dmar_domain->iovad,
iommu_flush_iova, iova_entry_free)) {
pr_warn("iova flush queue initialization failed\n");
intel_iommu_strict = 1;
}
domain_update_iommu_cap(dmar_domain); domain_update_iommu_cap(dmar_domain);
domain = &dmar_domain->domain; domain = &dmar_domain->domain;
...@@ -5291,13 +5260,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -5291,13 +5260,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct dmar_domain *old_domain; struct dmar_domain *old_domain;
old_domain = find_domain(dev); old_domain = find_domain(dev);
if (old_domain) { if (old_domain)
dmar_remove_one_dev_info(dev); dmar_remove_one_dev_info(dev);
if (!domain_type_is_vm_or_si(old_domain) &&
list_empty(&old_domain->devices))
domain_exit(old_domain);
}
} }
ret = prepare_domain_attach_device(domain, dev); ret = prepare_domain_attach_device(domain, dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment