Commit fdbe574e authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/dma: Allow MSI-only cookies

IOMMU domain users such as VFIO face a similar problem to DMA API ops
with regard to mapping MSI messages in systems where the MSI write is
subject to IOMMU translation. With the relevant infrastructure now in
place for managed DMA domains, it's actually really simple for other
users to piggyback off that and reap the benefits without giving up
their own IOVA management, and without having to reinvent their own
wheel in the MSI layer.

Allow such users to opt into automatic MSI remapping by dedicating a
region of their IOVA space to a managed cookie, and extend the mapping
routine to implement a trivial linear allocator in such cases, to avoid
the needless overhead of a full-blown IOVA domain.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Reviewed-by: default avatarTomasz Nowicki <tomasz.nowicki@caviumnetworks.com>
Reviewed-by: default avatarEric Auger <eric.auger@redhat.com>
Tested-by: default avatarEric Auger <eric.auger@redhat.com>
Tested-by: default avatarTomasz Nowicki <tomasz.nowicki@caviumnetworks.com>
Tested-by: default avatarBharat Bhushan <bharat.bhushan@nxp.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent a121103c
...@@ -37,15 +37,50 @@ struct iommu_dma_msi_page { ...@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
phys_addr_t phys; phys_addr_t phys;
}; };
enum iommu_dma_cookie_type {
IOMMU_DMA_IOVA_COOKIE,
IOMMU_DMA_MSI_COOKIE,
};
struct iommu_dma_cookie { struct iommu_dma_cookie {
enum iommu_dma_cookie_type type;
union {
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
struct iova_domain iovad; struct iova_domain iovad;
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
dma_addr_t msi_iova;
};
struct list_head msi_page_list; struct list_head msi_page_list;
spinlock_t msi_lock; spinlock_t msi_lock;
}; };
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
return cookie->iovad.granule;
return PAGE_SIZE;
}
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
{ {
return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; struct iommu_dma_cookie *cookie = domain->iova_cookie;
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
return &cookie->iovad;
return NULL;
}
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
struct iommu_dma_cookie *cookie;
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) {
spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
}
return cookie;
} }
int iommu_dma_init(void) int iommu_dma_init(void)
...@@ -61,26 +96,54 @@ int iommu_dma_init(void) ...@@ -61,26 +96,54 @@ int iommu_dma_init(void)
* callback when domain->type == IOMMU_DOMAIN_DMA. * callback when domain->type == IOMMU_DOMAIN_DMA.
*/ */
int iommu_get_dma_cookie(struct iommu_domain *domain) int iommu_get_dma_cookie(struct iommu_domain *domain)
{
if (domain->iova_cookie)
return -EEXIST;
domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
if (!domain->iova_cookie)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(iommu_get_dma_cookie);
/**
* iommu_get_msi_cookie - Acquire just MSI remapping resources
* @domain: IOMMU domain to prepare
* @base: Start address of IOVA region for MSI mappings
*
* Users who manage their own IOVA allocation and do not want DMA API support,
* but would still like to take advantage of automatic MSI remapping, can use
* this to initialise their own domain appropriately. Users should reserve a
* contiguous IOVA region, starting at @base, large enough to accommodate the
* number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
* used by the devices attached to @domain.
*/
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{ {
struct iommu_dma_cookie *cookie; struct iommu_dma_cookie *cookie;
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
if (domain->iova_cookie) if (domain->iova_cookie)
return -EEXIST; return -EEXIST;
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
if (!cookie) if (!cookie)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&cookie->msi_lock); cookie->msi_iova = base;
INIT_LIST_HEAD(&cookie->msi_page_list);
domain->iova_cookie = cookie; domain->iova_cookie = cookie;
return 0; return 0;
} }
EXPORT_SYMBOL(iommu_get_dma_cookie); EXPORT_SYMBOL(iommu_get_msi_cookie);
/** /**
* iommu_put_dma_cookie - Release a domain's DMA mapping resources * iommu_put_dma_cookie - Release a domain's DMA mapping resources
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
* iommu_get_msi_cookie()
* *
* IOMMU drivers should normally call this from their domain_free callback. * IOMMU drivers should normally call this from their domain_free callback.
*/ */
...@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) ...@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie) if (!cookie)
return; return;
if (cookie->iovad.granule) if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
put_iova_domain(&cookie->iovad); put_iova_domain(&cookie->iovad);
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
...@@ -137,11 +200,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, ...@@ -137,11 +200,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev) u64 size, struct device *dev)
{ {
struct iova_domain *iovad = cookie_iovad(domain); struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn; unsigned long order, base_pfn, end_pfn;
if (!iovad) if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -ENODEV; return -EINVAL;
/* Use the smallest supported page size for IOVA granularity */ /* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap); order = __ffs(domain->pgsize_bitmap);
...@@ -662,11 +726,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -662,11 +726,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
{ {
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page; struct iommu_dma_msi_page *msi_page;
struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = cookie_iovad(domain);
struct iova *iova; struct iova *iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
size_t size = cookie_msi_granule(cookie);
msi_addr &= ~(phys_addr_t)iova_mask(iovad); msi_addr &= ~(phys_addr_t)(size - 1);
list_for_each_entry(msi_page, &cookie->msi_page_list, list) list_for_each_entry(msi_page, &cookie->msi_page_list, list)
if (msi_page->phys == msi_addr) if (msi_page->phys == msi_addr)
return msi_page; return msi_page;
...@@ -675,13 +740,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -675,13 +740,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page) if (!msi_page)
return NULL; return NULL;
iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); msi_page->phys = msi_addr;
if (iovad) {
iova = __alloc_iova(domain, size, dma_get_mask(dev));
if (!iova) if (!iova)
goto out_free_page; goto out_free_page;
msi_page->phys = msi_addr;
msi_page->iova = iova_dma_addr(iovad, iova); msi_page->iova = iova_dma_addr(iovad, iova);
if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) } else {
msi_page->iova = cookie->msi_iova;
cookie->msi_iova += size;
}
if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
goto out_free_iova; goto out_free_iova;
INIT_LIST_HEAD(&msi_page->list); INIT_LIST_HEAD(&msi_page->list);
...@@ -689,7 +759,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -689,7 +759,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page; return msi_page;
out_free_iova: out_free_iova:
if (iovad)
__free_iova(iovad, iova); __free_iova(iovad, iova);
else
cookie->msi_iova -= size;
out_free_page: out_free_page:
kfree(msi_page); kfree(msi_page);
return NULL; return NULL;
...@@ -730,7 +803,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) ...@@ -730,7 +803,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
msg->data = ~0U; msg->data = ~0U;
} else { } else {
msg->address_hi = upper_32_bits(msi_page->iova); msg->address_hi = upper_32_bits(msi_page->iova);
msg->address_lo &= iova_mask(&cookie->iovad); msg->address_lo &= cookie_msi_granule(cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova); msg->address_lo += lower_32_bits(msi_page->iova);
} }
} }
...@@ -27,6 +27,7 @@ int iommu_dma_init(void); ...@@ -27,6 +27,7 @@ int iommu_dma_init(void);
/* Domain management interface for IOMMU drivers */ /* Domain management interface for IOMMU drivers */
int iommu_get_dma_cookie(struct iommu_domain *domain); int iommu_get_dma_cookie(struct iommu_domain *domain);
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
void iommu_put_dma_cookie(struct iommu_domain *domain); void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */ /* Setup call for arch DMA mapping code */
...@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain) ...@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
return -ENODEV; return -ENODEV;
} }
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
return -ENODEV;
}
static inline void iommu_put_dma_cookie(struct iommu_domain *domain) static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment