Commit d5c383f2 authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu/iova: Squash entry_dtor abstraction

All flush queues are driven by iommu-dma now, so there is no need to
abstract entry_dtor or its data any more. Squash the now-canonical
implementation directly into the IOVA code to get it out of the way.
Reviewed-by: default avatarJohn Garry <john.garry@huawei.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/2260f8de00ab5e0f9d2a1cf8978e6ae7cd4f182c.1639753638.git.robin.murphy@arm.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent d7061627
...@@ -64,18 +64,6 @@ static int __init iommu_dma_forcedac_setup(char *str) ...@@ -64,18 +64,6 @@ static int __init iommu_dma_forcedac_setup(char *str)
} }
early_param("iommu.forcedac", iommu_dma_forcedac_setup); early_param("iommu.forcedac", iommu_dma_forcedac_setup);
static void iommu_dma_entry_dtor(unsigned long data)
{
struct page *freelist = (struct page *)data;
while (freelist) {
unsigned long p = (unsigned long)page_address(freelist);
freelist = freelist->freelist;
free_page(p);
}
}
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{ {
if (cookie->type == IOMMU_DMA_IOVA_COOKIE) if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
...@@ -324,8 +312,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain) ...@@ -324,8 +312,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
if (cookie->fq_domain) if (cookie->fq_domain)
return 0; return 0;
ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all, ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all);
iommu_dma_entry_dtor);
if (ret) { if (ret) {
pr_warn("iova flush queue initialization failed\n"); pr_warn("iova flush queue initialization failed\n");
return ret; return ret;
...@@ -471,7 +458,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, ...@@ -471,7 +458,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
else if (gather && gather->queued) else if (gather && gather->queued)
queue_iova(iovad, iova_pfn(iovad, iova), queue_iova(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad), size >> iova_shift(iovad),
(unsigned long)gather->freelist); gather->freelist);
else else
free_iova_fast(iovad, iova_pfn(iovad, iova), free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad)); size >> iova_shift(iovad));
......
...@@ -91,11 +91,9 @@ static void free_iova_flush_queue(struct iova_domain *iovad) ...@@ -91,11 +91,9 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
iovad->fq = NULL; iovad->fq = NULL;
iovad->flush_cb = NULL; iovad->flush_cb = NULL;
iovad->entry_dtor = NULL;
} }
int init_iova_flush_queue(struct iova_domain *iovad, int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
{ {
struct iova_fq __percpu *queue; struct iova_fq __percpu *queue;
int cpu; int cpu;
...@@ -108,7 +106,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, ...@@ -108,7 +106,6 @@ int init_iova_flush_queue(struct iova_domain *iovad,
return -ENOMEM; return -ENOMEM;
iovad->flush_cb = flush_cb; iovad->flush_cb = flush_cb;
iovad->entry_dtor = entry_dtor;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct iova_fq *fq; struct iova_fq *fq;
...@@ -547,6 +544,16 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) ...@@ -547,6 +544,16 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
} }
EXPORT_SYMBOL_GPL(free_iova_fast); EXPORT_SYMBOL_GPL(free_iova_fast);
static void fq_entry_dtor(struct page *freelist)
{
while (freelist) {
unsigned long p = (unsigned long)page_address(freelist);
freelist = freelist->freelist;
free_page(p);
}
}
#define fq_ring_for_each(i, fq) \ #define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
...@@ -579,9 +586,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) ...@@ -579,9 +586,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
if (fq->entries[idx].counter >= counter) if (fq->entries[idx].counter >= counter)
break; break;
if (iovad->entry_dtor) fq_entry_dtor(fq->entries[idx].freelist);
iovad->entry_dtor(fq->entries[idx].data);
free_iova_fast(iovad, free_iova_fast(iovad,
fq->entries[idx].iova_pfn, fq->entries[idx].iova_pfn,
fq->entries[idx].pages); fq->entries[idx].pages);
...@@ -606,15 +611,12 @@ static void fq_destroy_all_entries(struct iova_domain *iovad) ...@@ -606,15 +611,12 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
* bother to free iovas, just call the entry_dtor on all remaining * bother to free iovas, just call the entry_dtor on all remaining
* entries. * entries.
*/ */
if (!iovad->entry_dtor)
return;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
int idx; int idx;
fq_ring_for_each(idx, fq) fq_ring_for_each(idx, fq)
iovad->entry_dtor(fq->entries[idx].data); fq_entry_dtor(fq->entries[idx].freelist);
} }
} }
...@@ -639,7 +641,7 @@ static void fq_flush_timeout(struct timer_list *t) ...@@ -639,7 +641,7 @@ static void fq_flush_timeout(struct timer_list *t)
void queue_iova(struct iova_domain *iovad, void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages, unsigned long pfn, unsigned long pages,
unsigned long data) struct page *freelist)
{ {
struct iova_fq *fq; struct iova_fq *fq;
unsigned long flags; unsigned long flags;
...@@ -673,7 +675,7 @@ void queue_iova(struct iova_domain *iovad, ...@@ -673,7 +675,7 @@ void queue_iova(struct iova_domain *iovad,
fq->entries[idx].iova_pfn = pfn; fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages; fq->entries[idx].pages = pages;
fq->entries[idx].data = data; fq->entries[idx].freelist = freelist;
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
spin_unlock_irqrestore(&fq->lock, flags); spin_unlock_irqrestore(&fq->lock, flags);
......
...@@ -40,9 +40,6 @@ struct iova_domain; ...@@ -40,9 +40,6 @@ struct iova_domain;
/* Call-Back from IOVA code into IOMMU drivers */ /* Call-Back from IOVA code into IOMMU drivers */
typedef void (* iova_flush_cb)(struct iova_domain *domain); typedef void (* iova_flush_cb)(struct iova_domain *domain);
/* Destructor for per-entry data */
typedef void (* iova_entry_dtor)(unsigned long data);
/* Number of entries per Flush Queue */ /* Number of entries per Flush Queue */
#define IOVA_FQ_SIZE 256 #define IOVA_FQ_SIZE 256
...@@ -53,7 +50,7 @@ typedef void (* iova_entry_dtor)(unsigned long data); ...@@ -53,7 +50,7 @@ typedef void (* iova_entry_dtor)(unsigned long data);
struct iova_fq_entry { struct iova_fq_entry {
unsigned long iova_pfn; unsigned long iova_pfn;
unsigned long pages; unsigned long pages;
unsigned long data; struct page *freelist;
u64 counter; /* Flush counter when this entrie was added */ u64 counter; /* Flush counter when this entrie was added */
}; };
...@@ -88,9 +85,6 @@ struct iova_domain { ...@@ -88,9 +85,6 @@ struct iova_domain {
iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
TLBs */ TLBs */
iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
iova entry */
struct timer_list fq_timer; /* Timer to regularily empty the struct timer_list fq_timer; /* Timer to regularily empty the
flush-queues */ flush-queues */
atomic_t fq_timer_on; /* 1 when timer is active, 0 atomic_t fq_timer_on; /* 1 when timer is active, 0
...@@ -146,15 +140,14 @@ void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, ...@@ -146,15 +140,14 @@ void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size); unsigned long size);
void queue_iova(struct iova_domain *iovad, void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages, unsigned long pfn, unsigned long pages,
unsigned long data); struct page *freelist);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache); unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi); unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule, void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn); unsigned long start_pfn);
int init_iova_flush_queue(struct iova_domain *iovad, int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb);
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad); void put_iova_domain(struct iova_domain *iovad);
#else #else
...@@ -189,12 +182,6 @@ static inline void free_iova_fast(struct iova_domain *iovad, ...@@ -189,12 +182,6 @@ static inline void free_iova_fast(struct iova_domain *iovad,
{ {
} }
static inline void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data)
{
}
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size, unsigned long size,
unsigned long limit_pfn, unsigned long limit_pfn,
...@@ -216,13 +203,6 @@ static inline void init_iova_domain(struct iova_domain *iovad, ...@@ -216,13 +203,6 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{ {
} }
static inline int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb,
iova_entry_dtor entry_dtor)
{
return -ENODEV;
}
static inline struct iova *find_iova(struct iova_domain *iovad, static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn) unsigned long pfn)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment