Commit e241f8e7 authored by Joerg Roedel's avatar Joerg Roedel

iommu/amd: Add locking to per-domain flush-queue

With locking we can safely access the flush-queues of other
cpus.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent fd62190a
...@@ -146,6 +146,7 @@ struct flush_queue_entry { ...@@ -146,6 +146,7 @@ struct flush_queue_entry {
struct flush_queue { struct flush_queue {
struct flush_queue_entry *entries; struct flush_queue_entry *entries;
unsigned head, tail; unsigned head, tail;
spinlock_t lock;
}; };
/* /*
...@@ -1801,6 +1802,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) ...@@ -1801,6 +1802,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
dma_ops_domain_free_flush_queue(dom); dma_ops_domain_free_flush_queue(dom);
return -ENOMEM; return -ENOMEM;
} }
spin_lock_init(&queue->lock);
} }
return 0; return 0;
...@@ -1808,6 +1811,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) ...@@ -1808,6 +1811,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
static inline bool queue_ring_full(struct flush_queue *queue) static inline bool queue_ring_full(struct flush_queue *queue)
{ {
assert_spin_locked(&queue->lock);
return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head); return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
} }
...@@ -1819,6 +1824,8 @@ static void queue_release(struct dma_ops_domain *dom, ...@@ -1819,6 +1824,8 @@ static void queue_release(struct dma_ops_domain *dom,
{ {
unsigned i; unsigned i;
assert_spin_locked(&queue->lock);
queue_ring_for_each(i, queue) queue_ring_for_each(i, queue)
free_iova_fast(&dom->iovad, free_iova_fast(&dom->iovad,
queue->entries[i].iova_pfn, queue->entries[i].iova_pfn,
...@@ -1831,6 +1838,7 @@ static inline unsigned queue_ring_add(struct flush_queue *queue) ...@@ -1831,6 +1838,7 @@ static inline unsigned queue_ring_add(struct flush_queue *queue)
{ {
unsigned idx = queue->tail; unsigned idx = queue->tail;
assert_spin_locked(&queue->lock);
queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE; queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
return idx; return idx;
...@@ -1840,12 +1848,14 @@ static void queue_add(struct dma_ops_domain *dom, ...@@ -1840,12 +1848,14 @@ static void queue_add(struct dma_ops_domain *dom,
unsigned long address, unsigned long pages) unsigned long address, unsigned long pages)
{ {
struct flush_queue *queue; struct flush_queue *queue;
unsigned long flags;
int idx; int idx;
pages = __roundup_pow_of_two(pages); pages = __roundup_pow_of_two(pages);
address >>= PAGE_SHIFT; address >>= PAGE_SHIFT;
queue = get_cpu_ptr(dom->flush_queue); queue = get_cpu_ptr(dom->flush_queue);
spin_lock_irqsave(&queue->lock, flags);
if (queue_ring_full(queue)) { if (queue_ring_full(queue)) {
domain_flush_tlb(&dom->domain); domain_flush_tlb(&dom->domain);
...@@ -1858,6 +1868,7 @@ static void queue_add(struct dma_ops_domain *dom, ...@@ -1858,6 +1868,7 @@ static void queue_add(struct dma_ops_domain *dom,
queue->entries[idx].iova_pfn = address; queue->entries[idx].iova_pfn = address;
queue->entries[idx].pages = pages; queue->entries[idx].pages = pages;
spin_unlock_irqrestore(&queue->lock, flags);
put_cpu_ptr(dom->flush_queue); put_cpu_ptr(dom->flush_queue);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment