Commit 8109c2a2 authored by Joerg Roedel's avatar Joerg Roedel

iommu/iova: Add locking to Flush-Queues

The lock is taken from the same CPU most of the time. But
having it allows to flush the queue also from another CPU if
necessary.

This will be used by a timer to regularily flush any pending
IOVAs from the Flush-Queues.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent fb418dab
...@@ -91,6 +91,8 @@ int init_iova_flush_queue(struct iova_domain *iovad, ...@@ -91,6 +91,8 @@ int init_iova_flush_queue(struct iova_domain *iovad,
fq = per_cpu_ptr(iovad->fq, cpu); fq = per_cpu_ptr(iovad->fq, cpu);
fq->head = 0; fq->head = 0;
fq->tail = 0; fq->tail = 0;
spin_lock_init(&fq->lock);
} }
return 0; return 0;
...@@ -471,6 +473,7 @@ EXPORT_SYMBOL_GPL(free_iova_fast); ...@@ -471,6 +473,7 @@ EXPORT_SYMBOL_GPL(free_iova_fast);
static inline bool fq_full(struct iova_fq *fq) static inline bool fq_full(struct iova_fq *fq)
{ {
assert_spin_locked(&fq->lock);
return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
} }
...@@ -478,6 +481,8 @@ static inline unsigned fq_ring_add(struct iova_fq *fq) ...@@ -478,6 +481,8 @@ static inline unsigned fq_ring_add(struct iova_fq *fq)
{ {
unsigned idx = fq->tail; unsigned idx = fq->tail;
assert_spin_locked(&fq->lock);
fq->tail = (idx + 1) % IOVA_FQ_SIZE; fq->tail = (idx + 1) % IOVA_FQ_SIZE;
return idx; return idx;
...@@ -488,6 +493,8 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) ...@@ -488,6 +493,8 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
unsigned idx; unsigned idx;
assert_spin_locked(&fq->lock);
fq_ring_for_each(idx, fq) { fq_ring_for_each(idx, fq) {
if (fq->entries[idx].counter >= counter) if (fq->entries[idx].counter >= counter)
...@@ -537,8 +544,11 @@ void queue_iova(struct iova_domain *iovad, ...@@ -537,8 +544,11 @@ void queue_iova(struct iova_domain *iovad,
unsigned long data) unsigned long data)
{ {
struct iova_fq *fq = get_cpu_ptr(iovad->fq); struct iova_fq *fq = get_cpu_ptr(iovad->fq);
unsigned long flags;
unsigned idx; unsigned idx;
spin_lock_irqsave(&fq->lock, flags);
/* /*
* First remove all entries from the flush queue that have already been * First remove all entries from the flush queue that have already been
* flushed out on another CPU. This makes the fq_full() check below less * flushed out on another CPU. This makes the fq_full() check below less
...@@ -558,6 +568,7 @@ void queue_iova(struct iova_domain *iovad, ...@@ -558,6 +568,7 @@ void queue_iova(struct iova_domain *iovad,
fq->entries[idx].data = data; fq->entries[idx].data = data;
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
spin_unlock_irqrestore(&fq->lock, flags);
put_cpu_ptr(iovad->fq); put_cpu_ptr(iovad->fq);
} }
EXPORT_SYMBOL_GPL(queue_iova); EXPORT_SYMBOL_GPL(queue_iova);
......
...@@ -60,6 +60,7 @@ struct iova_fq_entry { ...@@ -60,6 +60,7 @@ struct iova_fq_entry {
struct iova_fq { struct iova_fq {
struct iova_fq_entry entries[IOVA_FQ_SIZE]; struct iova_fq_entry entries[IOVA_FQ_SIZE];
unsigned head, tail; unsigned head, tail;
spinlock_t lock;
}; };
/* holds all the iova translations for a domain */ /* holds all the iova translations for a domain */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment