Commit 4900dda9 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: replace MMU flush marker with flush sequence

If a MMU is shared between multiple GPUs, all of them need to flush their
TLBs, so a single marker that gets reset on the first flush won't do.
Replace the flush marker with a sequence number, so that it's possible to
check if the TLB is in sync with the current page table state for each GPU.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarPhilipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: default avatarGuido Günther <agx@sigxcpu.org>
parent bffe5db8
...@@ -315,6 +315,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, ...@@ -315,6 +315,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
u32 return_target, return_dwords; u32 return_target, return_dwords;
u32 link_target, link_dwords; u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state; bool switch_context = gpu->exec_state != exec_state;
unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
bool need_flush = gpu->flush_seq != new_flush_seq;
lockdep_assert_held(&gpu->lock); lockdep_assert_held(&gpu->lock);
...@@ -329,14 +331,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, ...@@ -329,14 +331,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
* need to append a mmu flush load state, followed by a new * need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words. * link to this buffer - a total of four additional words.
*/ */
if (gpu->mmu->need_flush || switch_context) { if (need_flush || switch_context) {
u32 target, extra_dwords; u32 target, extra_dwords;
/* link command */ /* link command */
extra_dwords = 1; extra_dwords = 1;
/* flush command */ /* flush command */
if (gpu->mmu->need_flush) { if (need_flush) {
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1; extra_dwords += 1;
else else
...@@ -349,7 +351,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, ...@@ -349,7 +351,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords); target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
if (gpu->mmu->need_flush) { if (need_flush) {
/* Add the MMU flush */ /* Add the MMU flush */
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) { if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
...@@ -369,7 +371,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, ...@@ -369,7 +371,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
SYNC_RECIPIENT_PE); SYNC_RECIPIENT_PE);
} }
gpu->mmu->need_flush = false; gpu->flush_seq = new_flush_seq;
} }
if (switch_context) { if (switch_context) {
......
...@@ -137,6 +137,7 @@ struct etnaviv_gpu { ...@@ -137,6 +137,7 @@ struct etnaviv_gpu {
int irq; int irq;
struct etnaviv_iommu *mmu; struct etnaviv_iommu *mmu;
unsigned int flush_seq;
/* Power Control: */ /* Power Control: */
struct clk *clk_bus; struct clk *clk_bus;
......
...@@ -263,7 +263,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, ...@@ -263,7 +263,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
} }
list_add_tail(&mapping->mmu_node, &mmu->mappings); list_add_tail(&mapping->mmu_node, &mmu->mappings);
mmu->need_flush = true; mmu->flush_seq++;
unlock: unlock:
mutex_unlock(&mmu->lock); mutex_unlock(&mmu->lock);
...@@ -282,7 +282,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, ...@@ -282,7 +282,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
etnaviv_iommu_remove_mapping(mmu, mapping); etnaviv_iommu_remove_mapping(mmu, mapping);
list_del(&mapping->mmu_node); list_del(&mapping->mmu_node);
mmu->need_flush = true; mmu->flush_seq++;
mutex_unlock(&mmu->lock); mutex_unlock(&mmu->lock);
} }
...@@ -369,7 +369,7 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu, ...@@ -369,7 +369,7 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu,
return ret; return ret;
} }
mmu->need_flush = true; mmu->flush_seq++;
} }
list_add_tail(&mapping->mmu_node, &mmu->mappings); list_add_tail(&mapping->mmu_node, &mmu->mappings);
......
...@@ -48,7 +48,7 @@ struct etnaviv_iommu { ...@@ -48,7 +48,7 @@ struct etnaviv_iommu {
struct mutex lock; struct mutex lock;
struct list_head mappings; struct list_head mappings;
struct drm_mm mm; struct drm_mm mm;
bool need_flush; unsigned int flush_seq;
}; };
struct etnaviv_gem_object; struct etnaviv_gem_object;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment