Commit d6408538 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: reference MMU context when setting up hardware state

Move the refcount manipulation of the MMU context to the point where the
hardware state is programmed. At that point it is also known if a previous
MMU state is still there, or the state needs to be reprogrammed with a
potentially different context.

Cc: stable@vger.kernel.org # 5.4
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Tested-by: default avatarMichael Walle <michael@walle.cc>
Tested-by: default avatarMarek Vasut <marex@denx.de>
Reviewed-by: default avatarChristian Gmeiner <christian.gmeiner@gmail.com>
parent f978a530
...@@ -647,17 +647,19 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) ...@@ -647,17 +647,19 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
gpu->fe_running = true; gpu->fe_running = true;
} }
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{ {
u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
&gpu->mmu_context->cmdbuf_mapping);
u16 prefetch; u16 prefetch;
u32 address;
/* setup the MMU */ /* setup the MMU */
etnaviv_iommu_restore(gpu, gpu->mmu_context); etnaviv_iommu_restore(gpu, context);
/* Start command processor */ /* Start command processor */
prefetch = etnaviv_buffer_init(gpu); prefetch = etnaviv_buffer_init(gpu);
address = etnaviv_cmdbuf_get_va(&gpu->buffer,
&gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch); etnaviv_gpu_start_fe(gpu, address, prefetch);
} }
...@@ -1375,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) ...@@ -1375,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock; goto out_unlock;
} }
if (!gpu->fe_running) { if (!gpu->fe_running)
gpu->mmu_context = etnaviv_iommu_context_get(submit->mmu_context); etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
etnaviv_gpu_start_fe_idleloop(gpu);
} else { if (submit->prev_mmu_context)
if (submit->prev_mmu_context) etnaviv_iommu_context_put(submit->prev_mmu_context);
etnaviv_iommu_context_put(submit->prev_mmu_context); submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
}
if (submit->nr_pmrs) { if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
......
...@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu, ...@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable; u32 pgtable;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
/* set base addresses */ /* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
......
...@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, ...@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return; return;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
prefetch = etnaviv_buffer_config_mmuv2(gpu, prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma, (u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma); (u32)context->global->bad_page_dma);
...@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, ...@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return; return;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma)); lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment