Commit 5ec69c91 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: serialise mmu invalidations with private mutex

nvkm_subdev.mutex is going away.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent dbffdff7
...@@ -117,6 +117,8 @@ struct nvkm_mmu { ...@@ -117,6 +117,8 @@ struct nvkm_mmu {
struct list_head list; struct list_head list;
} ptc, ptp; } ptc, ptp;
struct mutex mutex; /* serialises mmu invalidations */
struct nvkm_device_oclass user; struct nvkm_device_oclass user;
}; };
......
...@@ -402,6 +402,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev) ...@@ -402,6 +402,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
nvkm_vmm_unref(&mmu->vmm); nvkm_vmm_unref(&mmu->vmm);
nvkm_mmu_ptc_fini(mmu); nvkm_mmu_ptc_fini(mmu);
mutex_destroy(&mmu->mutex);
return mmu; return mmu;
} }
...@@ -420,6 +421,7 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device, ...@@ -420,6 +421,7 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
mmu->func = func; mmu->func = func;
mmu->dma_bits = func->dma_bits; mmu->dma_bits = func->dma_bits;
nvkm_mmu_ptc_init(mmu); nvkm_mmu_ptc_init(mmu);
mutex_init(&mmu->mutex);
mmu->user.ctor = nvkm_ummu_new; mmu->user.ctor = nvkm_ummu_new;
mmu->user.base = func->mmu.user; mmu->user.base = func->mmu.user;
} }
......
...@@ -187,12 +187,11 @@ gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr) ...@@ -187,12 +187,11 @@ gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
void void
gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type) gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
{ {
struct nvkm_subdev *subdev = &vmm->mmu->subdev; struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_device *device = subdev->device;
struct nvkm_mmu_pt *pd = vmm->pd->pt[0]; struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
u64 addr = 0; u64 addr = 0;
mutex_lock(&subdev->mutex); mutex_lock(&vmm->mmu->mutex);
/* Looks like maybe a "free flush slots" counter, the /* Looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases. * faster you write to 0x100cbc to more it decreases.
*/ */
...@@ -222,7 +221,7 @@ gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type) ...@@ -222,7 +221,7 @@ gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
if (nvkm_rd32(device, 0x100c80) & 0x00008000) if (nvkm_rd32(device, 0x100c80) & 0x00008000)
break; break;
); );
mutex_unlock(&subdev->mutex); mutex_unlock(&vmm->mmu->mutex);
} }
void void
......
...@@ -80,17 +80,16 @@ nv41_vmm_desc_12[] = { ...@@ -80,17 +80,16 @@ nv41_vmm_desc_12[] = {
static void static void
nv41_vmm_flush(struct nvkm_vmm *vmm, int level) nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
{ {
struct nvkm_subdev *subdev = &vmm->mmu->subdev; struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_device *device = subdev->device;
mutex_lock(&subdev->mutex); mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0x100810, 0x00000022); nvkm_wr32(device, 0x100810, 0x00000022);
nvkm_msec(device, 2000, nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100810) & 0x00000020) if (nvkm_rd32(device, 0x100810) & 0x00000020)
break; break;
); );
nvkm_wr32(device, 0x100810, 0x00000000); nvkm_wr32(device, 0x100810, 0x00000000);
mutex_unlock(&subdev->mutex); mutex_unlock(&vmm->mmu->mutex);
} }
static const struct nvkm_vmm_func static const struct nvkm_vmm_func
......
...@@ -184,7 +184,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level) ...@@ -184,7 +184,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
struct nvkm_device *device = subdev->device; struct nvkm_device *device = subdev->device;
int i, id; int i, id;
mutex_lock(&subdev->mutex); mutex_lock(&vmm->mmu->mutex);
for (i = 0; i < NVKM_SUBDEV_NR; i++) { for (i = 0; i < NVKM_SUBDEV_NR; i++) {
if (!atomic_read(&vmm->engref[i])) if (!atomic_read(&vmm->engref[i]))
continue; continue;
...@@ -220,7 +220,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level) ...@@ -220,7 +220,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
nvkm_error(subdev, "%s mmu invalidate timeout\n", nvkm_error(subdev, "%s mmu invalidate timeout\n",
nvkm_subdev_name[i]); nvkm_subdev_name[i]);
} }
mutex_unlock(&subdev->mutex); mutex_unlock(&vmm->mmu->mutex);
} }
int int
......
...@@ -26,15 +26,14 @@ ...@@ -26,15 +26,14 @@
static void static void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{ {
struct nvkm_subdev *subdev = &vmm->mmu->subdev; struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_device *device = subdev->device;
u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
type |= 0x00000001; /* PAGE_ALL */ type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */ type |= 0x00000004; /* HUB_ONLY */
mutex_lock(&subdev->mutex); mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8); nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
nvkm_wr32(device, 0xb830a4, 0x00000000); nvkm_wr32(device, 0xb830a4, 0x00000000);
...@@ -46,7 +45,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) ...@@ -46,7 +45,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
break; break;
); );
mutex_unlock(&subdev->mutex); mutex_unlock(&vmm->mmu->mutex);
} }
static const struct nvkm_vmm_func static const struct nvkm_vmm_func
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment