Commit c625c274 authored by Jason Gunthorpe's avatar Jason Gunthorpe

nouveau: use mmu_notifier directly for invalidate_range_start

There is no reason to get the invalidate_range_start() callback via an
indirection through hmm_mirror, just register a normal notifier directly.

Link: https://lore.kernel.org/r/20191112202231.3856-9-jgg@ziepe.caTested-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 3506ff69
...@@ -88,6 +88,7 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst) ...@@ -88,6 +88,7 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
} }
struct nouveau_svmm { struct nouveau_svmm {
struct mmu_notifier notifier;
struct nouveau_vmm *vmm; struct nouveau_vmm *vmm;
struct { struct {
unsigned long start; unsigned long start;
...@@ -96,7 +97,6 @@ struct nouveau_svmm { ...@@ -96,7 +97,6 @@ struct nouveau_svmm {
struct mutex mutex; struct mutex mutex;
struct mm_struct *mm;
struct hmm_mirror mirror; struct hmm_mirror mirror;
}; };
...@@ -251,10 +251,11 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) ...@@ -251,10 +251,11 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
} }
static int static int
nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *update) const struct mmu_notifier_range *update)
{ {
struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror); struct nouveau_svmm *svmm =
container_of(mn, struct nouveau_svmm, notifier);
unsigned long start = update->start; unsigned long start = update->start;
unsigned long limit = update->end; unsigned long limit = update->end;
...@@ -264,6 +265,9 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, ...@@ -264,6 +265,9 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
mutex_lock(&svmm->mutex); mutex_lock(&svmm->mutex);
if (unlikely(!svmm->vmm))
goto out;
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) { if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
if (start < svmm->unmanaged.start) { if (start < svmm->unmanaged.start) {
nouveau_svmm_invalidate(svmm, start, nouveau_svmm_invalidate(svmm, start,
...@@ -273,19 +277,31 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, ...@@ -273,19 +277,31 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
} }
nouveau_svmm_invalidate(svmm, start, limit); nouveau_svmm_invalidate(svmm, start, limit);
out:
mutex_unlock(&svmm->mutex); mutex_unlock(&svmm->mutex);
return 0; return 0;
} }
static void static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
nouveau_svmm_release(struct hmm_mirror *mirror) {
kfree(container_of(mn, struct nouveau_svmm, notifier));
}
static const struct mmu_notifier_ops nouveau_mn_ops = {
.invalidate_range_start = nouveau_svmm_invalidate_range_start,
.free_notifier = nouveau_svmm_free_notifier,
};
static int
nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
const struct mmu_notifier_range *update)
{ {
return 0;
} }
static const struct hmm_mirror_ops static const struct hmm_mirror_ops nouveau_svmm = {
nouveau_svmm = {
.sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables, .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
.release = nouveau_svmm_release,
}; };
void void
...@@ -294,7 +310,10 @@ nouveau_svmm_fini(struct nouveau_svmm **psvmm) ...@@ -294,7 +310,10 @@ nouveau_svmm_fini(struct nouveau_svmm **psvmm)
struct nouveau_svmm *svmm = *psvmm; struct nouveau_svmm *svmm = *psvmm;
if (svmm) { if (svmm) {
hmm_mirror_unregister(&svmm->mirror); hmm_mirror_unregister(&svmm->mirror);
kfree(*psvmm); mutex_lock(&svmm->mutex);
svmm->vmm = NULL;
mutex_unlock(&svmm->mutex);
mmu_notifier_put(&svmm->notifier);
*psvmm = NULL; *psvmm = NULL;
} }
} }
...@@ -320,7 +339,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data, ...@@ -320,7 +339,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
mutex_lock(&cli->mutex); mutex_lock(&cli->mutex);
if (cli->svm.cli) { if (cli->svm.cli) {
ret = -EBUSY; ret = -EBUSY;
goto done; goto out_free;
} }
/* Allocate a new GPU VMM that can support SVM (managed by the /* Allocate a new GPU VMM that can support SVM (managed by the
...@@ -335,24 +354,33 @@ nouveau_svmm_init(struct drm_device *dev, void *data, ...@@ -335,24 +354,33 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
.fault_replay = true, .fault_replay = true,
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm); }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
if (ret) if (ret)
goto done; goto out_free;
/* Enable HMM mirroring of CPU address-space to VMM. */ down_write(&current->mm->mmap_sem);
svmm->mm = get_task_mm(current);
down_write(&svmm->mm->mmap_sem);
svmm->mirror.ops = &nouveau_svmm; svmm->mirror.ops = &nouveau_svmm;
ret = hmm_mirror_register(&svmm->mirror, svmm->mm); ret = hmm_mirror_register(&svmm->mirror, current->mm);
if (ret == 0) { if (ret)
cli->svm.svmm = svmm; goto out_mm_unlock;
cli->svm.cli = cli;
}
up_write(&svmm->mm->mmap_sem);
mmput(svmm->mm);
done: svmm->notifier.ops = &nouveau_mn_ops;
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret) if (ret)
nouveau_svmm_fini(&svmm); goto out_hmm_unregister;
/* Note, ownership of svmm transfers to mmu_notifier */
cli->svm.svmm = svmm;
cli->svm.cli = cli;
up_write(&current->mm->mmap_sem);
mutex_unlock(&cli->mutex); mutex_unlock(&cli->mutex);
return 0;
out_hmm_unregister:
hmm_mirror_unregister(&svmm->mirror);
out_mm_unlock:
up_write(&current->mm->mmap_sem);
out_free:
mutex_unlock(&cli->mutex);
kfree(svmm);
return ret; return ret;
} }
...@@ -494,12 +522,12 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range) ...@@ -494,12 +522,12 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
ret = hmm_range_register(range, &svmm->mirror); ret = hmm_range_register(range, &svmm->mirror);
if (ret) { if (ret) {
up_read(&svmm->mm->mmap_sem); up_read(&svmm->notifier.mm->mmap_sem);
return (int)ret; return (int)ret;
} }
if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
up_read(&svmm->mm->mmap_sem); up_read(&svmm->notifier.mm->mmap_sem);
return -EBUSY; return -EBUSY;
} }
...@@ -507,7 +535,7 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range) ...@@ -507,7 +535,7 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
if (ret <= 0) { if (ret <= 0) {
if (ret == 0) if (ret == 0)
ret = -EBUSY; ret = -EBUSY;
up_read(&svmm->mm->mmap_sem); up_read(&svmm->notifier.mm->mmap_sem);
hmm_range_unregister(range); hmm_range_unregister(range);
return ret; return ret;
} }
...@@ -587,12 +615,15 @@ nouveau_svm_fault(struct nvif_notify *notify) ...@@ -587,12 +615,15 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.version = 0; args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) { for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
struct mm_struct *mm;
/* Cancel any faults from non-SVM channels. */ /* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) { if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue; continue;
} }
SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr); SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
mm = svmm->notifier.mm;
/* We try and group handling of faults within a small /* We try and group handling of faults within a small
* window into a single update. * window into a single update.
...@@ -609,11 +640,11 @@ nouveau_svm_fault(struct nvif_notify *notify) ...@@ -609,11 +640,11 @@ nouveau_svm_fault(struct nvif_notify *notify)
/* Intersect fault window with the CPU VMA, cancelling /* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid. * the fault if the address is invalid.
*/ */
down_read(&svmm->mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma_intersection(svmm->mm, start, limit); vma = find_vma_intersection(mm, start, limit);
if (!vma) { if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit); SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
up_read(&svmm->mm->mmap_sem); up_read(&mm->mmap_sem);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue; continue;
} }
...@@ -623,7 +654,7 @@ nouveau_svm_fault(struct nvif_notify *notify) ...@@ -623,7 +654,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
if (buffer->fault[fi]->addr != start) { if (buffer->fault[fi]->addr != start) {
SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr); SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
up_read(&svmm->mm->mmap_sem); up_read(&mm->mmap_sem);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue; continue;
} }
...@@ -704,7 +735,7 @@ nouveau_svm_fault(struct nvif_notify *notify) ...@@ -704,7 +735,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
NULL); NULL);
svmm->vmm->vmm.object.client->super = false; svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex); mutex_unlock(&svmm->mutex);
up_read(&svmm->mm->mmap_sem); up_read(&mm->mmap_sem);
} }
/* Cancel any faults in the window whose pages didn't manage /* Cancel any faults in the window whose pages didn't manage
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment