Commit 42d5b846 authored by Paul Burton's avatar Paul Burton

MIPS: mm: Unify ASID version checks

Introduce a new check_mmu_context() function to check an mm's ASID
version & get a new one if it's outdated, and a
check_switch_mmu_context() function which additionally sets up the new
ASID & page directory. Simplify switch_mm() & various
get_new_mmu_context() callsites in MIPS KVM by making use of the new
functions, which will help reduce the amount of code that requires
modification to gain MMID support.
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
parent 4ebea49c
...@@ -98,6 +98,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -98,6 +98,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
} }
extern void get_new_mmu_context(struct mm_struct *mm); extern void get_new_mmu_context(struct mm_struct *mm);
extern void check_mmu_context(struct mm_struct *mm);
extern void check_switch_mmu_context(struct mm_struct *mm);
/* /*
* Initialize the context related info for a new mm_struct * Initialize the context related info for a new mm_struct
...@@ -126,11 +128,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -126,11 +128,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_save(flags); local_irq_save(flags);
htw_stop(); htw_stop();
/* Check if our ASID is of an older version and thus invalid */ check_switch_mmu_context(next);
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
get_new_mmu_context(next);
write_c0_entryhi(cpu_asid(cpu, next));
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* /*
* Mark current->active_mm as not "active" anymore. * Mark current->active_mm as not "active" anymore.
......
...@@ -1056,11 +1056,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1056,11 +1056,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/ */
if (current->flags & PF_VCPU) { if (current->flags & PF_VCPU) {
mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & check_switch_mmu_context(mm);
asid_version_mask(cpu))
get_new_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
kvm_mips_suspend_mm(cpu); kvm_mips_suspend_mm(cpu);
ehb(); ehb();
} }
...@@ -1074,11 +1070,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) ...@@ -1074,11 +1070,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
if (current->flags & PF_VCPU) { if (current->flags & PF_VCPU) {
/* Restore normal Linux process memory map */ /* Restore normal Linux process memory map */
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & check_switch_mmu_context(current->mm);
asid_version_mask(cpu)))
get_new_mmu_context(current->mm);
write_c0_entryhi(cpu_asid(cpu, current->mm));
TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
kvm_mips_resume_mm(cpu); kvm_mips_resume_mm(cpu);
ehb(); ehb();
} }
...@@ -1228,9 +1220,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, ...@@ -1228,9 +1220,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
* Check if ASID is stale. This may happen due to a TLB flush request or * Check if ASID is stale. This may happen due to a TLB flush request or
* a lazy user MM invalidation. * a lazy user MM invalidation.
*/ */
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & check_mmu_context(mm);
asid_version_mask(cpu))
get_new_mmu_context(mm);
} }
static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
...@@ -1266,11 +1256,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1266,11 +1256,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
cpu = smp_processor_id(); cpu = smp_processor_id();
/* Restore normal Linux process memory map */ /* Restore normal Linux process memory map */
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & check_switch_mmu_context(current->mm);
asid_version_mask(cpu)))
get_new_mmu_context(current->mm);
write_c0_entryhi(cpu_asid(cpu, current->mm));
TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
kvm_mips_resume_mm(cpu); kvm_mips_resume_mm(cpu);
htw_start(); htw_start();
......
...@@ -2454,10 +2454,10 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) ...@@ -2454,10 +2454,10 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
* Root ASID dealiases guest GPA mappings in the root TLB. * Root ASID dealiases guest GPA mappings in the root TLB.
* Allocate new root ASID if needed. * Allocate new root ASID if needed.
*/ */
if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
|| (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))
get_new_mmu_context(gpa_mm); get_new_mmu_context(gpa_mm);
else
check_mmu_context(gpa_mm);
} }
} }
......
...@@ -17,3 +17,21 @@ void get_new_mmu_context(struct mm_struct *mm) ...@@ -17,3 +17,21 @@ void get_new_mmu_context(struct mm_struct *mm)
cpu_context(cpu, mm) = asid_cache(cpu) = asid; cpu_context(cpu, mm) = asid_cache(cpu) = asid;
} }
void check_mmu_context(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
/* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
get_new_mmu_context(mm);
}
void check_switch_mmu_context(struct mm_struct *mm)
{
unsigned int cpu = smp_processor_id();
check_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment