Commit 4739f7dd authored by Paul Burton's avatar Paul Burton

MIPS: mm: Remove redundant get_new_mmu_context() cpu argument

get_new_mmu_context() accepts a cpu argument, but implicitly assumes
that this is always equal to smp_processor_id() by operating on the
local CPU's TLB & icache.

Remove the cpu argument and have get_new_mmu_context() call
smp_processor_id() instead.
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
parent 9a27324f
...@@ -100,9 +100,13 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -100,9 +100,13 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
/* Normal, classic MIPS get_new_mmu_context */ /* Normal, classic MIPS get_new_mmu_context */
static inline void static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) get_new_mmu_context(struct mm_struct *mm)
{ {
u64 asid = asid_cache(cpu); unsigned int cpu;
u64 asid;
cpu = smp_processor_id();
asid = asid_cache(cpu);
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache) if (cpu_has_vtag_icache)
...@@ -142,7 +146,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -142,7 +146,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
htw_stop(); htw_stop();
/* Check if our ASID is of an older version and thus invalid */ /* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu)) if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
get_new_mmu_context(next, cpu); get_new_mmu_context(next);
write_c0_entryhi(cpu_asid(cpu, next)); write_c0_entryhi(cpu_asid(cpu, next));
TLBMISS_HANDLER_SETUP_PGD(next->pgd); TLBMISS_HANDLER_SETUP_PGD(next->pgd);
...@@ -184,7 +188,7 @@ drop_mmu_context(struct mm_struct *mm) ...@@ -184,7 +188,7 @@ drop_mmu_context(struct mm_struct *mm)
cpu = smp_processor_id(); cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu); get_new_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm)); write_c0_entryhi(cpu_asid(cpu, mm));
} else { } else {
/* will get a new context next time */ /* will get a new context next time */
......
...@@ -1016,7 +1016,7 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, ...@@ -1016,7 +1016,7 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
*/ */
preempt_disable(); preempt_disable();
cpu = smp_processor_id(); cpu = smp_processor_id();
get_new_mmu_context(kern_mm, cpu); get_new_mmu_context(kern_mm);
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (i != cpu) if (i != cpu)
cpu_context(i, kern_mm) = 0; cpu_context(i, kern_mm) = 0;
......
...@@ -1058,7 +1058,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1058,7 +1058,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) asid_version_mask(cpu))
get_new_mmu_context(mm, cpu); get_new_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm)); write_c0_entryhi(cpu_asid(cpu, mm));
TLBMISS_HANDLER_SETUP_PGD(mm->pgd); TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
kvm_mips_suspend_mm(cpu); kvm_mips_suspend_mm(cpu);
...@@ -1076,7 +1076,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) ...@@ -1076,7 +1076,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
/* Restore normal Linux process memory map */ /* Restore normal Linux process memory map */
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))) asid_version_mask(cpu)))
get_new_mmu_context(current->mm, cpu); get_new_mmu_context(current->mm);
write_c0_entryhi(cpu_asid(cpu, current->mm)); write_c0_entryhi(cpu_asid(cpu, current->mm));
TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
kvm_mips_resume_mm(cpu); kvm_mips_resume_mm(cpu);
...@@ -1113,7 +1113,7 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, ...@@ -1113,7 +1113,7 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
/* Generate new ASID for current mode */ /* Generate new ASID for current mode */
if (reload_asid) { if (reload_asid) {
mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
get_new_mmu_context(mm, cpu); get_new_mmu_context(mm);
htw_stop(); htw_stop();
write_c0_entryhi(cpu_asid(cpu, mm)); write_c0_entryhi(cpu_asid(cpu, mm));
TLBMISS_HANDLER_SETUP_PGD(mm->pgd); TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
...@@ -1230,7 +1230,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, ...@@ -1230,7 +1230,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
*/ */
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) asid_version_mask(cpu))
get_new_mmu_context(mm, cpu); get_new_mmu_context(mm);
} }
static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
...@@ -1268,7 +1268,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1268,7 +1268,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Restore normal Linux process memory map */ /* Restore normal Linux process memory map */
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu))) asid_version_mask(cpu)))
get_new_mmu_context(current->mm, cpu); get_new_mmu_context(current->mm);
write_c0_entryhi(cpu_asid(cpu, current->mm)); write_c0_entryhi(cpu_asid(cpu, current->mm));
TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
kvm_mips_resume_mm(cpu); kvm_mips_resume_mm(cpu);
......
...@@ -2457,7 +2457,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) ...@@ -2457,7 +2457,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
|| (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) asid_version_mask(cpu))
get_new_mmu_context(gpa_mm, cpu); get_new_mmu_context(gpa_mm);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment