Commit 2faf153b authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/tlb: Move __flush_tlb() out of line

cpu_tlbstate is exported because various TLB-related functions need
access to it, but cpu_tlbstate is sensitive information which should
only be accessed by well-contained kernel functions and not be directly
exposed to modules.

As a first step, move __flush_tlb() out of line and hide the native
function. The latter can be static when CONFIG_PARAVIRT is disabled.

Consolidate the namespace while at it and remove the pointless extra
wrapper in the paravirt code.

No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200421092559.246130908@linutronix.de
parent 9020d395
...@@ -47,7 +47,9 @@ static inline void slow_down_io(void) ...@@ -47,7 +47,9 @@ static inline void slow_down_io(void)
#endif #endif
} }
static inline void __flush_tlb(void) void native_flush_tlb_local(void);
static inline void __flush_tlb_local(void)
{ {
PVOP_VCALL0(mmu.flush_tlb_user); PVOP_VCALL0(mmu.flush_tlb_user);
} }
......
...@@ -140,12 +140,13 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) ...@@ -140,12 +140,13 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
} }
void flush_tlb_local(void);
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
#define __flush_tlb() __native_flush_tlb() #define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_global() __native_flush_tlb_global() #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
#endif #endif
struct tlb_context { struct tlb_context {
...@@ -370,24 +371,6 @@ static inline void invalidate_user_asid(u16 asid) ...@@ -370,24 +371,6 @@ static inline void invalidate_user_asid(u16 asid)
(unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
} }
/*
* flush the entire current user mapping
*/
static inline void __native_flush_tlb(void)
{
/*
* Preemption or interrupts must be disabled to protect the access
* to the per CPU variable and to prevent being preempted between
* read_cr3() and write_cr3().
*/
WARN_ON_ONCE(preemptible());
invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
/* If current->mm == NULL then the read_cr3() "borrows" an mm */
native_write_cr3(__native_read_cr3());
}
/* /*
* flush everything * flush everything
*/ */
...@@ -461,7 +444,7 @@ static inline void __flush_tlb_all(void) ...@@ -461,7 +444,7 @@ static inline void __flush_tlb_all(void)
/* /*
* !PGE -> !PCID (setup_pcid()), thus every flush is total. * !PGE -> !PCID (setup_pcid()), thus every flush is total.
*/ */
__flush_tlb(); flush_tlb_local();
} }
} }
...@@ -537,8 +520,6 @@ struct flush_tlb_info { ...@@ -537,8 +520,6 @@ struct flush_tlb_info {
bool freed_tables; bool freed_tables;
}; };
#define local_flush_tlb() __flush_tlb()
#define flush_tlb_mm(mm) \ #define flush_tlb_mm(mm) \
flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
......
...@@ -761,7 +761,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) ...@@ -761,7 +761,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb(); flush_tlb_local();
/* Save MTRR state */ /* Save MTRR state */
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
...@@ -778,7 +778,7 @@ static void post_set(void) __releases(set_atomicity_lock) ...@@ -778,7 +778,7 @@ static void post_set(void) __releases(set_atomicity_lock)
{ {
/* Flush TLBs (no need to flush caches - they are disabled) */ /* Flush TLBs (no need to flush caches - they are disabled) */
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb(); flush_tlb_local();
/* Intel (P6) standard MTRRs */ /* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
......
...@@ -160,11 +160,6 @@ unsigned paravirt_patch_insns(void *insn_buff, unsigned len, ...@@ -160,11 +160,6 @@ unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
return insn_len; return insn_len;
} }
static void native_flush_tlb(void)
{
__native_flush_tlb();
}
/* /*
* Global pages have to be flushed a bit differently. Not a real * Global pages have to be flushed a bit differently. Not a real
* performance problem because this does not happen often. * performance problem because this does not happen often.
...@@ -359,7 +354,7 @@ struct paravirt_patch_template pv_ops = { ...@@ -359,7 +354,7 @@ struct paravirt_patch_template pv_ops = {
#endif /* CONFIG_PARAVIRT_XXL */ #endif /* CONFIG_PARAVIRT_XXL */
/* Mmu ops. */ /* Mmu ops. */
.mmu.flush_tlb_user = native_flush_tlb, .mmu.flush_tlb_user = native_flush_tlb_local,
.mmu.flush_tlb_kernel = native_flush_tlb_global, .mmu.flush_tlb_kernel = native_flush_tlb_global,
.mmu.flush_tlb_one_user = native_flush_tlb_one_user, .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
.mmu.flush_tlb_others = native_flush_tlb_others, .mmu.flush_tlb_others = native_flush_tlb_others,
......
...@@ -134,7 +134,7 @@ static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, ...@@ -134,7 +134,7 @@ static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
} while (size); } while (size);
__native_flush_tlb(); flush_tlb_local();
} }
void __init sme_unmap_bootdata(char *real_mode_data) void __init sme_unmap_bootdata(char *real_mode_data)
......
...@@ -18,6 +18,13 @@ ...@@ -18,6 +18,13 @@
#include "mm_internal.h" #include "mm_internal.h"
#ifdef CONFIG_PARAVIRT
# define STATIC_NOPV
#else
# define STATIC_NOPV static
# define __flush_tlb_local native_flush_tlb_local
#endif
/* /*
* TLB flushing, formerly SMP-only * TLB flushing, formerly SMP-only
* c/o Linus Torvalds. * c/o Linus Torvalds.
...@@ -645,7 +652,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, ...@@ -645,7 +652,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
trace_tlb_flush(reason, nr_invalidate); trace_tlb_flush(reason, nr_invalidate);
} else { } else {
/* Full flush. */ /* Full flush. */
local_flush_tlb(); flush_tlb_local();
if (local) if (local)
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
trace_tlb_flush(reason, TLB_FLUSH_ALL); trace_tlb_flush(reason, TLB_FLUSH_ALL);
...@@ -883,6 +890,30 @@ unsigned long __get_current_cr3_fast(void) ...@@ -883,6 +890,30 @@ unsigned long __get_current_cr3_fast(void)
} }
EXPORT_SYMBOL_GPL(__get_current_cr3_fast); EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
/*
* Flush the entire current user mapping
*/
STATIC_NOPV void native_flush_tlb_local(void)
{
/*
* Preemption or interrupts must be disabled to protect the access
* to the per CPU variable and to prevent being preempted between
* read_cr3() and write_cr3().
*/
WARN_ON_ONCE(preemptible());
invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
/* If current->mm == NULL then the read_cr3() "borrows" an mm */
native_write_cr3(__native_read_cr3());
}
void flush_tlb_local(void)
{
__flush_tlb_local();
}
EXPORT_SYMBOL_GPL(flush_tlb_local);
/* /*
* arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm. * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
* This means that the 'struct flush_tlb_info' that describes which mappings to * This means that the 'struct flush_tlb_info' that describes which mappings to
......
...@@ -293,7 +293,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, ...@@ -293,7 +293,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
* This must be a normal message, or retry of a normal message * This must be a normal message, or retry of a normal message
*/ */
if (msg->address == TLB_FLUSH_ALL) { if (msg->address == TLB_FLUSH_ALL) {
local_flush_tlb(); flush_tlb_local();
stat->d_alltlb++; stat->d_alltlb++;
} else { } else {
__flush_tlb_one_user(msg->address); __flush_tlb_one_user(msg->address);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment