Commit 4cb38750 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86/mm changes from Peter Anvin:
 "The big change here is the patchset by Alex Shi to use INVLPG to flush
  only the affected pages when we only need to flush a small page range.

  It also removes the special INVALIDATE_TLB_VECTOR interrupts (32
  vectors!) and replace it with an ordinary IPI function call."

Fix up trivial conflicts in arch/x86/include/asm/apic.h (added code next
to changed line)

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/tlb: Fix build warning and crash when building for !SMP
  x86/tlb: do flush_tlb_kernel_range by 'invlpg'
  x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR
  x86/tlb: enable tlb flush range support for x86
  mm/mmu_gather: enable tlb flush range in generic mmu_gather
  x86/tlb: add tlb_flushall_shift knob into debugfs
  x86/tlb: add tlb_flushall_shift for specific CPU
  x86/tlb: fall back to flush all when meet a THP large page
  x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
  x86/tlb_info: get last level TLB entry number of CPU
  x86: Add read_mostly declaration/definition to variables from smp.h
  x86: Define early read-mostly per-cpu macros
parents 0a2fe19c 7efa1c87
...@@ -129,6 +129,25 @@ config DOUBLEFAULT ...@@ -129,6 +129,25 @@ config DOUBLEFAULT
option saves about 4k and might cause you much additional grey option saves about 4k and might cause you much additional grey
hair. hair.
config DEBUG_TLBFLUSH
bool "Set upper limit of TLB entries to flush one-by-one"
depends on DEBUG_KERNEL && (X86_64 || X86_INVLPG)
---help---
X86-only for now.
This option allows the user to tune the amount of TLB entries the
kernel flushes one-by-one instead of doing a full TLB flush. In
certain situations, the former is cheaper. This is controlled by the
tlb_flushall_shift knob under /sys/kernel/debug/x86. If you set it
to -1, the code flushes the whole TLB unconditionally. Otherwise,
for positive values of it, the kernel will use single TLB entry
invalidating instructions according to the following formula:
flush_entries <= active_tlb_entries / 2^tlb_flushall_shift
If in doubt, say "N".
config IOMMU_DEBUG config IOMMU_DEBUG
bool "Enable IOMMU debugging" bool "Enable IOMMU debugging"
depends on GART_IOMMU && DEBUG_KERNEL depends on GART_IOMMU && DEBUG_KERNEL
......
...@@ -546,7 +546,7 @@ static inline const struct cpumask *online_target_cpus(void) ...@@ -546,7 +546,7 @@ static inline const struct cpumask *online_target_cpus(void)
return cpu_online_mask; return cpu_online_mask;
} }
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
static inline unsigned int read_apic_id(void) static inline unsigned int read_apic_id(void)
......
...@@ -15,15 +15,6 @@ BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) ...@@ -15,15 +15,6 @@ BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
.if NUM_INVALIDATE_TLB_VECTORS > \idx
BUILD_INTERRUPT3(invalidate_interrupt\idx,
(INVALIDATE_TLB_VECTOR_START)+\idx,
smp_invalidate_interrupt)
.endif
.endr
#endif #endif
BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
......
...@@ -119,17 +119,6 @@ ...@@ -119,17 +119,6 @@
*/ */
#define LOCAL_TIMER_VECTOR 0xef #define LOCAL_TIMER_VECTOR 0xef
/* up to 32 vectors used for spreading out TLB flushes: */
#if NR_CPUS <= 32
# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS)
#else
# define NUM_INVALIDATE_TLB_VECTORS (32)
#endif
#define INVALIDATE_TLB_VECTOR_END (0xee)
#define INVALIDATE_TLB_VECTOR_START \
(INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
#define NR_VECTORS 256 #define NR_VECTORS 256
#define FPU_IRQ 13 #define FPU_IRQ 13
......
...@@ -360,9 +360,10 @@ static inline void __flush_tlb_single(unsigned long addr) ...@@ -360,9 +360,10 @@ static inline void __flush_tlb_single(unsigned long addr)
static inline void flush_tlb_others(const struct cpumask *cpumask, static inline void flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long va) unsigned long start,
unsigned long end)
{ {
PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va); PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
} }
static inline int paravirt_pgd_alloc(struct mm_struct *mm) static inline int paravirt_pgd_alloc(struct mm_struct *mm)
......
...@@ -248,7 +248,8 @@ struct pv_mmu_ops { ...@@ -248,7 +248,8 @@ struct pv_mmu_ops {
void (*flush_tlb_single)(unsigned long addr); void (*flush_tlb_single)(unsigned long addr);
void (*flush_tlb_others)(const struct cpumask *cpus, void (*flush_tlb_others)(const struct cpumask *cpus,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long va); unsigned long start,
unsigned long end);
/* Hooks for allocating and freeing a pagetable top-level */ /* Hooks for allocating and freeing a pagetable top-level */
int (*pgd_alloc)(struct mm_struct *mm); int (*pgd_alloc)(struct mm_struct *mm);
......
...@@ -551,6 +551,12 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); ...@@ -551,6 +551,12 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
{ [0 ... NR_CPUS-1] = _initvalue }; \ { [0 ... NR_CPUS-1] = _initvalue }; \
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
__typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
{ [0 ... NR_CPUS-1] = _initvalue }; \
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name) EXPORT_PER_CPU_SYMBOL(_name)
...@@ -559,6 +565,11 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); ...@@ -559,6 +565,11 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
extern __typeof__(_type) *_name##_early_ptr; \ extern __typeof__(_type) *_name##_early_ptr; \
extern __typeof__(_type) _name##_early_map[] extern __typeof__(_type) _name##_early_map[]
#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
extern __typeof__(_type) *_name##_early_ptr; \
extern __typeof__(_type) _name##_early_map[]
#define early_per_cpu_ptr(_name) (_name##_early_ptr) #define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \ #define early_per_cpu(_name, _cpu) \
...@@ -570,12 +581,18 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); ...@@ -570,12 +581,18 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off);
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
DEFINE_PER_CPU(_type, _name) = _initvalue DEFINE_PER_CPU(_type, _name) = _initvalue
#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
EXPORT_PER_CPU_SYMBOL(_name) EXPORT_PER_CPU_SYMBOL(_name)
#define DECLARE_EARLY_PER_CPU(_type, _name) \ #define DECLARE_EARLY_PER_CPU(_type, _name) \
DECLARE_PER_CPU(_type, _name) DECLARE_PER_CPU(_type, _name)
#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
#define early_per_cpu_ptr(_name) NULL #define early_per_cpu_ptr(_name) NULL
/* no early_per_cpu_map() */ /* no early_per_cpu_map() */
......
...@@ -61,6 +61,19 @@ static inline void *current_text_addr(void) ...@@ -61,6 +61,19 @@ static inline void *current_text_addr(void)
# define ARCH_MIN_MMSTRUCT_ALIGN 0 # define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif #endif
enum tlb_infos {
ENTRIES,
NR_INFO
};
extern u16 __read_mostly tlb_lli_4k[NR_INFO];
extern u16 __read_mostly tlb_lli_2m[NR_INFO];
extern u16 __read_mostly tlb_lli_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
extern s8 __read_mostly tlb_flushall_shift;
/* /*
* CPU type and hardware bug flags. Kept separately for each CPU. * CPU type and hardware bug flags. Kept separately for each CPU.
* Members of this structure are referenced in head.S, so think twice * Members of this structure are referenced in head.S, so think twice
......
...@@ -31,12 +31,12 @@ static inline bool cpu_has_ht_siblings(void) ...@@ -31,12 +31,12 @@ static inline bool cpu_has_ht_siblings(void)
return has_siblings; return has_siblings;
} }
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
/* cpus sharing the last level cache: */ /* cpus sharing the last level cache: */
DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
DECLARE_PER_CPU(int, cpu_number); DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
static inline struct cpumask *cpu_sibling_mask(int cpu) static inline struct cpumask *cpu_sibling_mask(int cpu)
{ {
...@@ -53,10 +53,10 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu) ...@@ -53,10 +53,10 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
return per_cpu(cpu_llc_shared_map, cpu); return per_cpu(cpu_llc_shared_map, cpu);
} }
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid); DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
#endif #endif
/* Static state in head.S used to set up a CPU */ /* Static state in head.S used to set up a CPU */
......
...@@ -4,7 +4,14 @@ ...@@ -4,7 +4,14 @@
#define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#define tlb_flush(tlb) \
{ \
if (tlb->fullmm == 0) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
}
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
...@@ -73,14 +73,10 @@ static inline void __flush_tlb_one(unsigned long addr) ...@@ -73,14 +73,10 @@ static inline void __flush_tlb_one(unsigned long addr)
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
* *
* ..but the i386 has somewhat limited tlb flushing capabilities, * ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up. * and page-granular flushes are available only on i486 and up.
*
* x86-64 can only flush individual pages or full VMs. For a range flush
* we always do the full VM. Might be worth trying if for a small
* range a few INVLPGs in a row are a win.
*/ */
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -109,9 +105,17 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -109,9 +105,17 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb(); __flush_tlb();
} }
static inline void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned long vmflag)
{
if (mm == current->active_mm)
__flush_tlb();
}
static inline void native_flush_tlb_others(const struct cpumask *cpumask, static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long va) unsigned long start,
unsigned long end)
{ {
} }
...@@ -119,27 +123,35 @@ static inline void reset_lazy_tlbstate(void) ...@@ -119,27 +123,35 @@ static inline void reset_lazy_tlbstate(void)
{ {
} }
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#else /* SMP */ #else /* SMP */
#include <asm/smp.h> #include <asm/smp.h>
#define local_flush_tlb() __flush_tlb() #define local_flush_tlb() __flush_tlb()
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
#define flush_tlb_range(vma, start, end) \
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void); extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long); extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task() #define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
flush_tlb_mm(vma->vm_mm);
}
void native_flush_tlb_others(const struct cpumask *cpumask, void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long va); struct mm_struct *mm,
unsigned long start, unsigned long end);
#define TLBSTATE_OK 1 #define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2 #define TLBSTATE_LAZY 2
...@@ -159,13 +171,8 @@ static inline void reset_lazy_tlbstate(void) ...@@ -159,13 +171,8 @@ static inline void reset_lazy_tlbstate(void)
#endif /* SMP */ #endif /* SMP */
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va) #define flush_tlb_others(mask, mm, start, end) \
native_flush_tlb_others(mask, mm, start, end)
#endif #endif
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#endif /* _ASM_X86_TLBFLUSH_H */ #endif /* _ASM_X86_TLBFLUSH_H */
...@@ -15,7 +15,8 @@ extern void uv_nmi_init(void); ...@@ -15,7 +15,8 @@ extern void uv_nmi_init(void);
extern void uv_system_init(void); extern void uv_system_init(void);
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long va, unsigned long start,
unsigned end,
unsigned int cpu); unsigned int cpu);
#else /* X86_UV */ #else /* X86_UV */
...@@ -26,7 +27,7 @@ static inline void uv_cpu_init(void) { } ...@@ -26,7 +27,7 @@ static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { } static inline void uv_system_init(void) { }
static inline const struct cpumask * static inline const struct cpumask *
uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
unsigned long va, unsigned int cpu) unsigned long start, unsigned long end, unsigned int cpu)
{ return cpumask; } { return cpumask; }
#endif /* X86_UV */ #endif /* X86_UV */
......
...@@ -75,8 +75,8 @@ physid_mask_t phys_cpu_present_map; ...@@ -75,8 +75,8 @@ physid_mask_t phys_cpu_present_map;
/* /*
* Map cpu index to physical APIC ID * Map cpu index to physical APIC ID
*/ */
DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
...@@ -88,7 +88,7 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); ...@@ -88,7 +88,7 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
* used for the mapping. This is where the behaviors of x86_64 and 32 * used for the mapping. This is where the behaviors of x86_64 and 32
* actually diverge. Let's keep it ugly for now. * actually diverge. Let's keep it ugly for now.
*/ */
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID); DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
/* /*
* Knob to control our willingness to enable the local APIC. * Knob to control our willingness to enable the local APIC.
......
...@@ -452,6 +452,35 @@ void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) ...@@ -452,6 +452,35 @@ void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
c->x86_cache_size = l2size; c->x86_cache_size = l2size;
} }
u16 __read_mostly tlb_lli_4k[NR_INFO];
u16 __read_mostly tlb_lli_2m[NR_INFO];
u16 __read_mostly tlb_lli_4m[NR_INFO];
u16 __read_mostly tlb_lld_4k[NR_INFO];
u16 __read_mostly tlb_lld_2m[NR_INFO];
u16 __read_mostly tlb_lld_4m[NR_INFO];
/*
* tlb_flushall_shift shows the balance point in replacing cr3 write
* with multiple 'invlpg'. It will do this replacement when
* flush_tlb_lines <= active_lines/2^tlb_flushall_shift.
* If tlb_flushall_shift is -1, means the replacement will be disabled.
*/
s8 __read_mostly tlb_flushall_shift = -1;
void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
{
if (this_cpu->c_detect_tlb)
this_cpu->c_detect_tlb(c);
printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
"Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
"tlb_flushall_shift is 0x%x\n",
tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES],
tlb_flushall_shift);
}
void __cpuinit detect_ht(struct cpuinfo_x86 *c) void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
...@@ -911,6 +940,8 @@ void __init identify_boot_cpu(void) ...@@ -911,6 +940,8 @@ void __init identify_boot_cpu(void)
#else #else
vgetcpu_set_mode(); vgetcpu_set_mode();
#endif #endif
if (boot_cpu_data.cpuid_level >= 2)
cpu_detect_tlb(&boot_cpu_data);
} }
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
......
...@@ -20,10 +20,19 @@ struct cpu_dev { ...@@ -20,10 +20,19 @@ struct cpu_dev {
void (*c_bsp_init)(struct cpuinfo_x86 *); void (*c_bsp_init)(struct cpuinfo_x86 *);
void (*c_init)(struct cpuinfo_x86 *); void (*c_init)(struct cpuinfo_x86 *);
void (*c_identify)(struct cpuinfo_x86 *); void (*c_identify)(struct cpuinfo_x86 *);
void (*c_detect_tlb)(struct cpuinfo_x86 *);
unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int); unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int);
int c_x86_vendor; int c_x86_vendor;
}; };
struct _tlb_table {
unsigned char descriptor;
char tlb_type;
unsigned int entries;
/* unsigned int ways; */
char info[128];
};
#define cpu_dev_register(cpu_devX) \ #define cpu_dev_register(cpu_devX) \
static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \ static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
__attribute__((__section__(".x86_cpu_dev.init"))) = \ __attribute__((__section__(".x86_cpu_dev.init"))) = \
......
...@@ -491,6 +491,181 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i ...@@ -491,6 +491,181 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
} }
#endif #endif
#define TLB_INST_4K 0x01
#define TLB_INST_4M 0x02
#define TLB_INST_2M_4M 0x03
#define TLB_INST_ALL 0x05
#define TLB_INST_1G 0x06
#define TLB_DATA_4K 0x11
#define TLB_DATA_4M 0x12
#define TLB_DATA_2M_4M 0x13
#define TLB_DATA_4K_4M 0x14
#define TLB_DATA_1G 0x16
#define TLB_DATA0_4K 0x21
#define TLB_DATA0_4M 0x22
#define TLB_DATA0_2M_4M 0x23
#define STLB_4K 0x41
static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
{ 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
{ 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
{ 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
{ 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
{ 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
{ 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
{ 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
{ 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
{ 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
{ 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
{ 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
{ 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
{ 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
{ 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
{ 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
{ 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
{ 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
{ 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
{ 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
{ 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
{ 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
{ 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
{ 0x00, 0, 0 }
};
static void __cpuinit intel_tlb_lookup(const unsigned char desc)
{
unsigned char k;
if (desc == 0)
return;
/* look up this descriptor in the table */
for (k = 0; intel_tlb_table[k].descriptor != desc && \
intel_tlb_table[k].descriptor != 0; k++)
;
if (intel_tlb_table[k].tlb_type == 0)
return;
switch (intel_tlb_table[k].tlb_type) {
case STLB_4K:
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_INST_ALL:
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_INST_4K:
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_INST_4M:
if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_INST_2M_4M:
if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_DATA_4K:
case TLB_DATA0_4K:
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_DATA_4M:
case TLB_DATA0_4M:
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_DATA_2M_4M:
case TLB_DATA0_2M_4M:
if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_DATA_4K_4M:
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
}
}
static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
{
if (!cpu_has_invlpg) {
tlb_flushall_shift = -1;
return;
}
switch ((c->x86 << 8) + c->x86_model) {
case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 0x61d: /* six-core 45 nm xeon "Dunnington" */
tlb_flushall_shift = -1;
break;
case 0x61a: /* 45 nm nehalem, "Bloomfield" */
case 0x61e: /* 45 nm nehalem, "Lynnfield" */
case 0x625: /* 32 nm nehalem, "Clarkdale" */
case 0x62c: /* 32 nm nehalem, "Gulftown" */
case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
case 0x62f: /* 32 nm Xeon E7 */
tlb_flushall_shift = 6;
break;
case 0x62a: /* SandyBridge */
case 0x62d: /* SandyBridge, "Romely-EP" */
tlb_flushall_shift = 5;
break;
case 0x63a: /* Ivybridge */
tlb_flushall_shift = 1;
break;
default:
tlb_flushall_shift = 6;
}
}
static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
{
int i, j, n;
unsigned int regs[4];
unsigned char *desc = (unsigned char *)regs;
/* Number of times to iterate */
n = cpuid_eax(2) & 0xFF;
for (i = 0 ; i < n ; i++) {
cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
/* If bit 31 is set, this is an unknown format */
for (j = 0 ; j < 3 ; j++)
if (regs[j] & (1 << 31))
regs[j] = 0;
/* Byte 0 is level count, not a descriptor */
for (j = 1 ; j < 16 ; j++)
intel_tlb_lookup(desc[j]);
}
intel_tlb_flushall_shift_set(c);
}
static const struct cpu_dev __cpuinitconst intel_cpu_dev = { static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
.c_vendor = "Intel", .c_vendor = "Intel",
.c_ident = { "GenuineIntel" }, .c_ident = { "GenuineIntel" },
...@@ -546,6 +721,7 @@ static const struct cpu_dev __cpuinitconst intel_cpu_dev = { ...@@ -546,6 +721,7 @@ static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
}, },
.c_size_cache = intel_size_cache, .c_size_cache = intel_size_cache,
#endif #endif
.c_detect_tlb = intel_detect_tlb,
.c_early_init = early_init_intel, .c_early_init = early_init_intel,
.c_init = init_intel, .c_init = init_intel,
.c_x86_vendor = X86_VENDOR_INTEL, .c_x86_vendor = X86_VENDOR_INTEL,
......
...@@ -1048,24 +1048,6 @@ apicinterrupt LOCAL_TIMER_VECTOR \ ...@@ -1048,24 +1048,6 @@ apicinterrupt LOCAL_TIMER_VECTOR \
apicinterrupt X86_PLATFORM_IPI_VECTOR \ apicinterrupt X86_PLATFORM_IPI_VECTOR \
x86_platform_ipi smp_x86_platform_ipi x86_platform_ipi smp_x86_platform_ipi
#ifdef CONFIG_SMP
ALIGN
INTR_FRAME
.irp idx,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
.if NUM_INVALIDATE_TLB_VECTORS > \idx
ENTRY(invalidate_interrupt\idx)
pushq_cfi $~(INVALIDATE_TLB_VECTOR_START+\idx)
jmp .Lcommon_invalidate_interrupt0
CFI_ADJUST_CFA_OFFSET -8
END(invalidate_interrupt\idx)
.endif
.endr
CFI_ENDPROC
apicinterrupt INVALIDATE_TLB_VECTOR_START, \
invalidate_interrupt0, smp_invalidate_interrupt
#endif
apicinterrupt THRESHOLD_APIC_VECTOR \ apicinterrupt THRESHOLD_APIC_VECTOR \
threshold_interrupt smp_threshold_interrupt threshold_interrupt smp_threshold_interrupt
apicinterrupt THERMAL_APIC_VECTOR \ apicinterrupt THERMAL_APIC_VECTOR \
......
...@@ -171,79 +171,6 @@ static void __init smp_intr_init(void) ...@@ -171,79 +171,6 @@ static void __init smp_intr_init(void)
*/ */
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
/* IPIs for invalidation */
#define ALLOC_INVTLB_VEC(NR) \
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
invalidate_interrupt##NR)
switch (NUM_INVALIDATE_TLB_VECTORS) {
default:
ALLOC_INVTLB_VEC(31);
case 31:
ALLOC_INVTLB_VEC(30);
case 30:
ALLOC_INVTLB_VEC(29);
case 29:
ALLOC_INVTLB_VEC(28);
case 28:
ALLOC_INVTLB_VEC(27);
case 27:
ALLOC_INVTLB_VEC(26);
case 26:
ALLOC_INVTLB_VEC(25);
case 25:
ALLOC_INVTLB_VEC(24);
case 24:
ALLOC_INVTLB_VEC(23);
case 23:
ALLOC_INVTLB_VEC(22);
case 22:
ALLOC_INVTLB_VEC(21);
case 21:
ALLOC_INVTLB_VEC(20);
case 20:
ALLOC_INVTLB_VEC(19);
case 19:
ALLOC_INVTLB_VEC(18);
case 18:
ALLOC_INVTLB_VEC(17);
case 17:
ALLOC_INVTLB_VEC(16);
case 16:
ALLOC_INVTLB_VEC(15);
case 15:
ALLOC_INVTLB_VEC(14);
case 14:
ALLOC_INVTLB_VEC(13);
case 13:
ALLOC_INVTLB_VEC(12);
case 12:
ALLOC_INVTLB_VEC(11);
case 11:
ALLOC_INVTLB_VEC(10);
case 10:
ALLOC_INVTLB_VEC(9);
case 9:
ALLOC_INVTLB_VEC(8);
case 8:
ALLOC_INVTLB_VEC(7);
case 7:
ALLOC_INVTLB_VEC(6);
case 6:
ALLOC_INVTLB_VEC(5);
case 5:
ALLOC_INVTLB_VEC(4);
case 4:
ALLOC_INVTLB_VEC(3);
case 3:
ALLOC_INVTLB_VEC(2);
case 2:
ALLOC_INVTLB_VEC(1);
case 1:
ALLOC_INVTLB_VEC(0);
break;
}
/* IPI for generic function call */ /* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/stackprotector.h> #include <asm/stackprotector.h>
DEFINE_PER_CPU(int, cpu_number); DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
EXPORT_PER_CPU_SYMBOL(cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -106,17 +106,17 @@ int smp_num_siblings = 1; ...@@ -106,17 +106,17 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */ /* Last level cache ID of each logical CPU */
DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
/* representing HT siblings of each logical CPU */ /* representing HT siblings of each logical CPU */
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */ /* representing HT and core siblings of each logical CPU */
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
/* Per CPU bogomips and other parameters */ /* Per CPU bogomips and other parameters */
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
......
This diff is collapsed.
...@@ -1055,8 +1055,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, ...@@ -1055,8 +1055,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
* done. The returned pointer is valid till preemption is re-enabled. * done. The returned pointer is valid till preemption is re-enabled.
*/ */
const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long va, struct mm_struct *mm, unsigned long start,
unsigned int cpu) unsigned end, unsigned int cpu)
{ {
int locals = 0; int locals = 0;
int remotes = 0; int remotes = 0;
...@@ -1113,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, ...@@ -1113,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
record_send_statistics(stat, locals, hubs, remotes, bau_desc); record_send_statistics(stat, locals, hubs, remotes, bau_desc);
bau_desc->payload.address = va; bau_desc->payload.address = start;
bau_desc->payload.sending_cpu = cpu; bau_desc->payload.sending_cpu = cpu;
/* /*
* uv_flush_send_and_wait returns 0 if all cpu's were messaged, * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
......
...@@ -1256,7 +1256,8 @@ static void xen_flush_tlb_single(unsigned long addr) ...@@ -1256,7 +1256,8 @@ static void xen_flush_tlb_single(unsigned long addr)
} }
static void xen_flush_tlb_others(const struct cpumask *cpus, static void xen_flush_tlb_others(const struct cpumask *cpus,
struct mm_struct *mm, unsigned long va) struct mm_struct *mm, unsigned long start,
unsigned long end)
{ {
struct { struct {
struct mmuext_op op; struct mmuext_op op;
...@@ -1268,7 +1269,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, ...@@ -1268,7 +1269,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
} *args; } *args;
struct multicall_space mcs; struct multicall_space mcs;
trace_xen_mmu_flush_tlb_others(cpus, mm, va); trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
if (cpumask_empty(cpus)) if (cpumask_empty(cpus))
return; /* nothing to do */ return; /* nothing to do */
...@@ -1281,11 +1282,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, ...@@ -1281,11 +1282,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
if (va == TLB_FLUSH_ALL) { args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
} else {
args->op.cmd = MMUEXT_INVLPG_MULTI; args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = va; args->op.arg1.linear_addr = start;
} }
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
......
...@@ -86,6 +86,8 @@ struct mmu_gather { ...@@ -86,6 +86,8 @@ struct mmu_gather {
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch; struct mmu_table_batch *batch;
#endif #endif
unsigned long start;
unsigned long end;
unsigned int need_flush : 1, /* Did free PTEs */ unsigned int need_flush : 1, /* Did free PTEs */
fast_mode : 1; /* No batching */ fast_mode : 1; /* No batching */
...@@ -113,7 +115,8 @@ static inline int tlb_fast_mode(struct mmu_gather *tlb) ...@@ -113,7 +115,8 @@ static inline int tlb_fast_mode(struct mmu_gather *tlb)
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
unsigned long end);
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
/* tlb_remove_page /* tlb_remove_page
......
...@@ -397,18 +397,20 @@ TRACE_EVENT(xen_mmu_flush_tlb_single, ...@@ -397,18 +397,20 @@ TRACE_EVENT(xen_mmu_flush_tlb_single,
TRACE_EVENT(xen_mmu_flush_tlb_others, TRACE_EVENT(xen_mmu_flush_tlb_others,
TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm, TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
unsigned long addr), unsigned long addr, unsigned long end),
TP_ARGS(cpus, mm, addr), TP_ARGS(cpus, mm, addr, end),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned, ncpus) __field(unsigned, ncpus)
__field(struct mm_struct *, mm) __field(struct mm_struct *, mm)
__field(unsigned long, addr) __field(unsigned long, addr)
__field(unsigned long, end)
), ),
TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
__entry->mm = mm; __entry->mm = mm;
__entry->addr = addr), __entry->addr = addr,
TP_printk("ncpus %d mm %p addr %lx", __entry->end = end),
__entry->ncpus, __entry->mm, __entry->addr) TP_printk("ncpus %d mm %p addr %lx, end %lx",
__entry->ncpus, __entry->mm, __entry->addr, __entry->end)
); );
TRACE_EVENT(xen_mmu_write_cr3, TRACE_EVENT(xen_mmu_write_cr3,
......
...@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) ...@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
tlb->mm = mm; tlb->mm = mm;
tlb->fullmm = fullmm; tlb->fullmm = fullmm;
tlb->start = -1UL;
tlb->end = 0;
tlb->need_flush = 0; tlb->need_flush = 0;
tlb->fast_mode = (num_possible_cpus() == 1); tlb->fast_mode = (num_possible_cpus() == 1);
tlb->local.next = NULL; tlb->local.next = NULL;
...@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e ...@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
{ {
struct mmu_gather_batch *batch, *next; struct mmu_gather_batch *batch, *next;
tlb->start = start;
tlb->end = end;
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */ /* keep the page table cache within bounds */
...@@ -1204,6 +1208,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1204,6 +1208,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
*/ */
if (force_flush) { if (force_flush) {
force_flush = 0; force_flush = 0;
#ifdef HAVE_GENERIC_MMU_GATHER
tlb->start = addr;
tlb->end = end;
#endif
tlb_flush_mmu(tlb); tlb_flush_mmu(tlb);
if (addr != end) if (addr != end)
goto again; goto again;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment