Commit 161aa772 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Peter Anvin:
 "A collection of small fixes:

   - There still seem to be problems with asm goto which requires the
     empty asm hack.
   - If SMAP is disabled at compile time, don't enable it nor try to
     interpret a page fault as an SMAP violation.
   - Fix a case of unbounded recursion while tracing"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, smap: smap_violation() is bogus if CONFIG_X86_SMAP is off
  x86, smap: Don't enable SMAP if CONFIG_X86_SMAP is disabled
  compiler/gcc4: Make quirk for asm_volatile_goto() unconditional
  x86: Use preempt_disable_notrace() in cycles_2_ns()
parents eef445ee 4640c7ee
...@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) ...@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
raw_local_save_flags(eflags); raw_local_save_flags(eflags);
BUG_ON(eflags & X86_EFLAGS_AC); BUG_ON(eflags & X86_EFLAGS_AC);
if (cpu_has(c, X86_FEATURE_SMAP)) if (cpu_has(c, X86_FEATURE_SMAP)) {
#ifdef CONFIG_X86_SMAP
set_in_cr4(X86_CR4_SMAP); set_in_cr4(X86_CR4_SMAP);
#else
clear_in_cr4(X86_CR4_SMAP);
#endif
}
} }
/* /*
......
...@@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) ...@@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
* dance when its actually needed. * dance when its actually needed.
*/ */
preempt_disable(); preempt_disable_notrace();
data = this_cpu_read(cyc2ns.head); data = this_cpu_read(cyc2ns.head);
tail = this_cpu_read(cyc2ns.tail); tail = this_cpu_read(cyc2ns.tail);
...@@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) ...@@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
if (!--data->__count) if (!--data->__count)
this_cpu_write(cyc2ns.tail, data); this_cpu_write(cyc2ns.tail, data);
} }
preempt_enable(); preempt_enable_notrace();
return ns; return ns;
} }
......
...@@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address) ...@@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address)
static inline bool smap_violation(int error_code, struct pt_regs *regs) static inline bool smap_violation(int error_code, struct pt_regs *regs)
{ {
if (!IS_ENABLED(CONFIG_X86_SMAP))
return false;
if (!static_cpu_has(X86_FEATURE_SMAP))
return false;
if (error_code & PF_USER) if (error_code & PF_USER)
return false; return false;
...@@ -1087,12 +1093,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -1087,12 +1093,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD)) if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address); pgtable_bad(regs, error_code, address);
if (static_cpu_has(X86_FEATURE_SMAP)) {
if (unlikely(smap_violation(error_code, regs))) { if (unlikely(smap_violation(error_code, regs))) {
bad_area_nosemaphore(regs, error_code, address); bad_area_nosemaphore(regs, error_code, address);
return; return;
} }
}
/* /*
* If we're in an interrupt, have no user context or are running * If we're in an interrupt, have no user context or are running
......
...@@ -75,11 +75,7 @@ ...@@ -75,11 +75,7 @@
* *
* (asm goto is automatically volatile - the naming reflects this.) * (asm goto is automatically volatile - the naming reflects this.)
*/ */
#if GCC_VERSION <= 40801 #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#else
# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
#endif
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400 #if GCC_VERSION >= 40400
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment