Commit 9b3499d7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Two fixes:

   - A PCID related revert that fixes power management and performance
     regressions.

   - The module loader robustization and sanity check commit is rather
     fresh, but it looked like a good idea to apply because of the
     hidden data corruption problem such invalid modules could cause"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/module: Detect and skip invalid relocations
  Revert "x86/mm: Stop calling leave_mm() in idle code"
parents b21172cf eda9cec4
...@@ -112,6 +112,8 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf) ...@@ -112,6 +112,8 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
} }
#define acpi_unlazy_tlb(x)
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map; extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \ #define for_each_possible_early_cpu(cpu) \
......
...@@ -150,6 +150,8 @@ static inline void disable_acpi(void) { } ...@@ -150,6 +150,8 @@ static inline void disable_acpi(void) { }
extern int x86_acpi_numa_init(void); extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */ #endif /* CONFIG_ACPI_NUMA */
#define acpi_unlazy_tlb(x) leave_mm(x)
#ifdef CONFIG_ACPI_APEI #ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{ {
......
...@@ -172,19 +172,27 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -172,19 +172,27 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_X86_64_NONE: case R_X86_64_NONE:
break; break;
case R_X86_64_64: case R_X86_64_64:
if (*(u64 *)loc != 0)
goto invalid_relocation;
*(u64 *)loc = val; *(u64 *)loc = val;
break; break;
case R_X86_64_32: case R_X86_64_32:
if (*(u32 *)loc != 0)
goto invalid_relocation;
*(u32 *)loc = val; *(u32 *)loc = val;
if (val != *(u32 *)loc) if (val != *(u32 *)loc)
goto overflow; goto overflow;
break; break;
case R_X86_64_32S: case R_X86_64_32S:
if (*(s32 *)loc != 0)
goto invalid_relocation;
*(s32 *)loc = val; *(s32 *)loc = val;
if ((s64)val != *(s32 *)loc) if ((s64)val != *(s32 *)loc)
goto overflow; goto overflow;
break; break;
case R_X86_64_PC32: case R_X86_64_PC32:
if (*(u32 *)loc != 0)
goto invalid_relocation;
val -= (u64)loc; val -= (u64)loc;
*(u32 *)loc = val; *(u32 *)loc = val;
#if 0 #if 0
...@@ -200,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -200,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
} }
return 0; return 0;
invalid_relocation:
pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), loc, val);
return -ENOEXEC;
overflow: overflow:
pr_err("overflow in relocation type %d val %Lx\n", pr_err("overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val); (int)ELF64_R_TYPE(rel[i].r_info), val);
......
...@@ -85,6 +85,7 @@ void leave_mm(int cpu) ...@@ -85,6 +85,7 @@ void leave_mm(int cpu)
switch_mm(NULL, &init_mm, NULL); switch_mm(NULL, &init_mm, NULL);
} }
EXPORT_SYMBOL_GPL(leave_mm);
void switch_mm(struct mm_struct *prev, struct mm_struct *next, void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
...@@ -195,12 +196,22 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -195,12 +196,22 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
write_cr3(build_cr3(next, new_asid)); write_cr3(build_cr3(next, new_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
TLB_FLUSH_ALL); /*
* NB: This gets called via leave_mm() in the idle path
* where RCU functions differently. Tracing normally
* uses RCU, so we need to use the _rcuidle variant.
*
* (There is no good reason for this. The idle code should
* be rearranged to call this before rcu_idle_enter().)
*/
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else { } else {
/* The new ASID is already up to date. */ /* The new ASID is already up to date. */
write_cr3(build_cr3_noflush(next, new_asid)); write_cr3(build_cr3_noflush(next, new_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
/* See above wrt _rcuidle. */
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
} }
this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm, next);
......
...@@ -710,6 +710,8 @@ static DEFINE_RAW_SPINLOCK(c3_lock); ...@@ -710,6 +710,8 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
static void acpi_idle_enter_bm(struct acpi_processor *pr, static void acpi_idle_enter_bm(struct acpi_processor *pr,
struct acpi_processor_cx *cx, bool timer_bc) struct acpi_processor_cx *cx, bool timer_bc)
{ {
acpi_unlazy_tlb(smp_processor_id());
/* /*
* Must be done before busmaster disable as we might need to * Must be done before busmaster disable as we might need to
* access HPET ! * access HPET !
......
...@@ -913,15 +913,16 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, ...@@ -913,15 +913,16 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_state *state = &drv->states[index]; struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags); unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate; unsigned int cstate;
int cpu = smp_processor_id();
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
/* /*
* NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition * leave_mm() to avoid costly and often unnecessary wakeups
* will probably flush the TLB. It's not guaranteed to flush * for flushing the user TLB's associated with the active mm.
* the TLB, though, so it's not clear that we can do anything
* useful with this knowledge.
*/ */
if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(cpu);
if (!(lapic_timer_reliable_states & (1 << (cstate)))) if (!(lapic_timer_reliable_states & (1 << (cstate))))
tick_broadcast_enter(); tick_broadcast_enter();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment