Commit 8a304e5d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc: Fix call to flush_ptrace_hw_breakpoint()
  powerpc/kexec: Restore ppc_md.machine_kexec
  powerpc/mm: Make hpte_need_flush() safe for preemption
parents c7b01d3d e0780b72
...@@ -240,6 +240,12 @@ struct machdep_calls { ...@@ -240,6 +240,12 @@ struct machdep_calls {
* claims to support kexec. * claims to support kexec.
*/ */
int (*machine_kexec_prepare)(struct kimage *image); int (*machine_kexec_prepare)(struct kimage *image);
/* Called to perform the _real_ kexec.
* Do NOT allocate memory or fail here. We are past the point of
* no return.
*/
void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */ #endif /* CONFIG_KEXEC */
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
......
...@@ -87,6 +87,9 @@ void machine_kexec(struct kimage *image) ...@@ -87,6 +87,9 @@ void machine_kexec(struct kimage *image)
save_ftrace_enabled = __ftrace_enabled_save(); save_ftrace_enabled = __ftrace_enabled_save();
if (ppc_md.machine_kexec)
ppc_md.machine_kexec(image);
else
default_machine_kexec(image); default_machine_kexec(image);
__ftrace_enabled_restore(save_ftrace_enabled); __ftrace_enabled_restore(save_ftrace_enabled);
......
...@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) ...@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
prime_debug_regs(new_thread); prime_debug_regs(new_thread);
} }
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_debug_reg_defaults(struct thread_struct *thread) static void set_debug_reg_defaults(struct thread_struct *thread)
{ {
if (thread->dabr) { if (thread->dabr) {
...@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) ...@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
set_dabr(0); set_dabr(0);
} }
} }
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
int set_dabr(unsigned long dabr) int set_dabr(unsigned long dabr)
...@@ -670,11 +672,11 @@ void flush_thread(void) ...@@ -670,11 +672,11 @@ void flush_thread(void)
{ {
discard_lazy_cpu_state(); discard_lazy_cpu_state();
#ifdef CONFIG_HAVE_HW_BREAKPOINTS #ifdef CONFIG_HAVE_HW_BREAKPOINT
flush_ptrace_hw_breakpoint(current); flush_ptrace_hw_breakpoint(current);
#else /* CONFIG_HAVE_HW_BREAKPOINTS */ #else /* CONFIG_HAVE_HW_BREAKPOINT */
set_debug_reg_defaults(&current->thread); set_debug_reg_defaults(&current->thread);
#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ #endif /* CONFIG_HAVE_HW_BREAKPOINT */
} }
void void
......
...@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); ...@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* neesd to be flushed. This function will either perform the flush * neesd to be flushed. This function will either perform the flush
* immediately or will batch it up if the current CPU has an active * immediately or will batch it up if the current CPU has an active
* batch on it. * batch on it.
*
* Must be called from within some kind of spinlock/non-preempt region...
*/ */
void hpte_need_flush(struct mm_struct *mm, unsigned long addr, void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge) pte_t *ptep, unsigned long pte, int huge)
{ {
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
unsigned long vsid, vaddr; unsigned long vsid, vaddr;
unsigned int psize; unsigned int psize;
int ssize; int ssize;
...@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/ */
if (!batch->active) { if (!batch->active) {
flush_hash_page(vaddr, rpte, psize, ssize, 0); flush_hash_page(vaddr, rpte, psize, ssize, 0);
put_cpu_var(ppc64_tlb_batch);
return; return;
} }
...@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
batch->index = ++i; batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR) if (i >= PPC64_TLB_BATCH_NR)
__flush_tlb_pending(batch); __flush_tlb_pending(batch);
put_cpu_var(ppc64_tlb_batch);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment