Commit 8404410b authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/livepatch' into next

Merge the support for live patching on ppc64le using mprofile-kernel.
This branch has also been merged into the livepatching tree for v4.7.
parents 1050e689 85baa095
...@@ -160,6 +160,7 @@ config PPC ...@@ -160,6 +160,7 @@ config PPC
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
config GENERIC_CSUM config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN def_bool CPU_LITTLE_ENDIAN
...@@ -1107,3 +1108,5 @@ config PPC_LIB_RHEAP ...@@ -1107,3 +1108,5 @@ config PPC_LIB_RHEAP
bool bool
source "arch/powerpc/kvm/Kconfig" source "arch/powerpc/kvm/Kconfig"
source "kernel/livepatch/Kconfig"
/*
* livepatch.h - powerpc-specific Kernel Live Patching Core
*
* Copyright (C) 2015-2016, SUSE, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_POWERPC_LIVEPATCH_H
#define _ASM_POWERPC_LIVEPATCH_H
#include <linux/module.h>
#include <linux/ftrace.h>
#ifdef CONFIG_LIVEPATCH
static inline int klp_check_compiler_support(void)
{
return 0;
}
static inline int klp_write_module_reloc(struct module *mod, unsigned long
type, unsigned long loc, unsigned long value)
{
/* This requires infrastructure changes; we need the loadinfos. */
return -ENOSYS;
}
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->nip = ip;
}
#define klp_get_ftrace_location klp_get_ftrace_location
static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
{
/*
* Live patch works only with -mprofile-kernel on PPC. In this case,
* the ftrace location is always within the first 16 bytes.
*/
return ftrace_location_range(faddr, faddr + 16);
}
static inline void klp_init_thread_info(struct thread_info *ti)
{
/* + 1 to account for STACK_END_MAGIC */
ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
}
#else
static void klp_init_thread_info(struct thread_info *ti) { }
#endif /* CONFIG_LIVEPATCH */
#endif /* _ASM_POWERPC_LIVEPATCH_H */
...@@ -43,7 +43,9 @@ struct thread_info { ...@@ -43,7 +43,9 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, int preempt_count; /* 0 => preemptable,
<0 => BUG */ <0 => BUG */
unsigned long local_flags; /* private flags for thread */ unsigned long local_flags; /* private flags for thread */
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
#endif
/* low level flags - has atomic operations done on it */ /* low level flags - has atomic operations done on it */
unsigned long flags ____cacheline_aligned_in_smp; unsigned long flags ____cacheline_aligned_in_smp;
}; };
......
...@@ -86,6 +86,10 @@ int main(void) ...@@ -86,6 +86,10 @@ int main(void)
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
#ifdef CONFIG_LIVEPATCH
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
#endif
DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/magic.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -1248,6 +1249,9 @@ _GLOBAL(ftrace_caller) ...@@ -1248,6 +1249,9 @@ _GLOBAL(ftrace_caller)
addi r3,r3,function_trace_op@toc@l addi r3,r3,function_trace_op@toc@l
ld r5,0(r3) ld r5,0(r3)
#ifdef CONFIG_LIVEPATCH
mr r14,r7 /* remember old NIP */
#endif
/* Calculate ip from nip-4 into r3 for call below */ /* Calculate ip from nip-4 into r3 for call below */
subi r3, r7, MCOUNT_INSN_SIZE subi r3, r7, MCOUNT_INSN_SIZE
...@@ -1272,6 +1276,9 @@ ftrace_call: ...@@ -1272,6 +1276,9 @@ ftrace_call:
/* Load ctr with the possibly modified NIP */ /* Load ctr with the possibly modified NIP */
ld r3, _NIP(r1) ld r3, _NIP(r1)
mtctr r3 mtctr r3
#ifdef CONFIG_LIVEPATCH
cmpd r14,r3 /* has NIP been altered? */
#endif
/* Restore gprs */ /* Restore gprs */
REST_8GPRS(0,r1) REST_8GPRS(0,r1)
...@@ -1289,6 +1296,11 @@ ftrace_call: ...@@ -1289,6 +1296,11 @@ ftrace_call:
ld r0, LRSAVE(r1) ld r0, LRSAVE(r1)
mtlr r0 mtlr r0
#ifdef CONFIG_LIVEPATCH
/* Based on the cmpd above, if the NIP was altered handle livepatch */
bne- livepatch_handler
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
stdu r1, -112(r1) stdu r1, -112(r1)
.globl ftrace_graph_call .globl ftrace_graph_call
...@@ -1305,6 +1317,91 @@ _GLOBAL(ftrace_graph_stub) ...@@ -1305,6 +1317,91 @@ _GLOBAL(ftrace_graph_stub)
_GLOBAL(ftrace_stub) _GLOBAL(ftrace_stub)
blr blr
#ifdef CONFIG_LIVEPATCH
/*
* This function runs in the mcount context, between two functions. As
* such it can only clobber registers which are volatile and used in
* function linkage.
*
* We get here when a function A, calls another function B, but B has
* been live patched with a new function C.
*
* On entry:
* - we have no stack frame and can not allocate one
* - LR points back to the original caller (in A)
* - CTR holds the new NIP in C
* - r0 & r12 are free
*
* r0 can't be used as the base register for a DS-form load or store, so
* we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
*/
livepatch_handler:
CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */
mr r0, r1
/* Allocate 3 x 8 bytes */
ld r1, TI_livepatch_sp(r12)
addi r1, r1, 24
std r1, TI_livepatch_sp(r12)
/* Save toc & real LR on livepatch stack */
std r2, -24(r1)
mflr r12
std r12, -16(r1)
/* Store stack end marker */
lis r12, STACK_END_MAGIC@h
ori r12, r12, STACK_END_MAGIC@l
std r12, -8(r1)
/* Restore real stack pointer */
mr r1, r0
/* Put ctr in r12 for global entry and branch there */
mfctr r12
bctrl
/*
* Now we are returning from the patched function to the original
* caller A. We are free to use r0 and r12, and we can use r2 until we
* restore it.
*/
CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */
mr r0, r1
ld r1, TI_livepatch_sp(r12)
/* Check stack marker hasn't been trashed */
lis r2, STACK_END_MAGIC@h
ori r2, r2, STACK_END_MAGIC@l
ld r12, -8(r1)
1: tdne r12, r2
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
/* Restore LR & toc from livepatch stack */
ld r12, -16(r1)
mtlr r12
ld r2, -24(r1)
/* Pop livepatch stack frame */
CURRENT_THREAD_INFO(r12, r0)
subi r1, r1, 24
std r1, TI_livepatch_sp(r12)
/* Restore real stack pointer */
mr r1, r0
/* Return to original caller of live patched function */
blr
#endif
#else #else
_GLOBAL_TOC(_mcount) _GLOBAL_TOC(_mcount)
/* Taken from output of objdump from lib64/glibc */ /* Taken from output of objdump from lib64/glibc */
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/livepatch.h>
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/paca.h> #include <asm/paca.h>
...@@ -607,10 +608,12 @@ void irq_ctx_init(void) ...@@ -607,10 +608,12 @@ void irq_ctx_init(void)
memset((void *)softirq_ctx[i], 0, THREAD_SIZE); memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
tp = softirq_ctx[i]; tp = softirq_ctx[i];
tp->cpu = i; tp->cpu = i;
klp_init_thread_info(tp);
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
tp = hardirq_ctx[i]; tp = hardirq_ctx[i];
tp->cpu = i; tp->cpu = i;
klp_init_thread_info(tp);
} }
} }
......
...@@ -57,6 +57,8 @@ ...@@ -57,6 +57,8 @@
#endif #endif
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/exec.h> #include <asm/exec.h>
#include <asm/livepatch.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
...@@ -1402,13 +1404,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -1402,13 +1404,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
extern void ret_from_kernel_thread(void); extern void ret_from_kernel_thread(void);
void (*f)(void); void (*f)(void);
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
struct thread_info *ti = task_thread_info(p);
klp_init_thread_info(ti);
/* Copy registers */ /* Copy registers */
sp -= sizeof(struct pt_regs); sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp; childregs = (struct pt_regs *) sp;
if (unlikely(p->flags & PF_KTHREAD)) { if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */ /* kernel thread */
struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs); childregs->gpr[1] = sp + sizeof(struct pt_regs);
/* function */ /* function */
......
...@@ -69,6 +69,7 @@ ...@@ -69,6 +69,7 @@
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/hugetlb.h> #include <asm/hugetlb.h>
#include <asm/epapr_hcalls.h> #include <asm/epapr_hcalls.h>
#include <asm/livepatch.h>
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
...@@ -667,16 +668,16 @@ static void __init emergency_stack_init(void) ...@@ -667,16 +668,16 @@ static void __init emergency_stack_init(void)
limit = min(safe_stack_limit(), ppc64_rma_size); limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
unsigned long sp; struct thread_info *ti;
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
sp += THREAD_SIZE; klp_init_thread_info(ti);
paca[i].emergency_sp = __va(sp); paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for machine check exception handling. */ /* emergency stack for machine check exception handling. */
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
sp += THREAD_SIZE; klp_init_thread_info(ti);
paca[i].mc_emergency_sp = __va(sp); paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif #endif
} }
} }
...@@ -700,6 +701,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -700,6 +701,8 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.panic) if (ppc_md.panic)
setup_panic(); setup_panic();
klp_init_thread_info(&init_thread_info);
init_mm.start_code = (unsigned long)_stext; init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext; init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata; init_mm.end_data = (unsigned long) _edata;
......
...@@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable); ...@@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable); int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command); void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip); unsigned long ftrace_location(unsigned long ip);
unsigned long ftrace_location_range(unsigned long start, unsigned long end);
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
......
...@@ -298,6 +298,19 @@ static void notrace klp_ftrace_handler(unsigned long ip, ...@@ -298,6 +298,19 @@ static void notrace klp_ftrace_handler(unsigned long ip,
rcu_read_unlock(); rcu_read_unlock();
} }
/*
* Convert a function address into the appropriate ftrace location.
*
* Usually this is just the address of the function, but on some architectures
* it's more complicated so allow them to provide a custom behaviour.
*/
#ifndef klp_get_ftrace_location
static unsigned long klp_get_ftrace_location(unsigned long faddr)
{
return faddr;
}
#endif
static void klp_disable_func(struct klp_func *func) static void klp_disable_func(struct klp_func *func)
{ {
struct klp_ops *ops; struct klp_ops *ops;
...@@ -312,8 +325,14 @@ static void klp_disable_func(struct klp_func *func) ...@@ -312,8 +325,14 @@ static void klp_disable_func(struct klp_func *func)
return; return;
if (list_is_singular(&ops->func_stack)) { if (list_is_singular(&ops->func_stack)) {
unsigned long ftrace_loc;
ftrace_loc = klp_get_ftrace_location(func->old_addr);
if (WARN_ON(!ftrace_loc))
return;
WARN_ON(unregister_ftrace_function(&ops->fops)); WARN_ON(unregister_ftrace_function(&ops->fops));
WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0)); WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
list_del_rcu(&func->stack_node); list_del_rcu(&func->stack_node);
list_del(&ops->node); list_del(&ops->node);
...@@ -338,6 +357,15 @@ static int klp_enable_func(struct klp_func *func) ...@@ -338,6 +357,15 @@ static int klp_enable_func(struct klp_func *func)
ops = klp_find_ops(func->old_addr); ops = klp_find_ops(func->old_addr);
if (!ops) { if (!ops) {
unsigned long ftrace_loc;
ftrace_loc = klp_get_ftrace_location(func->old_addr);
if (!ftrace_loc) {
pr_err("failed to find location for function '%s'\n",
func->old_name);
return -EINVAL;
}
ops = kzalloc(sizeof(*ops), GFP_KERNEL); ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops) if (!ops)
return -ENOMEM; return -ENOMEM;
...@@ -352,7 +380,7 @@ static int klp_enable_func(struct klp_func *func) ...@@ -352,7 +380,7 @@ static int klp_enable_func(struct klp_func *func)
INIT_LIST_HEAD(&ops->func_stack); INIT_LIST_HEAD(&ops->func_stack);
list_add_rcu(&func->stack_node, &ops->func_stack); list_add_rcu(&func->stack_node, &ops->func_stack);
ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0); ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
if (ret) { if (ret) {
pr_err("failed to set ftrace filter for function '%s' (%d)\n", pr_err("failed to set ftrace filter for function '%s' (%d)\n",
func->old_name, ret); func->old_name, ret);
...@@ -363,7 +391,7 @@ static int klp_enable_func(struct klp_func *func) ...@@ -363,7 +391,7 @@ static int klp_enable_func(struct klp_func *func)
if (ret) { if (ret) {
pr_err("failed to register ftrace handler for function '%s' (%d)\n", pr_err("failed to register ftrace handler for function '%s' (%d)\n",
func->old_name, ret); func->old_name, ret);
ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0); ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
goto err; goto err;
} }
......
...@@ -1530,7 +1530,19 @@ static int ftrace_cmp_recs(const void *a, const void *b) ...@@ -1530,7 +1530,19 @@ static int ftrace_cmp_recs(const void *a, const void *b)
return 0; return 0;
} }
static unsigned long ftrace_location_range(unsigned long start, unsigned long end) /**
* ftrace_location_range - return the first address of a traced location
* if it touches the given ip range
* @start: start of range to search.
* @end: end of range to search (inclusive). @end points to the last byte
* to check.
*
* Returns rec->ip if the related ftrace location is a least partly within
* the given address range. That is, the first address of the instruction
* that is either a NOP or call to the function tracer. It checks the ftrace
* internal tables to determine if the address belongs or not.
*/
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{ {
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment