Commit 77d64656 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull ftrace fixes from Steven Rostedt:
 "There's been a few memory issues found with ftrace.

  One was simply a memory leak where not all was being freed that should
  have been in releasing a file pointer on set_graph_function.

  Then Thomas found that the ftrace trampolines were marked for
  read/write as well as execute. To shrink the possible attack surface,
  he added calls to set them to ro. Which also uncovered some other
  issues with freeing module allocated memory that had its permissions
  changed.

  Kprobes had a similar issue which is fixed and a selftest was added to
  trigger that issue again"

* tag 'trace-v4.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  x86/ftrace: Make sure that ftrace trampolines are not RWX
  x86/mm/ftrace: Do not bug in early boot on irqs_disabled in cpu_flush_range()
  selftests/ftrace: Add a testcase for many kprobe events
  kprobes/x86: Fix to set RWX bits correctly before releasing trampoline
  ftrace: Fix memory leak in ftrace_graph_release()
parents c86daad2 6ee98ffe
...@@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size) ...@@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size)
{ {
return module_alloc(size); return module_alloc(size);
} }
static inline void tramp_free(void *tramp) static inline void tramp_free(void *tramp, int size)
{ {
int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
set_memory_nx((unsigned long)tramp, npages);
set_memory_rw((unsigned long)tramp, npages);
module_memfree(tramp); module_memfree(tramp);
} }
#else #else
...@@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size) ...@@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size)
{ {
return NULL; return NULL;
} }
static inline void tramp_free(void *tramp) { } static inline void tramp_free(void *tramp, int size) { }
#endif #endif
/* Defined as markers to the end of the ftrace default trampolines */ /* Defined as markers to the end of the ftrace default trampolines */
...@@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* Copy ftrace_caller onto the trampoline memory */ /* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size); ret = probe_kernel_read(trampoline, (void *)start_offset, size);
if (WARN_ON(ret < 0)) { if (WARN_ON(ret < 0)) {
tramp_free(trampoline); tramp_free(trampoline, *tramp_size);
return 0; return 0;
} }
...@@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* Are we pointing to the reference? */ /* Are we pointing to the reference? */
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
tramp_free(trampoline); tramp_free(trampoline, *tramp_size);
return 0; return 0;
} }
...@@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) ...@@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
unsigned long offset; unsigned long offset;
unsigned long ip; unsigned long ip;
unsigned int size; unsigned int size;
int ret; int ret, npages;
if (ops->trampoline) { if (ops->trampoline) {
/* /*
...@@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) ...@@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
*/ */
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return; return;
npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
set_memory_rw(ops->trampoline, npages);
} else { } else {
ops->trampoline = create_trampoline(ops, &size); ops->trampoline = create_trampoline(ops, &size);
if (!ops->trampoline) if (!ops->trampoline)
return; return;
ops->trampoline_size = size; ops->trampoline_size = size;
npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
} }
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
...@@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) ...@@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
/* Do a safe modify in case the trampoline is executing */ /* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func); new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new); ret = update_ftrace_func(ip, new);
set_memory_ro(ops->trampoline, npages);
/* The update should never fail */ /* The update should never fail */
WARN_ON(ret); WARN_ON(ret);
...@@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops) ...@@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return; return;
tramp_free((void *)ops->trampoline); tramp_free((void *)ops->trampoline, ops->trampoline_size);
ops->trampoline = 0; ops->trampoline = 0;
} }
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/frame.h> #include <linux/frame.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <asm/text-patching.h> #include <asm/text-patching.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn) ...@@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn)
} }
} }
/* Recover page to RW mode before releasing it */
void free_insn_page(void *page)
{
set_memory_nx((unsigned long)page & PAGE_MASK, 1);
set_memory_rw((unsigned long)page & PAGE_MASK, 1);
module_memfree(page);
}
static int arch_copy_kprobe(struct kprobe *p) static int arch_copy_kprobe(struct kprobe *p)
{ {
struct insn insn; struct insn insn;
......
...@@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) ...@@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
unsigned int i, level; unsigned int i, level;
unsigned long addr; unsigned long addr;
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
WARN_ON(PAGE_ALIGN(start) != start); WARN_ON(PAGE_ALIGN(start) != start);
on_each_cpu(__cpa_flush_range, NULL, 1); on_each_cpu(__cpa_flush_range, NULL, 1);
......
...@@ -122,7 +122,7 @@ static void *alloc_insn_page(void) ...@@ -122,7 +122,7 @@ static void *alloc_insn_page(void)
return module_alloc(PAGE_SIZE); return module_alloc(PAGE_SIZE);
} }
static void free_insn_page(void *page) void __weak free_insn_page(void *page)
{ {
module_memfree(page); module_memfree(page);
} }
......
...@@ -5063,7 +5063,7 @@ ftrace_graph_release(struct inode *inode, struct file *file) ...@@ -5063,7 +5063,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
} }
out: out:
kfree(fgd->new_hash); free_ftrace_hash(fgd->new_hash);
kfree(fgd); kfree(fgd);
return ret; return ret;
......
#!/bin/sh
# description: Register/unregister many kprobe events
# ftrace fentry skip size depends on the machine architecture.
# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc
case `uname -m` in
x86_64|i[3456]86) OFFS=5;;
ppc*) OFFS=4;;
*) OFFS=0;;
esac
echo "Setup up to 256 kprobes"
grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||:
echo 1 > events/kprobes/enable
echo 0 > events/kprobes/enable
echo > kprobe_events
echo "Waiting for unoptimizing & freeing"
sleep 5
echo "Done"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment