Commit 54faf77d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Three small fixlets"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  hw_breakpoint: Use cpu_possible_mask in {reserve,release}_bp_slot()
  hw_breakpoint: Fix cpu check in task_bp_pinned(cpu)
  kprobes: Fix arch_prepare_kprobe to handle copy insn failures
parents e3ff9114 c790b0ad
...@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) ...@@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
return insn.length; return insn.length;
} }
static void __kprobes arch_copy_kprobe(struct kprobe *p) static int __kprobes arch_copy_kprobe(struct kprobe *p)
{ {
int ret;
/* Copy an instruction with recovering if other optprobe modifies it.*/ /* Copy an instruction with recovering if other optprobe modifies it.*/
__copy_instruction(p->ainsn.insn, p->addr); ret = __copy_instruction(p->ainsn.insn, p->addr);
if (!ret)
return -EINVAL;
/* /*
* __copy_instruction can modify the displacement of the instruction, * __copy_instruction can modify the displacement of the instruction,
...@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) ...@@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
/* Also, displacement change doesn't affect the first byte */ /* Also, displacement change doesn't affect the first byte */
p->opcode = p->ainsn.insn[0]; p->opcode = p->ainsn.insn[0];
return 0;
} }
int __kprobes arch_prepare_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p)
...@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) ...@@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
p->ainsn.insn = get_insn_slot(); p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn) if (!p->ainsn.insn)
return -ENOMEM; return -ENOMEM;
arch_copy_kprobe(p);
return 0; return arch_copy_kprobe(p);
} }
void __kprobes arch_arm_kprobe(struct kprobe *p) void __kprobes arch_arm_kprobe(struct kprobe *p)
......
...@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) ...@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
list_for_each_entry(iter, &bp_task_head, hw.bp_list) { list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
if (iter->hw.bp_target == tsk && if (iter->hw.bp_target == tsk &&
find_slot_idx(iter) == type && find_slot_idx(iter) == type &&
cpu == iter->cpu) (iter->cpu < 0 || cpu == iter->cpu))
count += hw_breakpoint_weight(iter); count += hw_breakpoint_weight(iter);
} }
...@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, ...@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
return; return;
} }
for_each_online_cpu(cpu) { for_each_possible_cpu(cpu) {
unsigned int nr; unsigned int nr;
nr = per_cpu(nr_cpu_bp_pinned[type], cpu); nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
...@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, ...@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
if (cpu >= 0) { if (cpu >= 0) {
toggle_bp_task_slot(bp, cpu, enable, type, weight); toggle_bp_task_slot(bp, cpu, enable, type, weight);
} else { } else {
for_each_online_cpu(cpu) for_each_possible_cpu(cpu)
toggle_bp_task_slot(bp, cpu, enable, type, weight); toggle_bp_task_slot(bp, cpu, enable, type, weight);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment