Commit ebd912ff authored by Heiko Carstens's avatar Heiko Carstens Committed by Alexander Gordeev

s390/stacktrace: Merge perf_callchain_user() and arch_stack_walk_user()

The two functions perf_callchain_user() and arch_stack_walk_user() are
nearly identical. Reduce code duplication and add a common helper which can
be called by both functions.

Fixes: aa44433a ("s390: add USER_STACKTRACE support")
Reviewed-by: default avatarJens Remus <jremus@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent 185445c7
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#ifndef _ASM_S390_STACKTRACE_H #ifndef _ASM_S390_STACKTRACE_H
#define _ASM_S390_STACKTRACE_H #define _ASM_S390_STACKTRACE_H
#include <linux/stacktrace.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
...@@ -12,6 +13,12 @@ struct stack_frame_user { ...@@ -12,6 +13,12 @@ struct stack_frame_user {
unsigned long empty2[4]; unsigned long empty2[4];
}; };
struct perf_callchain_entry_ctx;
void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
struct perf_callchain_entry_ctx *entry,
const struct pt_regs *regs, bool perf);
enum stack_type { enum stack_type {
STACK_TYPE_UNKNOWN, STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK, STACK_TYPE_TASK,
......
...@@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, ...@@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct stack_frame_user __user *sf; arch_stack_walk_user_common(NULL, NULL, entry, regs, true);
unsigned long ip, sp;
bool first = true;
if (is_compat_task())
return;
perf_callchain_store(entry, instruction_pointer(regs));
sf = (void __user *)user_stack_pointer(regs);
pagefault_disable();
while (entry->nr < entry->max_stack) {
if (__get_user(sp, &sf->back_chain))
break;
if (__get_user(ip, &sf->gprs[8]))
break;
if (ip & 0x1) {
/*
* If the instruction address is invalid, and this
* is the first stack frame, assume r14 has not
* been written to the stack yet. Otherwise exit.
*/
if (first && !(regs->gprs[14] & 0x1))
ip = regs->gprs[14];
else
break;
}
perf_callchain_store(entry, ip);
/* Sanity check: ABI requires SP to be aligned 8 bytes. */
if (!sp || sp & 0x7)
break;
sf = (void __user *)sp;
first = false;
}
pagefault_enable();
} }
/* Perf definitions for PMU event attributes in sysfs */ /* Perf definitions for PMU event attributes in sysfs */
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2006 * Copyright IBM Corp. 2006
*/ */
#include <linux/perf_event.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/compat.h> #include <linux/compat.h>
...@@ -62,8 +63,23 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, ...@@ -62,8 +63,23 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
return 0; return 0;
} }
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs) struct perf_callchain_entry_ctx *entry, bool perf,
unsigned long ip)
{
#ifdef CONFIG_PERF_EVENTS
if (perf) {
if (perf_callchain_store(entry, ip))
return false;
return true;
}
#endif
return consume_entry(cookie, ip);
}
void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
struct perf_callchain_entry_ctx *entry,
const struct pt_regs *regs, bool perf)
{ {
struct stack_frame_user __user *sf; struct stack_frame_user __user *sf;
unsigned long ip, sp; unsigned long ip, sp;
...@@ -71,7 +87,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, ...@@ -71,7 +87,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
if (is_compat_task()) if (is_compat_task())
return; return;
if (!consume_entry(cookie, instruction_pointer(regs))) ip = instruction_pointer(regs);
if (!store_ip(consume_entry, cookie, entry, perf, ip))
return; return;
sf = (void __user *)user_stack_pointer(regs); sf = (void __user *)user_stack_pointer(regs);
pagefault_disable(); pagefault_disable();
...@@ -91,8 +108,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, ...@@ -91,8 +108,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
else else
break; break;
} }
if (!consume_entry(cookie, ip)) if (!store_ip(consume_entry, cookie, entry, perf, ip))
break; return;
/* Sanity check: ABI requires SP to be aligned 8 bytes. */ /* Sanity check: ABI requires SP to be aligned 8 bytes. */
if (!sp || sp & 0x7) if (!sp || sp & 0x7)
break; break;
...@@ -102,6 +119,12 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, ...@@ -102,6 +119,12 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
pagefault_enable(); pagefault_enable();
} }
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs)
{
arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
}
unsigned long return_address(unsigned int n) unsigned long return_address(unsigned int n)
{ {
struct unwind_state state; struct unwind_state state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment