Commit 5e9a2692 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Martin Schwidefsky

[S390] ptrace cleanup

Overhaul program event recording and the code dealing with the ptrace
user space interface.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent da7f51c1
...@@ -81,7 +81,8 @@ struct thread_struct { ...@@ -81,7 +81,8 @@ struct thread_struct {
mm_segment_t mm_segment; mm_segment_t mm_segment;
unsigned long prot_addr; /* address of protection-excep. */ unsigned long prot_addr; /* address of protection-excep. */
unsigned int trap_no; unsigned int trap_no;
per_struct per_info; struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
/* pfault_wait is used to block the process on a pfault event */ /* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait; unsigned long pfault_wait;
}; };
......
...@@ -331,10 +331,60 @@ struct pt_regs ...@@ -331,10 +331,60 @@ struct pt_regs
unsigned short ilc; unsigned short ilc;
unsigned short svcnr; unsigned short svcnr;
}; };
/*
* Program event recording (PER) register set.
*/
struct per_regs {
unsigned long control; /* PER control bits */
unsigned long start; /* PER starting address */
unsigned long end; /* PER ending address */
};
/*
* PER event contains information about the cause of the last PER exception.
*/
struct per_event {
unsigned short cause; /* PER code, ATMID and AI */
unsigned long address; /* PER address */
unsigned char paid; /* PER access identification */
};
/*
* Simplified per_info structure used to decode the ptrace user space ABI.
*/
struct per_struct_kernel {
unsigned long cr9; /* PER control bits */
unsigned long cr10; /* PER starting address */
unsigned long cr11; /* PER ending address */
unsigned long bits; /* Obsolete software bits */
unsigned long starting_addr; /* User specified start address */
unsigned long ending_addr; /* User specified end address */
unsigned short perc_atmid; /* PER trap ATMID */
unsigned long address; /* PER trap instruction address */
unsigned char access_id; /* PER trap access identification */
};
#define PER_EVENT_MASK 0xE9000000UL
#define PER_EVENT_BRANCH 0x80000000UL
#define PER_EVENT_IFETCH 0x40000000UL
#define PER_EVENT_STORE 0x20000000UL
#define PER_EVENT_STORE_REAL 0x08000000UL
#define PER_EVENT_NULLIFICATION 0x01000000UL
#define PER_CONTROL_MASK 0x00a00000UL
#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
#endif #endif
/* /*
* Now for the program event recording (trace) definitions. * Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't
* touch or even look at it if you don't want to modify the user-space
* ptrace interface. In particular stay away from it for in-kernel PER.
*/ */
typedef struct typedef struct
{ {
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
struct task_struct; struct task_struct;
extern struct task_struct *__switch_to(void *, void *); extern struct task_struct *__switch_to(void *, void *);
extern void update_per_regs(struct task_struct *task);
static inline void save_fp_regs(s390_fp_regs *fpregs) static inline void save_fp_regs(s390_fp_regs *fpregs)
{ {
...@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs)
if (next->mm) { \ if (next->mm) { \
restore_fp_regs(&next->thread.fp_regs); \ restore_fp_regs(&next->thread.fp_regs); \
restore_access_regs(&next->thread.acrs[0]); \ restore_access_regs(&next->thread.acrs[0]); \
update_per_regs(next); \
} \ } \
prev = __switch_to(prev,next); \ prev = __switch_to(prev,next); \
} while (0) } while (0)
......
...@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTART_SVC 4 /* restart svc with new svc number */ #define TIF_RESTART_SVC 4 /* restart svc with new svc number */
#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_MCCK_PENDING 7 /* machine check handling is pending */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
...@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void) ...@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_31BIT 17 /* 32bit process */ #define TIF_31BIT 17 /* 32bit process */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
#define TIF_FREEZE 20 /* thread is freezing for suspend */ #define TIF_SINGLE_STEP 20 /* This task is single stepped */
#define TIF_FREEZE 21 /* thread is freezing for suspend */
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) #define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
...@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT) #define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
#define _TIF_FREEZE (1<<TIF_FREEZE) #define _TIF_FREEZE (1<<TIF_FREEZE)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -23,14 +23,16 @@ int main(void) ...@@ -23,14 +23,16 @@ int main(void)
{ {
DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
BLANK(); BLANK();
DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK(); BLANK();
DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); DEFINE(__THREAD_per_cause,
DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); offsetof(struct task_struct, thread.per_event.cause));
DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); DEFINE(__THREAD_per_address,
offsetof(struct task_struct, thread.per_event.address));
DEFINE(__THREAD_per_paid,
offsetof(struct task_struct, thread.per_event.paid));
BLANK(); BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
...@@ -85,9 +87,9 @@ int main(void) ...@@ -85,9 +87,9 @@ int main(void)
DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
......
...@@ -4,40 +4,19 @@ ...@@ -4,40 +4,19 @@
#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ #include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
#include "compat_linux.h" /* needed for psw_compat_t */ #include "compat_linux.h" /* needed for psw_compat_t */
typedef struct { struct compat_per_struct_kernel {
__u32 cr[NUM_CR_WORDS]; __u32 cr9; /* PER control bits */
} per_cr_words32; __u32 cr10; /* PER starting address */
__u32 cr11; /* PER ending address */
typedef struct { __u32 bits; /* Obsolete software bits */
__u16 perc_atmid; /* 0x096 */ __u32 starting_addr; /* User specified start address */
__u32 address; /* 0x098 */ __u32 ending_addr; /* User specified end address */
__u8 access_id; /* 0x0a1 */ __u16 perc_atmid; /* PER trap ATMID */
} per_lowcore_words32; __u32 address; /* PER trap instruction address */
__u8 access_id; /* PER trap access identification */
typedef struct { };
union {
per_cr_words32 words;
} control_regs;
/*
* Use these flags instead of setting em_instruction_fetch
* directly they are used so that single stepping can be
* switched on & off while not affecting other tracing
*/
unsigned single_step : 1;
unsigned instruction_fetch : 1;
unsigned : 30;
/*
* These addresses are copied into cr10 & cr11 if single
* stepping is switched off
*/
__u32 starting_addr;
__u32 ending_addr;
union {
per_lowcore_words32 words;
} lowcore;
} per_struct32;
struct user_regs_struct32 struct compat_user_regs_struct
{ {
psw_compat_t psw; psw_compat_t psw;
u32 gprs[NUM_GPRS]; u32 gprs[NUM_GPRS];
...@@ -50,14 +29,14 @@ struct user_regs_struct32 ...@@ -50,14 +29,14 @@ struct user_regs_struct32
* itself as there is no "official" ptrace interface for hardware * itself as there is no "official" ptrace interface for hardware
* watchpoints. This is the way intel does it. * watchpoints. This is the way intel does it.
*/ */
per_struct32 per_info; struct compat_per_struct_kernel per_info;
u32 ieee_instruction_pointer; /* obsolete, always 0 */ u32 ieee_instruction_pointer; /* obsolete, always 0 */
}; };
struct user32 { struct compat_user {
/* We start with the registers, to mimic the way that "memory" /* We start with the registers, to mimic the way that "memory"
is returned from the ptrace(3,...) function. */ is returned from the ptrace(3,...) function. */
struct user_regs_struct32 regs; /* Where the registers are actually stored */ struct compat_user_regs_struct regs;
/* The rest of this junk is to help gdb figure out what goes where */ /* The rest of this junk is to help gdb figure out what goes where */
u32 u_tsize; /* Text segment size (pages). */ u32 u_tsize; /* Text segment size (pages). */
u32 u_dsize; /* Data segment size (pages). */ u32 u_dsize; /* Data segment size (pages). */
...@@ -79,6 +58,6 @@ typedef struct ...@@ -79,6 +58,6 @@ typedef struct
__u32 len; __u32 len;
__u32 kernel_addr; __u32 kernel_addr;
__u32 process_addr; __u32 process_addr;
} ptrace_area_emu31; } compat_ptrace_area;
#endif /* _PTRACE32_H */ #endif /* _PTRACE32_H */
...@@ -48,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR ...@@ -48,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
...@@ -200,31 +200,21 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -200,31 +200,21 @@ STACK_SIZE = 1 << STACK_SHIFT
.globl __switch_to .globl __switch_to
__switch_to: __switch_to:
basr %r1,0 basr %r1,0
__switch_to_base: 0: l %r4,__THREAD_info(%r2) # get thread_info of prev
tm __THREAD_per(%r3),0xe8 # new process is using per ? l %r5,__THREAD_info(%r3) # get thread_info of next
bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
__switch_to_noper:
l %r4,__THREAD_info(%r2) # get thread_info of prev
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
bz __switch_to_no_mcck-__switch_to_base(%r1) bz 1f-0b(%r1)
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
l %r4,__THREAD_info(%r3) # get thread_info of next oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
__switch_to_no_mcck: st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task l %r15,__THREAD_ksp(%r3) # load kernel stack of next
st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task st %r3,__LC_CURRENT # store task struct of next
st %r3,__LC_CURRENT # __LC_CURRENT = current task struct st %r5,__LC_THREAD_INFO # store thread info of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 ahi %r5,STACK_SIZE # end of kernel stack of next
l %r3,__THREAD_info(%r3) # load thread_info from task struct st %r5,__LC_KERNEL_STACK # store end of kernel stack
st %r3,__LC_THREAD_INFO
ahi %r3,STACK_SIZE
st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
br %r14 br %r14
__critical_start: __critical_start:
...@@ -297,7 +287,7 @@ sysc_work_tif: ...@@ -297,7 +287,7 @@ sysc_work_tif:
bo BASED(sysc_notify_resume) bo BASED(sysc_notify_resume)
tm __TI_flags+3(%r12),_TIF_RESTART_SVC tm __TI_flags+3(%r12),_TIF_RESTART_SVC
bo BASED(sysc_restart) bo BASED(sysc_restart)
tm __TI_flags+3(%r12),_TIF_SINGLE_STEP tm __TI_flags+3(%r12),_TIF_PER_TRAP
bo BASED(sysc_singlestep) bo BASED(sysc_singlestep)
b BASED(sysc_return) # beware of critical section cleanup b BASED(sysc_return) # beware of critical section cleanup
...@@ -321,13 +311,13 @@ sysc_mcck_pending: ...@@ -321,13 +311,13 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
sysc_sigpending: sysc_sigpending:
ni __TI_flags+3(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_signal) l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal basr %r14,%r1 # call do_signal
tm __TI_flags+3(%r12),_TIF_RESTART_SVC tm __TI_flags+3(%r12),_TIF_RESTART_SVC
bo BASED(sysc_restart) bo BASED(sysc_restart)
tm __TI_flags+3(%r12),_TIF_SINGLE_STEP tm __TI_flags+3(%r12),_TIF_PER_TRAP
bo BASED(sysc_singlestep) bo BASED(sysc_singlestep)
b BASED(sysc_return) b BASED(sysc_return)
...@@ -353,15 +343,15 @@ sysc_restart: ...@@ -353,15 +343,15 @@ sysc_restart:
b BASED(sysc_nr_ok) # restart svc b BASED(sysc_nr_ok) # restart svc
# #
# _TIF_SINGLE_STEP is set, call do_single_step # _TIF_PER_TRAP is set, call do_per_trap
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+3(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
l %r1,BASED(.Lhandle_per) # load adr. of per handler l %r1,BASED(.Lhandle_per) # load adr. of per handler
la %r14,BASED(sysc_return) # load adr. of system return la %r14,BASED(sysc_return) # load adr. of system return
br %r1 # branch to do_single_step br %r1 # branch to do_per_trap
# #
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
...@@ -520,10 +510,10 @@ pgm_no_vtime2: ...@@ -520,10 +510,10 @@ pgm_no_vtime2:
l %r1,__TI_task(%r12) l %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tm SP_PSW+1(%r15),0x01 # kernel per event ?
bz BASED(kernel_per) bz BASED(kernel_per)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
oi __TI_flags+3(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
l %r3,__LC_PGM_ILC # load program interruption code l %r3,__LC_PGM_ILC # load program interruption code
l %r4,__LC_TRANS_EXC_CODE l %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS REENABLE_IRQS
...@@ -551,10 +541,10 @@ pgm_svcper: ...@@ -551,10 +541,10 @@ pgm_svcper:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
l %r8,__TI_task(%r12) l %r8,__TI_task(%r12)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
oi __TI_flags+3(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lm %r2,%r6,SP_R2(%r15) # load svc arguments lm %r2,%r6,SP_R2(%r15) # load svc arguments
b BASED(sysc_do_svc) b BASED(sysc_do_svc)
...@@ -1056,7 +1046,7 @@ cleanup_io_restore_insn: ...@@ -1056,7 +1046,7 @@ cleanup_io_restore_insn:
.Ldo_signal: .long do_signal .Ldo_signal: .long do_signal
.Ldo_notify_resume: .Ldo_notify_resume:
.long do_notify_resume .long do_notify_resume
.Lhandle_per: .long do_single_step .Lhandle_per: .long do_per_trap
.Ldo_execve: .long do_execve .Ldo_execve: .long do_execve
.Lexecve_tail: .long execve_tail .Lexecve_tail: .long execve_tail
.Ljump_table: .long pgm_check_table .Ljump_table: .long pgm_check_table
......
...@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception; ...@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception;
extern int sysctl_userprocess_debug; extern int sysctl_userprocess_debug;
void do_single_step(struct pt_regs *regs); void do_per_trap(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit); void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs); void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs); void do_signal(struct pt_regs *regs);
......
...@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER ...@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT STACK_SIZE = 1 << STACK_SHIFT
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
...@@ -208,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ...@@ -208,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
*/ */
.globl __switch_to .globl __switch_to
__switch_to: __switch_to:
tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? lg %r4,__THREAD_info(%r2) # get thread_info of prev
jz __switch_to_noper # if not we're fine lg %r5,__THREAD_info(%r3) # get thread_info of next
stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
je __switch_to_noper # we got away without bashing TLB's
lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
__switch_to_noper:
lg %r4,__THREAD_info(%r2) # get thread_info of prev
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
jz __switch_to_no_mcck jz 0f
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
lg %r4,__THREAD_info(%r3) # get thread_info of next oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
__switch_to_no_mcck: stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task stg %r3,__LC_CURRENT # store task struct of next
stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct stg %r5,__LC_THREAD_INFO # store thread info of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 aghi %r5,STACK_SIZE # end of kernel stack of next
lg %r3,__THREAD_info(%r3) # load thread_info from task struct stg %r5,__LC_KERNEL_STACK # store end of kernel stack
stg %r3,__LC_THREAD_INFO
aghi %r3,STACK_SIZE
stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
br %r14 br %r14
__critical_start: __critical_start:
...@@ -311,7 +302,7 @@ sysc_work_tif: ...@@ -311,7 +302,7 @@ sysc_work_tif:
jo sysc_notify_resume jo sysc_notify_resume
tm __TI_flags+7(%r12),_TIF_RESTART_SVC tm __TI_flags+7(%r12),_TIF_RESTART_SVC
jo sysc_restart jo sysc_restart
tm __TI_flags+7(%r12),_TIF_SINGLE_STEP tm __TI_flags+7(%r12),_TIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
j sysc_return # beware of critical section cleanup j sysc_return # beware of critical section cleanup
...@@ -333,12 +324,12 @@ sysc_mcck_pending: ...@@ -333,12 +324,12 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
sysc_sigpending: sysc_sigpending:
ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_signal # call do_signal brasl %r14,do_signal # call do_signal
tm __TI_flags+7(%r12),_TIF_RESTART_SVC tm __TI_flags+7(%r12),_TIF_RESTART_SVC
jo sysc_restart jo sysc_restart
tm __TI_flags+7(%r12),_TIF_SINGLE_STEP tm __TI_flags+7(%r12),_TIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
j sysc_return j sysc_return
...@@ -363,14 +354,14 @@ sysc_restart: ...@@ -363,14 +354,14 @@ sysc_restart:
j sysc_nr_ok # restart svc j sysc_nr_ok # restart svc
# #
# _TIF_SINGLE_STEP is set, call do_single_step # _TIF_PER_TRAP is set, call do_per_trap
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
larl %r14,sysc_return # load adr. of system return larl %r14,sysc_return # load adr. of system return
jg do_single_step # branch to do_sigtrap jg do_per_trap
# #
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
...@@ -526,10 +517,10 @@ pgm_no_vtime2: ...@@ -526,10 +517,10 @@ pgm_no_vtime2:
lg %r1,__TI_task(%r12) lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per jz kernel_per
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
lgf %r3,__LC_PGM_ILC # load program interruption code lgf %r3,__LC_PGM_ILC # load program interruption code
lg %r4,__LC_TRANS_EXC_CODE lg %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS REENABLE_IRQS
...@@ -558,10 +549,10 @@ pgm_svcper: ...@@ -558,10 +549,10 @@ pgm_svcper:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK LAST_BREAK
lg %r8,__TI_task(%r12) lg %r8,__TI_task(%r12)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lmg %r2,%r6,SP_R2(%r15) # load svc arguments lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_svc j sysc_do_svc
...@@ -573,7 +564,7 @@ kernel_per: ...@@ -573,7 +564,7 @@ kernel_per:
REENABLE_IRQS REENABLE_IRQS
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_single_step brasl %r14,do_per_trap
j pgm_exit j pgm_exit
/* /*
......
...@@ -175,13 +175,12 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, ...@@ -175,13 +175,12 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
struct pt_regs *regs, struct pt_regs *regs,
unsigned long ip) unsigned long ip)
{ {
per_cr_bits kprobe_per_regs[1]; struct per_regs per_kprobe;
/* Set up the per control reg info, will pass to lctl */ /* Set up the PER control registers %cr9-%cr11 */
memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); per_kprobe.control = PER_EVENT_IFETCH;
kprobe_per_regs[0].em_instruction_fetch = 1; per_kprobe.start = ip;
kprobe_per_regs[0].starting_addr = ip; per_kprobe.end = ip;
kprobe_per_regs[0].ending_addr = ip;
/* Save control regs and psw mask */ /* Save control regs and psw mask */
__ctl_store(kcb->kprobe_saved_ctl, 9, 11); __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
...@@ -189,7 +188,7 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, ...@@ -189,7 +188,7 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Set PER control regs, turns on single step for the given address */ /* Set PER control regs, turns on single step for the given address */
__ctl_load(kprobe_per_regs, 9, 11); __ctl_load(per_kprobe, 9, 11);
regs->psw.mask |= PSW_MASK_PER; regs->psw.mask |= PSW_MASK_PER;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.addr = ip | PSW_ADDR_AMODE; regs->psw.addr = ip | PSW_ADDR_AMODE;
......
...@@ -213,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, ...@@ -213,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* start new process with ar4 pointing to the correct address space */ /* start new process with ar4 pointing to the correct address space */
p->thread.mm_segment = get_fs(); p->thread.mm_segment = get_fs();
/* Don't copy debug registers */ /* Don't copy debug registers */
memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP); clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
clear_tsk_thread_flag(p, TIF_PER_TRAP);
/* Initialize per thread user and system timer values */ /* Initialize per thread user and system timer values */
ti = task_thread_info(p); ti = task_thread_info(p);
ti->user_timer = 0; ti->user_timer = 0;
......
This diff is collapsed.
...@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs) ...@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs)
* Let tracing know that we've done the handler setup. * Let tracing know that we've done the handler setup.
*/ */
tracehook_signal_handler(signr, &info, &ka, regs, tracehook_signal_handler(signr, &info, &ka, regs,
current->thread.per_info.single_step); test_thread_flag(TIF_SINGLE_STEP));
} }
return; return;
} }
......
...@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs, ...@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
} }
void __kprobes do_single_step(struct pt_regs *regs) void __kprobes do_per_trap(struct pt_regs *regs)
{ {
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
SIGTRAP) == NOTIFY_STOP){
return; return;
}
if (tracehook_consider_fatal_signal(current, SIGTRAP)) if (tracehook_consider_fatal_signal(current, SIGTRAP))
force_sig(SIGTRAP, current); force_sig(SIGTRAP, current);
} }
......
...@@ -235,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code, ...@@ -235,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code,
rc = __get_user(instruction, (u16 __user *) regs->psw.addr); rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
if (!rc && instruction == 0x0a77) { if (!rc && instruction == 0x0a77) {
clear_tsk_thread_flag(current, TIF_SINGLE_STEP); clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task()) if (is_compat_task())
sys32_sigreturn(); sys32_sigreturn();
else else
sys_sigreturn(); sys_sigreturn();
} else if (!rc && instruction == 0x0aad) { } else if (!rc && instruction == 0x0aad) {
clear_tsk_thread_flag(current, TIF_SINGLE_STEP); clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task()) if (is_compat_task())
sys32_rt_sigreturn(); sys32_rt_sigreturn();
else else
...@@ -379,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -379,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
* The instruction that caused the program check will * The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP. * be repeated. Don't signal single step via SIGTRAP.
*/ */
clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0; fault = 0;
out_up: out_up:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment