Commit 5e9a2692 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Martin Schwidefsky

[S390] ptrace cleanup

Overhaul program event recording and the code dealing with the ptrace
user space interface.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent da7f51c1
...@@ -81,7 +81,8 @@ struct thread_struct { ...@@ -81,7 +81,8 @@ struct thread_struct {
mm_segment_t mm_segment; mm_segment_t mm_segment;
unsigned long prot_addr; /* address of protection-excep. */ unsigned long prot_addr; /* address of protection-excep. */
unsigned int trap_no; unsigned int trap_no;
per_struct per_info; struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
/* pfault_wait is used to block the process on a pfault event */ /* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait; unsigned long pfault_wait;
}; };
......
...@@ -331,10 +331,60 @@ struct pt_regs ...@@ -331,10 +331,60 @@ struct pt_regs
unsigned short ilc; unsigned short ilc;
unsigned short svcnr; unsigned short svcnr;
}; };
/*
* Program event recording (PER) register set.
*/
struct per_regs {
unsigned long control; /* PER control bits */
unsigned long start; /* PER starting address */
unsigned long end; /* PER ending address */
};
/*
* PER event contains information about the cause of the last PER exception.
*/
struct per_event {
unsigned short cause; /* PER code, ATMID and AI */
unsigned long address; /* PER address */
unsigned char paid; /* PER access identification */
};
/*
* Simplified per_info structure used to decode the ptrace user space ABI.
*/
struct per_struct_kernel {
unsigned long cr9; /* PER control bits */
unsigned long cr10; /* PER starting address */
unsigned long cr11; /* PER ending address */
unsigned long bits; /* Obsolete software bits */
unsigned long starting_addr; /* User specified start address */
unsigned long ending_addr; /* User specified end address */
unsigned short perc_atmid; /* PER trap ATMID */
unsigned long address; /* PER trap instruction address */
unsigned char access_id; /* PER trap access identification */
};
#define PER_EVENT_MASK 0xE9000000UL
#define PER_EVENT_BRANCH 0x80000000UL
#define PER_EVENT_IFETCH 0x40000000UL
#define PER_EVENT_STORE 0x20000000UL
#define PER_EVENT_STORE_REAL 0x08000000UL
#define PER_EVENT_NULLIFICATION 0x01000000UL
#define PER_CONTROL_MASK 0x00a00000UL
#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
#endif #endif
/* /*
* Now for the program event recording (trace) definitions. * Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't
* touch or even look at it if you don't want to modify the user-space
* ptrace interface. In particular stay away from it for in-kernel PER.
*/ */
typedef struct typedef struct
{ {
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
struct task_struct; struct task_struct;
extern struct task_struct *__switch_to(void *, void *); extern struct task_struct *__switch_to(void *, void *);
extern void update_per_regs(struct task_struct *task);
static inline void save_fp_regs(s390_fp_regs *fpregs) static inline void save_fp_regs(s390_fp_regs *fpregs)
{ {
...@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs)
if (next->mm) { \ if (next->mm) { \
restore_fp_regs(&next->thread.fp_regs); \ restore_fp_regs(&next->thread.fp_regs); \
restore_access_regs(&next->thread.acrs[0]); \ restore_access_regs(&next->thread.acrs[0]); \
update_per_regs(next); \
} \ } \
prev = __switch_to(prev,next); \ prev = __switch_to(prev,next); \
} while (0) } while (0)
......
...@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTART_SVC 4 /* restart svc with new svc number */ #define TIF_RESTART_SVC 4 /* restart svc with new svc number */
#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_MCCK_PENDING 7 /* machine check handling is pending */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
...@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void) ...@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_31BIT 17 /* 32bit process */ #define TIF_31BIT 17 /* 32bit process */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
#define TIF_FREEZE 20 /* thread is freezing for suspend */ #define TIF_SINGLE_STEP 20 /* This task is single stepped */
#define TIF_FREEZE 21 /* thread is freezing for suspend */
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) #define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
...@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT) #define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
#define _TIF_FREEZE (1<<TIF_FREEZE) #define _TIF_FREEZE (1<<TIF_FREEZE)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -23,14 +23,16 @@ int main(void) ...@@ -23,14 +23,16 @@ int main(void)
{ {
DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
BLANK(); BLANK();
DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK(); BLANK();
DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); DEFINE(__THREAD_per_cause,
DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); offsetof(struct task_struct, thread.per_event.cause));
DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); DEFINE(__THREAD_per_address,
offsetof(struct task_struct, thread.per_event.address));
DEFINE(__THREAD_per_paid,
offsetof(struct task_struct, thread.per_event.paid));
BLANK(); BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
...@@ -85,9 +87,9 @@ int main(void) ...@@ -85,9 +87,9 @@ int main(void)
DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
......
...@@ -4,40 +4,19 @@ ...@@ -4,40 +4,19 @@
#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ #include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
#include "compat_linux.h" /* needed for psw_compat_t */ #include "compat_linux.h" /* needed for psw_compat_t */
typedef struct { struct compat_per_struct_kernel {
__u32 cr[NUM_CR_WORDS]; __u32 cr9; /* PER control bits */
} per_cr_words32; __u32 cr10; /* PER starting address */
__u32 cr11; /* PER ending address */
typedef struct { __u32 bits; /* Obsolete software bits */
__u16 perc_atmid; /* 0x096 */ __u32 starting_addr; /* User specified start address */
__u32 address; /* 0x098 */ __u32 ending_addr; /* User specified end address */
__u8 access_id; /* 0x0a1 */ __u16 perc_atmid; /* PER trap ATMID */
} per_lowcore_words32; __u32 address; /* PER trap instruction address */
__u8 access_id; /* PER trap access identification */
typedef struct { };
union {
per_cr_words32 words;
} control_regs;
/*
* Use these flags instead of setting em_instruction_fetch
* directly they are used so that single stepping can be
* switched on & off while not affecting other tracing
*/
unsigned single_step : 1;
unsigned instruction_fetch : 1;
unsigned : 30;
/*
* These addresses are copied into cr10 & cr11 if single
* stepping is switched off
*/
__u32 starting_addr;
__u32 ending_addr;
union {
per_lowcore_words32 words;
} lowcore;
} per_struct32;
struct user_regs_struct32 struct compat_user_regs_struct
{ {
psw_compat_t psw; psw_compat_t psw;
u32 gprs[NUM_GPRS]; u32 gprs[NUM_GPRS];
...@@ -50,14 +29,14 @@ struct user_regs_struct32 ...@@ -50,14 +29,14 @@ struct user_regs_struct32
* itself as there is no "official" ptrace interface for hardware * itself as there is no "official" ptrace interface for hardware
* watchpoints. This is the way intel does it. * watchpoints. This is the way intel does it.
*/ */
per_struct32 per_info; struct compat_per_struct_kernel per_info;
u32 ieee_instruction_pointer; /* obsolete, always 0 */ u32 ieee_instruction_pointer; /* obsolete, always 0 */
}; };
struct user32 { struct compat_user {
/* We start with the registers, to mimic the way that "memory" /* We start with the registers, to mimic the way that "memory"
is returned from the ptrace(3,...) function. */ is returned from the ptrace(3,...) function. */
struct user_regs_struct32 regs; /* Where the registers are actually stored */ struct compat_user_regs_struct regs;
/* The rest of this junk is to help gdb figure out what goes where */ /* The rest of this junk is to help gdb figure out what goes where */
u32 u_tsize; /* Text segment size (pages). */ u32 u_tsize; /* Text segment size (pages). */
u32 u_dsize; /* Data segment size (pages). */ u32 u_dsize; /* Data segment size (pages). */
...@@ -79,6 +58,6 @@ typedef struct ...@@ -79,6 +58,6 @@ typedef struct
__u32 len; __u32 len;
__u32 kernel_addr; __u32 kernel_addr;
__u32 process_addr; __u32 process_addr;
} ptrace_area_emu31; } compat_ptrace_area;
#endif /* _PTRACE32_H */ #endif /* _PTRACE32_H */
...@@ -48,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR ...@@ -48,7 +48,7 @@ SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
...@@ -200,31 +200,21 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -200,31 +200,21 @@ STACK_SIZE = 1 << STACK_SHIFT
.globl __switch_to .globl __switch_to
__switch_to: __switch_to:
basr %r1,0 basr %r1,0
__switch_to_base: 0: l %r4,__THREAD_info(%r2) # get thread_info of prev
tm __THREAD_per(%r3),0xe8 # new process is using per ? l %r5,__THREAD_info(%r3) # get thread_info of next
bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
__switch_to_noper:
l %r4,__THREAD_info(%r2) # get thread_info of prev
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
bz __switch_to_no_mcck-__switch_to_base(%r1) bz 1f-0b(%r1)
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
l %r4,__THREAD_info(%r3) # get thread_info of next oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
__switch_to_no_mcck: st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task l %r15,__THREAD_ksp(%r3) # load kernel stack of next
st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task st %r3,__LC_CURRENT # store task struct of next
st %r3,__LC_CURRENT # __LC_CURRENT = current task struct st %r5,__LC_THREAD_INFO # store thread info of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 ahi %r5,STACK_SIZE # end of kernel stack of next
l %r3,__THREAD_info(%r3) # load thread_info from task struct st %r5,__LC_KERNEL_STACK # store end of kernel stack
st %r3,__LC_THREAD_INFO
ahi %r3,STACK_SIZE
st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
br %r14 br %r14
__critical_start: __critical_start:
...@@ -297,7 +287,7 @@ sysc_work_tif: ...@@ -297,7 +287,7 @@ sysc_work_tif:
bo BASED(sysc_notify_resume) bo BASED(sysc_notify_resume)
tm __TI_flags+3(%r12),_TIF_RESTART_SVC tm __TI_flags+3(%r12),_TIF_RESTART_SVC
bo BASED(sysc_restart) bo BASED(sysc_restart)
tm __TI_flags+3(%r12),_TIF_SINGLE_STEP tm __TI_flags+3(%r12),_TIF_PER_TRAP
bo BASED(sysc_singlestep) bo BASED(sysc_singlestep)
b BASED(sysc_return) # beware of critical section cleanup b BASED(sysc_return) # beware of critical section cleanup
...@@ -321,13 +311,13 @@ sysc_mcck_pending: ...@@ -321,13 +311,13 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
sysc_sigpending: sysc_sigpending:
ni __TI_flags+3(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_signal) l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal basr %r14,%r1 # call do_signal
tm __TI_flags+3(%r12),_TIF_RESTART_SVC tm __TI_flags+3(%r12),_TIF_RESTART_SVC
bo BASED(sysc_restart) bo BASED(sysc_restart)
tm __TI_flags+3(%r12),_TIF_SINGLE_STEP tm __TI_flags+3(%r12),_TIF_PER_TRAP
bo BASED(sysc_singlestep) bo BASED(sysc_singlestep)
b BASED(sysc_return) b BASED(sysc_return)
...@@ -353,15 +343,15 @@ sysc_restart: ...@@ -353,15 +343,15 @@ sysc_restart:
b BASED(sysc_nr_ok) # restart svc b BASED(sysc_nr_ok) # restart svc
# #
# _TIF_SINGLE_STEP is set, call do_single_step # _TIF_PER_TRAP is set, call do_per_trap
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+3(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
l %r1,BASED(.Lhandle_per) # load adr. of per handler l %r1,BASED(.Lhandle_per) # load adr. of per handler
la %r14,BASED(sysc_return) # load adr. of system return la %r14,BASED(sysc_return) # load adr. of system return
br %r1 # branch to do_single_step br %r1 # branch to do_per_trap
# #
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
...@@ -520,10 +510,10 @@ pgm_no_vtime2: ...@@ -520,10 +510,10 @@ pgm_no_vtime2:
l %r1,__TI_task(%r12) l %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tm SP_PSW+1(%r15),0x01 # kernel per event ?
bz BASED(kernel_per) bz BASED(kernel_per)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
oi __TI_flags+3(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
l %r3,__LC_PGM_ILC # load program interruption code l %r3,__LC_PGM_ILC # load program interruption code
l %r4,__LC_TRANS_EXC_CODE l %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS REENABLE_IRQS
...@@ -551,10 +541,10 @@ pgm_svcper: ...@@ -551,10 +541,10 @@ pgm_svcper:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
l %r8,__TI_task(%r12) l %r8,__TI_task(%r12)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
oi __TI_flags+3(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lm %r2,%r6,SP_R2(%r15) # load svc arguments lm %r2,%r6,SP_R2(%r15) # load svc arguments
b BASED(sysc_do_svc) b BASED(sysc_do_svc)
...@@ -1056,7 +1046,7 @@ cleanup_io_restore_insn: ...@@ -1056,7 +1046,7 @@ cleanup_io_restore_insn:
.Ldo_signal: .long do_signal .Ldo_signal: .long do_signal
.Ldo_notify_resume: .Ldo_notify_resume:
.long do_notify_resume .long do_notify_resume
.Lhandle_per: .long do_single_step .Lhandle_per: .long do_per_trap
.Ldo_execve: .long do_execve .Ldo_execve: .long do_execve
.Lexecve_tail: .long execve_tail .Lexecve_tail: .long execve_tail
.Ljump_table: .long pgm_check_table .Ljump_table: .long pgm_check_table
......
...@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception; ...@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception;
extern int sysctl_userprocess_debug; extern int sysctl_userprocess_debug;
void do_single_step(struct pt_regs *regs); void do_per_trap(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit); void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs); void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs); void do_signal(struct pt_regs *regs);
......
...@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER ...@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT STACK_SIZE = 1 << STACK_SHIFT
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
...@@ -208,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ...@@ -208,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
*/ */
.globl __switch_to .globl __switch_to
__switch_to: __switch_to:
tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? lg %r4,__THREAD_info(%r2) # get thread_info of prev
jz __switch_to_noper # if not we're fine lg %r5,__THREAD_info(%r3) # get thread_info of next
stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
je __switch_to_noper # we got away without bashing TLB's
lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
__switch_to_noper:
lg %r4,__THREAD_info(%r2) # get thread_info of prev
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
jz __switch_to_no_mcck jz 0f
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
lg %r4,__THREAD_info(%r3) # get thread_info of next oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
__switch_to_no_mcck: stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task stg %r3,__LC_CURRENT # store task struct of next
stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct stg %r5,__LC_THREAD_INFO # store thread info of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 aghi %r5,STACK_SIZE # end of kernel stack of next
lg %r3,__THREAD_info(%r3) # load thread_info from task struct stg %r5,__LC_KERNEL_STACK # store end of kernel stack
stg %r3,__LC_THREAD_INFO
aghi %r3,STACK_SIZE
stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
br %r14 br %r14
__critical_start: __critical_start:
...@@ -311,7 +302,7 @@ sysc_work_tif: ...@@ -311,7 +302,7 @@ sysc_work_tif:
jo sysc_notify_resume jo sysc_notify_resume
tm __TI_flags+7(%r12),_TIF_RESTART_SVC tm __TI_flags+7(%r12),_TIF_RESTART_SVC
jo sysc_restart jo sysc_restart
tm __TI_flags+7(%r12),_TIF_SINGLE_STEP tm __TI_flags+7(%r12),_TIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
j sysc_return # beware of critical section cleanup j sysc_return # beware of critical section cleanup
...@@ -333,12 +324,12 @@ sysc_mcck_pending: ...@@ -333,12 +324,12 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
sysc_sigpending: sysc_sigpending:
ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_signal # call do_signal brasl %r14,do_signal # call do_signal
tm __TI_flags+7(%r12),_TIF_RESTART_SVC tm __TI_flags+7(%r12),_TIF_RESTART_SVC
jo sysc_restart jo sysc_restart
tm __TI_flags+7(%r12),_TIF_SINGLE_STEP tm __TI_flags+7(%r12),_TIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
j sysc_return j sysc_return
...@@ -363,14 +354,14 @@ sysc_restart: ...@@ -363,14 +354,14 @@ sysc_restart:
j sysc_nr_ok # restart svc j sysc_nr_ok # restart svc
# #
# _TIF_SINGLE_STEP is set, call do_single_step # _TIF_PER_TRAP is set, call do_per_trap
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
larl %r14,sysc_return # load adr. of system return larl %r14,sysc_return # load adr. of system return
jg do_single_step # branch to do_sigtrap jg do_per_trap
# #
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
...@@ -526,10 +517,10 @@ pgm_no_vtime2: ...@@ -526,10 +517,10 @@ pgm_no_vtime2:
lg %r1,__TI_task(%r12) lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per jz kernel_per
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
lgf %r3,__LC_PGM_ILC # load program interruption code lgf %r3,__LC_PGM_ILC # load program interruption code
lg %r4,__LC_TRANS_EXC_CODE lg %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS REENABLE_IRQS
...@@ -558,10 +549,10 @@ pgm_svcper: ...@@ -558,10 +549,10 @@ pgm_svcper:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK LAST_BREAK
lg %r8,__TI_task(%r12) lg %r8,__TI_task(%r12)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lmg %r2,%r6,SP_R2(%r15) # load svc arguments lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_svc j sysc_do_svc
...@@ -573,7 +564,7 @@ kernel_per: ...@@ -573,7 +564,7 @@ kernel_per:
REENABLE_IRQS REENABLE_IRQS
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_single_step brasl %r14,do_per_trap
j pgm_exit j pgm_exit
/* /*
......
...@@ -175,13 +175,12 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, ...@@ -175,13 +175,12 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
struct pt_regs *regs, struct pt_regs *regs,
unsigned long ip) unsigned long ip)
{ {
per_cr_bits kprobe_per_regs[1]; struct per_regs per_kprobe;
/* Set up the per control reg info, will pass to lctl */ /* Set up the PER control registers %cr9-%cr11 */
memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); per_kprobe.control = PER_EVENT_IFETCH;
kprobe_per_regs[0].em_instruction_fetch = 1; per_kprobe.start = ip;
kprobe_per_regs[0].starting_addr = ip; per_kprobe.end = ip;
kprobe_per_regs[0].ending_addr = ip;
/* Save control regs and psw mask */ /* Save control regs and psw mask */
__ctl_store(kcb->kprobe_saved_ctl, 9, 11); __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
...@@ -189,7 +188,7 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, ...@@ -189,7 +188,7 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Set PER control regs, turns on single step for the given address */ /* Set PER control regs, turns on single step for the given address */
__ctl_load(kprobe_per_regs, 9, 11); __ctl_load(per_kprobe, 9, 11);
regs->psw.mask |= PSW_MASK_PER; regs->psw.mask |= PSW_MASK_PER;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.addr = ip | PSW_ADDR_AMODE; regs->psw.addr = ip | PSW_ADDR_AMODE;
......
...@@ -213,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, ...@@ -213,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* start new process with ar4 pointing to the correct address space */ /* start new process with ar4 pointing to the correct address space */
p->thread.mm_segment = get_fs(); p->thread.mm_segment = get_fs();
/* Don't copy debug registers */ /* Don't copy debug registers */
memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP); clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
clear_tsk_thread_flag(p, TIF_PER_TRAP);
/* Initialize per thread user and system timer values */ /* Initialize per thread user and system timer values */
ti = task_thread_info(p); ti = task_thread_info(p);
ti->user_timer = 0; ti->user_timer = 0;
......
/* /*
* arch/s390/kernel/ptrace.c * Ptrace user space interface.
* *
* S390 version * Copyright IBM Corp. 1999,2010
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Denis Joseph Barrow
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Based on PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan (cort@cs.nmt.edu)
*
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file README.legal in the main directory of
* this archive for more details.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -61,76 +45,58 @@ enum s390_regset { ...@@ -61,76 +45,58 @@ enum s390_regset {
REGSET_GENERAL_EXTENDED, REGSET_GENERAL_EXTENDED,
}; };
static void void update_per_regs(struct task_struct *task)
FixPerRegisters(struct task_struct *task)
{ {
struct pt_regs *regs; static const struct per_regs per_single_step = {
per_struct *per_info; .control = PER_EVENT_IFETCH,
per_cr_words cr_words; .start = 0,
.end = PSW_ADDR_INSN,
regs = task_pt_regs(task); };
per_info = (per_struct *) &task->thread.per_info; struct pt_regs *regs = task_pt_regs(task);
per_info->control_regs.bits.em_instruction_fetch = struct thread_struct *thread = &task->thread;
per_info->single_step | per_info->instruction_fetch; const struct per_regs *new;
struct per_regs old;
if (per_info->single_step) {
per_info->control_regs.bits.starting_addr = 0; /* TIF_SINGLE_STEP overrides the user specified PER registers. */
#ifdef CONFIG_COMPAT new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
if (is_compat_task()) &per_single_step : &thread->per_user;
per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
else /* Take care of the PER enablement bit in the PSW. */
#endif if (!(new->control & PER_EVENT_MASK)) {
per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
} else {
per_info->control_regs.bits.starting_addr =
per_info->starting_addr;
per_info->control_regs.bits.ending_addr =
per_info->ending_addr;
}
/*
* if any of the control reg tracing bits are on
* we switch on per in the psw
*/
if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
regs->psw.mask |= PSW_MASK_PER;
else
regs->psw.mask &= ~PSW_MASK_PER; regs->psw.mask &= ~PSW_MASK_PER;
return;
if (per_info->control_regs.bits.em_storage_alteration)
per_info->control_regs.bits.storage_alt_space_ctl = 1;
else
per_info->control_regs.bits.storage_alt_space_ctl = 0;
if (task == current) {
__ctl_store(cr_words, 9, 11);
if (memcmp(&cr_words, &per_info->control_regs.words,
sizeof(cr_words)) != 0)
__ctl_load(per_info->control_regs.words, 9, 11);
} }
regs->psw.mask |= PSW_MASK_PER;
__ctl_store(old, 9, 11);
if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
__ctl_load(*new, 9, 11);
} }
void user_enable_single_step(struct task_struct *task) void user_enable_single_step(struct task_struct *task)
{ {
task->thread.per_info.single_step = 1; set_tsk_thread_flag(task, TIF_SINGLE_STEP);
FixPerRegisters(task); if (task == current)
update_per_regs(task);
} }
void user_disable_single_step(struct task_struct *task) void user_disable_single_step(struct task_struct *task)
{ {
task->thread.per_info.single_step = 0; clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
FixPerRegisters(task); if (task == current)
update_per_regs(task);
} }
/* /*
* Called by kernel/ptrace.c when detaching.. * Called by kernel/ptrace.c when detaching..
* *
* Make sure single step bits etc are not set. * Clear all debugging related fields.
*/ */
void void ptrace_disable(struct task_struct *task)
ptrace_disable(struct task_struct *child)
{ {
/* make sure the single step bit is not set. */ memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
user_disable_single_step(child); memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
clear_tsk_thread_flag(task, TIF_PER_TRAP);
} }
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
...@@ -139,6 +105,47 @@ ptrace_disable(struct task_struct *child) ...@@ -139,6 +105,47 @@ ptrace_disable(struct task_struct *child)
# define __ADDR_MASK 7 # define __ADDR_MASK 7
#endif #endif
static inline unsigned long __peek_user_per(struct task_struct *child,
addr_t addr)
{
struct per_struct_kernel *dummy = NULL;
if (addr == (addr_t) &dummy->cr9)
/* Control bits of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
else if (addr == (addr_t) &dummy->cr10)
/* Start address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
else if (addr == (addr_t) &dummy->cr11)
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PSW_ADDR_INSN : child->thread.per_user.end;
else if (addr == (addr_t) &dummy->bits)
/* Single-step bit. */
return test_thread_flag(TIF_SINGLE_STEP) ?
(1UL << (BITS_PER_LONG - 1)) : 0;
else if (addr == (addr_t) &dummy->starting_addr)
/* Start address of the user specified per set. */
return child->thread.per_user.start;
else if (addr == (addr_t) &dummy->ending_addr)
/* End address of the user specified per set. */
return child->thread.per_user.end;
else if (addr == (addr_t) &dummy->perc_atmid)
/* PER code, ATMID and AI of the last PER trap */
return (unsigned long)
child->thread.per_event.cause << (BITS_PER_LONG - 16);
else if (addr == (addr_t) &dummy->address)
/* Address of the last PER trap */
return child->thread.per_event.address;
else if (addr == (addr_t) &dummy->access_id)
/* Access id of the last PER trap */
return (unsigned long)
child->thread.per_event.paid << (BITS_PER_LONG - 8);
return 0;
}
/* /*
* Read the word at offset addr from the user area of a process. The * Read the word at offset addr from the user area of a process. The
* trouble here is that the information is littered over different * trouble here is that the information is littered over different
...@@ -204,10 +211,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) ...@@ -204,10 +211,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/* /*
* per_info is found in the thread structure * Handle access to the per_info structure.
*/ */
offset = addr - (addr_t) &dummy->regs.per_info; addr -= (addr_t) &dummy->regs.per_info;
tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); tmp = __peek_user_per(child, addr);
} else } else
tmp = 0; tmp = 0;
...@@ -237,6 +244,35 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -237,6 +244,35 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
return put_user(tmp, (addr_t __user *) data); return put_user(tmp, (addr_t __user *) data);
} }
static inline void __poke_user_per(struct task_struct *child,
addr_t addr, addr_t data)
{
struct per_struct_kernel *dummy = NULL;
/*
* There are only three fields in the per_info struct that the
* debugger user can write to.
* 1) cr9: the debugger wants to set a new PER event mask
* 2) starting_addr: the debugger wants to set a new starting
* address to use with the PER event mask.
* 3) ending_addr: the debugger wants to set a new ending
* address to use with the PER event mask.
* The user specified PER event mask and the start and end
* addresses are used only if single stepping is not in effect.
* Writes to any other field in per_info are ignored.
*/
if (addr == (addr_t) &dummy->cr9)
/* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
else if (addr == (addr_t) &dummy->starting_addr)
/* Starting address of the user specified per set. */
child->thread.per_user.start = data;
else if (addr == (addr_t) &dummy->ending_addr)
/* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
/* /*
* Write a word to the user area of a process at location addr. This * Write a word to the user area of a process at location addr. This
* operation does have an additional problem compared to peek_user. * operation does have an additional problem compared to peek_user.
...@@ -311,19 +347,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -311,19 +347,17 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/* /*
* per_info is found in the thread structure * Handle access to the per_info structure.
*/ */
offset = addr - (addr_t) &dummy->regs.per_info; addr -= (addr_t) &dummy->regs.per_info;
*(addr_t *)((addr_t) &child->thread.per_info + offset) = data; __poke_user_per(child, addr, data);
} }
FixPerRegisters(child);
return 0; return 0;
} }
static int static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
poke_user(struct task_struct *child, addr_t addr, addr_t data)
{ {
addr_t mask; addr_t mask;
...@@ -409,13 +443,54 @@ long arch_ptrace(struct task_struct *child, long request, ...@@ -409,13 +443,54 @@ long arch_ptrace(struct task_struct *child, long request,
* a 64 bit program is a no-no. * a 64 bit program is a no-no.
*/ */
/*
* Same as peek_user_per but for a 31 bit program.
*/
static inline __u32 __peek_user_per_compat(struct task_struct *child,
addr_t addr)
{
struct compat_per_struct_kernel *dummy32 = NULL;
if (addr == (addr_t) &dummy32->cr9)
/* Control bits of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
else if (addr == (addr_t) &dummy32->cr10)
/* Start address of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
else if (addr == (addr_t) &dummy32->cr11)
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PSW32_ADDR_INSN : child->thread.per_user.end;
else if (addr == (addr_t) &dummy32->bits)
/* Single-step bit. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0x80000000 : 0;
else if (addr == (addr_t) &dummy32->starting_addr)
/* Start address of the user specified per set. */
return (__u32) child->thread.per_user.start;
else if (addr == (addr_t) &dummy32->ending_addr)
/* End address of the user specified per set. */
return (__u32) child->thread.per_user.end;
else if (addr == (addr_t) &dummy32->perc_atmid)
/* PER code, ATMID and AI of the last PER trap */
return (__u32) child->thread.per_event.cause << 16;
else if (addr == (addr_t) &dummy32->address)
/* Address of the last PER trap */
return (__u32) child->thread.per_event.address;
else if (addr == (addr_t) &dummy32->access_id)
/* Access id of the last PER trap */
return (__u32) child->thread.per_event.paid << 24;
return 0;
}
/* /*
* Same as peek_user but for a 31 bit program. * Same as peek_user but for a 31 bit program.
*/ */
static u32 __peek_user_compat(struct task_struct *child, addr_t addr) static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
{ {
struct user32 *dummy32 = NULL; struct compat_user *dummy32 = NULL;
per_struct32 *dummy_per32 = NULL;
addr_t offset; addr_t offset;
__u32 tmp; __u32 tmp;
...@@ -465,19 +540,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) ...@@ -465,19 +540,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
/* /*
* per_info is found in the thread structure * Handle access to the per_info structure.
*/ */
offset = addr - (addr_t) &dummy32->regs.per_info; addr -= (addr_t) &dummy32->regs.per_info;
/* This is magic. See per_struct and per_struct32. */ tmp = __peek_user_per_compat(child, addr);
if ((offset >= (addr_t) &dummy_per32->control_regs &&
offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
(offset >= (addr_t) &dummy_per32->starting_addr &&
offset <= (addr_t) &dummy_per32->ending_addr) ||
offset == (addr_t) &dummy_per32->lowcore.words.address)
offset = offset*2 + 4;
else
offset = offset*2;
tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
} else } else
tmp = 0; tmp = 0;
...@@ -497,14 +563,33 @@ static int peek_user_compat(struct task_struct *child, ...@@ -497,14 +563,33 @@ static int peek_user_compat(struct task_struct *child,
return put_user(tmp, (__u32 __user *) data); return put_user(tmp, (__u32 __user *) data);
} }
/*
* Same as poke_user_per but for a 31 bit program.
*/
static inline void __poke_user_per_compat(struct task_struct *child,
addr_t addr, __u32 data)
{
struct compat_per_struct_kernel *dummy32 = NULL;
if (addr == (addr_t) &dummy32->cr9)
/* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK);
else if (addr == (addr_t) &dummy32->starting_addr)
/* Starting address of the user specified per set. */
child->thread.per_user.start = data;
else if (addr == (addr_t) &dummy32->ending_addr)
/* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
/* /*
* Same as poke_user but for a 31 bit program. * Same as poke_user but for a 31 bit program.
*/ */
static int __poke_user_compat(struct task_struct *child, static int __poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data) addr_t addr, addr_t data)
{ {
struct user32 *dummy32 = NULL; struct compat_user *dummy32 = NULL;
per_struct32 *dummy_per32 = NULL;
__u32 tmp = (__u32) data; __u32 tmp = (__u32) data;
addr_t offset; addr_t offset;
...@@ -561,37 +646,20 @@ static int __poke_user_compat(struct task_struct *child, ...@@ -561,37 +646,20 @@ static int __poke_user_compat(struct task_struct *child,
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
/* /*
* per_info is found in the thread structure. * Handle access to the per_info structure.
*/
offset = addr - (addr_t) &dummy32->regs.per_info;
/*
* This is magic. See per_struct and per_struct32.
* By incident the offsets in per_struct are exactly
* twice the offsets in per_struct32 for all fields.
* The 8 byte fields need special handling though,
* because the second half (bytes 4-7) is needed and
* not the first half.
*/ */
if ((offset >= (addr_t) &dummy_per32->control_regs && addr -= (addr_t) &dummy32->regs.per_info;
offset < (addr_t) (&dummy_per32->control_regs + 1)) || __poke_user_per_compat(child, addr, data);
(offset >= (addr_t) &dummy_per32->starting_addr &&
offset <= (addr_t) &dummy_per32->ending_addr) ||
offset == (addr_t) &dummy_per32->lowcore.words.address)
offset = offset*2 + 4;
else
offset = offset*2;
*(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
} }
FixPerRegisters(child);
return 0; return 0;
} }
static int poke_user_compat(struct task_struct *child, static int poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data) addr_t addr, addr_t data)
{ {
if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) if (!is_compat_task() || (addr & 3) ||
addr > sizeof(struct compat_user) - 3)
return -EIO; return -EIO;
return __poke_user_compat(child, addr, data); return __poke_user_compat(child, addr, data);
...@@ -602,7 +670,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, ...@@ -602,7 +670,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
{ {
unsigned long addr = caddr; unsigned long addr = caddr;
unsigned long data = cdata; unsigned long data = cdata;
ptrace_area_emu31 parea; compat_ptrace_area parea;
int copied, ret; int copied, ret;
switch (request) { switch (request) {
......
...@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs) ...@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs)
* Let tracing know that we've done the handler setup. * Let tracing know that we've done the handler setup.
*/ */
tracehook_signal_handler(signr, &info, &ka, regs, tracehook_signal_handler(signr, &info, &ka, regs,
current->thread.per_info.single_step); test_thread_flag(TIF_SINGLE_STEP));
} }
return; return;
} }
......
...@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs, ...@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
} }
void __kprobes do_single_step(struct pt_regs *regs) void __kprobes do_per_trap(struct pt_regs *regs)
{ {
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
SIGTRAP) == NOTIFY_STOP){
return; return;
}
if (tracehook_consider_fatal_signal(current, SIGTRAP)) if (tracehook_consider_fatal_signal(current, SIGTRAP))
force_sig(SIGTRAP, current); force_sig(SIGTRAP, current);
} }
......
...@@ -235,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code, ...@@ -235,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code,
rc = __get_user(instruction, (u16 __user *) regs->psw.addr); rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
if (!rc && instruction == 0x0a77) { if (!rc && instruction == 0x0a77) {
clear_tsk_thread_flag(current, TIF_SINGLE_STEP); clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task()) if (is_compat_task())
sys32_sigreturn(); sys32_sigreturn();
else else
sys_sigreturn(); sys_sigreturn();
} else if (!rc && instruction == 0x0aad) { } else if (!rc && instruction == 0x0aad) {
clear_tsk_thread_flag(current, TIF_SINGLE_STEP); clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task()) if (is_compat_task())
sys32_rt_sigreturn(); sys32_rt_sigreturn();
else else
...@@ -379,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -379,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
* The instruction that caused the program check will * The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP. * be repeated. Don't signal single step via SIGTRAP.
*/ */
clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0; fault = 0;
out_up: out_up:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment