Commit 470e6173 authored by David Mosberger's avatar David Mosberger Committed by Tony Luck

[IA64] minimal sparse-enablement; add __user annotations

This enables sparse for ia64 and adds a basic set of __user
annotations.  Apart for sys_execve() and the uaccess.h changes, the
patch is trivially safe.  Also note that in gcc_intrin.h, I
changed "asm __volatile" to "asm volatile" since sparse didn't
like the old version (and it's a "strane" version anyhow).
Patch has been (boot) tested.
Signed-off-by: default avatarDavid Mosberger-Tang <davidm@hpl.hp.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 1335183b
......@@ -13,6 +13,8 @@ READELF := $(CROSS_COMPILE)readelf
export AWK
CHECKFLAGS += -m64 -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
OBJCOPYFLAGS := --strip-all
LDFLAGS_vmlinux := -static
LDFLAGS_MODULE += -T $(srctree)/arch/ia64/module.lds
......
......@@ -966,7 +966,7 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
return len;
}
static int irq_affinity_write_proc (struct file *file, const char *buffer,
static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
unsigned int irq = (unsigned long) data;
......
......@@ -1513,7 +1513,7 @@ exit_pfm_fs(void)
}
static ssize_t
pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
{
pfm_context_t *ctx;
pfm_msg_t *msg;
......@@ -1606,7 +1606,7 @@ pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
}
static ssize_t
pfm_write(struct file *file, const char *ubuf,
pfm_write(struct file *file, const char __user *ubuf,
size_t size, loff_t *ppos)
{
DPRINT(("pfm_write called\n"));
......@@ -4797,7 +4797,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
* system-call entry point (must return long)
*/
asmlinkage long
sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, long arg7,
sys_perfmonctl (int fd, int cmd, void __user *arg, int count, long arg5, long arg6, long arg7,
long arg8, long stack)
{
struct pt_regs *regs = (struct pt_regs *)&stack;
......
......@@ -138,7 +138,7 @@ show_regs (struct pt_regs *regs)
ndirty = (regs->loadrs >> 19);
bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
for (i = 0; i < sof; ++i) {
get_user(val, ia64_rse_skip_regs(bsp, i));
get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
}
......@@ -610,16 +610,18 @@ dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
}
asmlinkage long
sys_execve (char *filename, char **argv, char **envp, struct pt_regs *regs)
sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,
struct pt_regs *regs)
{
char *fname;
int error;
filename = getname(filename);
error = PTR_ERR(filename);
if (IS_ERR(filename))
fname = getname(filename);
error = PTR_ERR(fname);
if (IS_ERR(fname))
goto out;
error = do_execve(filename, argv, envp, regs);
putname(filename);
error = do_execve(fname, argv, envp, regs);
putname(fname);
out:
return error;
}
......
......@@ -152,7 +152,7 @@ ia64_increment_ip (struct pt_regs *regs)
ri = 0;
regs->cr_iip += 16;
} else if (ri == 2) {
get_user(w0, (char *) regs->cr_iip + 0);
get_user(w0, (char __user *) regs->cr_iip + 0);
if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
/*
* rfi'ing to slot 2 of an MLX bundle causes
......@@ -174,7 +174,7 @@ ia64_decrement_ip (struct pt_regs *regs)
if (ia64_psr(regs)->ri == 0) {
regs->cr_iip -= 16;
ri = 2;
get_user(w0, (char *) regs->cr_iip + 0);
get_user(w0, (char __user *) regs->cr_iip + 0);
if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
/*
* rfi'ing to slot 2 of an MLX bundle causes
......@@ -1458,11 +1458,11 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
goto out_tsk;
case PTRACE_GETREGS:
ret = ptrace_getregs(child, (struct pt_all_user_regs*) data);
ret = ptrace_getregs(child, (struct pt_all_user_regs __user *) data);
goto out_tsk;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, (struct pt_all_user_regs*) data);
ret = ptrace_setregs(child, (struct pt_all_user_regs __user *) data);
goto out_tsk;
default:
......
......@@ -268,7 +268,7 @@ salinfo_event_open(struct inode *inode, struct file *file)
}
static ssize_t
salinfo_event_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
......@@ -426,7 +426,7 @@ salinfo_log_new_read(int cpu, struct salinfo_data *data)
}
static ssize_t
salinfo_log_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
......@@ -483,7 +483,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
}
static ssize_t
salinfo_log_write(struct file *file, const char *buffer, size_t count, loff_t *ppos)
salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
......
......@@ -17,7 +17,7 @@ struct sigframe {
* End of architected state.
*/
void *handler; /* pointer to the plabel of the signal handler */
void __user *handler; /* pointer to the plabel of the signal handler */
struct siginfo info;
struct sigcontext sc;
};
......
......@@ -43,7 +43,7 @@
#endif
long
ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch *scr)
{
sigset_t oldset, set;
......@@ -84,7 +84,7 @@ ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
}
asmlinkage long
sys_sigaltstack (const stack_t *uss, stack_t *uoss, long arg2, long arg3, long arg4,
sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, long arg3, long arg4,
long arg5, long arg6, long arg7, long stack)
{
struct pt_regs *pt = (struct pt_regs *) &stack;
......@@ -93,7 +93,7 @@ sys_sigaltstack (const stack_t *uss, stack_t *uoss, long arg2, long arg3, long a
}
static long
restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
{
unsigned long ip, flags, nat, um, cfm;
long err;
......@@ -155,7 +155,7 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
}
int
copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from)
{
if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t)))
return -EFAULT;
......@@ -211,12 +211,12 @@ long
ia64_rt_sigreturn (struct sigscratch *scr)
{
extern char ia64_strace_leave_kernel, ia64_leave_kernel;
struct sigcontext *sc;
struct sigcontext __user *sc;
struct siginfo si;
sigset_t set;
long retval;
sc = &((struct sigframe *) (scr->pt.r12 + 16))->sc;
sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc;
/*
* When we return to the previously executing context, r8 and r10 have already
......@@ -281,7 +281,7 @@ ia64_rt_sigreturn (struct sigscratch *scr)
* trampoline starts. Everything else is done at the user-level.
*/
static long
setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr)
{
unsigned long flags = 0, ifs, cfm, nat;
long err;
......@@ -352,7 +352,7 @@ rbs_on_sig_stack (unsigned long bsp)
}
static long
force_sigsegv_info (int sig, void *addr)
force_sigsegv_info (int sig, void __user *addr)
{
unsigned long flags;
struct siginfo si;
......@@ -387,14 +387,14 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
{
extern char __kernel_sigtramp[];
unsigned long tramp_addr, new_rbs = 0;
struct sigframe *frame;
struct sigframe __user *frame;
long err;
frame = (void *) scr->pt.r12;
frame = (void __user *) scr->pt.r12;
tramp_addr = (unsigned long) __kernel_sigtramp;
if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags((unsigned long) frame) == 0) {
frame = (void *) ((current->sas_ss_sp + current->sas_ss_size)
& ~(STACK_ALIGN - 1));
frame = (void __user *) ((current->sas_ss_sp + current->sas_ss_size)
& ~(STACK_ALIGN - 1));
/*
* We need to check for the register stack being on the signal stack
* separately, because it's switched separately (memory stack is switched
......@@ -403,7 +403,7 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
}
frame = (void *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1));
frame = (void __user *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return force_sigsegv_info(sig, frame);
......
......@@ -93,7 +93,7 @@ sys_getpagesize (void)
}
asmlinkage unsigned long
ia64_shmat (int shmid, void *shmaddr, int shmflg)
ia64_shmat (int shmid, void __user *shmaddr, int shmflg)
{
unsigned long raddr;
int retval;
......
......@@ -112,7 +112,7 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
int sig, code;
/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = break_num;
siginfo.si_flags = 0; /* clear __ISR_VALID */
siginfo.si_isr = 0;
......@@ -282,7 +282,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
fault_ip = regs->cr_iip;
if (!fp_fault && (ia64_psr(regs)->ri == 0))
fault_ip -= 16;
if (copy_from_user(bundle, (void *) fault_ip, sizeof(bundle)))
if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
return -1;
if (jiffies - last_time > 5*HZ)
......@@ -312,7 +312,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
siginfo.si_signo = SIGFPE;
siginfo.si_errno = 0;
siginfo.si_code = __SI_FAULT; /* default code */
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
if (isr & 0x11) {
siginfo.si_code = FPE_FLTINV;
} else if (isr & 0x22) {
......@@ -336,7 +336,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
siginfo.si_signo = SIGFPE;
siginfo.si_errno = 0;
siginfo.si_code = __SI_FAULT; /* default code */
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
if (isr & 0x880) {
siginfo.si_code = FPE_FLTOVF;
} else if (isr & 0x1100) {
......@@ -383,7 +383,7 @@ ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
memset(&si, 0, sizeof(si));
si.si_signo = SIGILL;
si.si_code = ILL_ILLOPC;
si.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
si.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
force_sig_info(SIGILL, &si, current);
rv.fkt = 0;
return rv;
......@@ -445,18 +445,18 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 26: /* NaT Consumption */
if (user_mode(regs)) {
void *addr;
void __user *addr;
if (((isr >> 4) & 0xf) == 2) {
/* NaT page consumption */
sig = SIGSEGV;
code = SEGV_ACCERR;
addr = (void *) ifa;
addr = (void __user *) ifa;
} else {
/* register NaT consumption */
sig = SIGILL;
code = ILL_ILLOPN;
addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
}
siginfo.si_signo = sig;
siginfo.si_code = code;
......@@ -477,7 +477,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_ILLOPN;
siginfo.si_errno = 0;
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = vector;
siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr;
......@@ -524,7 +524,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
}
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0;
siginfo.si_addr = (void *) ifa;
siginfo.si_addr = (void __user *) ifa;
siginfo.si_imm = 0;
siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr;
......@@ -538,7 +538,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
siginfo.si_signo = SIGFPE;
siginfo.si_errno = 0;
siginfo.si_code = FPE_FLTINV;
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr;
siginfo.si_imm = 0;
......@@ -565,7 +565,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
siginfo.si_flags = 0;
siginfo.si_isr = 0;
siginfo.si_imm = 0;
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_addr = (void __user *)
(regs->cr_iip + ia64_psr(regs)->ri);
force_sig_info(SIGILL, &siginfo, current);
return;
}
......
......@@ -760,7 +760,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
return -1;
}
/* this assumes little-endian byte-order: */
if (copy_from_user(&val, (void *) ifa, len))
if (copy_from_user(&val, (void __user *) ifa, len))
return -1;
setreg(ld.r1, val, 0, regs);
......@@ -887,7 +887,7 @@ emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
}
/* this assumes little-endian byte-order: */
if (copy_to_user((void *) ifa, &r2, len))
if (copy_to_user((void __user *) ifa, &r2, len))
return -1;
/*
......@@ -1036,8 +1036,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
* This assumes little-endian byte-order. Note that there is no "ldfpe"
* instruction:
*/
if (copy_from_user(&fpr_init[0], (void *) ifa, len)
|| copy_from_user(&fpr_init[1], (void *) (ifa + len), len))
if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
|| copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))
return -1;
DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
......@@ -1138,7 +1138,7 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
* See comments in ldX for descriptions on how the various loads are handled.
*/
if (ld.x6_op != 0x2) {
if (copy_from_user(&fpr_init, (void *) ifa, len))
if (copy_from_user(&fpr_init, (void __user *) ifa, len))
return -1;
DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
......@@ -1230,7 +1230,7 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
DDUMP("fpr_init =", &fpr_init, len);
DDUMP("fpr_final =", &fpr_final, len);
if (copy_to_user((void *) ifa, &fpr_final, len))
if (copy_to_user((void __user *) ifa, &fpr_final, len))
return -1;
/*
......@@ -1351,7 +1351,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
if (__copy_from_user(bundle, (void *) regs->cr_iip, 16))
if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
goto failure;
/*
......@@ -1496,7 +1496,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void *) ifa;
si.si_addr = (void __user *) ifa;
si.si_flags = 0;
si.si_isr = 0;
si.si_imm = 0;
......
......@@ -2299,7 +2299,7 @@ unw_init (void)
* EFAULT BUF points outside your accessible address space.
*/
asmlinkage long
sys_getunwind (void *buf, size_t buf_size)
sys_getunwind (void __user *buf, size_t buf_size)
{
if (buf && buf_size >= unw.gate_table_size)
if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
......
......@@ -105,7 +105,7 @@ unsigned long do_csum_c(const unsigned char * buff, int len, unsigned int psum)
extern unsigned long do_csum(const unsigned char *, long);
static unsigned int
do_csum_partial_copy_from_user (const char *src, char *dst, int len,
do_csum_partial_copy_from_user (const char __user *src, char *dst, int len,
unsigned int psum, int *errp)
{
unsigned long result;
......@@ -142,7 +142,7 @@ csum_partial_copy_from_user (const char __user *src, char *dst, int len,
}
unsigned int
csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum)
csum_partial_copy_nocheck(const char __user *src, char *dst, int len, unsigned int sum)
{
return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
}
......
......@@ -196,7 +196,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
si.si_signo = signal;
si.si_errno = 0;
si.si_code = code;
si.si_addr = (void *) address;
si.si_addr = (void __user *) address;
si.si_isr = isr;
si.si_flags = __ISR_VALID;
force_sig_info(signal, &si, current);
......
......@@ -460,9 +460,9 @@ ia64_pfn_valid (unsigned long pfn)
char byte;
struct page *pg = pfn_to_page(pfn);
return (__get_user(byte, (char *) pg) == 0)
return (__get_user(byte, (char __user *) pg) == 0)
&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
|| (__get_user(byte, (char *) (pg + 1) - 1) == 0));
|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
}
EXPORT_SYMBOL(ia64_pfn_valid);
......
......@@ -182,17 +182,17 @@ struct compat_shmid64_ds {
*/
typedef u32 compat_uptr_t;
static inline void *
static inline void __user *
compat_ptr (compat_uptr_t uptr)
{
return (void *) (unsigned long) uptr;
return (void __user *) (unsigned long) uptr;
}
static __inline__ void *
static __inline__ void __user *
compat_alloc_user_space (long len)
{
struct pt_regs *regs = ia64_task_regs(current);
return (void *) (((regs->r12 & 0xffffffff) & -16) - len);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
}
#endif /* _ASM_IA64_COMPAT_H */
......@@ -259,35 +259,35 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
ia64_intri_res; \
})
#define ia64_xchg1(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
#define ia64_xchg1(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm volatile ("xchg1 %0=[%1],%2" \
: "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg2(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg4(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
#define ia64_xchg8(ptr,x) \
({ \
__u64 ia64_intri_res; \
asm __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
: "r" (ptr), "r" (x) : "memory"); \
ia64_intri_res; \
})
......
......@@ -200,7 +200,7 @@ typedef struct {
#define GET_UNALIGN_CTL(task,addr) \
({ \
put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
(int *) (addr)); \
(int __user *) (addr)); \
})
#define SET_FPEMU_CTL(task,value) \
......@@ -212,7 +212,7 @@ typedef struct {
#define GET_FPEMU_CTL(task,addr) \
({ \
put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
(int *) (addr)); \
(int __user *) (addr)); \
})
#ifdef CONFIG_IA32_SUPPORT
......
......@@ -60,7 +60,7 @@ typedef struct siginfo {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
void *_addr; /* faulting insn/memory ref. */
void __user *_addr; /* faulting insn/memory ref. */
int _imm; /* immediate value for "break" */
unsigned int _flags; /* see below */
unsigned long _isr; /* isr */
......
......@@ -144,10 +144,10 @@
struct siginfo;
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
typedef void __user (*__sighandler_t)(int);
typedef struct sigaltstack {
void *ss_sp;
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
......
......@@ -60,14 +60,17 @@
* address TASK_SIZE is never valid. We also need to make sure that the address doesn't
* point inside the virtually mapped linear page table.
*/
#define __access_ok(addr, size, segment) \
(likely((unsigned long) (addr) <= (segment).seg) \
&& ((segment).seg == KERNEL_DS.seg \
|| likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)))
#define __access_ok(addr, size, segment) \
({ \
__chk_user_ptr(addr); \
(likely((unsigned long) (addr) <= (segment).seg) \
&& ((segment).seg == KERNEL_DS.seg \
|| likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
})
#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
static inline int
verify_area (int type, const void *addr, unsigned long size)
verify_area (int type, const void __user *addr, unsigned long size)
{
return access_ok(type, addr, size) ? 0 : -EFAULT;
}
......@@ -185,11 +188,11 @@ extern void __get_user_unknown (void);
*/
#define __do_get_user(check, x, ptr, size, segment) \
({ \
const __typeof__(*(ptr)) *__gu_ptr = (ptr); \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
__typeof__ (size) __gu_size = (size); \
long __gu_err = -EFAULT, __gu_val = 0; \
\
if (!check || __access_ok((long) __gu_ptr, size, segment)) \
if (!check || __access_ok(__gu_ptr, size, segment)) \
switch (__gu_size) { \
case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
......@@ -213,11 +216,11 @@ extern void __put_user_unknown (void);
#define __do_put_user(check, x, ptr, size, segment) \
({ \
__typeof__ (x) __pu_x = (x); \
__typeof__ (*(ptr)) *__pu_ptr = (ptr); \
__typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
__typeof__ (size) __pu_size = (size); \
long __pu_err = -EFAULT; \
\
if (!check || __access_ok((long) __pu_ptr, __pu_size, segment)) \
if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \
switch (__pu_size) { \
case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
......@@ -234,44 +237,64 @@ extern void __put_user_unknown (void);
/*
* Complex access routines
*/
extern unsigned long __copy_user (void *to, const void *from, unsigned long count);
extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
unsigned long count);
#define __copy_to_user(to, from, n) __copy_user((to), (from), (n))
#define __copy_from_user(to, from, n) __copy_user((to), (from), (n))
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_to_user(to, from, n) __copy_tofrom_user((to), (from), (n), 1)
#define copy_from_user(to, from, n) __copy_tofrom_user((to), (from), (n), 0)
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
return __copy_user(to, (void __user *) from, count);
}
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
return __copy_user((void __user *) to, from, count);
}
#define __copy_tofrom_user(to, from, n, check_to) \
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_to_user(to, from, n) \
({ \
void *__cu_to = (to); \
void __user *__cu_to = (to); \
const void *__cu_from = (from); \
long __cu_len = (n); \
\
if (__access_ok((long) ((check_to) ? __cu_to : __cu_from), __cu_len, get_fs())) \
__cu_len = __copy_user(__cu_to, __cu_from, __cu_len); \
if (__access_ok(__cu_to, __cu_len, get_fs())) \
__cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \
__cu_len; \
})
#define copy_from_user(to, from, n) \
({ \
void *__cu_to = (to); \
const void __user *__cu_from = (from); \
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
if (__access_ok(__cu_from, __cu_len, get_fs())) \
__cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \
__cu_len; \
})
#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
static inline unsigned long
copy_in_user (void *to, const void *from, unsigned long n)
copy_in_user (void __user *to, const void __user *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
n = __copy_user(to, from, n);
return n;
}
extern unsigned long __do_clear_user (void *, unsigned long);
extern unsigned long __do_clear_user (void __user *, unsigned long);
#define __clear_user(to, n) __do_clear_user(to, n)
#define clear_user(to, n) \
({ \
unsigned long __cu_len = (n); \
if (__access_ok((long) to, __cu_len, get_fs())) \
if (__access_ok(to, __cu_len, get_fs())) \
__cu_len = __do_clear_user(to, __cu_len); \
__cu_len; \
})
......@@ -281,25 +304,25 @@ extern unsigned long __do_clear_user (void *, unsigned long);
* Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
* strlen.
*/
extern long __strncpy_from_user (char *to, const char *from, long to_len);
extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
#define strncpy_from_user(to, from, n) \
({ \
const char * __sfu_from = (from); \
const char __user * __sfu_from = (from); \
long __sfu_ret = -EFAULT; \
if (__access_ok((long) __sfu_from, 0, get_fs())) \
if (__access_ok(__sfu_from, 0, get_fs())) \
__sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
__sfu_ret; \
})
/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
extern unsigned long __strlen_user (const char *);
extern unsigned long __strlen_user (const char __user *);
#define strlen_user(str) \
({ \
const char *__su_str = (str); \
const char __user *__su_str = (str); \
unsigned long __su_ret = 0; \
if (__access_ok((long) __su_str, 0, get_fs())) \
if (__access_ok(__su_str, 0, get_fs())) \
__su_ret = __strlen_user(__su_str); \
__su_ret; \
})
......@@ -309,13 +332,13 @@ extern unsigned long __strlen_user (const char *);
* (N), a value greater than N if the limit would be exceeded, else
* strlen.
*/
extern unsigned long __strnlen_user (const char *, long);
extern unsigned long __strnlen_user (const char __user *, long);
#define strnlen_user(str, len) \
({ \
const char *__su_str = (str); \
const char __user *__su_str = (str); \
unsigned long __su_ret = 0; \
if (__access_ok((long) __su_str, 0, get_fs())) \
if (__access_ok(__su_str, 0, get_fs())) \
__su_ret = __strnlen_user(__su_str, len); \
__su_ret; \
})
......
......@@ -369,8 +369,8 @@ asmlinkage unsigned long sys_mmap2(
int fd, long pgoff);
struct pt_regs;
struct sigaction;
asmlinkage long sys_execve(char *filename, char **argv, char **envp,
struct pt_regs *regs);
asmlinkage long sys_execve(char __user *filename, char __user * __user *argv,
char __user * __user *envp, struct pt_regs *regs);
asmlinkage long sys_pipe(long arg0, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6, long arg7, long stack);
asmlinkage long sys_ptrace(long request, pid_t pid,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment