Commit 904818e2 authored by Hendrik Brueckner's avatar Hendrik Brueckner Committed by Martin Schwidefsky

s390/kernel: introduce fpu-internal.h with fpu helper functions

Introduce a new structure to manage FP and VX registers. Refactor the
save and restore of floating point and vector registers with a set
of helper functions in fpu-internal.h.
Signed-off-by: default avatarHendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 96b2d7a8
/*
* General floating pointer and vector register helpers
*
* Copyright IBM Corp. 2015
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
#ifndef _ASM_S390_FPU_INTERNAL_H
#define _ASM_S390_FPU_INTERNAL_H
#include <linux/errno.h>
#include <linux/string.h>
#include <asm/linkage.h>
#include <asm/ctl_reg.h>
#include <asm/sigcontext.h>
struct fpu {
__u32 fpc; /* Floating-point control */
__u32 pad;
freg_t fprs[__NUM_FPRS]; /* Floating-point register save area */
__vector128 *vxrs; /* Vector register save area */
};
#define is_vx_fpu(fpu) (!!(fpu)->vxrs)
#define is_vx_task(tsk) (!!(tsk)->thread.fpu.vxrs)
static inline int test_fp_ctl(u32 fpc)
{
u32 orig_fpc;
int rc;
asm volatile(
" efpc %1\n"
" sfpc %2\n"
"0: sfpc %1\n"
" la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc), "=d" (orig_fpc)
: "d" (fpc), "0" (-EINVAL));
return rc;
}
static inline void save_fp_ctl(u32 *fpc)
{
asm volatile(
" stfpc %0\n"
: "+Q" (*fpc));
}
static inline int restore_fp_ctl(u32 *fpc)
{
int rc;
asm volatile(
" lfpc %1\n"
"0: la %0,0\n"
"1:\n"
: "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
return rc;
}
static inline void save_fp_regs(freg_t *fprs)
{
asm volatile("std 0,%0" : "=Q" (fprs[0]));
asm volatile("std 2,%0" : "=Q" (fprs[2]));
asm volatile("std 4,%0" : "=Q" (fprs[4]));
asm volatile("std 6,%0" : "=Q" (fprs[6]));
asm volatile("std 1,%0" : "=Q" (fprs[1]));
asm volatile("std 3,%0" : "=Q" (fprs[3]));
asm volatile("std 5,%0" : "=Q" (fprs[5]));
asm volatile("std 7,%0" : "=Q" (fprs[7]));
asm volatile("std 8,%0" : "=Q" (fprs[8]));
asm volatile("std 9,%0" : "=Q" (fprs[9]));
asm volatile("std 10,%0" : "=Q" (fprs[10]));
asm volatile("std 11,%0" : "=Q" (fprs[11]));
asm volatile("std 12,%0" : "=Q" (fprs[12]));
asm volatile("std 13,%0" : "=Q" (fprs[13]));
asm volatile("std 14,%0" : "=Q" (fprs[14]));
asm volatile("std 15,%0" : "=Q" (fprs[15]));
}
static inline void restore_fp_regs(freg_t *fprs)
{
asm volatile("ld 0,%0" : : "Q" (fprs[0]));
asm volatile("ld 2,%0" : : "Q" (fprs[2]));
asm volatile("ld 4,%0" : : "Q" (fprs[4]));
asm volatile("ld 6,%0" : : "Q" (fprs[6]));
asm volatile("ld 1,%0" : : "Q" (fprs[1]));
asm volatile("ld 3,%0" : : "Q" (fprs[3]));
asm volatile("ld 5,%0" : : "Q" (fprs[5]));
asm volatile("ld 7,%0" : : "Q" (fprs[7]));
asm volatile("ld 8,%0" : : "Q" (fprs[8]));
asm volatile("ld 9,%0" : : "Q" (fprs[9]));
asm volatile("ld 10,%0" : : "Q" (fprs[10]));
asm volatile("ld 11,%0" : : "Q" (fprs[11]));
asm volatile("ld 12,%0" : : "Q" (fprs[12]));
asm volatile("ld 13,%0" : : "Q" (fprs[13]));
asm volatile("ld 14,%0" : : "Q" (fprs[14]));
asm volatile("ld 15,%0" : : "Q" (fprs[15]));
}
static inline void save_vx_regs(__vector128 *vxrs)
{
typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
asm volatile(
" la 1,%0\n"
" .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
: "=Q" (*(addrtype *) vxrs) : : "1");
}
static inline void save_vx_regs_safe(__vector128 *vxrs)
{
unsigned long cr0, flags;
flags = arch_local_irq_save();
__ctl_store(cr0, 0, 0);
__ctl_set_bit(0, 17);
__ctl_set_bit(0, 18);
save_vx_regs(vxrs);
__ctl_load(cr0, 0, 0);
arch_local_irq_restore(flags);
}
static inline void restore_vx_regs(__vector128 *vxrs)
{
typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
asm volatile(
" la 1,%0\n"
" .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
: : "Q" (*(addrtype *) vxrs) : "1");
}
static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
{
int i;
for (i = 0; i < __NUM_FPRS; i++)
fprs[i] = *(freg_t *)(vxrs + i);
}
static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
{
int i;
for (i = 0; i < __NUM_FPRS; i++)
*(freg_t *)(vxrs + i) = fprs[i];
}
static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
{
fpregs->pad = 0;
if (is_vx_fpu(fpu))
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
else
memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
sizeof(fpregs->fprs));
}
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
{
if (is_vx_fpu(fpu))
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
else
memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
sizeof(fpregs->fprs));
}
static inline void save_fpu_regs(struct fpu *fpu)
{
save_fp_ctl(&fpu->fpc);
if (is_vx_fpu(fpu))
save_vx_regs(fpu->vxrs);
else
save_fp_regs(fpu->fprs);
}
static inline void restore_fpu_regs(struct fpu *fpu)
{
restore_fp_ctl(&fpu->fpc);
if (is_vx_fpu(fpu))
restore_vx_regs(fpu->vxrs);
else
restore_fp_regs(fpu->fprs);
}
#endif /* _ASM_S390_FPU_INTERNAL_H */
......@@ -28,6 +28,7 @@
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/runtime_instr.h>
#include <asm/fpu-internal.h>
static inline void set_cpu_flag(int flag)
{
......@@ -85,7 +86,7 @@ typedef struct {
* Thread structure
*/
struct thread_struct {
s390_fp_regs fp_regs;
struct fpu fpu; /* FP and VX register save area */
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
mm_segment_t mm_segment;
......@@ -101,7 +102,6 @@ struct thread_struct {
struct runtime_instr_cb *ri_cb;
int ri_signum;
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
__vector128 *vxrs; /* Vector register save area */
};
/* Flag to disable transactions. */
......
......@@ -8,138 +8,12 @@
#define __ASM_SWITCH_TO_H
#include <linux/thread_info.h>
#include <asm/fpu-internal.h>
#include <asm/ptrace.h>
extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task);
static inline int test_fp_ctl(u32 fpc)
{
u32 orig_fpc;
int rc;
asm volatile(
" efpc %1\n"
" sfpc %2\n"
"0: sfpc %1\n"
" la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc), "=d" (orig_fpc)
: "d" (fpc), "0" (-EINVAL));
return rc;
}
static inline void save_fp_ctl(u32 *fpc)
{
asm volatile(
" stfpc %0\n"
: "+Q" (*fpc));
}
static inline int restore_fp_ctl(u32 *fpc)
{
int rc;
asm volatile(
" lfpc %1\n"
"0: la %0,0\n"
"1:\n"
: "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
return rc;
}
static inline void save_fp_regs(freg_t *fprs)
{
asm volatile("std 0,%0" : "=Q" (fprs[0]));
asm volatile("std 2,%0" : "=Q" (fprs[2]));
asm volatile("std 4,%0" : "=Q" (fprs[4]));
asm volatile("std 6,%0" : "=Q" (fprs[6]));
asm volatile("std 1,%0" : "=Q" (fprs[1]));
asm volatile("std 3,%0" : "=Q" (fprs[3]));
asm volatile("std 5,%0" : "=Q" (fprs[5]));
asm volatile("std 7,%0" : "=Q" (fprs[7]));
asm volatile("std 8,%0" : "=Q" (fprs[8]));
asm volatile("std 9,%0" : "=Q" (fprs[9]));
asm volatile("std 10,%0" : "=Q" (fprs[10]));
asm volatile("std 11,%0" : "=Q" (fprs[11]));
asm volatile("std 12,%0" : "=Q" (fprs[12]));
asm volatile("std 13,%0" : "=Q" (fprs[13]));
asm volatile("std 14,%0" : "=Q" (fprs[14]));
asm volatile("std 15,%0" : "=Q" (fprs[15]));
}
static inline void restore_fp_regs(freg_t *fprs)
{
asm volatile("ld 0,%0" : : "Q" (fprs[0]));
asm volatile("ld 2,%0" : : "Q" (fprs[2]));
asm volatile("ld 4,%0" : : "Q" (fprs[4]));
asm volatile("ld 6,%0" : : "Q" (fprs[6]));
asm volatile("ld 1,%0" : : "Q" (fprs[1]));
asm volatile("ld 3,%0" : : "Q" (fprs[3]));
asm volatile("ld 5,%0" : : "Q" (fprs[5]));
asm volatile("ld 7,%0" : : "Q" (fprs[7]));
asm volatile("ld 8,%0" : : "Q" (fprs[8]));
asm volatile("ld 9,%0" : : "Q" (fprs[9]));
asm volatile("ld 10,%0" : : "Q" (fprs[10]));
asm volatile("ld 11,%0" : : "Q" (fprs[11]));
asm volatile("ld 12,%0" : : "Q" (fprs[12]));
asm volatile("ld 13,%0" : : "Q" (fprs[13]));
asm volatile("ld 14,%0" : : "Q" (fprs[14]));
asm volatile("ld 15,%0" : : "Q" (fprs[15]));
}
static inline void save_vx_regs(__vector128 *vxrs)
{
typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
asm volatile(
" la 1,%0\n"
" .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
: "=Q" (*(addrtype *) vxrs) : : "1");
}
static inline void save_vx_regs_safe(__vector128 *vxrs)
{
unsigned long cr0, flags;
flags = arch_local_irq_save();
__ctl_store(cr0, 0, 0);
__ctl_set_bit(0, 17);
__ctl_set_bit(0, 18);
save_vx_regs(vxrs);
__ctl_load(cr0, 0, 0);
arch_local_irq_restore(flags);
}
static inline void restore_vx_regs(__vector128 *vxrs)
{
typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
asm volatile(
" la 1,%0\n"
" .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
: : "Q" (*(addrtype *) vxrs) : "1");
}
static inline void save_fp_vx_regs(struct task_struct *task)
{
if (task->thread.vxrs)
save_vx_regs(task->thread.vxrs);
else
save_fp_regs(task->thread.fp_regs.fprs);
}
static inline void restore_fp_vx_regs(struct task_struct *task)
{
if (task->thread.vxrs)
restore_vx_regs(task->thread.vxrs);
else
restore_fp_regs(task->thread.fp_regs.fprs);
}
static inline void save_access_regs(unsigned int *acrs)
{
typedef struct { int _[NUM_ACRS]; } acrstype;
......@@ -156,15 +30,13 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
save_fp_ctl(&prev->thread.fp_regs.fpc); \
save_fp_vx_regs(prev); \
save_fpu_regs(&prev->thread.fpu); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
if (next->mm) { \
update_cr_regs(next); \
restore_fp_ctl(&next->thread.fp_regs.fpc); \
restore_fp_vx_regs(next); \
restore_fpu_regs(&next->thread.fpu); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
} \
......
......@@ -153,33 +153,15 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
/* Store registers needed to create the signal frame */
static void store_sigregs(void)
{
int i;
save_access_regs(current->thread.acrs);
save_fp_ctl(&current->thread.fp_regs.fpc);
if (current->thread.vxrs) {
save_vx_regs(current->thread.vxrs);
for (i = 0; i < __NUM_FPRS; i++)
current->thread.fp_regs.fprs[i] =
*(freg_t *)(current->thread.vxrs + i);
} else
save_fp_regs(current->thread.fp_regs.fprs);
save_fpu_regs(&current->thread.fpu);
}
/* Load registers after signal return */
static void load_sigregs(void)
{
int i;
restore_access_regs(current->thread.acrs);
restore_fp_ctl(&current->thread.fp_regs.fpc);
if (current->thread.vxrs) {
for (i = 0; i < __NUM_FPRS; i++)
*(freg_t *)(current->thread.vxrs + i) =
current->thread.fp_regs.fprs[i];
restore_vx_regs(current->thread.vxrs);
} else
restore_fp_regs(current->thread.fp_regs.fprs);
restore_fpu_regs(&current->thread.fpu);
}
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
......@@ -196,8 +178,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs));
memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
sizeof(user_sregs.fpregs));
fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu);
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
return -EFAULT;
return 0;
......@@ -235,9 +216,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
sizeof(current->thread.fp_regs));
fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0;
......@@ -258,13 +237,13 @@ static int save_sigregs_ext32(struct pt_regs *regs,
return -EFAULT;
/* Save vector registers to signal stack */
if (current->thread.vxrs) {
if (is_vx_task(current)) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high,
current->thread.vxrs + __NUM_VXRS_LOW,
current->thread.fpu.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
......@@ -286,15 +265,15 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
*(__u32 *)&regs->gprs[i] = gprs_high[i];
/* Restore vector registers from signal stack */
if (current->thread.vxrs) {
if (is_vx_task(current)) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
*((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i];
}
return 0;
}
......@@ -472,7 +451,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
*/
uc_flags = UC_GPRS_HIGH;
if (MACHINE_HAS_VX) {
if (current->thread.vxrs)
if (is_vx_task(current))
uc_flags |= UC_VXRS;
} else
frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
......
......@@ -21,6 +21,7 @@
#include <asm/nmi.h>
#include <asm/crw.h>
#include <asm/switch_to.h>
#include <asm/fpu-internal.h>
#include <asm/ctl_reg.h>
struct mcck_struct {
......
......@@ -81,8 +81,8 @@ void release_thread(struct task_struct *dead_task)
void arch_release_task_struct(struct task_struct *tsk)
{
if (tsk->thread.vxrs)
kfree(tsk->thread.vxrs);
if (is_vx_task(tsk))
kfree(tsk->thread.fpu.vxrs);
}
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
......@@ -143,10 +143,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
frame->childregs.psw.mask &= ~PSW_MASK_RI;
/* Save the fpu registers to new thread structure. */
save_fp_ctl(&p->thread.fp_regs.fpc);
save_fp_regs(p->thread.fp_regs.fprs);
p->thread.fp_regs.pad = 0;
p->thread.vxrs = NULL;
save_fp_ctl(&p->thread.fpu.fpc);
save_fp_regs(p->thread.fpu.fprs);
p->thread.fpu.pad = 0;
p->thread.fpu.vxrs = NULL;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
unsigned long tls = frame->childregs.gprs[6];
......@@ -162,7 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
asmlinkage void execve_tail(void)
{
current->thread.fp_regs.fpc = 0;
current->thread.fpu.fpc = 0;
asm volatile("sfpc %0" : : "d" (0));
}
......
......@@ -59,7 +59,7 @@ void update_cr_regs(struct task_struct *task)
if (MACHINE_HAS_VX) {
/* Enable/disable of vector extension */
cr_new &= ~(1UL << 17);
if (task->thread.vxrs)
if (task->thread.fpu.vxrs)
cr_new |= (1UL << 17);
}
if (cr_new != cr)
......@@ -242,21 +242,21 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
/*
* floating point control reg. is in the thread structure
*/
tmp = child->thread.fp_regs.fpc;
tmp = child->thread.fpu.fpc;
tmp <<= BITS_PER_LONG - 32;
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
/*
* floating point regs. are either in child->thread.fp_regs
* or the child->thread.vxrs array
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
if (child->thread.vxrs)
if (is_vx_task(child))
tmp = *(addr_t *)
((addr_t) child->thread.vxrs + 2*offset);
((addr_t) child->thread.fpu.vxrs + 2*offset);
else
tmp = *(addr_t *)
((addr_t) &child->thread.fp_regs.fprs + offset);
((addr_t) &child->thread.fpu.fprs + offset);
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/*
......@@ -387,20 +387,20 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
if ((unsigned int) data != 0 ||
test_fp_ctl(data >> (BITS_PER_LONG - 32)))
return -EINVAL;
child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32);
child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
/*
* floating point regs. are either in child->thread.fp_regs
* or the child->thread.vxrs array
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
if (child->thread.vxrs)
if (is_vx_task(child))
*(addr_t *)((addr_t)
child->thread.vxrs + 2*offset) = data;
child->thread.fpu.vxrs + 2*offset) = data;
else
*(addr_t *)((addr_t)
&child->thread.fp_regs.fprs + offset) = data;
&child->thread.fpu.fprs + offset) = data;
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
/*
......@@ -621,20 +621,20 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
/*
* floating point control reg. is in the thread structure
*/
tmp = child->thread.fp_regs.fpc;
tmp = child->thread.fpu.fpc;
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
/*
* floating point regs. are either in child->thread.fp_regs
* or the child->thread.vxrs array
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
if (child->thread.vxrs)
if (is_vx_task(child))
tmp = *(__u32 *)
((addr_t) child->thread.vxrs + 2*offset);
((addr_t) child->thread.fpu.vxrs + 2*offset);
else
tmp = *(__u32 *)
((addr_t) &child->thread.fp_regs.fprs + offset);
((addr_t) &child->thread.fpu.fprs + offset);
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
/*
......@@ -746,20 +746,20 @@ static int __poke_user_compat(struct task_struct *child,
*/
if (test_fp_ctl(tmp))
return -EINVAL;
child->thread.fp_regs.fpc = data;
child->thread.fpu.fpc = data;
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
/*
* floating point regs. are either in child->thread.fp_regs
* or the child->thread.vxrs array
* floating point regs. are either in child->thread.fpu
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
if (child->thread.vxrs)
if (is_vx_task(child))
*(__u32 *)((addr_t)
child->thread.vxrs + 2*offset) = tmp;
child->thread.fpu.vxrs + 2*offset) = tmp;
else
*(__u32 *)((addr_t)
&child->thread.fp_regs.fprs + offset) = tmp;
&child->thread.fpu.fprs + offset) = tmp;
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
/*
......@@ -952,18 +952,16 @@ static int s390_fpregs_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf)
{
if (target == current) {
save_fp_ctl(&target->thread.fp_regs.fpc);
save_fp_regs(target->thread.fp_regs.fprs);
} else if (target->thread.vxrs) {
int i;
_s390_fp_regs fp_regs;
if (target == current)
save_fpu_regs(&target->thread.fpu);
fp_regs.fpc = target->thread.fpu.fpc;
fpregs_store(&fp_regs, &target->thread.fpu);
for (i = 0; i < __NUM_VXRS_LOW; i++)
target->thread.fp_regs.fprs[i] =
*(freg_t *)(target->thread.vxrs + i);
}
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
&fp_regs, 0, -1);
}
static int s390_fpregs_set(struct task_struct *target,
......@@ -972,41 +970,36 @@ static int s390_fpregs_set(struct task_struct *target,
const void __user *ubuf)
{
int rc = 0;
freg_t fprs[__NUM_FPRS];
if (target == current) {
save_fp_ctl(&target->thread.fp_regs.fpc);
save_fp_regs(target->thread.fp_regs.fprs);
}
if (target == current)
save_fpu_regs(&target->thread.fpu);
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
0, offsetof(s390_fp_regs, fprs));
if (rc)
return rc;
if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
return -EINVAL;
target->thread.fp_regs.fpc = ufpc[0];
target->thread.fpu.fpc = ufpc[0];
}
if (rc == 0 && count > 0)
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.fp_regs.fprs,
offsetof(s390_fp_regs, fprs), -1);
fprs, offsetof(s390_fp_regs, fprs), -1);
if (rc)
return rc;
if (rc == 0) {
if (target == current) {
restore_fp_ctl(&target->thread.fp_regs.fpc);
restore_fp_regs(target->thread.fp_regs.fprs);
} else if (target->thread.vxrs) {
int i;
if (is_vx_task(target))
convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
else
memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
for (i = 0; i < __NUM_VXRS_LOW; i++)
*(freg_t *)(target->thread.vxrs + i) =
target->thread.fp_regs.fprs[i];
}
}
if (target == current)
restore_fpu_regs(&target->thread.fpu);
return rc;
}
......@@ -1069,11 +1062,11 @@ static int s390_vxrs_low_get(struct task_struct *target,
if (!MACHINE_HAS_VX)
return -ENODEV;
if (target->thread.vxrs) {
if (is_vx_task(target)) {
if (target == current)
save_vx_regs(target->thread.vxrs);
save_fpu_regs(&target->thread.fpu);
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
} else
memset(vxrs, 0, sizeof(vxrs));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
......@@ -1089,19 +1082,19 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (!MACHINE_HAS_VX)
return -ENODEV;
if (!target->thread.vxrs) {
if (!is_vx_task(target)) {
rc = alloc_vector_registers(target);
if (rc)
return rc;
} else if (target == current)
save_vx_regs(target->thread.vxrs);
save_fpu_regs(&target->thread.fpu);
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
*((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
if (target == current)
restore_vx_regs(target->thread.vxrs);
restore_fpu_regs(&target->thread.fpu);
}
return rc;
......@@ -1116,10 +1109,10 @@ static int s390_vxrs_high_get(struct task_struct *target,
if (!MACHINE_HAS_VX)
return -ENODEV;
if (target->thread.vxrs) {
if (is_vx_task(target)) {
if (target == current)
save_vx_regs(target->thread.vxrs);
memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
save_fpu_regs(&target->thread.fpu);
memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
sizeof(vxrs));
} else
memset(vxrs, 0, sizeof(vxrs));
......@@ -1135,17 +1128,17 @@ static int s390_vxrs_high_set(struct task_struct *target,
if (!MACHINE_HAS_VX)
return -ENODEV;
if (!target->thread.vxrs) {
if (!is_vx_task(target)) {
rc = alloc_vector_registers(target);
if (rc)
return rc;
} else if (target == current)
save_vx_regs(target->thread.vxrs);
save_fpu_regs(&target->thread.fpu);
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
if (rc == 0 && target == current)
restore_vx_regs(target->thread.vxrs);
restore_vx_regs(target->thread.fpu.vxrs);
return rc;
}
......
......@@ -105,32 +105,14 @@ struct rt_sigframe
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
save_fp_ctl(&current->thread.fp_regs.fpc);
if (current->thread.vxrs) {
int i;
save_vx_regs(current->thread.vxrs);
for (i = 0; i < __NUM_FPRS; i++)
current->thread.fp_regs.fprs[i] =
*(freg_t *)(current->thread.vxrs + i);
} else
save_fp_regs(current->thread.fp_regs.fprs);
save_fpu_regs(&current->thread.fpu);
}
/* Load registers after signal return */
static void load_sigregs(void)
{
restore_access_regs(current->thread.acrs);
restore_fp_ctl(&current->thread.fp_regs.fpc);
if (current->thread.vxrs) {
int i;
for (i = 0; i < __NUM_FPRS; i++)
*(freg_t *)(current->thread.vxrs + i) =
current->thread.fp_regs.fprs[i];
restore_vx_regs(current->thread.vxrs);
} else
restore_fp_regs(current->thread.fp_regs.fprs);
restore_fpu_regs(&current->thread.fpu);
}
/* Returns non-zero on fault. */
......@@ -146,8 +128,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs));
memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
sizeof(user_sregs.fpregs));
fpregs_store(&user_sregs.fpregs, &current->thread.fpu);
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
return -EFAULT;
return 0;
......@@ -185,8 +166,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
sizeof(current->thread.fp_regs));
fpregs_load(&user_sregs.fpregs, &current->thread.fpu);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0;
......@@ -200,13 +180,13 @@ static int save_sigregs_ext(struct pt_regs *regs,
int i;
/* Save vector registers to signal stack */
if (current->thread.vxrs) {
if (is_vx_task(current)) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
sizeof(sregs_ext->vxrs_low)) ||
__copy_to_user(&sregs_ext->vxrs_high,
current->thread.vxrs + __NUM_VXRS_LOW,
current->thread.fpu.vxrs + __NUM_VXRS_LOW,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
......@@ -220,15 +200,15 @@ static int restore_sigregs_ext(struct pt_regs *regs,
int i;
/* Restore vector registers from signal stack */
if (current->thread.vxrs) {
if (is_vx_task(current)) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
&sregs_ext->vxrs_high,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
*((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i];
}
return 0;
}
......@@ -400,7 +380,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
uc_flags = 0;
if (MACHINE_HAS_VX) {
frame_size += sizeof(_sigregs_ext);
if (current->thread.vxrs)
if (is_vx_task(current))
uc_flags |= UC_VXRS;
}
frame = get_sigframe(&ksig->ka, regs, frame_size);
......
......@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/switch_to.h>
#include <asm/fpu-internal.h>
#include "entry.h"
int show_unhandled_signals = 1;
......@@ -227,7 +227,6 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
int alloc_vector_registers(struct task_struct *tsk)
{
__vector128 *vxrs;
int i;
/* Allocate vector register save area. */
vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
......@@ -236,11 +235,10 @@ int alloc_vector_registers(struct task_struct *tsk)
return -ENOMEM;
preempt_disable();
if (tsk == current)
save_fp_regs(tsk->thread.fp_regs.fprs);
save_fp_regs(tsk->thread.fpu.fprs);
/* Copy the 16 floating point registers */
for (i = 0; i < 16; i++)
*(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i];
tsk->thread.vxrs = vxrs;
convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs);
tsk->thread.fpu.vxrs = vxrs;
if (tsk == current) {
__ctl_set_bit(0, 17);
restore_vx_regs(vxrs);
......@@ -259,8 +257,8 @@ void vector_exception(struct pt_regs *regs)
}
/* get vector interrupt code from fpc */
asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
asm volatile("stfpc %0" : "=Q" (current->thread.fpu.fpc));
vic = (current->thread.fpu.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
si_code = FPE_FLTINV;
......@@ -297,22 +295,22 @@ void data_exception(struct pt_regs *regs)
location = get_trap_ip(regs);
asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
asm volatile("stfpc %0" : "=Q" (current->thread.fpu.fpc));
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !current->thread.vxrs &&
(current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
if (MACHINE_HAS_VX && !is_vx_task(current) &&
(current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) {
alloc_vector_registers(current);
/* Vector data exception is suppressing, rewind psw. */
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
clear_pt_regs_flag(regs, PIF_PER_TRAP);
return;
}
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
if (current->thread.fpu.fpc & FPC_DXC_MASK)
signal = SIGFPE;
else
signal = SIGILL;
if (signal == SIGFPE)
do_fp_trap(regs, current->thread.fp_regs.fpc);
do_fp_trap(regs, current->thread.fpu.fpc);
else if (signal)
do_trap(regs, signal, ILL_ILLOPN, "data exception");
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment