Commit 280c84d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "The bulk of the patches for the 3.13 merge window.

  Heiko spent quite a bit of work to improve the code generation for the
  kernel.  That includes the exploitation of the interlocked-access
  facility for the atomics and bitops implementation and the improvement
  for the -march and -mtune compiler settings.

  Another important change is the removal of the user_mode=home option,
  user processes now always run in primary space.  The storage keys are
  not initialized at system startup any more, with that the storage key
  removal work is complete.  For the PCI support the hibernation hooks
  have been implemented.

  And as usual cleanup and fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (62 commits)
  s390/scm_blk: fix endless loop for requests != REQ_TYPE_FS
  s390/mm,tlb: correct tlb flush on page table upgrade
  s390/mm: page_table_realloc returns failure
  s390: allow to set gcc -mtune flag
  s390/percpu: remove this_cpu_xor() implementation
  s390/vtime: correct idle time calculation
  s390/time: fix get_tod_clock_ext inline assembly
  tty/hvc_iucv: remove redundant NULL check
  s390/dasd: Write to profile data area only if it is available
  s390: convert use of typedef ctl_table to struct ctl_table
  s390/pci: cleanup function information block
  s390/pci: remove CONFIG_PCI_DEBUG dependancy
  s390/pci: message cleanup
  Update default configuration
  s390: add a couple of useful defconfigs
  s390/percpu: make use of interlocked-access facility 1 instructions
  s390/percpu: use generic percpu ops for CONFIG_32BIT
  s390/compat: make psw32_user_bits a constant value again
  s390: fix handling of runtime instrumentation psw bit
  s390: fix save and restore of the floating-point-control register
  ...
parents 8efdf2b7 de9587a2
...@@ -157,6 +157,16 @@ Return Value: none ...@@ -157,6 +157,16 @@ Return Value: none
Description: Sets new actual debug level if new_level is valid. Description: Sets new actual debug level if new_level is valid.
---------------------------------------------------------------------------
bool debug_level_enabled (debug_info_t * id, int level);
Parameter: id: handle for debug log
level: debug level
Return Value: True if level is less or equal to the current debug level.
Description: Returns true if debug events for the specified level would be
logged. Otherwise returns false.
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
void debug_stop_all(void); void debug_stop_all(void);
......
...@@ -99,6 +99,7 @@ config S390 ...@@ -99,6 +99,7 @@ config S390
select CLONE_BACKWARDS2 select CLONE_BACKWARDS2
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES if !SMP select GENERIC_CPU_DEVICES if !SMP
select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL_OLD select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB
...@@ -237,6 +238,67 @@ config MARCH_ZEC12 ...@@ -237,6 +238,67 @@ config MARCH_ZEC12
endchoice endchoice
config MARCH_G5_TUNE
def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT
config MARCH_Z900_TUNE
def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
config MARCH_Z990_TUNE
def_bool TUNE_Z990 || MARCH_Z990 && TUNE_DEFAULT
config MARCH_Z9_109_TUNE
def_bool TUNE_Z9_109 || MARCH_Z9_109 && TUNE_DEFAULT
config MARCH_Z10_TUNE
def_bool TUNE_Z10 || MARCH_Z10 && TUNE_DEFAULT
config MARCH_Z196_TUNE
def_bool TUNE_Z196 || MARCH_Z196 && TUNE_DEFAULT
config MARCH_ZEC12_TUNE
def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
choice
prompt "Tune code generation"
default TUNE_DEFAULT
help
Cause the compiler to tune (-mtune) the generated code for a machine.
This will make the code run faster on the selected machine but
somewhat slower on other machines.
This option only changes how the compiler emits instructions, not the
selection of instructions itself, so the resulting kernel will run on
all other machines.
config TUNE_DEFAULT
bool "Default"
help
Tune the generated code for the target processor for which the kernel
will be compiled.
config TUNE_G5
bool "System/390 model G5 and G6"
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
config TUNE_Z990
bool "IBM zSeries model z890 and z990"
config TUNE_Z9_109
bool "IBM System z9"
config TUNE_Z10
bool "IBM System z10"
config TUNE_Z196
bool "IBM zEnterprise 114 and 196"
config TUNE_ZEC12
bool "IBM zBC12 and zEC12"
endchoice
config 64BIT config 64BIT
def_bool y def_bool y
prompt "64 bit kernel" prompt "64 bit kernel"
......
...@@ -35,13 +35,21 @@ endif ...@@ -35,13 +35,21 @@ endif
export LD_BFD export LD_BFD
cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) cflags-$(CONFIG_MARCH_G5) += -march=g5
cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) cflags-$(CONFIG_MARCH_Z900) += -march=z900
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) cflags-$(CONFIG_MARCH_Z990) += -march=z990
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109
cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10) cflags-$(CONFIG_MARCH_Z10) += -march=z10
cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196) cflags-$(CONFIG_MARCH_Z196) += -march=z196
cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12) cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12
cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
#KBUILD_IMAGE is necessary for make rpm #KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image KBUILD_IMAGE :=arch/s390/boot/image
......
...@@ -48,9 +48,9 @@ static struct platform_device *appldata_pdev; ...@@ -48,9 +48,9 @@ static struct platform_device *appldata_pdev;
* /proc entries (sysctl) * /proc entries (sysctl)
*/ */
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
static int appldata_timer_handler(ctl_table *ctl, int write, static int appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos); void __user *buffer, size_t *lenp, loff_t *ppos);
static int appldata_interval_handler(ctl_table *ctl, int write, static int appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer, void __user *buffer,
size_t *lenp, loff_t *ppos); size_t *lenp, loff_t *ppos);
...@@ -201,10 +201,10 @@ static void __appldata_vtimer_setup(int cmd) ...@@ -201,10 +201,10 @@ static void __appldata_vtimer_setup(int cmd)
* Start/Stop timer, show status of timer (0 = not active, 1 = active) * Start/Stop timer, show status of timer (0 = not active, 1 = active)
*/ */
static int static int
appldata_timer_handler(ctl_table *ctl, int write, appldata_timer_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos) void __user *buffer, size_t *lenp, loff_t *ppos)
{ {
int len; unsigned int len;
char buf[2]; char buf[2];
if (!*lenp || *ppos) { if (!*lenp || *ppos) {
...@@ -243,10 +243,11 @@ appldata_timer_handler(ctl_table *ctl, int write, ...@@ -243,10 +243,11 @@ appldata_timer_handler(ctl_table *ctl, int write,
* current timer interval. * current timer interval.
*/ */
static int static int
appldata_interval_handler(ctl_table *ctl, int write, appldata_interval_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos) void __user *buffer, size_t *lenp, loff_t *ppos)
{ {
int len, interval; unsigned int len;
int interval;
char buf[16]; char buf[16];
if (!*lenp || *ppos) { if (!*lenp || *ppos) {
...@@ -286,11 +287,12 @@ appldata_interval_handler(ctl_table *ctl, int write, ...@@ -286,11 +287,12 @@ appldata_interval_handler(ctl_table *ctl, int write,
* monitoring (0 = not in process, 1 = in process) * monitoring (0 = not in process, 1 = in process)
*/ */
static int static int
appldata_generic_handler(ctl_table *ctl, int write, appldata_generic_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos) void __user *buffer, size_t *lenp, loff_t *ppos)
{ {
struct appldata_ops *ops = NULL, *tmp_ops; struct appldata_ops *ops = NULL, *tmp_ops;
int rc, len, found; unsigned int len;
int rc, found;
char buf[2]; char buf[2];
struct list_head *lh; struct list_head *lh;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# CONFIG_SWAP is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_MARCH_Z9_109=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
# CONFIG_HOTPLUG_CPU is not set
CONFIG_HZ_100=y
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
# CONFIG_CHECK_STACK is not set
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
CONFIG_CRASH_DUMP=y
CONFIG_ZFCPDUMP=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SECCOMP is not set
# CONFIG_IUCV is not set
CONFIG_ATM=y
CONFIG_ATM_LANE=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV_XPRAM is not set
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
CONFIG_ENCLOSURE_SERVICES=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_ENCLOSURE=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SRP_ATTRS=y
CONFIG_ZFCP=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_HVC_IUCV is not set
CONFIG_RAW_DRIVER=y
# CONFIG_SCLP_ASYNC is not set
# CONFIG_HMC_DRV is not set
# CONFIG_S390_TAPE is not set
# CONFIG_VMCP is not set
# CONFIG_MONWRITER is not set
# CONFIG_S390_VMUR is not set
# CONFIG_HID is not set
CONFIG_MEMSTICK=y
CONFIG_MEMSTICK_DEBUG=y
CONFIG_MEMSTICK_UNSAFE_RESUME=y
CONFIG_MSPRO_BLOCK=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
# CONFIG_INOTIFY_USER is not set
CONFIG_CONFIGFS_FS=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
# CONFIG_STRICT_DEVMEM is not set
CONFIG_XZ_DEC_X86=y
CONFIG_XZ_DEC_POWERPC=y
CONFIG_XZ_DEC_IA64=y
CONFIG_XZ_DEC_ARM=y
CONFIG_XZ_DEC_ARMTHUMB=y
CONFIG_XZ_DEC_SPARC=y
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS_FS is not set
# CONFIG_VIRTUALIZATION is not set
# CONFIG_S390_GUEST is not set
...@@ -725,6 +725,8 @@ static struct crypto_alg xts_aes_alg = { ...@@ -725,6 +725,8 @@ static struct crypto_alg xts_aes_alg = {
} }
}; };
static int xts_aes_alg_reg;
static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len) unsigned int key_len)
{ {
...@@ -846,6 +848,8 @@ static struct crypto_alg ctr_aes_alg = { ...@@ -846,6 +848,8 @@ static struct crypto_alg ctr_aes_alg = {
} }
}; };
static int ctr_aes_alg_reg;
static int __init aes_s390_init(void) static int __init aes_s390_init(void)
{ {
int ret; int ret;
...@@ -884,6 +888,7 @@ static int __init aes_s390_init(void) ...@@ -884,6 +888,7 @@ static int __init aes_s390_init(void)
ret = crypto_register_alg(&xts_aes_alg); ret = crypto_register_alg(&xts_aes_alg);
if (ret) if (ret)
goto xts_aes_err; goto xts_aes_err;
xts_aes_alg_reg = 1;
} }
if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT, if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
...@@ -902,6 +907,7 @@ static int __init aes_s390_init(void) ...@@ -902,6 +907,7 @@ static int __init aes_s390_init(void)
free_page((unsigned long) ctrblk); free_page((unsigned long) ctrblk);
goto ctr_aes_err; goto ctr_aes_err;
} }
ctr_aes_alg_reg = 1;
} }
out: out:
...@@ -921,9 +927,12 @@ static int __init aes_s390_init(void) ...@@ -921,9 +927,12 @@ static int __init aes_s390_init(void)
static void __exit aes_s390_fini(void) static void __exit aes_s390_fini(void)
{ {
crypto_unregister_alg(&ctr_aes_alg); if (ctr_aes_alg_reg) {
free_page((unsigned long) ctrblk); crypto_unregister_alg(&ctr_aes_alg);
crypto_unregister_alg(&xts_aes_alg); free_page((unsigned long) ctrblk);
}
if (xts_aes_alg_reg)
crypto_unregister_alg(&xts_aes_alg);
crypto_unregister_alg(&cbc_aes_alg); crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg); crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg); crypto_unregister_alg(&aes_alg);
......
...@@ -38,13 +38,14 @@ CONFIG_MODULE_UNLOAD=y ...@@ -38,13 +38,14 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y CONFIG_IBM_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_DEFAULT_DEADLINE=y CONFIG_DEFAULT_DEADLINE=y
CONFIG_MARCH_Z196=y
CONFIG_HZ_100=y CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA=y
CONFIG_CRASH_DUMP=y CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y CONFIG_HIBERNATION=y
...@@ -152,6 +153,7 @@ CONFIG_CRYPTO_CMAC=m ...@@ -152,6 +153,7 @@ CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD128=m
......
...@@ -19,21 +19,50 @@ ...@@ -19,21 +19,50 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define __CS_LOOP(ptr, op_val, op_string) ({ \ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa"
#define __ATOMIC_LOOP(ptr, op_val, op_string) \
({ \
int old_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar"
#define __ATOMIC_LOOP(ptr, op_val, op_string) \
({ \
int old_val, new_val; \ int old_val, new_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \ asm volatile( \
" l %0,%2\n" \ " l %0,%2\n" \
"0: lr %1,%0\n" \ "0: lr %1,%0\n" \
op_string " %1,%3\n" \ op_string " %1,%3\n" \
" cs %0,%1,%2\n" \ " cs %0,%1,%2\n" \
" jl 0b" \ " jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
"=Q" (((atomic_t *)(ptr))->counter) \ : "d" (op_val) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \ : "cc", "memory"); \
new_val; \ old_val; \
}) })
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int atomic_read(const atomic_t *v) static inline int atomic_read(const atomic_t *v)
{ {
int c; int c;
...@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
return __CS_LOOP(v, i, "ar"); return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
} }
#define atomic_add(_i, _v) atomic_add_return(_i, _v)
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
#define atomic_inc(_v) atomic_add_return(1, _v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
static inline int atomic_sub_return(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
return __CS_LOOP(v, i, "sr"); #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
asm volatile(
"asi %0,%1\n"
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
} else {
atomic_add_return(i, v);
}
#else
atomic_add_return(i, v);
#endif
} }
#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
#define atomic_inc(_v) atomic_add(1, _v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v) atomic_sub_return(1, _v) #define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
__CS_LOOP(v, ~mask, "nr"); __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
} }
static inline void atomic_set_mask(unsigned long mask, atomic_t *v) static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
__CS_LOOP(v, mask, "or"); __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
} }
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
...@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ...@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
asm volatile( asm volatile(
" cs %0,%2,%1" " cs %0,%2,%1"
: "+d" (old), "=Q" (v->counter) : "+d" (old), "+Q" (v->counter)
: "d" (new), "Q" (v->counter) : "d" (new)
: "cc", "memory"); : "cc", "memory");
return old; return old;
} }
...@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
} }
#undef __CS_LOOP #undef __ATOMIC_LOOP
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define __CSG_LOOP(ptr, op_val, op_string) ({ \ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag"
#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
({ \
long long old_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr"
#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
({ \
long long old_val, new_val; \ long long old_val, new_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \ asm volatile( \
" lg %0,%2\n" \ " lg %0,%2\n" \
"0: lgr %1,%0\n" \ "0: lgr %1,%0\n" \
op_string " %1,%3\n" \ op_string " %1,%3\n" \
" csg %0,%1,%2\n" \ " csg %0,%1,%2\n" \
" jl 0b" \ " jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
"=Q" (((atomic_t *)(ptr))->counter) \ : "d" (op_val) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \ : "cc", "memory"); \
new_val; \ old_val; \
}) })
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline long long atomic64_read(const atomic64_t *v) static inline long long atomic64_read(const atomic64_t *v)
{ {
long long c; long long c;
...@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i) ...@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i)
static inline long long atomic64_add_return(long long i, atomic64_t *v) static inline long long atomic64_add_return(long long i, atomic64_t *v)
{ {
return __CSG_LOOP(v, i, "agr"); return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
}
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
return __CSG_LOOP(v, i, "sgr");
} }
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{ {
__CSG_LOOP(v, ~mask, "ngr"); __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
} }
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{ {
__CSG_LOOP(v, mask, "ogr"); __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
} }
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
...@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, ...@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
{ {
asm volatile( asm volatile(
" csg %0,%2,%1" " csg %0,%2,%1"
: "+d" (old), "=Q" (v->counter) : "+d" (old), "+Q" (v->counter)
: "d" (new), "Q" (v->counter) : "d" (new)
: "cc", "memory"); : "cc", "memory");
return old; return old;
} }
#undef __CSG_LOOP #undef __ATOMIC64_LOOP
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
...@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new) ...@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
" lm %0,%N0,%1\n" " lm %0,%N0,%1\n"
"0: cds %0,%2,%1\n" "0: cds %0,%2,%1\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (rp_old), "=Q" (v->counter) : "=&d" (rp_old), "+Q" (v->counter)
: "d" (rp_new), "Q" (v->counter) : "d" (rp_new)
: "cc"); : "cc");
return rp_old.pair; return rp_old.pair;
} }
...@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, ...@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
asm volatile( asm volatile(
" cds %0,%2,%1" " cds %0,%2,%1"
: "+&d" (rp_old), "=Q" (v->counter) : "+&d" (rp_old), "+Q" (v->counter)
: "d" (rp_new), "Q" (v->counter) : "d" (rp_new)
: "cc"); : "cc");
return rp_old.pair; return rp_old.pair;
} }
...@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) ...@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return new; return new;
} }
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
long long old, new;
do {
old = atomic64_read(v);
new = old - i;
} while (atomic64_cmpxchg(v, old, new) != old);
return new;
}
static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{ {
long long old, new; long long old, new;
...@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) ...@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) static inline void atomic64_add(long long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
asm volatile(
"agsi %0,%1\n"
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
} else {
atomic64_add_return(i, v);
}
#else
atomic64_add_return(i, v);
#endif
}
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{ {
long long c, old; long long c, old;
...@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) ...@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
for (;;) { for (;;) {
if (unlikely(c == u)) if (unlikely(c == u))
break; break;
old = atomic64_cmpxchg(v, c, c + a); old = atomic64_cmpxchg(v, c, c + i);
if (likely(old == c)) if (likely(old == c))
break; break;
c = old; c = old;
...@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) ...@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
return dec; return dec;
} }
#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
#define atomic64_inc(_v) atomic64_add_return(1, _v) #define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v) #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub_return(1, _v) #define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
......
This diff is collapsed.
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#define PSW32_MASK_ASC 0x0000C000UL #define PSW32_MASK_ASC 0x0000C000UL
#define PSW32_MASK_CC 0x00003000UL #define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL #define PSW32_MASK_PM 0x00000f00UL
#define PSW32_MASK_RI 0x00000080UL
#define PSW32_MASK_USER 0x0000FF00UL #define PSW32_MASK_USER 0x0000FF00UL
...@@ -35,7 +36,9 @@ ...@@ -35,7 +36,9 @@
#define PSW32_ASC_SECONDARY 0x00008000UL #define PSW32_ASC_SECONDARY 0x00008000UL
#define PSW32_ASC_HOME 0x0000C000UL #define PSW32_ASC_HOME 0x0000C000UL
extern u32 psw32_user_bits; #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
#define COMPAT_USER_HZ 100 #define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0" #define COMPAT_UTS_MACHINE "s390\0\0\0\0"
......
...@@ -8,69 +8,59 @@ ...@@ -8,69 +8,59 @@
#define __ASM_CTL_REG_H #define __ASM_CTL_REG_H
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
# define __CTL_LOAD "lctlg"
#define __ctl_load(array, low, high) ({ \ # define __CTL_STORE "stctg"
typedef struct { char _[sizeof(array)]; } addrtype; \ #else
asm volatile( \ # define __CTL_LOAD "lctl"
" lctlg %1,%2,%0\n" \ # define __CTL_STORE "stctl"
: : "Q" (*(addrtype *)(&array)), \ #endif
"i" (low), "i" (high)); \
}) #define __ctl_load(array, low, high) { \
typedef struct { char _[sizeof(array)]; } addrtype; \
#define __ctl_store(array, low, high) ({ \ \
typedef struct { char _[sizeof(array)]; } addrtype; \ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \ asm volatile( \
" stctg %1,%2,%0\n" \ __CTL_LOAD " %1,%2,%0\n" \
: "=Q" (*(addrtype *)(&array)) \ : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
: "i" (low), "i" (high)); \ }
})
#define __ctl_store(array, low, high) { \
#else /* CONFIG_64BIT */ typedef struct { char _[sizeof(array)]; } addrtype; \
\
#define __ctl_load(array, low, high) ({ \ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
typedef struct { char _[sizeof(array)]; } addrtype; \ asm volatile( \
asm volatile( \ __CTL_STORE " %1,%2,%0\n" \
" lctl %1,%2,%0\n" \ : "=Q" (*(addrtype *)(&array)) \
: : "Q" (*(addrtype *)(&array)), \ : "i" (low), "i" (high)); \
"i" (low), "i" (high)); \ }
})
static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
#define __ctl_store(array, low, high) ({ \ {
typedef struct { char _[sizeof(array)]; } addrtype; \ unsigned long reg;
asm volatile( \
" stctl %1,%2,%0\n" \ __ctl_store(reg, cr, cr);
: "=Q" (*(addrtype *)(&array)) \ reg |= 1UL << bit;
: "i" (low), "i" (high)); \ __ctl_load(reg, cr, cr);
}) }
#endif /* CONFIG_64BIT */ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
{
#define __ctl_set_bit(cr, bit) ({ \ unsigned long reg;
unsigned long __dummy; \
__ctl_store(__dummy, cr, cr); \ __ctl_store(reg, cr, cr);
__dummy |= 1UL << (bit); \ reg &= ~(1UL << bit);
__ctl_load(__dummy, cr, cr); \ __ctl_load(reg, cr, cr);
}) }
#define __ctl_clear_bit(cr, bit) ({ \ void smp_ctl_set_bit(int cr, int bit);
unsigned long __dummy; \ void smp_ctl_clear_bit(int cr, int bit);
__ctl_store(__dummy, cr, cr); \
__dummy &= ~(1UL << (bit)); \
__ctl_load(__dummy, cr, cr); \
})
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
extern void smp_ctl_set_bit(int cr, int bit); # define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
extern void smp_ctl_clear_bit(int cr, int bit);
#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
#else #else
# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) # define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) #endif
#endif /* CONFIG_SMP */
#endif /* __ASM_CTL_REG_H */ #endif /* __ASM_CTL_REG_H */
...@@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level); ...@@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level);
void debug_set_critical(void); void debug_set_critical(void);
void debug_stop_all(void); void debug_stop_all(void);
static inline bool debug_level_enabled(debug_info_t* id, int level)
{
return level <= id->level;
}
static inline debug_entry_t* static inline debug_entry_t*
debug_event(debug_info_t* id, int level, void* data, int length) debug_event(debug_info_t* id, int level, void* data, int length)
{ {
......
/*
* Disassemble s390 instructions.
*
* Copyright IBM Corp. 2007
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#ifndef __ASM_S390_DIS_H__
#define __ASM_S390_DIS_H__
/* Type of operand */
#define OPERAND_GPR 0x1 /* Operand printed as %rx */
#define OPERAND_FPR 0x2 /* Operand printed as %fx */
#define OPERAND_AR 0x4 /* Operand printed as %ax */
#define OPERAND_CR 0x8 /* Operand printed as %cx */
#define OPERAND_DISP 0x10 /* Operand printed as displacement */
#define OPERAND_BASE 0x20 /* Operand printed as base register */
#define OPERAND_INDEX 0x40 /* Operand printed as index register */
#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
struct s390_operand {
int bits; /* The number of bits in the operand. */
int shift; /* The number of bits to shift. */
int flags; /* One bit syntax flags. */
};
struct s390_insn {
const char name[5];
unsigned char opfrag;
unsigned char format;
};
static inline int insn_length(unsigned char code)
{
return ((((int) code + 64) >> 7) + 1) << 1;
}
void show_code(struct pt_regs *regs);
void print_fn_code(unsigned char *code, unsigned long len);
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
struct s390_insn *find_insn(unsigned char *code);
static inline int is_known_insn(unsigned char *code)
{
return !!find_insn(code);
}
#endif /* __ASM_S390_DIS_H__ */
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
#define TCW_FORMAT_DEFAULT 0 #define TCW_FORMAT_DEFAULT 0
#define TCW_TIDAW_FORMAT_DEFAULT 0 #define TCW_TIDAW_FORMAT_DEFAULT 0
#define TCW_FLAGS_INPUT_TIDA 1 << (23 - 5) #define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5))
#define TCW_FLAGS_TCCB_TIDA 1 << (23 - 6) #define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6))
#define TCW_FLAGS_OUTPUT_TIDA 1 << (23 - 7) #define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7))
#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9) #define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9)
#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3) #define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3)
...@@ -54,11 +54,11 @@ struct tcw { ...@@ -54,11 +54,11 @@ struct tcw {
u32 intrg; u32 intrg;
} __attribute__ ((packed, aligned(64))); } __attribute__ ((packed, aligned(64)));
#define TIDAW_FLAGS_LAST 1 << (7 - 0) #define TIDAW_FLAGS_LAST (1 << (7 - 0))
#define TIDAW_FLAGS_SKIP 1 << (7 - 1) #define TIDAW_FLAGS_SKIP (1 << (7 - 1))
#define TIDAW_FLAGS_DATA_INT 1 << (7 - 2) #define TIDAW_FLAGS_DATA_INT (1 << (7 - 2))
#define TIDAW_FLAGS_TTIC 1 << (7 - 3) #define TIDAW_FLAGS_TTIC (1 << (7 - 3))
#define TIDAW_FLAGS_INSERT_CBC 1 << (7 - 4) #define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4))
/** /**
* struct tidaw - Transport-Indirect-Addressing Word (TIDAW) * struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
...@@ -106,9 +106,9 @@ struct tsa_ddpc { ...@@ -106,9 +106,9 @@ struct tsa_ddpc {
u8 sense[32]; u8 sense[32];
} __attribute__ ((packed)); } __attribute__ ((packed));
#define TSA_INTRG_FLAGS_CU_STATE_VALID 1 << (7 - 0) #define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0))
#define TSA_INTRG_FLAGS_DEV_STATE_VALID 1 << (7 - 1) #define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1))
#define TSA_INTRG_FLAGS_OP_STATE_VALID 1 << (7 - 2) #define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2))
/** /**
* struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA) * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
...@@ -140,10 +140,10 @@ struct tsa_intrg { ...@@ -140,10 +140,10 @@ struct tsa_intrg {
#define TSB_FORMAT_DDPC 2 #define TSB_FORMAT_DDPC 2
#define TSB_FORMAT_INTRG 3 #define TSB_FORMAT_INTRG 3
#define TSB_FLAGS_DCW_OFFSET_VALID 1 << (7 - 0) #define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0))
#define TSB_FLAGS_COUNT_VALID 1 << (7 - 1) #define TSB_FLAGS_COUNT_VALID (1 << (7 - 1))
#define TSB_FLAGS_CACHE_MISS 1 << (7 - 2) #define TSB_FLAGS_CACHE_MISS (1 << (7 - 2))
#define TSB_FLAGS_TIME_VALID 1 << (7 - 3) #define TSB_FLAGS_TIME_VALID (1 << (7 - 3))
#define TSB_FLAGS_FORMAT(x) ((x) & 7) #define TSB_FLAGS_FORMAT(x) ((x) & 7)
#define TSB_FORMAT(t) ((t)->flags & 7) #define TSB_FORMAT(t) ((t)->flags & 7)
...@@ -179,9 +179,9 @@ struct tsb { ...@@ -179,9 +179,9 @@ struct tsb {
#define DCW_INTRG_RCQ_PRIMARY 1 #define DCW_INTRG_RCQ_PRIMARY 1
#define DCW_INTRG_RCQ_SECONDARY 2 #define DCW_INTRG_RCQ_SECONDARY 2
#define DCW_INTRG_FLAGS_MPM 1 < (7 - 0) #define DCW_INTRG_FLAGS_MPM (1 << (7 - 0))
#define DCW_INTRG_FLAGS_PPR 1 < (7 - 1) #define DCW_INTRG_FLAGS_PPR (1 << (7 - 1))
#define DCW_INTRG_FLAGS_CRIT 1 < (7 - 2) #define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2))
/** /**
* struct dcw_intrg_data - Interrogate DCW data * struct dcw_intrg_data - Interrogate DCW data
...@@ -216,7 +216,7 @@ struct dcw_intrg_data { ...@@ -216,7 +216,7 @@ struct dcw_intrg_data {
u8 prog_data[0]; u8 prog_data[0];
} __attribute__ ((packed)); } __attribute__ ((packed));
#define DCW_FLAGS_CC 1 << (7 - 1) #define DCW_FLAGS_CC (1 << (7 - 1))
#define DCW_CMD_WRITE 0x01 #define DCW_CMD_WRITE 0x01
#define DCW_CMD_READ 0x02 #define DCW_CMD_READ 0x02
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef _ASM_S390_IPL_H #ifndef _ASM_S390_IPL_H
#define _ASM_S390_IPL_H #define _ASM_S390_IPL_H
#include <asm/lowcore.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/cio.h> #include <asm/cio.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -86,7 +87,14 @@ struct ipl_parameter_block { ...@@ -86,7 +87,14 @@ struct ipl_parameter_block {
*/ */
extern u32 ipl_flags; extern u32 ipl_flags;
extern u32 dump_prefix_page; extern u32 dump_prefix_page;
extern unsigned int zfcpdump_prefix_array[];
struct dump_save_areas {
struct save_area **areas;
int count;
};
extern struct dump_save_areas dump_save_areas;
struct save_area *dump_save_area_create(int cpu);
extern void do_reipl(void); extern void do_reipl(void);
extern void do_halt(void); extern void do_halt(void);
......
...@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) ...@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
pgd_t *pgd = mm->pgd; pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
if (s390_user_mode != HOME_SPACE_MODE) { /* Load primary space page table origin. */
/* Load primary space page table origin. */ asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_asce) );
} else
/* Load home space page table origin. */
asm volatile(LCTL_OPCODE" 13,13,%0"
: : "m" (S390_lowcore.user_asce) );
set_fs(current->thread.mm_segment); set_fs(current->thread.mm_segment);
} }
......
...@@ -30,7 +30,12 @@ ...@@ -30,7 +30,12 @@
#include <asm/setup.h> #include <asm/setup.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
void storage_key_init_range(unsigned long start, unsigned long end); static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
#if PAGE_DEFAULT_KEY
__storage_key_init_range(start, end);
#endif
}
static inline void clear_page(void *page) static inline void clear_page(void *page)
{ {
......
...@@ -6,14 +6,9 @@ ...@@ -6,14 +6,9 @@
extern debug_info_t *pci_debug_msg_id; extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id; extern debug_info_t *pci_debug_err_id;
#ifdef CONFIG_PCI_DEBUG
#define zpci_dbg(imp, fmt, args...) \ #define zpci_dbg(imp, fmt, args...) \
debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args) debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
#else /* !CONFIG_PCI_DEBUG */
#define zpci_dbg(imp, fmt, args...) do { } while (0)
#endif
#define zpci_err(text...) \ #define zpci_err(text...) \
do { \ do { \
char debug_buffer[16]; \ char debug_buffer[16]; \
......
...@@ -54,11 +54,9 @@ ...@@ -54,11 +54,9 @@
struct zpci_fib { struct zpci_fib {
u32 fmt : 8; /* format */ u32 fmt : 8; /* format */
u32 : 24; u32 : 24;
u32 reserved1; u32 : 32;
u8 fc; /* function controls */ u8 fc; /* function controls */
u8 reserved2; u64 : 56;
u16 reserved3;
u32 reserved4;
u64 pba; /* PCI base address */ u64 pba; /* PCI base address */
u64 pal; /* PCI address limit */ u64 pal; /* PCI address limit */
u64 iota; /* I/O Translation Anchor */ u64 iota; /* I/O Translation Anchor */
...@@ -70,14 +68,13 @@ struct zpci_fib { ...@@ -70,14 +68,13 @@ struct zpci_fib {
u32 sum : 1; /* Adapter int summary bit enabled */ u32 sum : 1; /* Adapter int summary bit enabled */
u32 : 1; u32 : 1;
u32 aisbo : 6; /* Adapter int summary bit offset */ u32 aisbo : 6; /* Adapter int summary bit offset */
u32 reserved5; u32 : 32;
u64 aibv; /* Adapter int bit vector address */ u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */ u64 aisb; /* Adapter int summary bit address */
u64 fmb_addr; /* Function measurement block address and key */ u64 fmb_addr; /* Function measurement block address and key */
u64 reserved6; u32 : 32;
u64 reserved7; u32 gd;
} __packed; } __packed __aligned(8);
int zpci_mod_fc(u64 req, struct zpci_fib *fib); int zpci_mod_fc(u64 req, struct zpci_fib *fib);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range); int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
......
...@@ -10,16 +10,22 @@ ...@@ -10,16 +10,22 @@
*/ */
#define __my_cpu_offset S390_lowcore.percpu_offset #define __my_cpu_offset S390_lowcore.percpu_offset
#ifdef CONFIG_64BIT
/* /*
* For 64 bit module code, the module may be more than 4G above the * For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to * per cpu area, use weak definitions to force the compiler to
* generate external references. * generate external references.
*/ */
#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE) #if defined(CONFIG_SMP) && defined(MODULE)
#define ARCH_NEEDS_WEAK_PER_CPU #define ARCH_NEEDS_WEAK_PER_CPU
#endif #endif
#define arch_this_cpu_to_op(pcp, val, op) \ /*
* We use a compare-and-swap loop since that uses less cpu cycles than
* disabling and enabling interrupts like the generic variant would do.
*/
#define arch_this_cpu_to_op_simple(pcp, val, op) \
({ \ ({ \
typedef typeof(pcp) pcp_op_T__; \ typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \ pcp_op_T__ old__, new__, prev__; \
...@@ -30,42 +36,101 @@ ...@@ -30,42 +36,101 @@
do { \ do { \
old__ = prev__; \ old__ = prev__; \
new__ = old__ op (val); \ new__ = old__ op (val); \
switch (sizeof(*ptr__)) { \ prev__ = cmpxchg(ptr__, old__, new__); \
case 8: \
prev__ = cmpxchg64(ptr__, old__, new__); \
break; \
default: \
prev__ = cmpxchg(ptr__, old__, new__); \
} \
} while (prev__ != old__); \ } while (prev__ != old__); \
preempt_enable(); \ preempt_enable(); \
new__; \ new__; \
}) })
#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) #define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) #define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) #define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) #define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
{ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
asm volatile( \
op2 " %[ptr__],%[val__]\n" \
: [ptr__] "+Q" (*ptr__) \
: [val__] "i" ((szcast)val__) \
: "cc"); \
} else { \
asm volatile( \
op1 " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
} \
preempt_enable(); \
}
#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) #define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) #define arch_this_cpu_add_return(pcp, val, op) \
#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) ({ \
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) typedef typeof(pcp) pcp_op_T__; \
#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &) pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
preempt_enable(); \
old__ + val__; \
})
#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |) #define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |) #define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^) #define arch_this_cpu_to_op(pcp, val, op) \
#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^) { \
#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) typedef typeof(pcp) pcp_op_T__; \
#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
: [val__] "d" (val__) \
: "cc"); \
preempt_enable(); \
}
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang")
#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \ #define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \ ({ \
...@@ -74,13 +139,7 @@ ...@@ -74,13 +139,7 @@
pcp_op_T__ *ptr__; \ pcp_op_T__ *ptr__; \
preempt_disable(); \ preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \ ptr__ = __this_cpu_ptr(&(pcp)); \
switch (sizeof(*ptr__)) { \ ret__ = cmpxchg(ptr__, oval, nval); \
case 8: \
ret__ = cmpxchg64(ptr__, oval, nval); \
break; \
default: \
ret__ = cmpxchg(ptr__, oval, nval); \
} \
preempt_enable(); \ preempt_enable(); \
ret__; \ ret__; \
}) })
...@@ -104,9 +163,7 @@ ...@@ -104,9 +163,7 @@
#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval) #define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval) #define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval) #define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#ifdef CONFIG_64BIT
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval) #define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#endif
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \ #define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \ ({ \
...@@ -124,9 +181,9 @@ ...@@ -124,9 +181,9 @@
}) })
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
#ifdef CONFIG_64BIT
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
#endif
#endif /* CONFIG_64BIT */
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
......
...@@ -134,19 +134,17 @@ struct stack_frame { ...@@ -134,19 +134,17 @@ struct stack_frame {
* Do necessary setup to start up a new thread. * Do necessary setup to start up a new thread.
*/ */
#define start_thread(regs, new_psw, new_stackp) do { \ #define start_thread(regs, new_psw, new_stackp) do { \
regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \ regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \ regs->gprs[15] = new_stackp; \
execve_tail(); \ execve_tail(); \
} while (0) } while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \ #define start_thread31(regs, new_psw, new_stackp) do { \
regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \ regs->gprs[15] = new_stackp; \
__tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \ crst_table_downgrade(current->mm, 1UL << 31); \
update_mm(current->mm, current); \
execve_tail(); \ execve_tail(); \
} while (0) } while (0)
...@@ -169,17 +167,15 @@ extern void release_thread(struct task_struct *); ...@@ -169,17 +167,15 @@ extern void release_thread(struct task_struct *);
*/ */
extern unsigned long thread_saved_pc(struct task_struct *t); extern unsigned long thread_saved_pc(struct task_struct *t);
extern void show_code(struct pt_regs *regs);
extern void print_fn_code(unsigned char *code, unsigned long len);
extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
unsigned int len);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \ #define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1) (task_stack_page(tsk) + THREAD_SIZE) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15]) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
/* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
static inline unsigned short stap(void) static inline unsigned short stap(void)
{ {
unsigned short cpu_address; unsigned short cpu_address;
...@@ -348,9 +344,9 @@ __set_psw_mask(unsigned long mask) ...@@ -348,9 +344,9 @@ __set_psw_mask(unsigned long mask)
} }
#define local_mcck_enable() \ #define local_mcck_enable() \
__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
#define local_mcck_disable() \ #define local_mcck_disable() \
__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
/* /*
* Basic Machine Check/Program Check Handler. * Basic Machine Check/Program Check Handler.
......
...@@ -10,8 +10,11 @@ ...@@ -10,8 +10,11 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern long psw_kernel_bits; #define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
extern long psw_user_bits; PSW_MASK_EA | PSW_MASK_BA)
#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
/* /*
* The pt_regs struct defines the way the registers are stored on * The pt_regs struct defines the way the registers are stored on
......
...@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize); ...@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size); unsigned long size);
#define PRIMARY_SPACE_MODE 0
#define ACCESS_REGISTER_MODE 1
#define SECONDARY_SPACE_MODE 2
#define HOME_SPACE_MODE 3
extern unsigned int s390_user_mode;
/* /*
* Machine features detected in head.S * Machine features detected in head.S
*/ */
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#define raw_smp_processor_id() (S390_lowcore.cpu_nr) #define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern struct mutex smp_cpu_state_mutex; extern struct mutex smp_cpu_state_mutex;
extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
......
...@@ -13,58 +13,94 @@ ...@@ -13,58 +13,94 @@
extern struct task_struct *__switch_to(void *, void *); extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task); extern void update_cr_regs(struct task_struct *task);
static inline void save_fp_regs(s390_fp_regs *fpregs) static inline int test_fp_ctl(u32 fpc)
{ {
u32 orig_fpc;
int rc;
if (!MACHINE_HAS_IEEE)
return 0;
asm volatile( asm volatile(
" std 0,%O0+8(%R0)\n" " efpc %1\n"
" std 2,%O0+24(%R0)\n" " sfpc %2\n"
" std 4,%O0+40(%R0)\n" "0: sfpc %1\n"
" std 6,%O0+56(%R0)" " la %0,0\n"
: "=Q" (*fpregs) : "Q" (*fpregs)); "1:\n"
EX_TABLE(0b,1b)
: "=d" (rc), "=d" (orig_fpc)
: "d" (fpc), "0" (-EINVAL));
return rc;
}
static inline void save_fp_ctl(u32 *fpc)
{
if (!MACHINE_HAS_IEEE) if (!MACHINE_HAS_IEEE)
return; return;
asm volatile( asm volatile(
" stfpc %0\n" " stfpc %0\n"
" std 1,%O0+16(%R0)\n" : "+Q" (*fpc));
" std 3,%O0+32(%R0)\n"
" std 5,%O0+48(%R0)\n"
" std 7,%O0+64(%R0)\n"
" std 8,%O0+72(%R0)\n"
" std 9,%O0+80(%R0)\n"
" std 10,%O0+88(%R0)\n"
" std 11,%O0+96(%R0)\n"
" std 12,%O0+104(%R0)\n"
" std 13,%O0+112(%R0)\n"
" std 14,%O0+120(%R0)\n"
" std 15,%O0+128(%R0)\n"
: "=Q" (*fpregs) : "Q" (*fpregs));
} }
static inline void restore_fp_regs(s390_fp_regs *fpregs) static inline int restore_fp_ctl(u32 *fpc)
{ {
int rc;
if (!MACHINE_HAS_IEEE)
return 0;
asm volatile( asm volatile(
" ld 0,%O0+8(%R0)\n" "0: lfpc %1\n"
" ld 2,%O0+24(%R0)\n" " la %0,0\n"
" ld 4,%O0+40(%R0)\n" "1:\n"
" ld 6,%O0+56(%R0)" EX_TABLE(0b,1b)
: : "Q" (*fpregs)); : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
return rc;
}
static inline void save_fp_regs(freg_t *fprs)
{
asm volatile("std 0,%0" : "=Q" (fprs[0]));
asm volatile("std 2,%0" : "=Q" (fprs[2]));
asm volatile("std 4,%0" : "=Q" (fprs[4]));
asm volatile("std 6,%0" : "=Q" (fprs[6]));
if (!MACHINE_HAS_IEEE) if (!MACHINE_HAS_IEEE)
return; return;
asm volatile( asm volatile("std 1,%0" : "=Q" (fprs[1]));
" lfpc %0\n" asm volatile("std 3,%0" : "=Q" (fprs[3]));
" ld 1,%O0+16(%R0)\n" asm volatile("std 5,%0" : "=Q" (fprs[5]));
" ld 3,%O0+32(%R0)\n" asm volatile("std 7,%0" : "=Q" (fprs[7]));
" ld 5,%O0+48(%R0)\n" asm volatile("std 8,%0" : "=Q" (fprs[8]));
" ld 7,%O0+64(%R0)\n" asm volatile("std 9,%0" : "=Q" (fprs[9]));
" ld 8,%O0+72(%R0)\n" asm volatile("std 10,%0" : "=Q" (fprs[10]));
" ld 9,%O0+80(%R0)\n" asm volatile("std 11,%0" : "=Q" (fprs[11]));
" ld 10,%O0+88(%R0)\n" asm volatile("std 12,%0" : "=Q" (fprs[12]));
" ld 11,%O0+96(%R0)\n" asm volatile("std 13,%0" : "=Q" (fprs[13]));
" ld 12,%O0+104(%R0)\n" asm volatile("std 14,%0" : "=Q" (fprs[14]));
" ld 13,%O0+112(%R0)\n" asm volatile("std 15,%0" : "=Q" (fprs[15]));
" ld 14,%O0+120(%R0)\n" }
" ld 15,%O0+128(%R0)\n"
: : "Q" (*fpregs)); static inline void restore_fp_regs(freg_t *fprs)
{
asm volatile("ld 0,%0" : : "Q" (fprs[0]));
asm volatile("ld 2,%0" : : "Q" (fprs[2]));
asm volatile("ld 4,%0" : : "Q" (fprs[4]));
asm volatile("ld 6,%0" : : "Q" (fprs[6]));
if (!MACHINE_HAS_IEEE)
return;
asm volatile("ld 1,%0" : : "Q" (fprs[1]));
asm volatile("ld 3,%0" : : "Q" (fprs[3]));
asm volatile("ld 5,%0" : : "Q" (fprs[5]));
asm volatile("ld 7,%0" : : "Q" (fprs[7]));
asm volatile("ld 8,%0" : : "Q" (fprs[8]));
asm volatile("ld 9,%0" : : "Q" (fprs[9]));
asm volatile("ld 10,%0" : : "Q" (fprs[10]));
asm volatile("ld 11,%0" : : "Q" (fprs[11]));
asm volatile("ld 12,%0" : : "Q" (fprs[12]));
asm volatile("ld 13,%0" : : "Q" (fprs[13]));
asm volatile("ld 14,%0" : : "Q" (fprs[14]));
asm volatile("ld 15,%0" : : "Q" (fprs[15]));
} }
static inline void save_access_regs(unsigned int *acrs) static inline void save_access_regs(unsigned int *acrs)
...@@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \ #define switch_to(prev,next,last) do { \
if (prev->mm) { \ if (prev->mm) { \
save_fp_regs(&prev->thread.fp_regs); \ save_fp_ctl(&prev->thread.fp_regs.fpc); \
save_fp_regs(prev->thread.fp_regs.fprs); \
save_access_regs(&prev->thread.acrs[0]); \ save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \ save_ri_cb(prev->thread.ri_cb); \
} \ } \
if (next->mm) { \ if (next->mm) { \
restore_fp_regs(&next->thread.fp_regs); \ restore_fp_ctl(&next->thread.fp_regs.fpc); \
restore_fp_regs(next->thread.fp_regs.fprs); \
restore_access_regs(&next->thread.acrs[0]); \ restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
update_cr_regs(next); \ update_cr_regs(next); \
......
...@@ -71,9 +71,11 @@ static inline void local_tick_enable(unsigned long long comp) ...@@ -71,9 +71,11 @@ static inline void local_tick_enable(unsigned long long comp)
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
static inline void get_tod_clock_ext(char *clk) static inline void get_tod_clock_ext(char clk[16])
{ {
asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); typedef struct { char _[sizeof(clk)]; } addrtype;
asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
} }
static inline unsigned long long get_tod_clock(void) static inline unsigned long long get_tod_clock(void)
......
...@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) ...@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
struct uaccess_ops { struct uaccess_ops {
size_t (*copy_from_user)(size_t, const void __user *, void *); size_t (*copy_from_user)(size_t, const void __user *, void *);
size_t (*copy_from_user_small)(size_t, const void __user *, void *);
size_t (*copy_to_user)(size_t, void __user *, const void *); size_t (*copy_to_user)(size_t, void __user *, const void *);
size_t (*copy_to_user_small)(size_t, void __user *, const void *);
size_t (*copy_in_user)(size_t, void __user *, const void __user *); size_t (*copy_in_user)(size_t, void __user *, const void __user *);
size_t (*clear_user)(size_t, void __user *); size_t (*clear_user)(size_t, void __user *);
size_t (*strnlen_user)(size_t, const char __user *); size_t (*strnlen_user)(size_t, const char __user *);
...@@ -106,22 +104,20 @@ struct uaccess_ops { ...@@ -106,22 +104,20 @@ struct uaccess_ops {
}; };
extern struct uaccess_ops uaccess; extern struct uaccess_ops uaccess;
extern struct uaccess_ops uaccess_std;
extern struct uaccess_ops uaccess_mvcos; extern struct uaccess_ops uaccess_mvcos;
extern struct uaccess_ops uaccess_mvcos_switch;
extern struct uaccess_ops uaccess_pt; extern struct uaccess_ops uaccess_pt;
extern int __handle_fault(unsigned long, unsigned long, int); extern int __handle_fault(unsigned long, unsigned long, int);
static inline int __put_user_fn(size_t size, void __user *ptr, void *x) static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{ {
size = uaccess.copy_to_user_small(size, ptr, x); size = uaccess.copy_to_user(size, ptr, x);
return size ? -EFAULT : size; return size ? -EFAULT : size;
} }
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{ {
size = uaccess.copy_from_user_small(size, ptr, x); size = uaccess.copy_from_user(size, ptr, x);
return size ? -EFAULT : size; return size ? -EFAULT : size;
} }
...@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn)); ...@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline unsigned long __must_check static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n) __copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
if (__builtin_constant_p(n) && (n <= 256)) return uaccess.copy_to_user(n, to, from);
return uaccess.copy_to_user_small(n, to, from);
else
return uaccess.copy_to_user(n, to, from);
} }
#define __copy_to_user_inatomic __copy_to_user #define __copy_to_user_inatomic __copy_to_user
...@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long __must_check static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n) __copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (__builtin_constant_p(n) && (n <= 256)) return uaccess.copy_from_user(n, from, to);
return uaccess.copy_from_user_small(n, from, to);
else
return uaccess.copy_from_user(n, from, to);
} }
extern void copy_from_user_overflow(void) extern void copy_from_user_overflow(void)
......
...@@ -199,6 +199,7 @@ typedef union ...@@ -199,6 +199,7 @@ typedef union
typedef struct typedef struct
{ {
__u32 fpc; __u32 fpc;
__u32 pad;
freg_t fprs[NUM_FPRS]; freg_t fprs[NUM_FPRS];
} s390_fp_regs; } s390_fp_regs;
...@@ -206,7 +207,6 @@ typedef struct ...@@ -206,7 +207,6 @@ typedef struct
#define FPC_FLAGS_MASK 0x00F80000 #define FPC_FLAGS_MASK 0x00F80000
#define FPC_DXC_MASK 0x0000FF00 #define FPC_DXC_MASK 0x0000FF00
#define FPC_RM_MASK 0x00000003 #define FPC_RM_MASK 0x00000003
#define FPC_VALID_MASK 0xF8F8FF03
/* this typedef defines how a Program Status Word looks like */ /* this typedef defines how a Program Status Word looks like */
typedef struct typedef struct
...@@ -263,7 +263,7 @@ typedef struct ...@@ -263,7 +263,7 @@ typedef struct
#define PSW_MASK_EA 0x0000000100000000UL #define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL #define PSW_MASK_BA 0x0000000080000000UL
#define PSW_MASK_USER 0x0000FF8180000000UL #define PSW_MASK_USER 0x0000FF0180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL #define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
......
...@@ -49,6 +49,7 @@ typedef struct ...@@ -49,6 +49,7 @@ typedef struct
typedef struct typedef struct
{ {
unsigned int fpc; unsigned int fpc;
unsigned int pad;
double fprs[__NUM_FPRS]; double fprs[__NUM_FPRS];
} _s390_fp_regs; } _s390_fp_regs;
......
...@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' ...@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
......
/*
* Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
* See include/asm/{bitops.h|posix_types.h} for details
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*/
#include <linux/bitops.h>
#include <linux/module.h>
const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
EXPORT_SYMBOL(_oi_bitmap);
const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f };
EXPORT_SYMBOL(_ni_bitmap);
const char _zb_findmap[] = {
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 };
EXPORT_SYMBOL(_zb_findmap);
const char _sb_findmap[] = {
8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 };
EXPORT_SYMBOL(_sb_findmap);
...@@ -146,15 +146,14 @@ static void __init cache_build_info(void) ...@@ -146,15 +146,14 @@ static void __init cache_build_info(void)
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (level = 0; level < CACHE_MAX_LEVEL; level++) { for (level = 0; level < CACHE_MAX_LEVEL; level++) {
switch (ct.ci[level].scope) { switch (ct.ci[level].scope) {
case CACHE_SCOPE_NOTEXISTS:
case CACHE_SCOPE_RESERVED:
return;
case CACHE_SCOPE_SHARED: case CACHE_SCOPE_SHARED:
private = 0; private = 0;
break; break;
case CACHE_SCOPE_PRIVATE: case CACHE_SCOPE_PRIVATE:
private = 1; private = 1;
break; break;
default:
return;
} }
if (ct.ci[level].type == CACHE_TYPE_SEPARATE) { if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
rc = cache_add(level, private, CACHE_TYPE_DATA); rc = cache_add(level, private, CACHE_TYPE_DATA);
......
...@@ -58,10 +58,6 @@ ...@@ -58,10 +58,6 @@
#include "compat_linux.h" #include "compat_linux.h"
u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT |
PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK |
PSW32_MASK_PSTATE | PSW32_ASC_HOME;
/* For this source file, we want overflow handling. */ /* For this source file, we want overflow handling. */
#undef high2lowuid #undef high2lowuid
......
...@@ -27,6 +27,7 @@ typedef union ...@@ -27,6 +27,7 @@ typedef union
typedef struct typedef struct
{ {
unsigned int fpc; unsigned int fpc;
unsigned int pad;
freg_t32 fprs[__NUM_FPRS]; freg_t32 fprs[__NUM_FPRS];
} _s390_fp_regs32; } _s390_fp_regs32;
......
...@@ -153,57 +153,66 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) ...@@ -153,57 +153,66 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
{ {
_s390_regs_common32 regs32; _sigregs32 user_sregs;
int err, i; int i;
regs32.psw.mask = psw32_user_bits | user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER); user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
regs32.psw.addr = (__u32) regs->psw.addr | user_sregs.regs.psw.mask |= PSW32_USER_BITS;
user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
(__u32)(regs->psw.mask & PSW_MASK_BA); (__u32)(regs->psw.mask & PSW_MASK_BA);
for (i = 0; i < NUM_GPRS; i++) for (i = 0; i < NUM_GPRS; i++)
regs32.gprs[i] = (__u32) regs->gprs[i]; user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
save_access_regs(current->thread.acrs); save_access_regs(current->thread.acrs);
memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs)); memcpy(&user_sregs.regs.acrs, current->thread.acrs,
err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32)); sizeof(user_sregs.regs.acrs));
if (err) save_fp_ctl(&current->thread.fp_regs.fpc);
return err; save_fp_regs(current->thread.fp_regs.fprs);
save_fp_regs(&current->thread.fp_regs); memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
/* s390_fp_regs and _s390_fp_regs32 are the same ! */ sizeof(user_sregs.fpregs));
return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs, if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
sizeof(_s390_fp_regs32)); return -EFAULT;
return 0;
} }
static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
{ {
_s390_regs_common32 regs32; _sigregs32 user_sregs;
int err, i; int i;
/* Alwys make any pending restarted system call return -EINTR */ /* Alwys make any pending restarted system call return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall; current_thread_info()->restart_block.fn = do_no_restart_syscall;
err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32)); if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
if (err) return -EFAULT;
return err;
if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
return -EINVAL;
/* Loading the floating-point-control word can fail. Do that first. */
if (restore_fp_ctl(&user_sregs.fpregs.fpc))
return -EINVAL;
/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
(__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
(__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
/* Check for invalid user address space control. */ /* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | regs->psw.mask = PSW_ASC_PRIMARY |
(regs->psw.mask & ~PSW_MASK_ASC); (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
for (i = 0; i < NUM_GPRS; i++) for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = (__u64) regs32.gprs[i]; regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs)); memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
restore_access_regs(current->thread.acrs); restore_access_regs(current->thread.acrs);
err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs, memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
sizeof(_s390_fp_regs32)); sizeof(current->thread.fp_regs));
current->thread.fp_regs.fpc &= FPC_VALID_MASK;
if (err)
return err;
restore_fp_regs(&current->thread.fp_regs); restore_fp_regs(current->thread.fp_regs.fprs);
clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
return 0; return 0;
} }
...@@ -215,18 +224,18 @@ static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) ...@@ -215,18 +224,18 @@ static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
for (i = 0; i < NUM_GPRS; i++) for (i = 0; i < NUM_GPRS; i++)
gprs_high[i] = regs->gprs[i] >> 32; gprs_high[i] = regs->gprs[i] >> 32;
if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high)))
return __copy_to_user(uregs, &gprs_high, sizeof(gprs_high)); return -EFAULT;
return 0;
} }
static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
{ {
__u32 gprs_high[NUM_GPRS]; __u32 gprs_high[NUM_GPRS];
int err, i; int i;
err = __copy_from_user(&gprs_high, uregs, sizeof(gprs_high)); if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high)))
if (err) return -EFAULT;
return err;
for (i = 0; i < NUM_GPRS; i++) for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)&regs->gprs[i] = gprs_high[i]; *(__u32 *)&regs->gprs[i] = gprs_high[i];
return 0; return 0;
...@@ -348,7 +357,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka, ...@@ -348,7 +357,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
regs->gprs[15] = (__force __u64) frame; regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */ /* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA | regs->psw.mask = PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) | (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC); (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ka->sa.sa_handler; regs->psw.addr = (__force __u64) ka->sa.sa_handler;
...@@ -415,7 +424,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -415,7 +424,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[15] = (__force __u64) frame; regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */ /* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA | regs->psw.mask = PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) | (PSW_USER_BITS & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC); (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64 __force) ka->sa.sa_handler; regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
......
...@@ -22,6 +22,32 @@ ...@@ -22,6 +22,32 @@
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
struct dump_save_areas dump_save_areas;
/*
* Allocate and add a save area for a CPU
*/
struct save_area *dump_save_area_create(int cpu)
{
struct save_area **save_areas, *save_area;
save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
if (!save_area)
return NULL;
if (cpu + 1 > dump_save_areas.count) {
dump_save_areas.count = cpu + 1;
save_areas = krealloc(dump_save_areas.areas,
dump_save_areas.count * sizeof(void *),
GFP_KERNEL | __GFP_ZERO);
if (!save_areas) {
kfree(save_area);
return NULL;
}
dump_save_areas.areas = save_areas;
}
dump_save_areas.areas[cpu] = save_area;
return save_area;
}
/* /*
* Return physical address for virtual address * Return physical address for virtual address
...@@ -45,7 +71,6 @@ static inline void *load_real_addr(void *addr) ...@@ -45,7 +71,6 @@ static inline void *load_real_addr(void *addr)
static int copy_from_realmem(void *dest, void *src, size_t count) static int copy_from_realmem(void *dest, void *src, size_t count)
{ {
unsigned long size; unsigned long size;
int rc;
if (!count) if (!count)
return 0; return 0;
...@@ -451,8 +476,8 @@ static int get_cpu_cnt(void) ...@@ -451,8 +476,8 @@ static int get_cpu_cnt(void)
{ {
int i, cpus = 0; int i, cpus = 0;
for (i = 0; zfcpdump_save_areas[i]; i++) { for (i = 0; i < dump_save_areas.count; i++) {
if (zfcpdump_save_areas[i]->pref_reg == 0) if (dump_save_areas.areas[i]->pref_reg == 0)
continue; continue;
cpus++; cpus++;
} }
...@@ -523,8 +548,8 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) ...@@ -523,8 +548,8 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
ptr = nt_prpsinfo(ptr); ptr = nt_prpsinfo(ptr);
for (i = 0; zfcpdump_save_areas[i]; i++) { for (i = 0; i < dump_save_areas.count; i++) {
sa = zfcpdump_save_areas[i]; sa = dump_save_areas.areas[i];
if (sa->pref_reg == 0) if (sa->pref_reg == 0)
continue; continue;
ptr = fill_cpu_elf_notes(ptr, sa); ptr = fill_cpu_elf_notes(ptr, sa);
......
...@@ -889,7 +889,7 @@ static int debug_active=1; ...@@ -889,7 +889,7 @@ static int debug_active=1;
* if debug_active is already off * if debug_active is already off
*/ */
static int static int
s390dbf_procactive(ctl_table *table, int write, s390dbf_procactive(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos) void __user *buffer, size_t *lenp, loff_t *ppos)
{ {
if (!write || debug_stoppable || !debug_active) if (!write || debug_stoppable || !debug_active)
......
This diff is collapsed.
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/dis.h>
#include <asm/ipl.h> #include <asm/ipl.h>
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
......
...@@ -206,6 +206,7 @@ static noinline __init void clear_bss_section(void) ...@@ -206,6 +206,7 @@ static noinline __init void clear_bss_section(void)
*/ */
static noinline __init void init_kernel_storage_key(void) static noinline __init void init_kernel_storage_key(void)
{ {
#if PAGE_DEFAULT_KEY
unsigned long end_pfn, init_pfn; unsigned long end_pfn, init_pfn;
end_pfn = PFN_UP(__pa(&_end)); end_pfn = PFN_UP(__pa(&_end));
...@@ -213,6 +214,7 @@ static noinline __init void init_kernel_storage_key(void) ...@@ -213,6 +214,7 @@ static noinline __init void init_kernel_storage_key(void)
for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT, page_set_storage_key(init_pfn << PAGE_SHIFT,
PAGE_DEFAULT_KEY, 0); PAGE_DEFAULT_KEY, 0);
#endif
} }
static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE); static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
......
...@@ -23,7 +23,6 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); ...@@ -23,7 +23,6 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
void do_protection_exception(struct pt_regs *regs); void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs); void do_dat_exception(struct pt_regs *regs);
void do_asce_exception(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs); void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs); void data_exception(struct pt_regs *regs);
......
...@@ -151,14 +151,13 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, ...@@ -151,14 +151,13 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out; goto out;
ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out;
trace.func = ip; trace.func = ip;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to. */ /* Only trace if the calling function expects to. */
if (!ftrace_graph_entry(&trace)) { if (!ftrace_graph_entry(&trace))
current->curr_ret_stack--; goto out;
if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out; goto out;
}
parent = (unsigned long) return_to_handler; parent = (unsigned long) return_to_handler;
out: out:
return parent; return parent;
......
...@@ -437,7 +437,7 @@ ENTRY(startup_kdump) ...@@ -437,7 +437,7 @@ ENTRY(startup_kdump)
#if defined(CONFIG_64BIT) #if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_ZEC12) #if defined(CONFIG_MARCH_ZEC12)
.long 3, 0xc100efe3, 0xf46ce000, 0x00400000 .long 3, 0xc100efe3, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196) #elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100efe3, 0xf46c0000 .long 2, 0xc100efe3, 0xf46c0000
#elif defined(CONFIG_MARCH_Z10) #elif defined(CONFIG_MARCH_Z10)
......
...@@ -2051,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data) ...@@ -2051,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data)
__ctl_clear_bit(0,28); __ctl_clear_bit(0,28);
/* Set new machine check handler */ /* Set new machine check handler */
S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
S390_lowcore.mcck_new_psw.addr = S390_lowcore.mcck_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
/* Set new program check handler */ /* Set new program check handler */
S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
S390_lowcore.program_new_psw.addr = S390_lowcore.program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
......
This diff is collapsed.
...@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */ ...@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */ PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */ PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */ PGM_CHECK_DEFAULT /* 37 */
PGM_CHECK_64BIT(do_asce_exception) /* 38 */ PGM_CHECK_DEFAULT /* 38 */
PGM_CHECK_64BIT(do_dat_exception) /* 39 */ PGM_CHECK_64BIT(do_dat_exception) /* 39 */
PGM_CHECK_64BIT(do_dat_exception) /* 3a */ PGM_CHECK_64BIT(do_dat_exception) /* 3a */
PGM_CHECK_64BIT(do_dat_exception) /* 3b */ PGM_CHECK_64BIT(do_dat_exception) /* 3b */
......
This diff is collapsed.
This diff is collapsed.
...@@ -40,8 +40,6 @@ static void disable_runtime_instr(void) ...@@ -40,8 +40,6 @@ static void disable_runtime_instr(void)
static void init_runtime_instr_cb(struct runtime_instr_cb *cb) static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
{ {
cb->buf_limit = 0xfff; cb->buf_limit = 0xfff;
if (s390_user_mode == HOME_SPACE_MODE)
cb->home_space = 1;
cb->int_requested = 1; cb->int_requested = 1;
cb->pstate = 1; cb->pstate = 1;
cb->pstate_set_buf = 1; cb->pstate_set_buf = 1;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -161,7 +161,7 @@ void __kprobes vtime_stop_cpu(void) ...@@ -161,7 +161,7 @@ void __kprobes vtime_stop_cpu(void)
trace_hardirqs_on(); trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */ /* Wait for external, I/O or machine check interrupt. */
psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
idle->nohz_delay = 0; idle->nohz_delay = 0;
...@@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu) ...@@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu)
sequence = ACCESS_ONCE(idle->sequence); sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter); idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit); idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (idle->sequence != sequence)); } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment