Commit 1db2a5c1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (85 commits)
  [S390] provide documentation for hvc_iucv kernel parameter.
  [S390] convert ctcm printks to dev_xxx and pr_xxx macros.
  [S390] convert zfcp printks to pr_xxx macros.
  [S390] convert vmlogrdr printks to pr_xxx macros.
  [S390] convert zfcp dumper printks to pr_xxx macros.
  [S390] convert cpu related printks to pr_xxx macros.
  [S390] convert qeth printks to dev_xxx and pr_xxx macros.
  [S390] convert sclp printks to pr_xxx macros.
  [S390] convert iucv printks to dev_xxx and pr_xxx macros.
  [S390] convert ap_bus printks to pr_xxx macros.
  [S390] convert dcssblk and extmem printks messages to pr_xxx macros.
  [S390] convert monwriter printks to pr_xxx macros.
  [S390] convert s390 debug feature printks to pr_xxx macros.
  [S390] convert monreader printks to pr_xxx macros.
  [S390] convert appldata printks to pr_xxx macros.
  [S390] convert setup printks to pr_xxx macros.
  [S390] convert hypfs printks to pr_xxx macros.
  [S390] convert time printks to pr_xxx macros.
  [S390] convert cpacf printks to pr_xxx macros.
  [S390] convert cio printks to pr_xxx macros.
  ...
parents a39b8633 cef7125d
...@@ -823,6 +823,9 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -823,6 +823,9 @@ and is between 256 and 4096 characters. It is defined in the file
hlt [BUGS=ARM,SH] hlt [BUGS=ARM,SH]
hvc_iucv= [S390] Number of z/VM IUCV Hypervisor console (HVC)
back-ends. Valid parameters: 0..8
i8042.debug [HW] Toggle i8042 debug mode i8042.debug [HW] Toggle i8042 debug mode
i8042.direct [HW] Put keyboard port into non-translated mode i8042.direct [HW] Put keyboard port into non-translated mode
i8042.dumbkbd [HW] Pretend that controller can only read data from i8042.dumbkbd [HW] Pretend that controller can only read data from
...@@ -2292,6 +2295,14 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -2292,6 +2295,14 @@ and is between 256 and 4096 characters. It is defined in the file
See comment before function dc390_setup() in See comment before function dc390_setup() in
drivers/scsi/tmscsim.c. drivers/scsi/tmscsim.c.
topology= [S390]
Format: {off | on}
Specify if the kernel should make use of the cpu
topology informations if the hardware supports these.
The scheduler will make use of these informations and
e.g. base its process migration decisions on it.
Default is off.
tp720= [HW,PS2] tp720= [HW,PS2]
trix= [HW,OSS] MediaTrix AudioTrix Pro trix= [HW,OSS] MediaTrix AudioTrix Pro
......
...@@ -267,7 +267,7 @@ extern int ucache_bsize; ...@@ -267,7 +267,7 @@ extern int ucache_bsize;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm; struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm, extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack); int uses_interp);
#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) ...@@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
* This is called from binfmt_elf, we create the special vma for the * This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree * vDSO and insert it into the mm struct tree
*/ */
int arch_setup_additional_pages(struct linux_binprm *bprm, int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int executable_stack)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct page **vdso_pagelist; struct page **vdso_pagelist;
......
...@@ -43,6 +43,9 @@ config GENERIC_HWEIGHT ...@@ -43,6 +43,9 @@ config GENERIC_HWEIGHT
config GENERIC_TIME config GENERIC_TIME
def_bool y def_bool y
config GENERIC_TIME_VSYSCALL
def_bool y
config GENERIC_CLOCKEVENTS config GENERIC_CLOCKEVENTS
def_bool y def_bool y
...@@ -66,10 +69,15 @@ config PGSTE ...@@ -66,10 +69,15 @@ config PGSTE
bool bool
default y if KVM default y if KVM
config VIRT_CPU_ACCOUNTING
def_bool y
mainmenu "Linux Kernel Configuration" mainmenu "Linux Kernel Configuration"
config S390 config S390
def_bool y def_bool y
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_FUNCTION_TRACER
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KRETPROBES select HAVE_KRETPROBES
...@@ -225,6 +233,14 @@ config MARCH_Z9_109 ...@@ -225,6 +233,14 @@ config MARCH_Z9_109
Class (z9 BC). The kernel will be slightly faster but will not Class (z9 BC). The kernel will be slightly faster but will not
work on older machines such as the z990, z890, z900, and z800. work on older machines such as the z990, z890, z900, and z800.
config MARCH_Z10
bool "IBM System z10"
help
Select this to enable optimizations for IBM System z10. The
kernel will be slightly faster but will not work on older
machines such as the z990, z890, z900, z800, z9-109, z9-ec
and z9-bc.
endchoice endchoice
config PACK_STACK config PACK_STACK
...@@ -343,16 +359,6 @@ config QDIO ...@@ -343,16 +359,6 @@ config QDIO
If unsure, say Y. If unsure, say Y.
config QDIO_DEBUG
bool "Extended debugging information"
depends on QDIO
help
Say Y here to get extended debugging output in
/sys/kernel/debug/s390dbf/qdio...
Warning: this option reduces the performance of the QDIO module.
If unsure, say N.
config CHSC_SCH config CHSC_SCH
tristate "Support for CHSC subchannels" tristate "Support for CHSC subchannels"
help help
...@@ -466,22 +472,9 @@ config PAGE_STATES ...@@ -466,22 +472,9 @@ config PAGE_STATES
hypervisor. The ESSA instruction is used to do the states hypervisor. The ESSA instruction is used to do the states
changes between a page that has content and the unused state. changes between a page that has content and the unused state.
config VIRT_TIMER
bool "Virtual CPU timer support"
help
This provides a kernel interface for virtual CPU timers.
Default is disabled.
config VIRT_CPU_ACCOUNTING
bool "Base user process accounting on virtual cpu timer"
depends on VIRT_TIMER
help
Select this option to use CPU timer deltas to do user
process accounting.
config APPLDATA_BASE config APPLDATA_BASE
bool "Linux - VM Monitor Stream, base infrastructure" bool "Linux - VM Monitor Stream, base infrastructure"
depends on PROC_FS && VIRT_TIMER=y depends on PROC_FS
help help
This provides a kernel interface for creating and updating z/VM APPLDATA This provides a kernel interface for creating and updating z/VM APPLDATA
monitor records. The monitor records are updated at certain time monitor records. The monitor records are updated at certain time
......
...@@ -34,6 +34,7 @@ cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) ...@@ -34,6 +34,7 @@ cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
#KBUILD_IMAGE is necessary for make rpm #KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image KBUILD_IMAGE :=arch/s390/boot/image
......
...@@ -26,10 +26,6 @@ ...@@ -26,10 +26,6 @@
#define CTL_APPLDATA_NET_SUM 2125 #define CTL_APPLDATA_NET_SUM 2125
#define CTL_APPLDATA_PROC 2126 #define CTL_APPLDATA_PROC 2126
#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
struct appldata_ops { struct appldata_ops {
struct list_head list; struct list_head list;
struct ctl_table_header *sysctl_header; struct ctl_table_header *sysctl_header;
......
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
#define KMSG_COMPONENT "appldata"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -32,7 +35,6 @@ ...@@ -32,7 +35,6 @@
#include "appldata.h" #include "appldata.h"
#define MY_PRINT_NAME "appldata" /* for debug messages, etc. */
#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
sampling interval in sampling interval in
milliseconds */ milliseconds */
...@@ -390,8 +392,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, ...@@ -390,8 +392,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
(unsigned long) ops->data, ops->size, (unsigned long) ops->data, ops->size,
ops->mod_lvl); ops->mod_lvl);
if (rc != 0) { if (rc != 0) {
P_ERROR("START DIAG 0xDC for %s failed, " pr_err("Starting the data collection for %s "
"return code: %d\n", ops->name, rc); "failed with rc=%d\n", ops->name, rc);
module_put(ops->owner); module_put(ops->owner);
} else } else
ops->active = 1; ops->active = 1;
...@@ -401,8 +403,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, ...@@ -401,8 +403,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
(unsigned long) ops->data, ops->size, (unsigned long) ops->data, ops->size,
ops->mod_lvl); ops->mod_lvl);
if (rc != 0) if (rc != 0)
P_ERROR("STOP DIAG 0xDC for %s failed, " pr_err("Stopping the data collection for %s "
"return code: %d\n", ops->name, rc); "failed with rc=%d\n", ops->name, rc);
module_put(ops->owner); module_put(ops->owner);
} }
spin_unlock(&appldata_ops_lock); spin_unlock(&appldata_ops_lock);
......
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
#define KMSG_COMPONENT "appldata"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -22,7 +25,6 @@ ...@@ -22,7 +25,6 @@
#include "appldata.h" #include "appldata.h"
#define MY_PRINT_NAME "appldata_os" /* for debug messages, etc. */
#define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
...@@ -143,21 +145,16 @@ static void appldata_get_os_data(void *data) ...@@ -143,21 +145,16 @@ static void appldata_get_os_data(void *data)
(unsigned long) ops.data, new_size, (unsigned long) ops.data, new_size,
ops.mod_lvl); ops.mod_lvl);
if (rc != 0) if (rc != 0)
P_ERROR("os: START NEW DIAG 0xDC failed, " pr_err("Starting a new OS data collection "
"return code: %d, new size = %i\n", rc, "failed with rc=%d\n", rc);
new_size);
rc = appldata_diag(APPLDATA_RECORD_OS_ID, rc = appldata_diag(APPLDATA_RECORD_OS_ID,
APPLDATA_STOP_REC, APPLDATA_STOP_REC,
(unsigned long) ops.data, ops.size, (unsigned long) ops.data, ops.size,
ops.mod_lvl); ops.mod_lvl);
if (rc != 0) if (rc != 0)
P_ERROR("os: STOP OLD DIAG 0xDC failed, " pr_err("Stopping a faulty OS data "
"return code: %d, old size = %i\n", rc, "collection failed with rc=%d\n", rc);
ops.size);
else
P_INFO("os: old record size = %i stopped\n",
ops.size);
} }
ops.size = new_size; ops.size = new_size;
} }
...@@ -178,8 +175,8 @@ static int __init appldata_os_init(void) ...@@ -178,8 +175,8 @@ static int __init appldata_os_init(void)
max_size = sizeof(struct appldata_os_data) + max_size = sizeof(struct appldata_os_data) +
(NR_CPUS * sizeof(struct appldata_os_per_cpu)); (NR_CPUS * sizeof(struct appldata_os_per_cpu));
if (max_size > APPLDATA_MAX_REC_SIZE) { if (max_size > APPLDATA_MAX_REC_SIZE) {
P_ERROR("Max. size of OS record = %i, bigger than maximum " pr_err("Maximum OS record size %i exceeds the maximum "
"record size (%i)\n", max_size, APPLDATA_MAX_REC_SIZE); "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
......
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
* *
*/ */
#define KMSG_COMPONENT "aes_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -169,7 +172,8 @@ static int fallback_init_cip(struct crypto_tfm *tfm) ...@@ -169,7 +172,8 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.cip)) { if (IS_ERR(sctx->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name); pr_err("Allocating AES fallback algorithm %s failed\n",
name);
return PTR_ERR(sctx->fallback.blk); return PTR_ERR(sctx->fallback.blk);
} }
...@@ -349,7 +353,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm) ...@@ -349,7 +353,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.blk)) { if (IS_ERR(sctx->fallback.blk)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name); pr_err("Allocating AES fallback algorithm %s failed\n",
name);
return PTR_ERR(sctx->fallback.blk); return PTR_ERR(sctx->fallback.blk);
} }
...@@ -515,9 +520,8 @@ static int __init aes_s390_init(void) ...@@ -515,9 +520,8 @@ static int __init aes_s390_init(void)
/* z9 109 and z9 BC/EC only support 128 bit key length */ /* z9 109 and z9 BC/EC only support 128 bit key length */
if (keylen_flag == AES_KEYLEN_128) if (keylen_flag == AES_KEYLEN_128)
printk(KERN_INFO pr_info("AES hardware acceleration is only available for"
"aes_s390: hardware acceleration only available for " " 128-bit keys\n");
"128 bit keys\n");
ret = crypto_register_alg(&aes_alg); ret = crypto_register_alg(&aes_alg);
if (ret) if (ret)
......
...@@ -3,10 +3,13 @@ ...@@ -3,10 +3,13 @@
* Hypervisor filesystem for Linux on s390. Diag 204 and 224 * Hypervisor filesystem for Linux on s390. Diag 204 and 224
* implementation. * implementation.
* *
* Copyright (C) IBM Corp. 2006 * Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com> * Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/ */
#define KMSG_COMPONENT "hypfs"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h> #include <linux/types.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -527,13 +530,14 @@ __init int hypfs_diag_init(void) ...@@ -527,13 +530,14 @@ __init int hypfs_diag_init(void)
int rc; int rc;
if (diag204_probe()) { if (diag204_probe()) {
printk(KERN_ERR "hypfs: diag 204 not working."); pr_err("The hardware system does not support hypfs\n");
return -ENODATA; return -ENODATA;
} }
rc = diag224_get_name_table(); rc = diag224_get_name_table();
if (rc) { if (rc) {
diag204_free_buffer(); diag204_free_buffer();
printk(KERN_ERR "hypfs: could not get name table.\n"); pr_err("The hardware system does not provide all "
"functions required by hypfs\n");
} }
return rc; return rc;
} }
......
...@@ -2,10 +2,13 @@ ...@@ -2,10 +2,13 @@
* arch/s390/hypfs/inode.c * arch/s390/hypfs/inode.c
* Hypervisor filesystem for Linux on s390. * Hypervisor filesystem for Linux on s390.
* *
* Copyright (C) IBM Corp. 2006 * Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com> * Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/ */
#define KMSG_COMPONENT "hypfs"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h> #include <linux/types.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -200,7 +203,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -200,7 +203,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
else else
rc = hypfs_diag_create_files(sb, sb->s_root); rc = hypfs_diag_create_files(sb, sb->s_root);
if (rc) { if (rc) {
printk(KERN_ERR "hypfs: Update failed\n"); pr_err("Updating the hypfs tree failed\n");
hypfs_delete_tree(sb->s_root); hypfs_delete_tree(sb->s_root);
goto out; goto out;
} }
...@@ -252,8 +255,7 @@ static int hypfs_parse_options(char *options, struct super_block *sb) ...@@ -252,8 +255,7 @@ static int hypfs_parse_options(char *options, struct super_block *sb)
break; break;
case opt_err: case opt_err:
default: default:
printk(KERN_ERR "hypfs: Unrecognized mount option " pr_err("%s is not a valid mount option\n", str);
"\"%s\" or missing value\n", str);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -317,7 +319,7 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -317,7 +319,7 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
} }
hypfs_update_update(sb); hypfs_update_update(sb);
sb->s_root = root_dentry; sb->s_root = root_dentry;
printk(KERN_INFO "hypfs: Hypervisor filesystem mounted\n"); pr_info("Hypervisor filesystem mounted\n");
return 0; return 0;
err_tree: err_tree:
...@@ -513,7 +515,7 @@ static int __init hypfs_init(void) ...@@ -513,7 +515,7 @@ static int __init hypfs_init(void)
if (!MACHINE_IS_VM) if (!MACHINE_IS_VM)
hypfs_diag_exit(); hypfs_diag_exit();
fail_diag: fail_diag:
printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc; return rc;
} }
......
#ifndef __ASMS390_AUXVEC_H #ifndef __ASMS390_AUXVEC_H
#define __ASMS390_AUXVEC_H #define __ASMS390_AUXVEC_H
#define AT_SYSINFO_EHDR 33
#endif #endif
...@@ -47,7 +47,10 @@ ...@@ -47,7 +47,10 @@
#endif /* CONFIG_DEBUG_BUGVERBOSE */ #endif /* CONFIG_DEBUG_BUGVERBOSE */
#define BUG() __EMIT_BUG(0) #define BUG() do { \
__EMIT_BUG(0); \
for (;;); \
} while (0)
#define WARN_ON(x) ({ \ #define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \ int __ret_warn_on = !!(x); \
......
...@@ -11,32 +11,39 @@ ...@@ -11,32 +11,39 @@
#include <asm/types.h> #include <asm/types.h>
#ifdef __GNUC__ #define __BIG_ENDIAN
#ifndef __s390x__
# define __SWAB_64_THRU_32__
#endif
#ifdef __s390x__ #ifdef __s390x__
static inline __u64 ___arch__swab64p(const __u64 *x) static inline __u64 __arch_swab64p(const __u64 *x)
{ {
__u64 result; __u64 result;
asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
return result; return result;
} }
#define __arch_swab64p __arch_swab64p
static inline __u64 ___arch__swab64(__u64 x) static inline __u64 __arch_swab64(__u64 x)
{ {
__u64 result; __u64 result;
asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
return result; return result;
} }
#define __arch_swab64 __arch_swab64
static inline void ___arch__swab64s(__u64 *x) static inline void __arch_swab64s(__u64 *x)
{ {
*x = ___arch__swab64p(x); *x = __arch_swab64p(x);
} }
#define __arch_swab64s __arch_swab64s
#endif /* __s390x__ */ #endif /* __s390x__ */
static inline __u32 ___arch__swab32p(const __u32 *x) static inline __u32 __arch_swab32p(const __u32 *x)
{ {
__u32 result; __u32 result;
...@@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x) ...@@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x)
#endif /* __s390x__ */ #endif /* __s390x__ */
return result; return result;
} }
#define __arch_swab32p __arch_swab32p
static inline __u32 ___arch__swab32(__u32 x) #ifdef __s390x__
static inline __u32 __arch_swab32(__u32 x)
{ {
#ifndef __s390x__
return ___arch__swab32p(&x);
#else /* __s390x__ */
__u32 result; __u32 result;
asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
return result; return result;
#endif /* __s390x__ */
}
static __inline__ void ___arch__swab32s(__u32 *x)
{
*x = ___arch__swab32p(x);
} }
#define __arch_swab32 __arch_swab32
#endif /* __s390x__ */
static __inline__ __u16 ___arch__swab16p(const __u16 *x) static inline __u16 __arch_swab16p(const __u16 *x)
{ {
__u16 result; __u16 result;
...@@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x) ...@@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x)
#endif /* __s390x__ */ #endif /* __s390x__ */
return result; return result;
} }
#define __arch_swab16p __arch_swab16p
static __inline__ __u16 ___arch__swab16(__u16 x) #include <linux/byteorder.h>
{
return ___arch__swab16p(&x);
}
static __inline__ void ___arch__swab16s(__u16 *x)
{
*x = ___arch__swab16p(x);
}
#ifdef __s390x__
#define __arch__swab64(x) ___arch__swab64(x)
#define __arch__swab64p(x) ___arch__swab64p(x)
#define __arch__swab64s(x) ___arch__swab64s(x)
#endif /* __s390x__ */
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab16(x) ___arch__swab16(x)
#define __arch__swab32p(x) ___arch__swab32p(x)
#define __arch__swab16p(x) ___arch__swab16p(x)
#define __arch__swab32s(x) ___arch__swab32s(x)
#define __arch__swab16s(x) ___arch__swab16s(x)
#ifndef __s390x__
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif
#else /* __s390x__ */
#define __BYTEORDER_HAS_U64__
#endif /* __s390x__ */
#endif /* __GNUC__ */
#include <linux/byteorder/big_endian.h>
#endif /* _S390_BYTEORDER_H */ #endif /* _S390_BYTEORDER_H */
...@@ -120,6 +120,10 @@ typedef s390_compat_regs compat_elf_gregset_t; ...@@ -120,6 +120,10 @@ typedef s390_compat_regs compat_elf_gregset_t;
#include <asm/system.h> /* for save_access_regs */ #include <asm/system.h> /* for save_access_regs */
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/vdso.h>
extern unsigned int vdso_enabled;
/* /*
* This is used to ensure we don't load something for the wrong architecture. * This is used to ensure we don't load something for the wrong architecture.
*/ */
...@@ -191,4 +195,16 @@ do { \ ...@@ -191,4 +195,16 @@ do { \
current->mm->context.noexec == 0; \ current->mm->context.noexec == 0; \
}) })
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso_base); \
} while (0)
struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
int arch_setup_additional_pages(struct linux_binprm *, int);
#endif #endif
...@@ -248,8 +248,8 @@ struct dcw { ...@@ -248,8 +248,8 @@ struct dcw {
#define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \ #define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \
TCCB_MAX_DCW * sizeof(struct dcw) + \ TCCB_MAX_DCW * sizeof(struct dcw) + \
sizeof(struct tccb_tcat)) sizeof(struct tccb_tcat))
#define TCCB_SAC_DEFAULT 0xf901 #define TCCB_SAC_DEFAULT 0x1ffe
#define TCCB_SAC_INTRG 0xf902 #define TCCB_SAC_INTRG 0x1fff
/** /**
* struct tccb_tcah - Transport-Command-Area Header (TCAH) * struct tccb_tcah - Transport-Command-Area Header (TCAH)
......
#ifndef _ASM_S390_FTRACE_H
#define _ASM_S390_FTRACE_H
#ifndef __ASSEMBLY__
extern void _mcount(void);
#endif
#endif /* _ASM_S390_FTRACE_H */
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define CHSC_SCH_ISC 7 /* CHSC subchannels */ #define CHSC_SCH_ISC 7 /* CHSC subchannels */
/* Adapter interrupts. */ /* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
#define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */ /* Functions for registration of I/O interruption subclasses */
void isc_register(unsigned int isc); void isc_register(unsigned int isc);
......
...@@ -6,6 +6,7 @@ typedef struct { ...@@ -6,6 +6,7 @@ typedef struct {
struct list_head pgtable_list; struct list_head pgtable_list;
unsigned long asce_bits; unsigned long asce_bits;
unsigned long asce_limit; unsigned long asce_limit;
unsigned long vdso_base;
int noexec; int noexec;
int has_pgste; /* The mmu context has extended page tables */ int has_pgste; /* The mmu context has extended page tables */
int alloc_pgste; /* cloned contexts will have extended page tables */ int alloc_pgste; /* cloned contexts will have extended page tables */
......
...@@ -152,4 +152,6 @@ void arch_alloc_page(struct page *page, int order); ...@@ -152,4 +152,6 @@ void arch_alloc_page(struct page *page, int order);
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#include <asm-generic/page.h> #include <asm-generic/page.h>
#define __HAVE_ARCH_GATE_AREA 1
#endif /* _S390_PAGE_H */ #endif /* _S390_PAGE_H */
...@@ -28,6 +28,8 @@ void disable_noexec(struct mm_struct *, struct task_struct *); ...@@ -28,6 +28,8 @@ void disable_noexec(struct mm_struct *, struct task_struct *);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n) static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{ {
typedef struct { char _[n]; } addrtype;
*s = val; *s = val;
n = (n / 256) - 1; n = (n / 256) - 1;
asm volatile( asm volatile(
...@@ -39,7 +41,8 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n) ...@@ -39,7 +41,8 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
"0: mvc 256(256,%0),0(%0)\n" "0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n" " la %0,256(%0)\n"
" brct %1,0b\n" " brct %1,0b\n"
: "+a" (s), "+d" (n)); : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
: "m" (*(addrtype *) s));
} }
static inline void crst_table_init(unsigned long *crst, unsigned long entry) static inline void crst_table_init(unsigned long *crst, unsigned long entry)
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#ifndef __ASM_S390_PROCESSOR_H #ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H #define __ASM_S390_PROCESSOR_H
#include <linux/linkage.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -258,7 +259,7 @@ static inline void enabled_wait(void) ...@@ -258,7 +259,7 @@ static inline void enabled_wait(void)
* Function to drop a processor into disabled wait state * Function to drop a processor into disabled wait state
*/ */
static inline void disabled_wait(unsigned long code) static inline void ATTRIB_NORET disabled_wait(unsigned long code)
{ {
unsigned long ctl_buf; unsigned long ctl_buf;
psw_t dw_psw; psw_t dw_psw;
...@@ -322,6 +323,7 @@ static inline void disabled_wait(unsigned long code) ...@@ -322,6 +323,7 @@ static inline void disabled_wait(unsigned long code)
: "=m" (ctl_buf) : "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0"); : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0");
#endif /* __s390x__ */ #endif /* __s390x__ */
while (1);
} }
/* /*
......
...@@ -373,16 +373,16 @@ struct qdio_initialize { ...@@ -373,16 +373,16 @@ struct qdio_initialize {
#define QDIO_FLAG_SYNC_OUTPUT 0x02 #define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_PCI_OUT 0x10 #define QDIO_FLAG_PCI_OUT 0x10
extern int qdio_initialize(struct qdio_initialize *init_data); extern int qdio_initialize(struct qdio_initialize *);
extern int qdio_allocate(struct qdio_initialize *init_data); extern int qdio_allocate(struct qdio_initialize *);
extern int qdio_establish(struct qdio_initialize *init_data); extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *); extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device*, unsigned int flags, extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, int qidx, int count); int q_nr, int bufnr, int count);
extern int qdio_cleanup(struct ccw_device*, int how); extern int qdio_cleanup(struct ccw_device*, int);
extern int qdio_shutdown(struct ccw_device*, int how); extern int qdio_shutdown(struct ccw_device*, int);
extern int qdio_free(struct ccw_device *); extern int qdio_free(struct ccw_device *);
extern struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev); extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*);
#endif /* __QDIO_H__ */ #endif /* __QDIO_H__ */
...@@ -61,6 +61,7 @@ typedef enum ...@@ -61,6 +61,7 @@ typedef enum
{ {
ec_schedule=0, ec_schedule=0,
ec_call_function, ec_call_function,
ec_call_function_single,
ec_bit_last ec_bit_last
} ec_bit_sig; } ec_bit_sig;
......
...@@ -91,8 +91,9 @@ extern int __cpu_up (unsigned int cpu); ...@@ -91,8 +91,9 @@ extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex; extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[]; extern int smp_cpu_polarization[];
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), extern void arch_send_call_function_single_ipi(int cpu);
void *info, int wait); extern void arch_send_call_function_ipi(cpumask_t mask);
#endif #endif
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
......
...@@ -118,4 +118,15 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) ...@@ -118,4 +118,15 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
return r0; return r0;
} }
/*
* Service level reporting interface.
*/
struct service_level {
struct list_head list;
void (*seq_print)(struct seq_file *, struct service_level *);
};
int register_service_level(struct service_level *);
int unregister_service_level(struct service_level *);
#endif /* __ASM_S390_SYSINFO_H */ #endif /* __ASM_S390_SYSINFO_H */
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#define __ASM_SYSTEM_H #define __ASM_SYSTEM_H
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -98,13 +99,9 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -98,13 +99,9 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \ prev = __switch_to(prev,next); \
} while (0) } while (0)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_vtime(struct task_struct *); extern void account_vtime(struct task_struct *);
extern void account_tick_vtime(struct task_struct *); extern void account_tick_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *); extern void account_system_vtime(struct task_struct *);
#else
#define account_vtime(x) do { /* empty */ } while (0)
#endif
#ifdef CONFIG_PFAULT #ifdef CONFIG_PFAULT
extern void pfault_irq_init(void); extern void pfault_irq_init(void);
...@@ -413,8 +410,6 @@ __set_psw_mask(unsigned long mask) ...@@ -413,8 +410,6 @@ __set_psw_mask(unsigned long mask)
#define local_mcck_enable() __set_psw_mask(psw_kernel_bits) #define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
int stfle(unsigned long long *list, int doublewords);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void smp_ctl_set_bit(int cr, int bit); extern void smp_ctl_set_bit(int cr, int bit);
...@@ -438,6 +433,23 @@ static inline unsigned int stfl(void) ...@@ -438,6 +433,23 @@ static inline unsigned int stfl(void)
return S390_lowcore.stfl_fac_list; return S390_lowcore.stfl_fac_list;
} }
static inline int __stfle(unsigned long long *list, int doublewords)
{
typedef struct { unsigned long long _[doublewords]; } addrtype;
register unsigned long __nr asm("0") = doublewords - 1;
asm volatile(".insn s,0xb2b00000,%0" /* stfle */
: "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
return __nr + 1;
}
static inline int stfle(unsigned long long *list, int doublewords)
{
if (!(stfl() & (1UL << 24)))
return -EOPNOTSUPP;
return __stfle(list, doublewords);
}
static inline unsigned short stap(void) static inline unsigned short stap(void)
{ {
unsigned short cpu_address; unsigned short cpu_address;
......
...@@ -48,18 +48,9 @@ extern int del_virt_timer(struct vtimer_list *timer); ...@@ -48,18 +48,9 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void); extern void init_cpu_vtimer(void);
extern void vtime_init(void); extern void vtime_init(void);
#ifdef CONFIG_VIRT_TIMER
extern void vtime_start_cpu_timer(void); extern void vtime_start_cpu_timer(void);
extern void vtime_stop_cpu_timer(void); extern void vtime_stop_cpu_timer(void);
#else
static inline void vtime_start_cpu_timer(void) { }
static inline void vtime_stop_cpu_timer(void) { }
#endif /* CONFIG_VIRT_TIMER */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_S390_TIMER_H */ #endif /* _ASM_S390_TIMER_H */
#ifndef __S390_VDSO_H__
#define __S390_VDSO_H__
#ifdef __KERNEL__
/* Default link addresses for the vDSOs */
#define VDSO32_LBASE 0
#define VDSO64_LBASE 0
#define VDSO_VERSION_STRING LINUX_2.6.26
#ifndef __ASSEMBLY__
/*
* Note about this structure:
*
* NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this
* structure is supposed to be known only to the function in the vdso
* itself and may change without notice.
*/
struct vdso_data {
__u64 tb_update_count; /* Timebase atomicity ctr 0x00 */
__u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
__u64 xtime_clock_sec; /* Kernel time 0x10 */
__u64 xtime_clock_nsec; /* 0x18 */
__u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */
__u64 wtom_clock_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
};
extern struct vdso_data *vdso_data;
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __S390_VDSO_H__ */
...@@ -2,6 +2,11 @@ ...@@ -2,6 +2,11 @@
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_early.o = -pg
endif
# #
# Passing null pointers is ok for smp code, since we access the lowcore here. # Passing null pointers is ok for smp code, since we access the lowcore here.
# #
...@@ -12,9 +17,10 @@ CFLAGS_smp.o := -Wno-nonnull ...@@ -12,9 +17,10 @@ CFLAGS_smp.o := -Wno-nonnull
# #
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := bitmap.o traps.o time.o process.o base.o early.o \ obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
vdso.o vtime.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
...@@ -30,12 +36,16 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ ...@@ -30,12 +36,16 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
compat_wrapper.o compat_exec_domain.o \ compat_wrapper.o compat_exec_domain.o \
$(compat-obj-y) $(compat-obj-y)
obj-$(CONFIG_VIRT_TIMER) += vtime.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
# Kexec part # Kexec part
S390_KEXEC_OBJS := machine_kexec.o crash.o S390_KEXEC_OBJS := machine_kexec.o crash.o
S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
# vdso
obj-$(CONFIG_64BIT) += vdso64/
obj-$(CONFIG_32BIT) += vdso32/
obj-$(CONFIG_COMPAT) += vdso32/
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <asm/vdso.h>
int main(void) int main(void)
{ {
...@@ -38,5 +39,19 @@ int main(void) ...@@ -38,5 +39,19 @@ int main(void)
DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
BLANK();
/* timeval/timezone offsets for use by vdso */
DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count));
DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
/* constants used by the vdso */
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
return 0; return 0;
} }
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
* Christian Borntraeger (cborntra@de.ibm.com), * Christian Borntraeger (cborntra@de.ibm.com),
*/ */
#define KMSG_COMPONENT "cpcmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -104,8 +107,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) ...@@ -104,8 +107,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
(((unsigned long)response + rlen) >> 31)) { (((unsigned long)response + rlen) >> 31)) {
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
if (!lowbuf) { if (!lowbuf) {
printk(KERN_WARNING pr_warning("The cpcmd kernel function failed to "
"cpcmd: could not allocate response buffer\n"); "allocate a response buffer\n");
return -ENOMEM; return -ENOMEM;
} }
spin_lock_irqsave(&cpcmd_lock, flags); spin_lock_irqsave(&cpcmd_lock, flags);
......
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
* Bugreports to: <Linux390@de.ibm.com> * Bugreports to: <Linux390@de.ibm.com>
*/ */
#define KMSG_COMPONENT "s390dbf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -388,7 +391,7 @@ debug_info_copy(debug_info_t* in, int mode) ...@@ -388,7 +391,7 @@ debug_info_copy(debug_info_t* in, int mode)
debug_info_free(rc); debug_info_free(rc);
} while (1); } while (1);
if(!rc || (mode == NO_AREAS)) if (mode == NO_AREAS)
goto out; goto out;
for(i = 0; i < in->nr_areas; i++){ for(i = 0; i < in->nr_areas; i++){
...@@ -693,8 +696,8 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, ...@@ -693,8 +696,8 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
/* Since debugfs currently does not support uid/gid other than root, */ /* Since debugfs currently does not support uid/gid other than root, */
/* we do not allow gid/uid != 0 until we get support for that. */ /* we do not allow gid/uid != 0 until we get support for that. */
if ((uid != 0) || (gid != 0)) if ((uid != 0) || (gid != 0))
printk(KERN_WARNING "debug: Warning - Currently only uid/gid " pr_warning("Root becomes the owner of all s390dbf files "
"= 0 are supported. Using root as owner now!"); "in sysfs\n");
if (!initialized) if (!initialized)
BUG(); BUG();
mutex_lock(&debug_mutex); mutex_lock(&debug_mutex);
...@@ -709,7 +712,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, ...@@ -709,7 +712,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
debug_register_view(rc, &debug_pages_view); debug_register_view(rc, &debug_pages_view);
out: out:
if (!rc){ if (!rc){
printk(KERN_ERR "debug: debug_register failed for %s\n",name); pr_err("Registering debug feature %s failed\n", name);
} }
mutex_unlock(&debug_mutex); mutex_unlock(&debug_mutex);
return rc; return rc;
...@@ -763,8 +766,8 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area) ...@@ -763,8 +766,8 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
if(pages_per_area > 0){ if(pages_per_area > 0){
new_areas = debug_areas_alloc(pages_per_area, nr_areas); new_areas = debug_areas_alloc(pages_per_area, nr_areas);
if(!new_areas) { if(!new_areas) {
printk(KERN_WARNING "debug: could not allocate memory "\ pr_info("Allocating memory for %i pages failed\n",
"for pagenumber: %i\n",pages_per_area); pages_per_area);
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
...@@ -780,8 +783,7 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area) ...@@ -780,8 +783,7 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
memset(id->active_entries,0,sizeof(int)*id->nr_areas); memset(id->active_entries,0,sizeof(int)*id->nr_areas);
memset(id->active_pages, 0, sizeof(int)*id->nr_areas); memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
spin_unlock_irqrestore(&id->lock,flags); spin_unlock_irqrestore(&id->lock,flags);
printk(KERN_INFO "debug: %s: set new size (%i pages)\n"\ pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area);
,id->name, pages_per_area);
out: out:
return rc; return rc;
} }
...@@ -800,10 +802,9 @@ debug_set_level(debug_info_t* id, int new_level) ...@@ -800,10 +802,9 @@ debug_set_level(debug_info_t* id, int new_level)
spin_lock_irqsave(&id->lock,flags); spin_lock_irqsave(&id->lock,flags);
if(new_level == DEBUG_OFF_LEVEL){ if(new_level == DEBUG_OFF_LEVEL){
id->level = DEBUG_OFF_LEVEL; id->level = DEBUG_OFF_LEVEL;
printk(KERN_INFO "debug: %s: switched off\n",id->name); pr_info("%s: switched off\n",id->name);
} else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) { } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
printk(KERN_INFO pr_info("%s: level %i is out of range (%i - %i)\n",
"debug: %s: level %i is out of range (%i - %i)\n",
id->name, new_level, 0, DEBUG_MAX_LEVEL); id->name, new_level, 0, DEBUG_MAX_LEVEL);
} else { } else {
id->level = new_level; id->level = new_level;
...@@ -1108,8 +1109,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view) ...@@ -1108,8 +1109,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
id , &debug_file_ops); id , &debug_file_ops);
if (!pde){ if (!pde){
printk(KERN_WARNING "debug: debugfs_create_file() failed!"\ pr_err("Registering view %s/%s failed due to out of "
" Cannot register view %s/%s\n", id->name,view->name); "memory\n", id->name,view->name);
rc = -1; rc = -1;
goto out; goto out;
} }
...@@ -1119,10 +1120,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view) ...@@ -1119,10 +1120,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
break; break;
} }
if (i == DEBUG_MAX_VIEWS) { if (i == DEBUG_MAX_VIEWS) {
printk(KERN_WARNING "debug: cannot register view %s/%s\n", pr_err("Registering view %s/%s would exceed the maximum "
id->name,view->name); "number of views %i\n", id->name, view->name, i);
printk(KERN_WARNING
"debug: maximum number of views reached (%i)!\n", i);
debugfs_remove(pde); debugfs_remove(pde);
rc = -1; rc = -1;
} else { } else {
...@@ -1303,7 +1302,8 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view, ...@@ -1303,7 +1302,8 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view,
new_level = debug_get_uint(str); new_level = debug_get_uint(str);
} }
if(new_level < 0) { if(new_level < 0) {
printk(KERN_INFO "debug: level `%s` is not valid\n", str); pr_warning("%s is not a valid level for a debug "
"feature\n", str);
rc = -EINVAL; rc = -EINVAL;
} else { } else {
debug_set_level(id, new_level); debug_set_level(id, new_level);
...@@ -1380,7 +1380,8 @@ debug_input_flush_fn(debug_info_t * id, struct debug_view *view, ...@@ -1380,7 +1380,8 @@ debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
goto out; goto out;
} }
printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]); pr_info("Flushing debug data failed because %c is not a valid "
"area\n", input_buf[0]);
out: out:
*offset += user_len; *offset += user_len;
......
...@@ -109,13 +109,6 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -109,13 +109,6 @@ STACK_SIZE = 1 << STACK_SHIFT
* R15 - kernel stack pointer * R15 - kernel stack pointer
*/ */
.macro STORE_TIMER lc_offset
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
stpt \lc_offset
#endif
.endm
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.macro UPDATE_VTIME lc_from,lc_to,lc_sum .macro UPDATE_VTIME lc_from,lc_to,lc_sum
lm %r10,%r11,\lc_from lm %r10,%r11,\lc_from
sl %r10,\lc_to sl %r10,\lc_to
...@@ -128,7 +121,6 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -128,7 +121,6 @@ STACK_SIZE = 1 << STACK_SHIFT
al %r10,BASED(.Lc_1) al %r10,BASED(.Lc_1)
1: stm %r10,%r11,\lc_sum 1: stm %r10,%r11,\lc_sum
.endm .endm
#endif
.macro SAVE_ALL_BASE savearea .macro SAVE_ALL_BASE savearea
stm %r12,%r15,\savearea stm %r12,%r15,\savearea
...@@ -198,7 +190,7 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -198,7 +190,7 @@ STACK_SIZE = 1 << STACK_SHIFT
ni \psworg+1,0xfd # clear wait state bit ni \psworg+1,0xfd # clear wait state bit
.endif .endif
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
STORE_TIMER __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
lpsw \psworg # back to caller lpsw \psworg # back to caller
.endm .endm
...@@ -247,20 +239,18 @@ __critical_start: ...@@ -247,20 +239,18 @@ __critical_start:
.globl system_call .globl system_call
system_call: system_call:
STORE_TIMER __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
sysc_saveall: sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
lh %r7,0x8a # get svc number from lowcore lh %r7,0x8a # get svc number from lowcore
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
sysc_vtime: sysc_vtime:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
sysc_stime: sysc_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
sysc_update: sysc_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
#endif
sysc_do_svc: sysc_do_svc:
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
ltr %r7,%r7 # test for svc 0 ltr %r7,%r7 # test for svc 0
...@@ -436,7 +426,7 @@ ret_from_fork: ...@@ -436,7 +426,7 @@ ret_from_fork:
basr %r14,%r1 basr %r14,%r1
TRACE_IRQS_ON TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
b BASED(sysc_return) b BASED(sysc_tracenogo)
# #
# kernel_execve function needs to deal with pt_regs that is not # kernel_execve function needs to deal with pt_regs that is not
...@@ -490,20 +480,18 @@ pgm_check_handler: ...@@ -490,20 +480,18 @@ pgm_check_handler:
* we just ignore the PER event (FIXME: is there anything we have to do * we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?). * for LPSW?).
*/ */
STORE_TIMER __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
bnz BASED(pgm_per) # got per exception -> special case bnz BASED(pgm_per) # got per exception -> special case
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(pgm_no_vtime) bz BASED(pgm_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime: pgm_no_vtime:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r3,__LC_PGM_ILC # load program interruption code l %r3,__LC_PGM_ILC # load program interruption code
...@@ -536,14 +524,12 @@ pgm_per: ...@@ -536,14 +524,12 @@ pgm_per:
pgm_per_std: pgm_per_std:
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(pgm_no_vtime2) bz BASED(pgm_no_vtime2)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2: pgm_no_vtime2:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r1,__TI_task(%r9) l %r1,__TI_task(%r9)
...@@ -565,11 +551,9 @@ pgm_no_vtime2: ...@@ -565,11 +551,9 @@ pgm_no_vtime2:
pgm_svcper: pgm_svcper:
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
#endif
lh %r7,0x8a # get svc number from lowcore lh %r7,0x8a # get svc number from lowcore
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -599,19 +583,17 @@ kernel_per: ...@@ -599,19 +583,17 @@ kernel_per:
.globl io_int_handler .globl io_int_handler
io_int_handler: io_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(io_no_vtime) bz BASED(io_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
io_no_vtime: io_no_vtime:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF TRACE_IRQS_OFF
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
...@@ -741,19 +723,17 @@ io_notify_resume: ...@@ -741,19 +723,17 @@ io_notify_resume:
.globl ext_int_handler .globl ext_int_handler
ext_int_handler: ext_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(ext_no_vtime) bz BASED(ext_no_vtime)
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
ext_no_vtime: ext_no_vtime:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
...@@ -776,7 +756,6 @@ mcck_int_handler: ...@@ -776,7 +756,6 @@ mcck_int_handler:
la %r12,__LC_MCK_OLD_PSW la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage? tm __LC_MCCK_CODE,0x80 # system damage?
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
...@@ -793,9 +772,7 @@ mcck_int_handler: ...@@ -793,9 +772,7 @@ mcck_int_handler:
la %r14,__LC_LAST_UPDATE_TIMER la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14) 0: spt 0(%r14)
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
1: 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
#endif
tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
bno BASED(mcck_int_main) # no -> skip cleanup critical bno BASED(mcck_int_main) # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
bnz BASED(mcck_int_main) # from user -> load async stack bnz BASED(mcck_int_main) # from user -> load async stack
...@@ -812,7 +789,6 @@ mcck_int_main: ...@@ -812,7 +789,6 @@ mcck_int_main:
be BASED(0f) be BASED(0f)
l %r15,__LC_PANIC_STACK # load panic stack l %r15,__LC_PANIC_STACK # load panic stack
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
bno BASED(mcck_no_vtime) # no -> skip cleanup critical bno BASED(mcck_no_vtime) # no -> skip cleanup critical
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
...@@ -821,7 +797,6 @@ mcck_int_main: ...@@ -821,7 +797,6 @@ mcck_int_main:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
mcck_no_vtime: mcck_no_vtime:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ls390_mcck) l %r1,BASED(.Ls390_mcck)
...@@ -843,16 +818,13 @@ mcck_no_vtime: ...@@ -843,16 +818,13 @@ mcck_no_vtime:
mcck_return: mcck_return:
mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
bno BASED(0f) bno BASED(0f)
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
lpsw __LC_RETURN_MCCK_PSW # back to caller lpsw __LC_RETURN_MCCK_PSW # back to caller
0: 0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
#endif
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
lpsw __LC_RETURN_MCCK_PSW # back to caller lpsw __LC_RETURN_MCCK_PSW # back to caller
RESTORE_ALL __LC_RETURN_MCCK_PSW,0 RESTORE_ALL __LC_RETURN_MCCK_PSW,0
...@@ -976,13 +948,11 @@ cleanup_system_call: ...@@ -976,13 +948,11 @@ cleanup_system_call:
b BASED(1f) b BASED(1f)
0: la %r12,__LC_SAVE_AREA+32 0: la %r12,__LC_SAVE_AREA+32
1: 1:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
bh BASED(0f) bh BASED(0f)
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
bhe BASED(cleanup_vtime) bhe BASED(cleanup_vtime)
#endif
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
bh BASED(0f) bh BASED(0f)
mvc __LC_SAVE_AREA(16),0(%r12) mvc __LC_SAVE_AREA(16),0(%r12)
...@@ -993,7 +963,6 @@ cleanup_system_call: ...@@ -993,7 +963,6 @@ cleanup_system_call:
l %r12,__LC_SAVE_AREA+48 # argh l %r12,__LC_SAVE_AREA+48 # argh
st %r15,12(%r12) st %r15,12(%r12)
lh %r7,0x8a lh %r7,0x8a
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
cleanup_vtime: cleanup_vtime:
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
bhe BASED(cleanup_stime) bhe BASED(cleanup_stime)
...@@ -1004,18 +973,15 @@ cleanup_stime: ...@@ -1004,18 +973,15 @@ cleanup_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
cleanup_update: cleanup_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
#endif
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
la %r12,__LC_RETURN_PSW la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_system_call_insn: cleanup_system_call_insn:
.long sysc_saveall + 0x80000000 .long sysc_saveall + 0x80000000
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.long system_call + 0x80000000 .long system_call + 0x80000000
.long sysc_vtime + 0x80000000 .long sysc_vtime + 0x80000000
.long sysc_stime + 0x80000000 .long sysc_stime + 0x80000000
.long sysc_update + 0x80000000 .long sysc_update + 0x80000000
#endif
cleanup_sysc_return: cleanup_sysc_return:
mvc __LC_RETURN_PSW(4),0(%r12) mvc __LC_RETURN_PSW(4),0(%r12)
...@@ -1026,11 +992,9 @@ cleanup_sysc_return: ...@@ -1026,11 +992,9 @@ cleanup_sysc_return:
cleanup_sysc_leave: cleanup_sysc_leave:
clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
be BASED(2f) be BASED(2f)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
be BASED(2f) be BASED(2f)
#endif
mvc __LC_RETURN_PSW(8),SP_PSW(%r15) mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
c %r12,BASED(.Lmck_old_psw) c %r12,BASED(.Lmck_old_psw)
bne BASED(0f) bne BASED(0f)
...@@ -1043,9 +1007,7 @@ cleanup_sysc_leave: ...@@ -1043,9 +1007,7 @@ cleanup_sysc_leave:
br %r14 br %r14
cleanup_sysc_leave_insn: cleanup_sysc_leave_insn:
.long sysc_done - 4 + 0x80000000 .long sysc_done - 4 + 0x80000000
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.long sysc_done - 8 + 0x80000000 .long sysc_done - 8 + 0x80000000
#endif
cleanup_io_return: cleanup_io_return:
mvc __LC_RETURN_PSW(4),0(%r12) mvc __LC_RETURN_PSW(4),0(%r12)
...@@ -1056,11 +1018,9 @@ cleanup_io_return: ...@@ -1056,11 +1018,9 @@ cleanup_io_return:
cleanup_io_leave: cleanup_io_leave:
clc 4(4,%r12),BASED(cleanup_io_leave_insn) clc 4(4,%r12),BASED(cleanup_io_leave_insn)
be BASED(2f) be BASED(2f)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
be BASED(2f) be BASED(2f)
#endif
mvc __LC_RETURN_PSW(8),SP_PSW(%r15) mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
c %r12,BASED(.Lmck_old_psw) c %r12,BASED(.Lmck_old_psw)
bne BASED(0f) bne BASED(0f)
...@@ -1073,9 +1033,7 @@ cleanup_io_leave: ...@@ -1073,9 +1033,7 @@ cleanup_io_leave:
br %r14 br %r14
cleanup_io_leave_insn: cleanup_io_leave_insn:
.long io_done - 4 + 0x80000000 .long io_done - 4 + 0x80000000
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.long io_done - 8 + 0x80000000 .long io_done - 8 + 0x80000000
#endif
/* /*
* Integer constants * Integer constants
......
...@@ -96,20 +96,12 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -96,20 +96,12 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
#define LOCKDEP_SYS_EXIT #define LOCKDEP_SYS_EXIT
#endif #endif
.macro STORE_TIMER lc_offset
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
stpt \lc_offset
#endif
.endm
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.macro UPDATE_VTIME lc_from,lc_to,lc_sum .macro UPDATE_VTIME lc_from,lc_to,lc_sum
lg %r10,\lc_from lg %r10,\lc_from
slg %r10,\lc_to slg %r10,\lc_to
alg %r10,\lc_sum alg %r10,\lc_sum
stg %r10,\lc_sum stg %r10,\lc_sum
.endm .endm
#endif
/* /*
* Register usage in interrupt handlers: * Register usage in interrupt handlers:
...@@ -186,7 +178,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -186,7 +178,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
ni \psworg+1,0xfd # clear wait state bit ni \psworg+1,0xfd # clear wait state bit
.endif .endif
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
STORE_TIMER __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
lpswe \psworg # back to caller lpswe \psworg # back to caller
.endm .endm
...@@ -233,20 +225,18 @@ __critical_start: ...@@ -233,20 +225,18 @@ __critical_start:
.globl system_call .globl system_call
system_call: system_call:
STORE_TIMER __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
sysc_saveall: sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
sysc_vtime: sysc_vtime:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
sysc_stime: sysc_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
sysc_update: sysc_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
#endif
sysc_do_svc: sysc_do_svc:
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
ltgr %r7,%r7 # test for svc 0 ltgr %r7,%r7 # test for svc 0
...@@ -417,7 +407,7 @@ ret_from_fork: ...@@ -417,7 +407,7 @@ ret_from_fork:
0: brasl %r14,schedule_tail 0: brasl %r14,schedule_tail
TRACE_IRQS_ON TRACE_IRQS_ON
stosm 24(%r15),0x03 # reenable interrupts stosm 24(%r15),0x03 # reenable interrupts
j sysc_return j sysc_tracenogo
# #
# kernel_execve function needs to deal with pt_regs that is not # kernel_execve function needs to deal with pt_regs that is not
...@@ -469,20 +459,18 @@ pgm_check_handler: ...@@ -469,20 +459,18 @@ pgm_check_handler:
* we just ignore the PER event (FIXME: is there anything we have to do * we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?). * for LPSW?).
*/ */
STORE_TIMER __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case jnz pgm_per # got per exception -> special case
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime jz pgm_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime: pgm_no_vtime:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -516,14 +504,12 @@ pgm_per: ...@@ -516,14 +504,12 @@ pgm_per:
pgm_per_std: pgm_per_std:
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime2 jz pgm_no_vtime2
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2: pgm_no_vtime2:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF TRACE_IRQS_OFF
lg %r1,__TI_task(%r9) lg %r1,__TI_task(%r9)
...@@ -545,11 +531,9 @@ pgm_no_vtime2: ...@@ -545,11 +531,9 @@ pgm_no_vtime2:
pgm_svcper: pgm_svcper:
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
#endif
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lg %r1,__TI_task(%r9) lg %r1,__TI_task(%r9)
...@@ -575,19 +559,17 @@ kernel_per: ...@@ -575,19 +559,17 @@ kernel_per:
*/ */
.globl io_int_handler .globl io_int_handler
io_int_handler: io_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz io_no_vtime jz io_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
io_no_vtime: io_no_vtime:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
...@@ -739,19 +721,17 @@ io_notify_resume: ...@@ -739,19 +721,17 @@ io_notify_resume:
*/ */
.globl ext_int_handler .globl ext_int_handler
ext_int_handler: ext_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz ext_no_vtime jz ext_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
ext_no_vtime: ext_no_vtime:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
...@@ -773,7 +753,6 @@ mcck_int_handler: ...@@ -773,7 +753,6 @@ mcck_int_handler:
la %r12,__LC_MCK_OLD_PSW la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage? tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_int_main # yes -> rest of mcck code invalid jo mcck_int_main # yes -> rest of mcck code invalid
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
la %r14,4095 la %r14,4095
mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
...@@ -791,9 +770,7 @@ mcck_int_handler: ...@@ -791,9 +770,7 @@ mcck_int_handler:
la %r14,__LC_LAST_UPDATE_TIMER la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14) 0: spt 0(%r14)
mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
1: 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
#endif
tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno mcck_int_main # no -> skip cleanup critical jno mcck_int_main # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
jnz mcck_int_main # from user -> load kernel stack jnz mcck_int_main # from user -> load kernel stack
...@@ -809,7 +786,6 @@ mcck_int_main: ...@@ -809,7 +786,6 @@ mcck_int_main:
jz 0f jz 0f
lg %r15,__LC_PANIC_STACK # load panic stack lg %r15,__LC_PANIC_STACK # load panic stack
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
jno mcck_no_vtime # no -> no timer update jno mcck_no_vtime # no -> no timer update
tm SP_PSW+1(%r15),0x01 # interrupting from user ? tm SP_PSW+1(%r15),0x01 # interrupting from user ?
...@@ -818,7 +794,6 @@ mcck_int_main: ...@@ -818,7 +794,6 @@ mcck_int_main:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
mcck_no_vtime: mcck_no_vtime:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,s390_do_machine_check brasl %r14,s390_do_machine_check
...@@ -839,14 +814,11 @@ mcck_return: ...@@ -839,14 +814,11 @@ mcck_return:
mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f jno 0f
stpt __LC_EXIT_TIMER stpt __LC_EXIT_TIMER
0: 0: lpswe __LC_RETURN_MCCK_PSW # back to caller
#endif
lpswe __LC_RETURN_MCCK_PSW # back to caller
/* /*
* Restart interruption handler, kick starter for additional CPUs * Restart interruption handler, kick starter for additional CPUs
...@@ -964,13 +936,11 @@ cleanup_system_call: ...@@ -964,13 +936,11 @@ cleanup_system_call:
j 1f j 1f
0: la %r12,__LC_SAVE_AREA+64 0: la %r12,__LC_SAVE_AREA+64
1: 1:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
jh 0f jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
jhe cleanup_vtime jhe cleanup_vtime
#endif
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
jh 0f jh 0f
mvc __LC_SAVE_AREA(32),0(%r12) mvc __LC_SAVE_AREA(32),0(%r12)
...@@ -981,7 +951,6 @@ cleanup_system_call: ...@@ -981,7 +951,6 @@ cleanup_system_call:
lg %r12,__LC_SAVE_AREA+96 # argh lg %r12,__LC_SAVE_AREA+96 # argh
stg %r15,24(%r12) stg %r15,24(%r12)
llgh %r7,__LC_SVC_INT_CODE llgh %r7,__LC_SVC_INT_CODE
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
cleanup_vtime: cleanup_vtime:
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
jhe cleanup_stime jhe cleanup_stime
...@@ -992,18 +961,15 @@ cleanup_stime: ...@@ -992,18 +961,15 @@ cleanup_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
cleanup_update: cleanup_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
#endif
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
la %r12,__LC_RETURN_PSW la %r12,__LC_RETURN_PSW
br %r14 br %r14
cleanup_system_call_insn: cleanup_system_call_insn:
.quad sysc_saveall .quad sysc_saveall
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.quad system_call .quad system_call
.quad sysc_vtime .quad sysc_vtime
.quad sysc_stime .quad sysc_stime
.quad sysc_update .quad sysc_update
#endif
cleanup_sysc_return: cleanup_sysc_return:
mvc __LC_RETURN_PSW(8),0(%r12) mvc __LC_RETURN_PSW(8),0(%r12)
...@@ -1014,11 +980,9 @@ cleanup_sysc_return: ...@@ -1014,11 +980,9 @@ cleanup_sysc_return:
cleanup_sysc_leave: cleanup_sysc_leave:
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
je 2f je 2f
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
je 2f je 2f
#endif
mvc __LC_RETURN_PSW(16),SP_PSW(%r15) mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW cghi %r12,__LC_MCK_OLD_PSW
jne 0f jne 0f
...@@ -1031,9 +995,7 @@ cleanup_sysc_leave: ...@@ -1031,9 +995,7 @@ cleanup_sysc_leave:
br %r14 br %r14
cleanup_sysc_leave_insn: cleanup_sysc_leave_insn:
.quad sysc_done - 4 .quad sysc_done - 4
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.quad sysc_done - 8 .quad sysc_done - 8
#endif
cleanup_io_return: cleanup_io_return:
mvc __LC_RETURN_PSW(8),0(%r12) mvc __LC_RETURN_PSW(8),0(%r12)
...@@ -1044,11 +1006,9 @@ cleanup_io_return: ...@@ -1044,11 +1006,9 @@ cleanup_io_return:
cleanup_io_leave: cleanup_io_leave:
clc 8(8,%r12),BASED(cleanup_io_leave_insn) clc 8(8,%r12),BASED(cleanup_io_leave_insn)
je 2f je 2f
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
je 2f je 2f
#endif
mvc __LC_RETURN_PSW(16),SP_PSW(%r15) mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW cghi %r12,__LC_MCK_OLD_PSW
jne 0f jne 0f
...@@ -1061,9 +1021,7 @@ cleanup_io_leave: ...@@ -1061,9 +1021,7 @@ cleanup_io_leave:
br %r14 br %r14
cleanup_io_leave_insn: cleanup_io_leave_insn:
.quad io_done - 4 .quad io_done - 4
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.quad io_done - 8 .quad io_done - 8
#endif
/* /*
* Integer constants * Integer constants
......
...@@ -461,6 +461,55 @@ start: ...@@ -461,6 +461,55 @@ start:
.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
#
# startup-code at 0x10000, running in absolute addressing mode
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
#
.org 0x10000
startup:basr %r13,0 # get base
.LPG0:
#ifndef CONFIG_MARCH_G5
# check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10}
stidp __LC_CPUID # store cpuid
lhi %r0,(3f-2f) / 2
la %r1,2f-.LPG0(%r13)
0: clc __LC_CPUID+4(2),0(%r1)
jne 3f
lpsw 1f-.LPG0(13) # machine type not good enough, crash
.align 16
1: .long 0x000a0000,0x00000000
2:
#if defined(CONFIG_MARCH_Z10)
.short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096
#elif defined(CONFIG_MARCH_Z9_109)
.short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086
#elif defined(CONFIG_MARCH_Z990)
.short 0x9672, 0x2064, 0x2066
#elif defined(CONFIG_MARCH_Z900)
.short 0x9672
#endif
3: la %r1,2(%r1)
brct %r0,0b
#endif
l %r13,0f-.LPG0(%r13)
b 0(%r13)
0: .long startup_continue
#
# params at 10400 (setup.h)
#
.org PARMAREA
.long 0,0 # IPL_DEVICE
.long 0,0 # INITRD_START
.long 0,0 # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#include "head64.S" #include "head64.S"
#else #else
......
...@@ -10,34 +10,13 @@ ...@@ -10,34 +10,13 @@
* *
*/ */
#
# startup-code at 0x10000, running in absolute addressing mode
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
#
.org 0x10000
startup:basr %r13,0 # get base
.LPG0: l %r13,0f-.LPG0(%r13)
b 0(%r13)
0: .long startup_continue
#
# params at 10400 (setup.h)
#
.org PARMAREA
.long 0,0 # IPL_DEVICE
.long 0,0 # INITRD_START
.long 0,0 # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org 0x11000 .org 0x11000
startup_continue: startup_continue:
basr %r13,0 # get base basr %r13,0 # get base
.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) .LPG1:
mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore # move IPL device to lowcore
...@@ -50,7 +29,6 @@ startup_continue: ...@@ -50,7 +29,6 @@ startup_continue:
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
st %r15,__LC_KERNEL_STACK # set end of kernel stack st %r15,__LC_KERNEL_STACK # set end of kernel stack
ahi %r15,-96 ahi %r15,-96
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
# #
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages, # Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
# and create a kernel NSS if the SAVESYS= parm is defined # and create a kernel NSS if the SAVESYS= parm is defined
......
...@@ -10,29 +10,6 @@ ...@@ -10,29 +10,6 @@
* *
*/ */
#
# startup-code at 0x10000, running in absolute addressing mode
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
#
.org 0x10000
startup:basr %r13,0 # get base
.LPG0: l %r13,0f-.LPG0(%r13)
b 0(%r13)
0: .long startup_continue
#
# params at 10400 (setup.h)
#
.org PARMAREA
.quad 0 # IPL_DEVICE
.quad 0 # INITRD_START
.quad 0 # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
.org 0x11000 .org 0x11000
startup_continue: startup_continue:
...@@ -119,7 +96,6 @@ startup_continue: ...@@ -119,7 +96,6 @@ startup_continue:
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
stg %r15,__LC_KERNEL_STACK # set end of kernel stack stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160 aghi %r15,-160
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
# #
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages, # Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
# and create a kernel NSS if the SAVESYS= parm is defined # and create a kernel NSS if the SAVESYS= parm is defined
......
/*
* Copyright IBM Corp. 2008
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*/
#ifndef CONFIG_64BIT
.globl _mcount
_mcount:
stm %r0,%r5,8(%r15)
st %r14,56(%r15)
lr %r1,%r15
ahi %r15,-96
l %r3,100(%r15)
la %r2,0(%r14)
st %r1,0(%r15)
la %r3,0(%r3)
bras %r14,0f
.long ftrace_trace_function
0: l %r14,0(%r14)
l %r14,0(%r14)
basr %r14,%r14
ahi %r15,96
lm %r0,%r5,8(%r15)
l %r14,56(%r15)
br %r14
.globl ftrace_stub
ftrace_stub:
br %r14
#else /* CONFIG_64BIT */
.globl _mcount
_mcount:
stmg %r0,%r5,16(%r15)
stg %r14,112(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,0(%r15)
lgr %r2,%r14
lg %r3,168(%r15)
larl %r14,ftrace_trace_function
lg %r14,0(%r14)
basr %r14,%r14
aghi %r15,160
lmg %r0,%r5,16(%r15)
lg %r14,112(%r15)
br %r14
.globl ftrace_stub
ftrace_stub:
br %r14
#endif /* CONFIG_64BIT */
/*
* arch/s390/kernel/processor.c
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <asm/elf.h>
#include <asm/lowcore.h>
#include <asm/param.h>
void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
{
pr_info("Processor %d started, address %d, identification %06X\n",
cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident);
}
/*
* show_cpuinfo - Get information on one CPU for use by procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[8] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat"
};
struct cpuinfo_S390 *cpuinfo;
unsigned long n = (unsigned long) v - 1;
int i;
s390_adjust_jiffies();
preempt_disable();
if (!n) {
seq_printf(m, "vendor_id : IBM/S390\n"
"# processors : %i\n"
"bogomips per cpu: %lu.%02lu\n",
num_online_cpus(), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
seq_puts(m, "features\t: ");
for (i = 0; i < 8; i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n");
}
if (cpu_online(n)) {
#ifdef CONFIG_SMP
if (smp_processor_id() == n)
cpuinfo = &S390_lowcore.cpu_data;
else
cpuinfo = &lowcore_ptr[n]->cpu_data;
#else
cpuinfo = &S390_lowcore.cpu_data;
#endif
seq_printf(m, "processor %li: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
n, cpuinfo->cpu_id.version,
cpuinfo->cpu_id.ident,
cpuinfo->cpu_id.machine);
}
preempt_enable();
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
...@@ -204,7 +204,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) ...@@ -204,7 +204,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
static int static int
peek_user(struct task_struct *child, addr_t addr, addr_t data) peek_user(struct task_struct *child, addr_t addr, addr_t data)
{ {
struct user *dummy = NULL;
addr_t tmp, mask; addr_t tmp, mask;
/* /*
...@@ -213,8 +212,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -213,8 +212,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
*/ */
mask = __ADDR_MASK; mask = __ADDR_MASK;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
if (addr >= (addr_t) &dummy->regs.acrs && if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &dummy->regs.orig_gpr2) addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3; mask = 3;
#endif #endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
...@@ -312,7 +311,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -312,7 +311,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
static int static int
poke_user(struct task_struct *child, addr_t addr, addr_t data) poke_user(struct task_struct *child, addr_t addr, addr_t data)
{ {
struct user *dummy = NULL;
addr_t mask; addr_t mask;
/* /*
...@@ -321,8 +319,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) ...@@ -321,8 +319,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
*/ */
mask = __ADDR_MASK; mask = __ADDR_MASK;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
if (addr >= (addr_t) &dummy->regs.acrs && if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &dummy->regs.orig_gpr2) addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3; mask = 3;
#endif #endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/ftrace.h>
#ifdef CONFIG_IP_MULTICAST #ifdef CONFIG_IP_MULTICAST
#include <net/arp.h> #include <net/arp.h>
#endif #endif
...@@ -43,3 +44,7 @@ EXPORT_SYMBOL(csum_fold); ...@@ -43,3 +44,7 @@ EXPORT_SYMBOL(csum_fold);
EXPORT_SYMBOL(console_mode); EXPORT_SYMBOL(console_mode);
EXPORT_SYMBOL(console_devno); EXPORT_SYMBOL(console_devno);
EXPORT_SYMBOL(console_irq); EXPORT_SYMBOL(console_irq);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
* This file handles the architecture-dependent parts of initialization * This file handles the architecture-dependent parts of initialization
*/ */
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -32,7 +35,6 @@ ...@@ -32,7 +35,6 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/seq_file.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/notifier.h> #include <linux/notifier.h>
...@@ -291,8 +293,8 @@ unsigned int switch_amode = 0; ...@@ -291,8 +293,8 @@ unsigned int switch_amode = 0;
#endif #endif
EXPORT_SYMBOL_GPL(switch_amode); EXPORT_SYMBOL_GPL(switch_amode);
static void set_amode_and_uaccess(unsigned long user_amode, static int set_amode_and_uaccess(unsigned long user_amode,
unsigned long user32_amode) unsigned long user32_amode)
{ {
psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
...@@ -309,11 +311,11 @@ static void set_amode_and_uaccess(unsigned long user_amode, ...@@ -309,11 +311,11 @@ static void set_amode_and_uaccess(unsigned long user_amode,
PSW_MASK_MCHECK | PSW_DEFAULT_KEY; PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
if (MACHINE_HAS_MVCOS) { if (MACHINE_HAS_MVCOS) {
printk("mvcos available.\n");
memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
return 1;
} else { } else {
printk("mvcos not available.\n");
memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
return 0;
} }
} }
...@@ -328,9 +330,10 @@ static int __init early_parse_switch_amode(char *p) ...@@ -328,9 +330,10 @@ static int __init early_parse_switch_amode(char *p)
early_param("switch_amode", early_parse_switch_amode); early_param("switch_amode", early_parse_switch_amode);
#else /* CONFIG_S390_SWITCH_AMODE */ #else /* CONFIG_S390_SWITCH_AMODE */
static inline void set_amode_and_uaccess(unsigned long user_amode, static inline int set_amode_and_uaccess(unsigned long user_amode,
unsigned long user32_amode) unsigned long user32_amode)
{ {
return 0;
} }
#endif /* CONFIG_S390_SWITCH_AMODE */ #endif /* CONFIG_S390_SWITCH_AMODE */
...@@ -355,11 +358,20 @@ early_param("noexec", early_parse_noexec); ...@@ -355,11 +358,20 @@ early_param("noexec", early_parse_noexec);
static void setup_addressing_mode(void) static void setup_addressing_mode(void)
{ {
if (s390_noexec) { if (s390_noexec) {
printk("S390 execute protection active, "); if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY); PSW32_ASC_SECONDARY))
pr_info("Execute protection active, "
"mvcos available\n");
else
pr_info("Execute protection active, "
"mvcos not available\n");
} else if (switch_amode) { } else if (switch_amode) {
printk("S390 address spaces switched, "); if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY); pr_info("Address spaces switched, "
"mvcos available\n");
else
pr_info("Address spaces switched, "
"mvcos not available\n");
} }
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
...@@ -572,15 +584,15 @@ setup_memory(void) ...@@ -572,15 +584,15 @@ setup_memory(void)
start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
if (start + INITRD_SIZE > memory_end) { if (start + INITRD_SIZE > memory_end) {
printk("initrd extends beyond end of memory " pr_err("initrd extends beyond end of "
"(0x%08lx > 0x%08lx)\n" "memory (0x%08lx > 0x%08lx) "
"disabling initrd\n", "disabling initrd\n",
start + INITRD_SIZE, memory_end); start + INITRD_SIZE, memory_end);
INITRD_START = INITRD_SIZE = 0; INITRD_START = INITRD_SIZE = 0;
} else { } else {
printk("Moving initrd (0x%08lx -> 0x%08lx, " pr_info("Moving initrd (0x%08lx -> "
"size: %ld)\n", "0x%08lx, size: %ld)\n",
INITRD_START, start, INITRD_SIZE); INITRD_START, start, INITRD_SIZE);
memmove((void *) start, (void *) INITRD_START, memmove((void *) start, (void *) INITRD_START,
INITRD_SIZE); INITRD_SIZE);
INITRD_START = start; INITRD_START = start;
...@@ -642,8 +654,9 @@ setup_memory(void) ...@@ -642,8 +654,9 @@ setup_memory(void)
initrd_start = INITRD_START; initrd_start = INITRD_START;
initrd_end = initrd_start + INITRD_SIZE; initrd_end = initrd_start + INITRD_SIZE;
} else { } else {
printk("initrd extends beyond end of memory " pr_err("initrd extends beyond end of "
"(0x%08lx > 0x%08lx)\ndisabling initrd\n", "memory (0x%08lx > 0x%08lx) "
"disabling initrd\n",
initrd_start + INITRD_SIZE, memory_end); initrd_start + INITRD_SIZE, memory_end);
initrd_start = initrd_end = 0; initrd_start = initrd_end = 0;
} }
...@@ -651,23 +664,6 @@ setup_memory(void) ...@@ -651,23 +664,6 @@ setup_memory(void)
#endif #endif
} }
static int __init __stfle(unsigned long long *list, int doublewords)
{
typedef struct { unsigned long long _[doublewords]; } addrtype;
register unsigned long __nr asm("0") = doublewords - 1;
asm volatile(".insn s,0xb2b00000,%0" /* stfle */
: "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
return __nr + 1;
}
int __init stfle(unsigned long long *list, int doublewords)
{
if (!(stfl() & (1UL << 24)))
return -EOPNOTSUPP;
return __stfle(list, doublewords);
}
/* /*
* Setup hardware capabilities. * Setup hardware capabilities.
*/ */
...@@ -739,8 +735,13 @@ static void __init setup_hwcaps(void) ...@@ -739,8 +735,13 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z990"); strcpy(elf_platform, "z990");
break; break;
case 0x2094: case 0x2094:
case 0x2096:
strcpy(elf_platform, "z9-109"); strcpy(elf_platform, "z9-109");
break; break;
case 0x2097:
case 0x2098:
strcpy(elf_platform, "z10");
break;
} }
} }
...@@ -752,25 +753,34 @@ static void __init setup_hwcaps(void) ...@@ -752,25 +753,34 @@ static void __init setup_hwcaps(void)
void __init void __init
setup_arch(char **cmdline_p) setup_arch(char **cmdline_p)
{ {
/* set up preferred console */
add_preferred_console("ttyS", 0, NULL);
/* /*
* print what head.S has found out about the machine * print what head.S has found out about the machine
*/ */
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
printk((MACHINE_IS_VM) ? if (MACHINE_IS_VM)
"We are running under VM (31 bit mode)\n" : pr_info("Linux is running as a z/VM "
"We are running native (31 bit mode)\n"); "guest operating system in 31-bit mode\n");
printk((MACHINE_HAS_IEEE) ? else
"This machine has an IEEE fpu\n" : pr_info("Linux is running natively in 31-bit mode\n");
"This machine has no IEEE fpu\n"); if (MACHINE_HAS_IEEE)
pr_info("The hardware system has IEEE compatible "
"floating point units\n");
else
pr_info("The hardware system has no IEEE compatible "
"floating point units\n");
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
printk("We are running under VM (64 bit mode)\n"); pr_info("Linux is running as a z/VM "
"guest operating system in 64-bit mode\n");
else if (MACHINE_IS_KVM) { else if (MACHINE_IS_KVM) {
printk("We are running under KVM (64 bit mode)\n"); pr_info("Linux is running under KVM in 64-bit mode\n");
add_preferred_console("hvc", 0, NULL); add_preferred_console("hvc", 0, NULL);
s390_virtio_console_init(); s390_virtio_console_init();
} else } else
printk("We are running native (64 bit mode)\n"); pr_info("Linux is running natively in 64-bit mode\n");
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
/* Have one command line that is parsed and saved in /proc/cmdline */ /* Have one command line that is parsed and saved in /proc/cmdline */
...@@ -818,90 +828,3 @@ setup_arch(char **cmdline_p) ...@@ -818,90 +828,3 @@ setup_arch(char **cmdline_p)
/* Setup zfcpdump support */ /* Setup zfcpdump support */
setup_zfcpdump(console_devno); setup_zfcpdump(console_devno);
} }
void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
{
printk(KERN_INFO "cpu %d "
#ifdef CONFIG_SMP
"phys_idx=%d "
#endif
"vers=%02X ident=%06X machine=%04X unused=%04X\n",
cpuinfo->cpu_nr,
#ifdef CONFIG_SMP
cpuinfo->cpu_addr,
#endif
cpuinfo->cpu_id.version,
cpuinfo->cpu_id.ident,
cpuinfo->cpu_id.machine,
cpuinfo->cpu_id.unused);
}
/*
* show_cpuinfo - Get information on one CPU for use by procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[8] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat"
};
struct cpuinfo_S390 *cpuinfo;
unsigned long n = (unsigned long) v - 1;
int i;
s390_adjust_jiffies();
preempt_disable();
if (!n) {
seq_printf(m, "vendor_id : IBM/S390\n"
"# processors : %i\n"
"bogomips per cpu: %lu.%02lu\n",
num_online_cpus(), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
seq_puts(m, "features\t: ");
for (i = 0; i < 8; i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n");
}
if (cpu_online(n)) {
#ifdef CONFIG_SMP
if (smp_processor_id() == n)
cpuinfo = &S390_lowcore.cpu_data;
else
cpuinfo = &lowcore_ptr[n]->cpu_data;
#else
cpuinfo = &S390_lowcore.cpu_data;
#endif
seq_printf(m, "processor %li: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
n, cpuinfo->cpu_id.version,
cpuinfo->cpu_id.ident,
cpuinfo->cpu_id.machine);
}
preempt_enable();
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
...@@ -20,6 +20,9 @@ ...@@ -20,6 +20,9 @@
* cpu_number_map in other architectures. * cpu_number_map in other architectures.
*/ */
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -77,159 +80,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); ...@@ -77,159 +80,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig); static void smp_ext_bitcall(int, ec_bit_sig);
/*
* Structure and data for __smp_call_function_map(). This is designed to
* minimise static memory requirements. It also looks cleaner.
*/
static DEFINE_SPINLOCK(call_lock);
struct call_data_struct {
void (*func) (void *info);
void *info;
cpumask_t started;
cpumask_t finished;
int wait;
};
static struct call_data_struct *call_data;
/*
* 'Call function' interrupt callback
*/
static void do_call_function(void)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
int wait = call_data->wait;
cpu_set(smp_processor_id(), call_data->started);
(*func)(info);
if (wait)
cpu_set(smp_processor_id(), call_data->finished);;
}
static void __smp_call_function_map(void (*func) (void *info), void *info,
int wait, cpumask_t map)
{
struct call_data_struct data;
int cpu, local = 0;
/*
* Can deadlock when interrupts are disabled or if in wrong context.
*/
WARN_ON(irqs_disabled() || in_irq());
/*
* Check for local function call. We have to have the same call order
* as in on_each_cpu() because of machine_restart_smp().
*/
if (cpu_isset(smp_processor_id(), map)) {
local = 1;
cpu_clear(smp_processor_id(), map);
}
cpus_and(map, map, cpu_online_map);
if (cpus_empty(map))
goto out;
data.func = func;
data.info = info;
data.started = CPU_MASK_NONE;
data.wait = wait;
if (wait)
data.finished = CPU_MASK_NONE;
call_data = &data;
for_each_cpu_mask(cpu, map)
smp_ext_bitcall(cpu, ec_call_function);
/* Wait for response */
while (!cpus_equal(map, data.started))
cpu_relax();
if (wait)
while (!cpus_equal(map, data.finished))
cpu_relax();
out:
if (local) {
local_irq_disable();
func(info);
local_irq_enable();
}
}
/*
* smp_call_function:
* @func: the function to run; this must be fast and non-blocking
* @info: an arbitrary pointer to pass to the function
* @wait: if true, wait (atomically) until function has completed on other CPUs
*
* Run a function on all other CPUs.
*
* You must not call this function with disabled interrupts, from a
* hardware interrupt handler or from a bottom half.
*/
int smp_call_function(void (*func) (void *info), void *info, int wait)
{
cpumask_t map;
spin_lock(&call_lock);
map = cpu_online_map;
cpu_clear(smp_processor_id(), map);
__smp_call_function_map(func, info, wait, map);
spin_unlock(&call_lock);
return 0;
}
EXPORT_SYMBOL(smp_call_function);
/*
* smp_call_function_single:
* @cpu: the CPU where func should run
* @func: the function to run; this must be fast and non-blocking
* @info: an arbitrary pointer to pass to the function
* @wait: if true, wait (atomically) until function has completed on other CPUs
*
* Run a function on one processor.
*
* You must not call this function with disabled interrupts, from a
* hardware interrupt handler or from a bottom half.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int wait)
{
spin_lock(&call_lock);
__smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
spin_unlock(&call_lock);
return 0;
}
EXPORT_SYMBOL(smp_call_function_single);
/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on. Must not include the current cpu.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
int wait)
{
spin_lock(&call_lock);
cpu_clear(smp_processor_id(), mask);
__smp_call_function_map(func, info, wait, mask);
spin_unlock(&call_lock);
return 0;
}
EXPORT_SYMBOL(smp_call_function_mask);
void smp_send_stop(void) void smp_send_stop(void)
{ {
int cpu, rc; int cpu, rc;
...@@ -271,7 +121,10 @@ static void do_ext_call_interrupt(__u16 code) ...@@ -271,7 +121,10 @@ static void do_ext_call_interrupt(__u16 code)
bits = xchg(&S390_lowcore.ext_call_fast, 0); bits = xchg(&S390_lowcore.ext_call_fast, 0);
if (test_bit(ec_call_function, &bits)) if (test_bit(ec_call_function, &bits))
do_call_function(); generic_smp_call_function_interrupt();
if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt();
} }
/* /*
...@@ -288,6 +141,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) ...@@ -288,6 +141,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
udelay(10); udelay(10);
} }
void arch_send_call_function_ipi(cpumask_t mask)
{
int cpu;
for_each_cpu_mask(cpu, mask)
smp_ext_bitcall(cpu, ec_call_function);
}
void arch_send_call_function_single_ipi(int cpu)
{
smp_ext_bitcall(cpu, ec_call_function_single);
}
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
/* /*
* this function sends a 'purge tlb' signal to another CPU. * this function sends a 'purge tlb' signal to another CPU.
...@@ -388,8 +254,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) ...@@ -388,8 +254,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
if (ipl_info.type != IPL_TYPE_FCP_DUMP) if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return; return;
if (cpu >= NR_CPUS) { if (cpu >= NR_CPUS) {
printk(KERN_WARNING "Registers for cpu %i not saved since dump " pr_warning("CPU %i exceeds the maximum %i and is excluded from "
"kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); "the dump\n", cpu, NR_CPUS - 1);
return; return;
} }
zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
...@@ -562,7 +428,7 @@ static void __init smp_detect_cpus(void) ...@@ -562,7 +428,7 @@ static void __init smp_detect_cpus(void)
} }
out: out:
kfree(info); kfree(info);
printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
get_online_cpus(); get_online_cpus();
__smp_rescan_cpus(); __smp_rescan_cpus();
put_online_cpus(); put_online_cpus();
...@@ -578,19 +444,17 @@ int __cpuinit start_secondary(void *cpuvoid) ...@@ -578,19 +444,17 @@ int __cpuinit start_secondary(void *cpuvoid)
preempt_disable(); preempt_disable();
/* Enable TOD clock interrupts on the secondary cpu. */ /* Enable TOD clock interrupts on the secondary cpu. */
init_cpu_timer(); init_cpu_timer();
#ifdef CONFIG_VIRT_TIMER
/* Enable cpu timer interrupts on the secondary cpu. */ /* Enable cpu timer interrupts on the secondary cpu. */
init_cpu_vtimer(); init_cpu_vtimer();
#endif
/* Enable pfault pseudo page faults on this cpu. */ /* Enable pfault pseudo page faults on this cpu. */
pfault_init(); pfault_init();
/* call cpu notifiers */ /* call cpu notifiers */
notify_cpu_starting(smp_processor_id()); notify_cpu_starting(smp_processor_id());
/* Mark this cpu as online */ /* Mark this cpu as online */
spin_lock(&call_lock); ipi_call_lock();
cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_online_map);
spin_unlock(&call_lock); ipi_call_unlock();
/* Switch on interrupts */ /* Switch on interrupts */
local_irq_enable(); local_irq_enable();
/* Print info about this processor */ /* Print info about this processor */
...@@ -639,18 +503,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu) ...@@ -639,18 +503,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
save_area = get_zeroed_page(GFP_KERNEL); save_area = get_zeroed_page(GFP_KERNEL);
if (!save_area) if (!save_area)
goto out_save_area; goto out;
lowcore->extended_save_area_addr = (u32) save_area; lowcore->extended_save_area_addr = (u32) save_area;
} }
#endif #endif
lowcore_ptr[cpu] = lowcore; lowcore_ptr[cpu] = lowcore;
return 0; return 0;
#ifndef CONFIG_64BIT
out_save_area:
free_page(panic_stack);
#endif
out: out:
free_page(panic_stack);
free_pages(async_stack, ASYNC_ORDER); free_pages(async_stack, ASYNC_ORDER);
free_pages((unsigned long) lowcore, lc_order); free_pages((unsigned long) lowcore, lc_order);
return -ENOMEM; return -ENOMEM;
...@@ -690,12 +551,8 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -690,12 +551,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix); cpu, sigp_set_prefix);
if (ccode) { if (ccode)
printk("sigp_set_prefix failed for cpu %d "
"with condition code %d\n",
(int) cpu, (int) ccode);
return -EIO; return -EIO;
}
idle = current_set[cpu]; idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore = lowcore_ptr[cpu];
...@@ -778,7 +635,7 @@ void __cpu_die(unsigned int cpu) ...@@ -778,7 +635,7 @@ void __cpu_die(unsigned int cpu)
while (!smp_cpu_not_running(cpu)) while (!smp_cpu_not_running(cpu))
cpu_relax(); cpu_relax();
smp_free_lowcore(cpu); smp_free_lowcore(cpu);
printk(KERN_INFO "Processor %d spun down\n", cpu); pr_info("Processor %d stopped\n", cpu);
} }
void cpu_die(void) void cpu_die(void)
......
This diff is collapsed.
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/ */
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -12,6 +15,7 @@ ...@@ -12,6 +15,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpuset.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
#include <asm/sysinfo.h> #include <asm/sysinfo.h>
...@@ -57,11 +61,11 @@ struct core_info { ...@@ -57,11 +61,11 @@ struct core_info {
cpumask_t mask; cpumask_t mask;
}; };
static int topology_enabled;
static void topology_work_fn(struct work_struct *work); static void topology_work_fn(struct work_struct *work);
static struct tl_info *tl_info; static struct tl_info *tl_info;
static struct core_info core_info; static struct core_info core_info;
static int machine_has_topology; static int machine_has_topology;
static int machine_has_topology_irq;
static struct timer_list topology_timer; static struct timer_list topology_timer;
static void set_topology_timer(void); static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn); static DECLARE_WORK(topology_work, topology_work_fn);
...@@ -77,8 +81,8 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) ...@@ -77,8 +81,8 @@ cpumask_t cpu_coregroup_map(unsigned int cpu)
cpumask_t mask; cpumask_t mask;
cpus_clear(mask); cpus_clear(mask);
if (!machine_has_topology) if (!topology_enabled || !machine_has_topology)
return cpu_present_map; return cpu_possible_map;
spin_lock_irqsave(&topology_lock, flags); spin_lock_irqsave(&topology_lock, flags);
while (core) { while (core) {
if (cpu_isset(cpu, core->mask)) { if (cpu_isset(cpu, core->mask)) {
...@@ -168,7 +172,7 @@ static void topology_update_polarization_simple(void) ...@@ -168,7 +172,7 @@ static void topology_update_polarization_simple(void)
int cpu; int cpu;
mutex_lock(&smp_cpu_state_mutex); mutex_lock(&smp_cpu_state_mutex);
for_each_present_cpu(cpu) for_each_possible_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_HRZ; smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
mutex_unlock(&smp_cpu_state_mutex); mutex_unlock(&smp_cpu_state_mutex);
} }
...@@ -199,7 +203,7 @@ int topology_set_cpu_management(int fc) ...@@ -199,7 +203,7 @@ int topology_set_cpu_management(int fc)
rc = ptf(PTF_HORIZONTAL); rc = ptf(PTF_HORIZONTAL);
if (rc) if (rc)
return -EBUSY; return -EBUSY;
for_each_present_cpu(cpu) for_each_possible_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
return rc; return rc;
} }
...@@ -208,7 +212,7 @@ static void update_cpu_core_map(void) ...@@ -208,7 +212,7 @@ static void update_cpu_core_map(void)
{ {
int cpu; int cpu;
for_each_present_cpu(cpu) for_each_possible_cpu(cpu)
cpu_core_map[cpu] = cpu_coregroup_map(cpu); cpu_core_map[cpu] = cpu_coregroup_map(cpu);
} }
...@@ -235,7 +239,7 @@ int arch_update_cpu_topology(void) ...@@ -235,7 +239,7 @@ int arch_update_cpu_topology(void)
static void topology_work_fn(struct work_struct *work) static void topology_work_fn(struct work_struct *work)
{ {
arch_reinit_sched_domains(); rebuild_sched_domains();
} }
void topology_schedule_update(void) void topology_schedule_update(void)
...@@ -258,10 +262,14 @@ static void set_topology_timer(void) ...@@ -258,10 +262,14 @@ static void set_topology_timer(void)
add_timer(&topology_timer); add_timer(&topology_timer);
} }
static void topology_interrupt(__u16 code) static int __init early_parse_topology(char *p)
{ {
schedule_work(&topology_work); if (strncmp(p, "on", 2))
return 0;
topology_enabled = 1;
return 0;
} }
early_param("topology", early_parse_topology);
static int __init init_topology_update(void) static int __init init_topology_update(void)
{ {
...@@ -273,14 +281,7 @@ static int __init init_topology_update(void) ...@@ -273,14 +281,7 @@ static int __init init_topology_update(void)
goto out; goto out;
} }
init_timer_deferrable(&topology_timer); init_timer_deferrable(&topology_timer);
if (machine_has_topology_irq) { set_topology_timer();
rc = register_external_interrupt(0x2005, topology_interrupt);
if (rc)
goto out;
ctl_set_bit(0, 8);
}
else
set_topology_timer();
out: out:
update_cpu_core_map(); update_cpu_core_map();
return rc; return rc;
...@@ -301,9 +302,6 @@ void __init s390_init_cpu_topology(void) ...@@ -301,9 +302,6 @@ void __init s390_init_cpu_topology(void)
return; return;
machine_has_topology = 1; machine_has_topology = 1;
if (facility_bits & (1ULL << 51))
machine_has_topology_irq = 1;
tl_info = alloc_bootmem_pages(PAGE_SIZE); tl_info = alloc_bootmem_pages(PAGE_SIZE);
info = tl_info; info = tl_info;
stsi(info, 15, 1, 2); stsi(info, 15, 1, 2);
...@@ -312,7 +310,7 @@ void __init s390_init_cpu_topology(void) ...@@ -312,7 +310,7 @@ void __init s390_init_cpu_topology(void)
for (i = 0; i < info->mnest - 2; i++) for (i = 0; i < info->mnest - 2; i++)
nr_cores *= info->mag[NR_MAG - 3 - i]; nr_cores *= info->mag[NR_MAG - 3 - i];
printk(KERN_INFO "CPU topology:"); pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < NR_MAG; i++) for (i = 0; i < NR_MAG; i++)
printk(" %d", info->mag[i]); printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest); printk(" / %d\n", info->mnest);
...@@ -327,5 +325,4 @@ void __init s390_init_cpu_topology(void) ...@@ -327,5 +325,4 @@ void __init s390_init_cpu_topology(void)
return; return;
error: error:
machine_has_topology = 0; machine_has_topology = 0;
machine_has_topology_irq = 0;
} }
/*
* vdso setup for s390
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/bootmem.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/vdso.h>
/* Max supported size for symbol names */
#define MAX_SYMNAME 64
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
static struct page **vdso32_pagelist;
#endif
#ifdef CONFIG_64BIT
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
static unsigned int vdso64_pages;
static struct page **vdso64_pagelist;
#endif /* CONFIG_64BIT */
/*
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
unsigned int __read_mostly vdso_enabled = 1;
static int __init vdso_setup(char *s)
{
vdso_enabled = simple_strtoul(s, NULL, 0);
return 1;
}
__setup("vdso=", vdso_setup);
/*
* The vdso data page
*/
static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __attribute__((__section__(".data.page_aligned")));
struct vdso_data *vdso_data = &vdso_data_store.data;
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
struct page **vdso_pagelist;
unsigned long vdso_pages;
unsigned long vdso_base;
int rc;
if (!vdso_enabled)
return 0;
/*
* Only map the vdso for dynamically linked elf binaries.
*/
if (!uses_interp)
return 0;
vdso_base = mm->mmap_base;
#ifdef CONFIG_64BIT
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_31BIT)) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
}
#endif
#else
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
#endif
/*
* vDSO has a problem and was disabled, just don't "enable" it for
* the process
*/
if (vdso_pages == 0)
return 0;
current->mm->context.vdso_base = 0;
/*
* pick a base address for the vDSO in process space. We try to put
* it at vdso_base which is the "natural" base for it, but we might
* fail and end up putting it elsewhere.
*/
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
rc = vdso_base;
goto out_up;
}
/*
* our vma flags don't have VM_WRITE so by default, the process
* isn't allowed to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW
* on those pages but it's then your responsibility to never do that
* on the "data" page of the vDSO or you'll stop getting kernel
* updates and your nice userland gettimeofday will be totally dead.
* It's fine to use that for setting breakpoints in the vDSO code
* pages though
*
* Make sure the vDSO gets into every core dump.
* Dumping its contents makes post-mortem fully interpretable later
* without matching up the same kernel and hardware config to see
* what PC values meant.
*/
rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
VM_ALWAYSDUMP,
vdso_pagelist);
if (rc)
goto out_up;
/* Put vDSO base into mm struct */
current->mm->context.vdso_base = vdso_base;
up_write(&mm->mmap_sem);
return 0;
out_up:
up_write(&mm->mmap_sem);
return rc;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
return "[vdso]";
return NULL;
}
static int __init vdso_init(void)
{
int i;
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
/* Make sure pages are in the correct state */
vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
GFP_KERNEL);
BUG_ON(vdso32_pagelist == NULL);
for (i = 0; i < vdso32_pages - 1; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
vdso32_pagelist[i] = pg;
}
vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
vdso32_pagelist[vdso32_pages] = NULL;
#endif
#ifdef CONFIG_64BIT
/* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
/* Make sure pages are in the correct state */
vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
GFP_KERNEL);
BUG_ON(vdso64_pagelist == NULL);
for (i = 0; i < vdso64_pages - 1; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
vdso64_pagelist[i] = pg;
}
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
vdso64_pagelist[vdso64_pages] = NULL;
#endif /* CONFIG_64BIT */
get_page(virt_to_page(vdso_data));
smp_wmb();
return 0;
}
arch_initcall(vdso_init);
int in_gate_area_no_task(unsigned long addr)
{
return 0;
}
int in_gate_area(struct task_struct *task, unsigned long addr)
{
return 0;
}
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
{
return NULL;
}
# List of files in the vdso, has to be asm only for now
obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o
# Build rules
targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_31 += -m31 -s
KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv)
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)
obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
$(call if_changed,vdso32ld)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
$(obj-vdso32): %.o: %.S
$(call if_changed_dep,vdso32as)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
vdso32.so: $(obj)/vdso32.so.dbg
@mkdir -p $(MODLIB)/vdso
$(call cmd,vdso_install)
vdso_install: vdso32.so
/*
* Userland implementation of clock_getres() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
.text
.align 4
.globl __kernel_clock_getres
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
chi %r2,CLOCK_REALTIME
je 0f
chi %r2,CLOCK_MONOTONIC
jne 3f
0: ltr %r3,%r3
jz 2f /* res == NULL */
basr %r1,0
1: l %r0,4f-1b(%r1)
xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
st %r0,4(%r3) /* store tp->tv_usec */
2: lhi %r2,0
br %r14
3: lhi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
4: .long CLOCK_REALTIME_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
/*
* Userland implementation of clock_gettime() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
.text
.align 4
.globl __kernel_clock_gettime
.type __kernel_clock_gettime,@function
__kernel_clock_gettime:
.cfi_startproc
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,CLOCK_REALTIME
je 10f
chi %r2,CLOCK_MONOTONIC
jne 19f
/* CLOCK_MONOTONIC */
ltr %r3,%r3
jz 9f /* tp == NULL */
1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
stck 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f
ahi %r0,-1
2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
lr %r2,%r0
lhi %r0,1000
ltr %r1,%r1
mr %r0,%r0
jnm 3f
ahi %r0,1000
3: alr %r0,%r2
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
al %r1,__VDSO_XTIME_NSEC+4(%r5)
brc 12,4f
ahi %r0,1
4: l %r2,__VDSO_XTIME_SEC+4(%r5)
al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
al %r1,__VDSO_WTOM_NSEC+4(%r5)
brc 12,5f
ahi %r0,1
5: al %r2,__VDSO_WTOM_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
basr %r5,0
6: ltr %r0,%r0
jnz 7f
cl %r1,20f-6b(%r5)
jl 8f
7: ahi %r2,1
sl %r1,20f-6b(%r5)
brc 3,6b
ahi %r0,-1
j 6b
8: st %r2,0(%r3) /* store tp->tv_sec */
st %r1,4(%r3) /* store tp->tv_nsec */
9: lhi %r2,0
br %r14
/* CLOCK_REALTIME */
10: ltr %r3,%r3 /* tp == NULL */
jz 18f
11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
stck 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
lr %r2,%r0
lhi %r0,1000
ltr %r1,%r1
mr %r0,%r0
jnm 13f
ahi %r0,1000
13: alr %r0,%r2
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
al %r1,__VDSO_XTIME_NSEC+4(%r5)
brc 12,14f
ahi %r0,1
14: l %r2,__VDSO_XTIME_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 11b
basr %r5,0
15: ltr %r0,%r0
jnz 16f
cl %r1,20f-15b(%r5)
jl 17f
16: ahi %r2,1
sl %r1,20f-15b(%r5)
brc 3,15b
ahi %r0,-1
j 15b
17: st %r2,0(%r3) /* store tp->tv_sec */
st %r1,4(%r3) /* store tp->tv_nsec */
18: lhi %r2,0
br %r14
/* Fallback to system call */
19: lhi %r1,__NR_clock_gettime
svc 0
br %r14
20: .long 1000000000
21: .long _vdso_data - 0b
.cfi_endproc
.size __kernel_clock_gettime,.-__kernel_clock_gettime
/*
* Userland implementation of gettimeofday() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
.text
.align 4
.globl __kernel_gettimeofday
.type __kernel_gettimeofday,@function
__kernel_gettimeofday:
.cfi_startproc
basr %r5,0
0: al %r5,13f-0b(%r5) /* get &_vdso_data */
1: ltr %r3,%r3 /* check if tz is NULL */
je 2f
mvc 0(8,%r3),__VDSO_TIMEZONE(%r5)
2: ltr %r2,%r2 /* check if tv is NULL */
je 10f
l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
stck 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
st %r0,24(%r15)
lhi %r0,1000
ltr %r1,%r1
mr %r0,%r0
jnm 4f
ahi %r0,1000
4: al %r0,24(%r15)
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
al %r1,__VDSO_XTIME_NSEC+4(%r5)
brc 12,5f
ahi %r0,1
5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
l %r4,24(%r15) /* get tv_sec from stack */
basr %r5,0
6: ltr %r0,%r0
jnz 7f
cl %r1,11f-6b(%r5)
jl 8f
7: ahi %r4,1
sl %r1,11f-6b(%r5)
brc 3,6b
ahi %r0,-1
j 6b
8: st %r4,0(%r2) /* store tv->tv_sec */
ltr %r1,%r1
m %r0,12f-6b(%r5)
jnm 9f
al %r0,12f-6b(%r5)
9: srl %r0,6
st %r0,4(%r2) /* store tv->tv_usec */
10: slr %r2,%r2
br %r14
11: .long 1000000000
12: .long 274877907
13: .long _vdso_data - 0b
.cfi_endproc
.size __kernel_gettimeofday,.-__kernel_gettimeofday
/*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
*/
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
/*
* This is the infamous ld script for the 32 bits vdso
* library
*/
#include <asm/vdso.h>
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
OUTPUT_ARCH(s390:31-bit)
ENTRY(_start)
SECTIONS
{
. = VDSO32_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
. = ALIGN(16);
.text : {
*(.text .stub .text.* .gnu.linkonce.t.*)
} :text
PROVIDE(__etext = .);
PROVIDE(_etext = .);
PROVIDE(etext = .);
/*
* Other stuff is appended to the text segment:
*/
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.dynamic : { *(.dynamic) } :text :dynamic
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
.got ALIGN(8) : { *(.got .toc) }
_end = .;
PROVIDE(end = .);
/*
* Stabs debugging sections are here too.
*/
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/*
* DWARF debug sections.
* Symbols in the DWARF debugging sections are relative to the
* beginning of the section so we begin them at 0.
*/
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* DWARF 3 */
.debug_pubtypes 0 : { *(.debug_pubtypes) }
.debug_ranges 0 : { *(.debug_ranges) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
. = ALIGN(4096);
PROVIDE(_vdso_data = .);
/DISCARD/ : {
*(.note.GNU-stack)
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
/*
* Very old versions of ld do not recognize this name token; use the constant.
*/
#define PT_GNU_EH_FRAME 0x6474e550
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
VDSO_VERSION_STRING {
global:
/*
* Has to be there for the kernel to find
*/
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
local: *;
};
}
#include <linux/init.h>
#include <asm/page.h>
.section ".data.page_aligned"
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
vdso32_start:
.incbin "arch/s390/kernel/vdso32/vdso32.so"
.balign PAGE_SIZE
vdso32_end:
.previous
# List of files in the vdso, has to be asm only for now
obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o
# Build rules
targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_64 += -m64 -s
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv)
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
$(call if_changed,vdso64ld)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
$(obj-vdso64): %.o: %.S
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
vdso64.so: $(obj)/vdso64.so.dbg
@mkdir -p $(MODLIB)/vdso
$(call cmd,vdso_install)
vdso_install: vdso64.so
/*
* Userland implementation of clock_getres() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
.text
.align 4
.globl __kernel_clock_getres
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
cghi %r2,CLOCK_REALTIME
je 0f
cghi %r2,CLOCK_MONOTONIC
jne 2f
0: ltgr %r3,%r3
jz 1f /* res == NULL */
larl %r1,3f
lg %r0,0(%r1)
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
stg %r0,8(%r3) /* store tp->tv_usec */
1: lghi %r2,0
br %r14
2: lghi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
3: .quad CLOCK_REALTIME_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
/*
* Userland implementation of clock_gettime() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
.text
.align 4
.globl __kernel_clock_gettime
.type __kernel_clock_gettime,@function
__kernel_clock_gettime:
.cfi_startproc
larl %r5,_vdso_data
cghi %r2,CLOCK_REALTIME
je 4f
cghi %r2,CLOCK_MONOTONIC
jne 9f
/* CLOCK_MONOTONIC */
ltgr %r3,%r3
jz 3f /* tp == NULL */
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
mghi %r1,1000
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
lg %r0,__VDSO_XTIME_SEC(%r5)
alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
alg %r0,__VDSO_WTOM_SEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
larl %r5,10f
1: clg %r1,0(%r5)
jl 2f
slg %r1,0(%r5)
aghi %r0,1
j 1b
2: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
3: lghi %r2,0
br %r14
/* CLOCK_REALTIME */
4: ltr %r3,%r3 /* tp == NULL */
jz 8f
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
mghi %r1,1000
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
lg %r0,__VDSO_XTIME_SEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 5b
larl %r5,10f
6: clg %r1,0(%r5)
jl 7f
slg %r1,0(%r5)
aghi %r0,1
j 6b
7: stg %r0,0(%r3) /* store tp->tv_sec */
stg %r1,8(%r3) /* store tp->tv_nsec */
8: lghi %r2,0
br %r14
/* Fallback to system call */
9: lghi %r1,__NR_clock_gettime
svc 0
br %r14
10: .quad 1000000000
.cfi_endproc
.size __kernel_clock_gettime,.-__kernel_clock_gettime
/*
* Userland implementation of gettimeofday() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
.text
.align 4
.globl __kernel_gettimeofday
.type __kernel_gettimeofday,@function
__kernel_gettimeofday:
.cfi_startproc
larl %r5,_vdso_data
0: ltgr %r3,%r3 /* check if tz is NULL */
je 1f
mvc 0(8,%r3),__VDSO_TIMEZONE(%r5)
1: ltgr %r2,%r2 /* check if tv is NULL */
je 4f
lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
mghi %r1,1000
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
larl %r5,5f
2: clg %r1,0(%r5)
jl 3f
slg %r1,0(%r5)
aghi %r0,1
j 2b
3: stg %r0,0(%r2) /* store tv->tv_sec */
slgr %r0,%r0 /* tv_nsec -> tv_usec */
ml %r0,8(%r5)
srlg %r0,%r0,6
stg %r0,8(%r2) /* store tv->tv_usec */
4: lghi %r2,0
br %r14
5: .quad 1000000000
.long 274877907
.cfi_endproc
.size __kernel_gettimeofday,.-__kernel_gettimeofday
/*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
*/
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
/*
* This is the infamous ld script for the 64 bits vdso
* library
*/
#include <asm/vdso.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
ENTRY(_start)
SECTIONS
{
. = VDSO64_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
. = ALIGN(16);
.text : {
*(.text .stub .text.* .gnu.linkonce.t.*)
} :text
PROVIDE(__etext = .);
PROVIDE(_etext = .);
PROVIDE(etext = .);
/*
* Other stuff is appended to the text segment:
*/
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.dynamic : { *(.dynamic) } :text :dynamic
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
.got ALIGN(8) : { *(.got .toc) }
_end = .;
PROVIDE(end = .);
/*
* Stabs debugging sections are here too.
*/
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/*
* DWARF debug sections.
* Symbols in the DWARF debugging sections are relative to the
* beginning of the section so we begin them at 0.
*/
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* DWARF 3 */
.debug_pubtypes 0 : { *(.debug_pubtypes) }
.debug_ranges 0 : { *(.debug_ranges) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
. = ALIGN(4096);
PROVIDE(_vdso_data = .);
/DISCARD/ : {
*(.note.GNU-stack)
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
/*
* Very old versions of ld do not recognize this name token; use the constant.
*/
#define PT_GNU_EH_FRAME 0x6474e550
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
VDSO_VERSION_STRING {
global:
/*
* Has to be there for the kernel to find
*/
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
local: *;
};
}
#include <linux/init.h>
#include <asm/page.h>
.section ".data.page_aligned"
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
.incbin "arch/s390/kernel/vdso64/vdso64.so"
.balign PAGE_SIZE
vdso64_end:
.previous
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
static ext_int_info_t ext_int_info_timer; static ext_int_info_t ext_int_info_timer;
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/* /*
* Update process times based on virtual cpu times stored by entry.S * Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock. * to the lowcore fields user_timer, system_timer & steal_clock.
...@@ -125,16 +124,6 @@ static inline void set_vtimer(__u64 expires) ...@@ -125,16 +124,6 @@ static inline void set_vtimer(__u64 expires)
/* store expire time for this CPU timer */ /* store expire time for this CPU timer */
__get_cpu_var(virt_cpu_timer).to_expire = expires; __get_cpu_var(virt_cpu_timer).to_expire = expires;
} }
#else
static inline void set_vtimer(__u64 expires)
{
S390_lowcore.last_update_timer = expires;
asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
/* store expire time for this CPU timer */
__get_cpu_var(virt_cpu_timer).to_expire = expires;
}
#endif
void vtime_start_cpu_timer(void) void vtime_start_cpu_timer(void)
{ {
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
* (C) IBM Corporation 2002-2004 * (C) IBM Corporation 2002-2004
*/ */
#define KMSG_COMPONENT "extmem"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -24,19 +27,6 @@ ...@@ -24,19 +27,6 @@
#include <asm/cpcmd.h> #include <asm/cpcmd.h>
#include <asm/setup.h> #include <asm/setup.h>
#define DCSS_DEBUG /* Debug messages on/off */
#define DCSS_NAME "extmem"
#ifdef DCSS_DEBUG
#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
#else
#define PRINT_DEBUG(x...) do {} while (0)
#endif
#define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
#define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
#define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
#define DCSS_LOADSHR 0x00 #define DCSS_LOADSHR 0x00
#define DCSS_LOADNSR 0x04 #define DCSS_LOADNSR 0x04
#define DCSS_PURGESEG 0x08 #define DCSS_PURGESEG 0x08
...@@ -286,7 +276,7 @@ query_segment_type (struct dcss_segment *seg) ...@@ -286,7 +276,7 @@ query_segment_type (struct dcss_segment *seg)
goto out_free; goto out_free;
} }
if (diag_cc > 1) { if (diag_cc > 1) {
PRINT_WARN ("segment_type: diag returned error %ld\n", vmrc); pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc);
rc = dcss_diag_translate_rc (vmrc); rc = dcss_diag_translate_rc (vmrc);
goto out_free; goto out_free;
} }
...@@ -368,7 +358,6 @@ query_segment_type (struct dcss_segment *seg) ...@@ -368,7 +358,6 @@ query_segment_type (struct dcss_segment *seg)
* -EIO : could not perform query diagnose * -EIO : could not perform query diagnose
* -ENOENT : no such segment * -ENOENT : no such segment
* -ENOTSUPP: multi-part segment cannot be used with linux * -ENOTSUPP: multi-part segment cannot be used with linux
* -ENOSPC : segment cannot be used (overlaps with storage)
* -ENOMEM : out of memory * -ENOMEM : out of memory
* 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h
*/ */
...@@ -480,9 +469,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long ...@@ -480,9 +469,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
goto out_resource; goto out_resource;
} }
if (diag_cc > 1) { if (diag_cc > 1) {
PRINT_WARN ("segment_load: could not load segment %s - " pr_warning("Loading DCSS %s failed with rc=%ld\n", name,
"diag returned error (%ld)\n", end_addr);
name, end_addr);
rc = dcss_diag_translate_rc(end_addr); rc = dcss_diag_translate_rc(end_addr);
dcss_diag(&purgeseg_scode, seg->dcss_name, dcss_diag(&purgeseg_scode, seg->dcss_name,
&dummy, &dummy); &dummy, &dummy);
...@@ -496,15 +484,13 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long ...@@ -496,15 +484,13 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
*addr = seg->start_addr; *addr = seg->start_addr;
*end = seg->end; *end = seg->end;
if (do_nonshared) if (do_nonshared)
PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " pr_info("DCSS %s of range %p to %p and type %s loaded as "
"type %s in non-shared mode\n", name, "exclusive-writable\n", name, (void*) seg->start_addr,
(void*)seg->start_addr, (void*)seg->end, (void*) seg->end, segtype_string[seg->vm_segtype]);
segtype_string[seg->vm_segtype]);
else { else {
PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " pr_info("DCSS %s of range %p to %p and type %s loaded in "
"type %s in shared mode\n", name, "shared access mode\n", name, (void*) seg->start_addr,
(void*)seg->start_addr, (void*)seg->end, (void*) seg->end, segtype_string[seg->vm_segtype]);
segtype_string[seg->vm_segtype]);
} }
goto out; goto out;
out_resource: out_resource:
...@@ -593,14 +579,14 @@ segment_modify_shared (char *name, int do_nonshared) ...@@ -593,14 +579,14 @@ segment_modify_shared (char *name, int do_nonshared)
goto out_unlock; goto out_unlock;
} }
if (do_nonshared == seg->do_nonshared) { if (do_nonshared == seg->do_nonshared) {
PRINT_INFO ("segment_modify_shared: not reloading segment %s" pr_info("DCSS %s is already in the requested access "
" - already in requested mode\n",name); "mode\n", name);
rc = 0; rc = 0;
goto out_unlock; goto out_unlock;
} }
if (atomic_read (&seg->ref_count) != 1) { if (atomic_read (&seg->ref_count) != 1) {
PRINT_WARN ("segment_modify_shared: not reloading segment %s - " pr_warning("DCSS %s is in use and cannot be reloaded\n",
"segment is in use by other driver(s)\n",name); name);
rc = -EAGAIN; rc = -EAGAIN;
goto out_unlock; goto out_unlock;
} }
...@@ -613,8 +599,8 @@ segment_modify_shared (char *name, int do_nonshared) ...@@ -613,8 +599,8 @@ segment_modify_shared (char *name, int do_nonshared)
seg->res->flags |= IORESOURCE_READONLY; seg->res->flags |= IORESOURCE_READONLY;
if (request_resource(&iomem_resource, seg->res)) { if (request_resource(&iomem_resource, seg->res)) {
PRINT_WARN("segment_modify_shared: could not reload segment %s" pr_warning("DCSS %s overlaps with used memory resources "
" - overlapping resources\n", name); "and cannot be reloaded\n", name);
rc = -EBUSY; rc = -EBUSY;
kfree(seg->res); kfree(seg->res);
goto out_del_mem; goto out_del_mem;
...@@ -632,9 +618,8 @@ segment_modify_shared (char *name, int do_nonshared) ...@@ -632,9 +618,8 @@ segment_modify_shared (char *name, int do_nonshared)
goto out_del_res; goto out_del_res;
} }
if (diag_cc > 1) { if (diag_cc > 1) {
PRINT_WARN ("segment_modify_shared: could not reload segment %s" pr_warning("Reloading DCSS %s failed with rc=%ld\n", name,
" - diag returned error (%ld)\n", end_addr);
name, end_addr);
rc = dcss_diag_translate_rc(end_addr); rc = dcss_diag_translate_rc(end_addr);
goto out_del_res; goto out_del_res;
} }
...@@ -673,8 +658,7 @@ segment_unload(char *name) ...@@ -673,8 +658,7 @@ segment_unload(char *name)
mutex_lock(&dcss_lock); mutex_lock(&dcss_lock);
seg = segment_by_name (name); seg = segment_by_name (name);
if (seg == NULL) { if (seg == NULL) {
PRINT_ERR ("could not find segment %s in segment_unload, " pr_err("Unloading unknown DCSS %s failed\n", name);
"please report to linux390@de.ibm.com\n",name);
goto out_unlock; goto out_unlock;
} }
if (atomic_dec_return(&seg->ref_count) != 0) if (atomic_dec_return(&seg->ref_count) != 0)
...@@ -709,8 +693,7 @@ segment_save(char *name) ...@@ -709,8 +693,7 @@ segment_save(char *name)
seg = segment_by_name (name); seg = segment_by_name (name);
if (seg == NULL) { if (seg == NULL) {
PRINT_ERR("could not find segment %s in segment_save, please " pr_err("Saving unknown DCSS %s failed\n", name);
"report to linux390@de.ibm.com\n", name);
goto out; goto out;
} }
...@@ -727,14 +710,14 @@ segment_save(char *name) ...@@ -727,14 +710,14 @@ segment_save(char *name)
response = 0; response = 0;
cpcmd(cmd1, NULL, 0, &response); cpcmd(cmd1, NULL, 0, &response);
if (response) { if (response) {
PRINT_ERR("segment_save: DEFSEG failed with response code %i\n", pr_err("Saving a DCSS failed with DEFSEG response code "
response); "%i\n", response);
goto out; goto out;
} }
cpcmd(cmd2, NULL, 0, &response); cpcmd(cmd2, NULL, 0, &response);
if (response) { if (response) {
PRINT_ERR("segment_save: SAVESEG failed with response code %i\n", pr_err("Saving a DCSS failed with SAVESEG response code "
response); "%i\n", response);
goto out; goto out;
} }
out: out:
...@@ -749,44 +732,41 @@ void segment_warning(int rc, char *seg_name) ...@@ -749,44 +732,41 @@ void segment_warning(int rc, char *seg_name)
{ {
switch (rc) { switch (rc) {
case -ENOENT: case -ENOENT:
PRINT_WARN("cannot load/query segment %s, " pr_err("DCSS %s cannot be loaded or queried\n", seg_name);
"does not exist\n", seg_name);
break; break;
case -ENOSYS: case -ENOSYS:
PRINT_WARN("cannot load/query segment %s, " pr_err("DCSS %s cannot be loaded or queried without "
"not running on VM\n", seg_name); "z/VM\n", seg_name);
break; break;
case -EIO: case -EIO:
PRINT_WARN("cannot load/query segment %s, " pr_err("Loading or querying DCSS %s resulted in a "
"hardware error\n", seg_name); "hardware error\n", seg_name);
break; break;
case -ENOTSUPP: case -ENOTSUPP:
PRINT_WARN("cannot load/query segment %s, " pr_err("DCSS %s has multiple page ranges and cannot be "
"is a multi-part segment\n", seg_name); "loaded or queried\n", seg_name);
break; break;
case -ENOSPC: case -ENOSPC:
PRINT_WARN("cannot load/query segment %s, " pr_err("DCSS %s overlaps with used storage and cannot "
"overlaps with storage\n", seg_name); "be loaded\n", seg_name);
break; break;
case -EBUSY: case -EBUSY:
PRINT_WARN("cannot load/query segment %s, " pr_err("%s needs used memory resources and cannot be "
"overlaps with already loaded dcss\n", seg_name); "loaded or queried\n", seg_name);
break; break;
case -EPERM: case -EPERM:
PRINT_WARN("cannot load/query segment %s, " pr_err("DCSS %s is already loaded in a different access "
"already loaded in incompatible mode\n", seg_name); "mode\n", seg_name);
break; break;
case -ENOMEM: case -ENOMEM:
PRINT_WARN("cannot load/query segment %s, " pr_err("There is not enough memory to load or query "
"out of memory\n", seg_name); "DCSS %s\n", seg_name);
break; break;
case -ERANGE: case -ERANGE:
PRINT_WARN("cannot load/query segment %s, " pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
"exceeds kernel mapping range\n", seg_name); "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
break; break;
default: default:
PRINT_WARN("cannot load/query segment %s, "
"return value %i\n", seg_name, rc);
break; break;
} }
} }
......
...@@ -202,7 +202,7 @@ do { \ ...@@ -202,7 +202,7 @@ do { \
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm; struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm, extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack); int uses_interp);
extern unsigned int vdso_enabled; extern unsigned int vdso_enabled;
extern void __kernel_vsyscall; extern void __kernel_vsyscall;
......
...@@ -59,8 +59,7 @@ int __init vsyscall_init(void) ...@@ -59,8 +59,7 @@ int __init vsyscall_init(void)
} }
/* Setup a VMA at program startup for the vsyscall page */ /* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int executable_stack)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr; unsigned long addr;
......
...@@ -325,7 +325,7 @@ struct linux_binprm; ...@@ -325,7 +325,7 @@ struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm, extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack); int uses_interp);
extern int syscall32_setup_pages(struct linux_binprm *, int exstack); extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
#define compat_arch_setup_additional_pages syscall32_setup_pages #define compat_arch_setup_additional_pages syscall32_setup_pages
......
...@@ -310,7 +310,7 @@ int __init sysenter_setup(void) ...@@ -310,7 +310,7 @@ int __init sysenter_setup(void)
} }
/* Setup a VMA at program startup for the vsyscall page */ /* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr; unsigned long addr;
......
...@@ -98,7 +98,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) ...@@ -98,7 +98,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
/* Setup a VMA at program startup for the vsyscall page. /* Setup a VMA at program startup for the vsyscall page.
Not called for compat tasks */ Not called for compat tasks */
int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr; unsigned long addr;
......
...@@ -622,6 +622,16 @@ config HVC_BEAT ...@@ -622,6 +622,16 @@ config HVC_BEAT
help help
Toshiba's Cell Reference Set Beat Console device driver Toshiba's Cell Reference Set Beat Console device driver
config HVC_IUCV
bool "z/VM IUCV Hypervisor console support (VM only)"
depends on S390
select HVC_DRIVER
select IUCV
default y
help
This driver provides a Hypervisor console (HVC) back-end to access
a Linux (console) terminal via a z/VM IUCV communication path.
config HVC_XEN config HVC_XEN
bool "Xen Hypervisor Console support" bool "Xen Hypervisor Console support"
depends on XEN depends on XEN
......
...@@ -50,6 +50,7 @@ obj-$(CONFIG_HVC_BEAT) += hvc_beat.o ...@@ -50,6 +50,7 @@ obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_HVC_IRQ) += hvc_irq.o obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
obj-$(CONFIG_HVC_XEN) += hvc_xen.o obj-$(CONFIG_HVC_XEN) += hvc_xen.o
obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
......
This diff is collapsed.
...@@ -1898,15 +1898,19 @@ static int dasd_flush_block_queue(struct dasd_block *block) ...@@ -1898,15 +1898,19 @@ static int dasd_flush_block_queue(struct dasd_block *block)
wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
/* Process finished ERP request. */ /* Process finished ERP request. */
if (cqr->refers) { if (cqr->refers) {
spin_lock_bh(&block->queue_lock);
__dasd_block_process_erp(block, cqr); __dasd_block_process_erp(block, cqr);
spin_unlock_bh(&block->queue_lock);
/* restart list_for_xx loop since dasd_process_erp /* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */ * might remove multiple elements */
goto restart_cb; goto restart_cb;
} }
/* call the callback function */ /* call the callback function */
spin_lock_irq(&block->request_queue_lock);
cqr->endclk = get_clock(); cqr->endclk = get_clock();
list_del_init(&cqr->blocklist); list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr); __dasd_cleanup_cqr(cqr);
spin_unlock_irq(&block->request_queue_lock);
} }
return rc; return rc;
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
/* This is ugly... */ /* This is ugly... */
#define PRINTK_HEADER "dasd_devmap:" #define PRINTK_HEADER "dasd_devmap:"
#define DASD_BUS_ID_SIZE 20
#include "dasd_int.h" #include "dasd_int.h"
...@@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dasd_page_cache); ...@@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dasd_page_cache);
*/ */
struct dasd_devmap { struct dasd_devmap {
struct list_head list; struct list_head list;
char bus_id[BUS_ID_SIZE]; char bus_id[DASD_BUS_ID_SIZE];
unsigned int devindex; unsigned int devindex;
unsigned short features; unsigned short features;
struct dasd_device *device; struct dasd_device *device;
...@@ -94,7 +95,7 @@ dasd_hash_busid(const char *bus_id) ...@@ -94,7 +95,7 @@ dasd_hash_busid(const char *bus_id)
int hash, i; int hash, i;
hash = 0; hash = 0;
for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++) for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++)
hash += *bus_id; hash += *bus_id;
return hash & 0xff; return hash & 0xff;
} }
...@@ -301,7 +302,7 @@ dasd_parse_range( char *parsestring ) { ...@@ -301,7 +302,7 @@ dasd_parse_range( char *parsestring ) {
int from, from_id0, from_id1; int from, from_id0, from_id1;
int to, to_id0, to_id1; int to, to_id0, to_id1;
int features, rc; int features, rc;
char bus_id[BUS_ID_SIZE+1], *str; char bus_id[DASD_BUS_ID_SIZE+1], *str;
str = parsestring; str = parsestring;
rc = dasd_busid(&str, &from_id0, &from_id1, &from); rc = dasd_busid(&str, &from_id0, &from_id1, &from);
...@@ -407,14 +408,14 @@ dasd_add_busid(const char *bus_id, int features) ...@@ -407,14 +408,14 @@ dasd_add_busid(const char *bus_id, int features)
devmap = NULL; devmap = NULL;
hash = dasd_hash_busid(bus_id); hash = dasd_hash_busid(bus_id);
list_for_each_entry(tmp, &dasd_hashlists[hash], list) list_for_each_entry(tmp, &dasd_hashlists[hash], list)
if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
devmap = tmp; devmap = tmp;
break; break;
} }
if (!devmap) { if (!devmap) {
/* This bus_id is new. */ /* This bus_id is new. */
new->devindex = dasd_max_devindex++; new->devindex = dasd_max_devindex++;
strncpy(new->bus_id, bus_id, BUS_ID_SIZE); strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features; new->features = features;
new->device = NULL; new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]); list_add(&new->list, &dasd_hashlists[hash]);
...@@ -439,7 +440,7 @@ dasd_find_busid(const char *bus_id) ...@@ -439,7 +440,7 @@ dasd_find_busid(const char *bus_id)
devmap = ERR_PTR(-ENODEV); devmap = ERR_PTR(-ENODEV);
hash = dasd_hash_busid(bus_id); hash = dasd_hash_busid(bus_id);
list_for_each_entry(tmp, &dasd_hashlists[hash], list) { list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
devmap = tmp; devmap = tmp;
break; break;
} }
...@@ -561,7 +562,7 @@ dasd_create_device(struct ccw_device *cdev) ...@@ -561,7 +562,7 @@ dasd_create_device(struct ccw_device *cdev)
} }
spin_lock_irqsave(get_ccwdev_lock(cdev), flags); spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
cdev->dev.driver_data = device; dev_set_drvdata(&cdev->dev, device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return device; return device;
...@@ -597,7 +598,7 @@ dasd_delete_device(struct dasd_device *device) ...@@ -597,7 +598,7 @@ dasd_delete_device(struct dasd_device *device)
/* Disconnect dasd_device structure from ccw_device structure. */ /* Disconnect dasd_device structure from ccw_device structure. */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
device->cdev->dev.driver_data = NULL; dev_set_drvdata(&device->cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* /*
...@@ -638,7 +639,7 @@ dasd_put_device_wake(struct dasd_device *device) ...@@ -638,7 +639,7 @@ dasd_put_device_wake(struct dasd_device *device)
struct dasd_device * struct dasd_device *
dasd_device_from_cdev_locked(struct ccw_device *cdev) dasd_device_from_cdev_locked(struct ccw_device *cdev)
{ {
struct dasd_device *device = cdev->dev.driver_data; struct dasd_device *device = dev_get_drvdata(&cdev->dev);
if (!device) if (!device)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
......
...@@ -1496,7 +1496,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, ...@@ -1496,7 +1496,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
/* service information message SIM */ /* service information message SIM */
if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) && if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) &&
((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
dasd_3990_erp_handle_sim(device, irb->ecw); dasd_3990_erp_handle_sim(device, irb->ecw);
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
......
...@@ -180,12 +180,12 @@ dasd_calc_metrics(char *page, char **start, off_t off, ...@@ -180,12 +180,12 @@ dasd_calc_metrics(char *page, char **start, off_t off,
#ifdef CONFIG_DASD_PROFILE #ifdef CONFIG_DASD_PROFILE
static char * static char *
dasd_statistics_array(char *str, unsigned int *array, int shift) dasd_statistics_array(char *str, unsigned int *array, int factor)
{ {
int i; int i;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
str += sprintf(str, "%7d ", array[i] >> shift); str += sprintf(str, "%7d ", array[i] / factor);
if (i == 15) if (i == 15)
str += sprintf(str, "\n"); str += sprintf(str, "\n");
} }
...@@ -202,7 +202,7 @@ dasd_statistics_read(char *page, char **start, off_t off, ...@@ -202,7 +202,7 @@ dasd_statistics_read(char *page, char **start, off_t off,
#ifdef CONFIG_DASD_PROFILE #ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info_t *prof; struct dasd_profile_info_t *prof;
char *str; char *str;
int shift; int factor;
/* check for active profiling */ /* check for active profiling */
if (dasd_profile_level == DASD_PROFILE_OFF) { if (dasd_profile_level == DASD_PROFILE_OFF) {
...@@ -214,12 +214,14 @@ dasd_statistics_read(char *page, char **start, off_t off, ...@@ -214,12 +214,14 @@ dasd_statistics_read(char *page, char **start, off_t off,
prof = &dasd_global_profile; prof = &dasd_global_profile;
/* prevent couter 'overflow' on output */ /* prevent couter 'overflow' on output */
for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++); for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
factor *= 10);
str = page; str = page;
str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
str += sprintf(str, "with %d sectors(512B each)\n", str += sprintf(str, "with %u sectors(512B each)\n",
prof->dasd_io_sects); prof->dasd_io_sects);
str += sprintf(str, "Scale Factor is %d\n", factor);
str += sprintf(str, str += sprintf(str,
" __<4 ___8 __16 __32 __64 _128 " " __<4 ___8 __16 __32 __64 _128 "
" _256 _512 __1k __2k __4k __8k " " _256 _512 __1k __2k __4k __8k "
...@@ -230,22 +232,22 @@ dasd_statistics_read(char *page, char **start, off_t off, ...@@ -230,22 +232,22 @@ dasd_statistics_read(char *page, char **start, off_t off,
" __1G __2G __4G " " _>4G\n"); " __1G __2G __4G " " _>4G\n");
str += sprintf(str, "Histogram of sizes (512B secs)\n"); str += sprintf(str, "Histogram of sizes (512B secs)\n");
str = dasd_statistics_array(str, prof->dasd_io_secs, shift); str = dasd_statistics_array(str, prof->dasd_io_secs, factor);
str += sprintf(str, "Histogram of I/O times (microseconds)\n"); str += sprintf(str, "Histogram of I/O times (microseconds)\n");
str = dasd_statistics_array(str, prof->dasd_io_times, shift); str = dasd_statistics_array(str, prof->dasd_io_times, factor);
str += sprintf(str, "Histogram of I/O times per sector\n"); str += sprintf(str, "Histogram of I/O times per sector\n");
str = dasd_statistics_array(str, prof->dasd_io_timps, shift); str = dasd_statistics_array(str, prof->dasd_io_timps, factor);
str += sprintf(str, "Histogram of I/O time till ssch\n"); str += sprintf(str, "Histogram of I/O time till ssch\n");
str = dasd_statistics_array(str, prof->dasd_io_time1, shift); str = dasd_statistics_array(str, prof->dasd_io_time1, factor);
str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
str = dasd_statistics_array(str, prof->dasd_io_time2, shift); str = dasd_statistics_array(str, prof->dasd_io_time2, factor);
str += sprintf(str, "Histogram of I/O time between ssch " str += sprintf(str, "Histogram of I/O time between ssch "
"and irq per sector\n"); "and irq per sector\n");
str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift); str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor);
str += sprintf(str, "Histogram of I/O time between irq and end\n"); str += sprintf(str, "Histogram of I/O time between irq and end\n");
str = dasd_statistics_array(str, prof->dasd_io_time3, shift); str = dasd_statistics_array(str, prof->dasd_io_time3, factor);
str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift); str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor);
len = str - page; len = str - page;
#else #else
len = sprintf(page, "Statistics are not activated in this kernel\n"); len = sprintf(page, "Statistics are not activated in this kernel\n");
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
* Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
*/ */
#define KMSG_COMPONENT "dcssblk"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/ctype.h> #include <linux/ctype.h>
...@@ -17,19 +20,10 @@ ...@@ -17,19 +20,10 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/s390_rdev.h> #include <asm/s390_rdev.h>
//#define DCSSBLK_DEBUG /* Debug messages on/off */
#define DCSSBLK_NAME "dcssblk" #define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1 #define DCSSBLK_MINORS_PER_DISK 1
#define DCSSBLK_PARM_LEN 400 #define DCSSBLK_PARM_LEN 400
#define DCSS_BUS_ID_SIZE 20
#ifdef DCSSBLK_DEBUG
#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
#else
#define PRINT_DEBUG(x...) do {} while (0)
#endif
#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
static int dcssblk_open(struct block_device *bdev, fmode_t mode); static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static int dcssblk_release(struct gendisk *disk, fmode_t mode); static int dcssblk_release(struct gendisk *disk, fmode_t mode);
...@@ -50,7 +44,7 @@ static struct block_device_operations dcssblk_devops = { ...@@ -50,7 +44,7 @@ static struct block_device_operations dcssblk_devops = {
struct dcssblk_dev_info { struct dcssblk_dev_info {
struct list_head lh; struct list_head lh;
struct device dev; struct device dev;
char segment_name[BUS_ID_SIZE]; char segment_name[DCSS_BUS_ID_SIZE];
atomic_t use_count; atomic_t use_count;
struct gendisk *gd; struct gendisk *gd;
unsigned long start; unsigned long start;
...@@ -65,7 +59,7 @@ struct dcssblk_dev_info { ...@@ -65,7 +59,7 @@ struct dcssblk_dev_info {
struct segment_info { struct segment_info {
struct list_head lh; struct list_head lh;
char segment_name[BUS_ID_SIZE]; char segment_name[DCSS_BUS_ID_SIZE];
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
int segment_type; int segment_type;
...@@ -261,10 +255,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) ...@@ -261,10 +255,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
/* check continuity */ /* check continuity */
for (i = 0; i < dev_info->num_of_segments - 1; i++) { for (i = 0; i < dev_info->num_of_segments - 1; i++) {
if ((sort_list[i].end + 1) != sort_list[i+1].start) { if ((sort_list[i].end + 1) != sort_list[i+1].start) {
PRINT_ERR("Segment %s is not contiguous with " pr_err("Adjacent DCSSs %s and %s are not "
"segment %s\n", "contiguous\n", sort_list[i].segment_name,
sort_list[i].segment_name, sort_list[i+1].segment_name);
sort_list[i+1].segment_name);
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
...@@ -275,10 +268,10 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) ...@@ -275,10 +268,10 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
!(sort_list[i+1].segment_type & !(sort_list[i+1].segment_type &
SEGMENT_EXCLUSIVE) || SEGMENT_EXCLUSIVE) ||
(sort_list[i+1].segment_type == SEG_TYPE_ER)) { (sort_list[i+1].segment_type == SEG_TYPE_ER)) {
PRINT_ERR("Segment %s has different type from " pr_err("DCSS %s and DCSS %s have "
"segment %s\n", "incompatible types\n",
sort_list[i].segment_name, sort_list[i].segment_name,
sort_list[i+1].segment_name); sort_list[i+1].segment_name);
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
...@@ -380,8 +373,9 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch ...@@ -380,8 +373,9 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
} else if (inbuf[0] == '0') { } else if (inbuf[0] == '0') {
/* reload segments in exclusive mode */ /* reload segments in exclusive mode */
if (dev_info->segment_type == SEG_TYPE_SC) { if (dev_info->segment_type == SEG_TYPE_SC) {
PRINT_ERR("Segment type SC (%s) cannot be loaded in " pr_err("DCSS %s is of type SC and cannot be "
"non-shared mode\n", dev_info->segment_name); "loaded as exclusive-writable\n",
dev_info->segment_name);
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
...@@ -404,9 +398,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch ...@@ -404,9 +398,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
goto out; goto out;
removeseg: removeseg:
PRINT_ERR("Could not reload segment(s) of the device %s, removing " pr_err("DCSS device %s is removed after a failed access mode "
"segment(s) now!\n", "change\n", dev_info->segment_name);
dev_info->segment_name);
temp = entry; temp = entry;
list_for_each_entry(entry, &dev_info->seg_list, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (entry != temp) if (entry != temp)
...@@ -454,17 +447,17 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char ...@@ -454,17 +447,17 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
if (inbuf[0] == '1') { if (inbuf[0] == '1') {
if (atomic_read(&dev_info->use_count) == 0) { if (atomic_read(&dev_info->use_count) == 0) {
// device is idle => we save immediately // device is idle => we save immediately
PRINT_INFO("Saving segment(s) of the device %s\n", pr_info("All DCSSs that map to device %s are "
dev_info->segment_name); "saved\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) {
segment_save(entry->segment_name); segment_save(entry->segment_name);
} }
} else { } else {
// device is busy => we save it when it becomes // device is busy => we save it when it becomes
// idle in dcssblk_release // idle in dcssblk_release
PRINT_INFO("Device %s is currently busy, segment(s) " pr_info("Device %s is in use, its DCSSs will be "
"will be saved when it becomes idle...\n", "saved when it becomes idle\n",
dev_info->segment_name); dev_info->segment_name);
dev_info->save_pending = 1; dev_info->save_pending = 1;
} }
} else if (inbuf[0] == '0') { } else if (inbuf[0] == '0') {
...@@ -472,9 +465,9 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char ...@@ -472,9 +465,9 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
// device is busy & the user wants to undo his save // device is busy & the user wants to undo his save
// request // request
dev_info->save_pending = 0; dev_info->save_pending = 0;
PRINT_INFO("Pending save for segment(s) of the device " pr_info("A pending save request for device %s "
"%s deactivated\n", "has been canceled\n",
dev_info->segment_name); dev_info->segment_name);
} }
} else { } else {
up_write(&dcssblk_devices_sem); up_write(&dcssblk_devices_sem);
...@@ -614,9 +607,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char ...@@ -614,9 +607,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
seg_byte_size = (dev_info->end - dev_info->start + 1); seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, " pr_info("Loaded %s with total size %lu bytes and capacity %lu "
"capacity = %lu (512 Byte) sectors\n", local_buf, "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
seg_byte_size, seg_byte_size >> 9);
dev_info->save_pending = 0; dev_info->save_pending = 0;
dev_info->is_shared = 1; dev_info->is_shared = 1;
...@@ -744,13 +736,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch ...@@ -744,13 +736,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
dev_info = dcssblk_get_device_by_name(local_buf); dev_info = dcssblk_get_device_by_name(local_buf);
if (dev_info == NULL) { if (dev_info == NULL) {
up_write(&dcssblk_devices_sem); up_write(&dcssblk_devices_sem);
PRINT_WARN("Device %s is not loaded!\n", local_buf); pr_warning("Device %s cannot be removed because it is not a "
"known device\n", local_buf);
rc = -ENODEV; rc = -ENODEV;
goto out_buf; goto out_buf;
} }
if (atomic_read(&dev_info->use_count) != 0) { if (atomic_read(&dev_info->use_count) != 0) {
up_write(&dcssblk_devices_sem); up_write(&dcssblk_devices_sem);
PRINT_WARN("Device %s is in use!\n", local_buf); pr_warning("Device %s cannot be removed while it is in "
"use\n", local_buf);
rc = -EBUSY; rc = -EBUSY;
goto out_buf; goto out_buf;
} }
...@@ -807,8 +801,8 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) ...@@ -807,8 +801,8 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
down_write(&dcssblk_devices_sem); down_write(&dcssblk_devices_sem);
if (atomic_dec_and_test(&dev_info->use_count) if (atomic_dec_and_test(&dev_info->use_count)
&& (dev_info->save_pending)) { && (dev_info->save_pending)) {
PRINT_INFO("Device %s became idle and is being saved now\n", pr_info("Device %s has become idle and is being saved "
dev_info->segment_name); "now\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) {
segment_save(entry->segment_name); segment_save(entry->segment_name);
} }
...@@ -851,7 +845,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) ...@@ -851,7 +845,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
case SEG_TYPE_SC: case SEG_TYPE_SC:
/* cannot write to these segments */ /* cannot write to these segments */
if (bio_data_dir(bio) == WRITE) { if (bio_data_dir(bio) == WRITE) {
PRINT_WARN("rejecting write to ro device %s\n", pr_warning("Writing to %s failed because it "
"is a read-only device\n",
dev_name(&dev_info->dev)); dev_name(&dev_info->dev));
goto fail; goto fail;
} }
......
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
* generic hard disk support to replace ad-hoc partitioning * generic hard disk support to replace ad-hoc partitioning
*/ */
#define KMSG_COMPONENT "xpram"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/ctype.h> /* isdigit, isxdigit */ #include <linux/ctype.h> /* isdigit, isxdigit */
...@@ -42,12 +45,6 @@ ...@@ -42,12 +45,6 @@
#define XPRAM_DEVS 1 /* one partition */ #define XPRAM_DEVS 1 /* one partition */
#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ #define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
#define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x)
#define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x)
#define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x)
#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x)
typedef struct { typedef struct {
unsigned int size; /* size of xpram segment in pages */ unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */ unsigned int offset; /* start page of xpram segment */
...@@ -264,7 +261,7 @@ static int __init xpram_setup_sizes(unsigned long pages) ...@@ -264,7 +261,7 @@ static int __init xpram_setup_sizes(unsigned long pages)
/* Check number of devices. */ /* Check number of devices. */
if (devs <= 0 || devs > XPRAM_MAX_DEVS) { if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
PRINT_ERR("invalid number %d of devices\n",devs); pr_err("%d is not a valid number of XPRAM devices\n",devs);
return -EINVAL; return -EINVAL;
} }
xpram_devs = devs; xpram_devs = devs;
...@@ -295,22 +292,22 @@ static int __init xpram_setup_sizes(unsigned long pages) ...@@ -295,22 +292,22 @@ static int __init xpram_setup_sizes(unsigned long pages)
mem_auto_no++; mem_auto_no++;
} }
PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs); pr_info(" number of devices (partitions): %d \n", xpram_devs);
for (i = 0; i < xpram_devs; i++) { for (i = 0; i < xpram_devs; i++) {
if (xpram_sizes[i]) if (xpram_sizes[i])
PRINT_INFO(" size of partition %d: %u kB\n", pr_info(" size of partition %d: %u kB\n",
i, xpram_sizes[i]); i, xpram_sizes[i]);
else else
PRINT_INFO(" size of partition %d to be set " pr_info(" size of partition %d to be set "
"automatically\n",i); "automatically\n",i);
} }
PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n", pr_info(" memory needed (for sized partitions): %lu kB\n",
mem_needed); mem_needed);
PRINT_DEBUG(" partitions to be sized automatically: %d\n", pr_info(" partitions to be sized automatically: %d\n",
mem_auto_no); mem_auto_no);
if (mem_needed > pages * 4) { if (mem_needed > pages * 4) {
PRINT_ERR("Not enough expanded memory available\n"); pr_err("Not enough expanded memory available\n");
return -EINVAL; return -EINVAL;
} }
...@@ -322,8 +319,8 @@ static int __init xpram_setup_sizes(unsigned long pages) ...@@ -322,8 +319,8 @@ static int __init xpram_setup_sizes(unsigned long pages)
*/ */
if (mem_auto_no) { if (mem_auto_no) {
mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
PRINT_INFO(" automatically determined " pr_info(" automatically determined "
"partition size: %lu kB\n", mem_auto); "partition size: %lu kB\n", mem_auto);
for (i = 0; i < xpram_devs; i++) for (i = 0; i < xpram_devs; i++)
if (xpram_sizes[i] == 0) if (xpram_sizes[i] == 0)
xpram_sizes[i] = mem_auto; xpram_sizes[i] = mem_auto;
...@@ -405,12 +402,12 @@ static int __init xpram_init(void) ...@@ -405,12 +402,12 @@ static int __init xpram_init(void)
/* Find out size of expanded memory. */ /* Find out size of expanded memory. */
if (xpram_present() != 0) { if (xpram_present() != 0) {
PRINT_WARN("No expanded memory available\n"); pr_err("No expanded memory available\n");
return -ENODEV; return -ENODEV;
} }
xpram_pages = xpram_highest_page_index() + 1; xpram_pages = xpram_highest_page_index() + 1;
PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", pr_info(" %u pages expanded memory found (%lu KB).\n",
xpram_pages, (unsigned long) xpram_pages*4); xpram_pages, (unsigned long) xpram_pages*4);
rc = xpram_setup_sizes(xpram_pages); rc = xpram_setup_sizes(xpram_pages);
if (rc) if (rc)
return rc; return rc;
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/ */
#define KMSG_COMPONENT "monreader"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -24,19 +27,6 @@ ...@@ -24,19 +27,6 @@
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/extmem.h> #include <asm/extmem.h>
//#define MON_DEBUG /* Debug messages on/off */
#define MON_NAME "monreader"
#define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x)
#define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x)
#define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x)
#ifdef MON_DEBUG
#define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x)
#else
#define P_DEBUG(x...) do {} while (0)
#endif
#define MON_COLLECT_SAMPLE 0x80 #define MON_COLLECT_SAMPLE 0x80
#define MON_COLLECT_EVENT 0x40 #define MON_COLLECT_EVENT 0x40
...@@ -172,7 +162,7 @@ static int mon_send_reply(struct mon_msg *monmsg, ...@@ -172,7 +162,7 @@ static int mon_send_reply(struct mon_msg *monmsg,
} else } else
monmsg->replied_msglim = 1; monmsg->replied_msglim = 1;
if (rc) { if (rc) {
P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc); pr_err("Reading monitor data failed with rc=%i\n", rc);
return -EIO; return -EIO;
} }
return 0; return 0;
...@@ -251,7 +241,8 @@ static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) ...@@ -251,7 +241,8 @@ static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{ {
struct mon_private *monpriv = path->private; struct mon_private *monpriv = path->private;
P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]); pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
ipuser[0]);
iucv_path_sever(path, NULL); iucv_path_sever(path, NULL);
atomic_set(&monpriv->iucv_severed, 1); atomic_set(&monpriv->iucv_severed, 1);
wake_up(&mon_conn_wait_queue); wake_up(&mon_conn_wait_queue);
...@@ -266,8 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path, ...@@ -266,8 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path,
memcpy(&monpriv->msg_array[monpriv->write_index]->msg, memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg)); msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
P_WARNING("IUCV message pending, message limit (%i) reached\n", pr_warning("The read queue for monitor data is full\n");
MON_MSGLIM);
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
} }
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
...@@ -311,8 +301,8 @@ static int mon_open(struct inode *inode, struct file *filp) ...@@ -311,8 +301,8 @@ static int mon_open(struct inode *inode, struct file *filp)
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv); MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) { if (rc) {
P_ERROR("iucv connection to *MONITOR failed with " pr_err("Connecting to the z/VM *MONITOR system service "
"IPUSER SEVER code = %i\n", rc); "failed with rc=%i\n", rc);
rc = -EIO; rc = -EIO;
goto out_path; goto out_path;
} }
...@@ -353,7 +343,8 @@ static int mon_close(struct inode *inode, struct file *filp) ...@@ -353,7 +343,8 @@ static int mon_close(struct inode *inode, struct file *filp)
*/ */
rc = iucv_path_sever(monpriv->path, user_data_sever); rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc) if (rc)
P_ERROR("close, iucv_sever failed with rc = %i\n", rc); pr_warning("Disconnecting the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
atomic_set(&monpriv->iucv_severed, 0); atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0); atomic_set(&monpriv->iucv_connected, 0);
...@@ -469,7 +460,8 @@ static int __init mon_init(void) ...@@ -469,7 +460,8 @@ static int __init mon_init(void)
int rc; int rc;
if (!MACHINE_IS_VM) { if (!MACHINE_IS_VM) {
P_ERROR("not running under z/VM, driver not loaded\n"); pr_err("The z/VM *MONITOR record device driver cannot be "
"loaded without z/VM\n");
return -ENODEV; return -ENODEV;
} }
...@@ -478,7 +470,8 @@ static int __init mon_init(void) ...@@ -478,7 +470,8 @@ static int __init mon_init(void)
*/ */
rc = iucv_register(&monreader_iucv_handler, 1); rc = iucv_register(&monreader_iucv_handler, 1);
if (rc) { if (rc) {
P_ERROR("failed to register with iucv driver\n"); pr_err("The z/VM *MONITOR record device driver failed to "
"register with IUCV\n");
return rc; return rc;
} }
...@@ -488,8 +481,8 @@ static int __init mon_init(void) ...@@ -488,8 +481,8 @@ static int __init mon_init(void)
goto out_iucv; goto out_iucv;
} }
if (rc != SEG_TYPE_SC) { if (rc != SEG_TYPE_SC) {
P_ERROR("segment %s has unsupported type, should be SC\n", pr_err("The specified *MONITOR DCSS %s does not have the "
mon_dcss_name); "required type SC\n", mon_dcss_name);
rc = -EINVAL; rc = -EINVAL;
goto out_iucv; goto out_iucv;
} }
......
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
*/ */
#define KMSG_COMPONENT "monwriter"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -64,9 +67,9 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) ...@@ -64,9 +67,9 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
if (rc <= 0) if (rc <= 0)
return rc; return rc;
pr_err("Writing monitor data failed with rc=%i\n", rc);
if (rc == 5) if (rc == 5)
return -EPERM; return -EPERM;
printk("DIAG X'DC' error with return code: %i\n", rc);
return -EINVAL; return -EINVAL;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
* Arnd Bergmann (arndb@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com)
*/ */
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h> #include <linux/init.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -50,9 +53,10 @@ static int blacklist_range(range_action action, unsigned int from_ssid, ...@@ -50,9 +53,10 @@ static int blacklist_range(range_action action, unsigned int from_ssid,
{ {
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
if (msgtrigger) if (msgtrigger)
printk(KERN_WARNING "cio: Invalid cio_ignore range " pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
"0.%x.%04x-0.%x.%04x\n", from_ssid, from, "range for cio_ignore\n", from_ssid, from,
to_ssid, to); to_ssid, to);
return 1; return 1;
} }
...@@ -140,8 +144,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, ...@@ -140,8 +144,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
rc = 0; rc = 0;
out: out:
if (rc && msgtrigger) if (rc && msgtrigger)
printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n", pr_warning("%s is not a valid device for the cio_ignore "
str); "kernel parameter\n", str);
return rc; return rc;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment