Commit fb5131e1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (65 commits)
  [S390] prevent unneccesary loops_per_jiffy recalculation
  [S390] cpuinfo: use get_online_cpus() instead of preempt_disable()
  [S390] smp: remove cpu hotplug messages
  [S390] mutex: enable spinning mutex on s390
  [S390] mutex: Introduce arch_mutex_cpu_relax()
  [S390] cio: fix ccwgroup unregistration race condition
  [S390] perf: add DWARF register lookup for s390
  [S390] cleanup ftrace backend functions
  [S390] ptrace cleanup
  [S390] smp/idle: call init_idle() before starting a new cpu
  [S390] smp: delay idle task creation
  [S390] dasd: Correct retry counter for terminated I/O.
  [S390] dasd: Add support for raw ECKD access.
  [S390] dasd: Prevent deadlock during suspend/resume.
  [S390] dasd: Improve handling of stolen DASD reservation
  [S390] dasd: do path verification for paths added at runtime
  [S390] dasd: add High Performance FICON multitrack support
  [S390] cio: reduce memory consumption of itcw structures
  [S390] nmi: enable machine checks early
  [S390] qeth: buffer count imbalance
  ...
parents d074b104 8e102301
config SCHED_MC
def_bool y
depends on SMP
config MMU config MMU
def_bool y def_bool y
config ZONE_DMA config ZONE_DMA
def_bool y def_bool y if 64BIT
depends on 64BIT
config LOCKDEP_SUPPORT config LOCKDEP_SUPPORT
def_bool y def_bool y
...@@ -25,12 +20,10 @@ config RWSEM_XCHGADD_ALGORITHM ...@@ -25,12 +20,10 @@ config RWSEM_XCHGADD_ALGORITHM
def_bool y def_bool y
config ARCH_HAS_ILOG2_U32 config ARCH_HAS_ILOG2_U32
bool def_bool n
default n
config ARCH_HAS_ILOG2_U64 config ARCH_HAS_ILOG2_U64
bool def_bool n
default n
config GENERIC_HWEIGHT config GENERIC_HWEIGHT
def_bool y def_bool y
...@@ -42,9 +35,7 @@ config GENERIC_CLOCKEVENTS ...@@ -42,9 +35,7 @@ config GENERIC_CLOCKEVENTS
def_bool y def_bool y
config GENERIC_BUG config GENERIC_BUG
bool def_bool y if BUG
depends on BUG
default y
config GENERIC_BUG_RELATIVE_POINTERS config GENERIC_BUG_RELATIVE_POINTERS
def_bool y def_bool y
...@@ -59,13 +50,10 @@ config ARCH_DMA_ADDR_T_64BIT ...@@ -59,13 +50,10 @@ config ARCH_DMA_ADDR_T_64BIT
def_bool 64BIT def_bool 64BIT
config GENERIC_LOCKBREAK config GENERIC_LOCKBREAK
bool def_bool y if SMP && PREEMPT
default y
depends on SMP && PREEMPT
config PGSTE config PGSTE
bool def_bool y if KVM
default y if KVM
config VIRT_CPU_ACCOUNTING config VIRT_CPU_ACCOUNTING
def_bool y def_bool y
...@@ -85,7 +73,6 @@ config S390 ...@@ -85,7 +73,6 @@ config S390
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_DEFAULT_NO_SPIN_MUTEXES
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KRETPROBES select HAVE_KRETPROBES
...@@ -130,8 +117,7 @@ config S390 ...@@ -130,8 +117,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
config SCHED_OMIT_FRAME_POINTER config SCHED_OMIT_FRAME_POINTER
bool def_bool y
default y
source "init/Kconfig" source "init/Kconfig"
...@@ -144,20 +130,21 @@ comment "Processor type and features" ...@@ -144,20 +130,21 @@ comment "Processor type and features"
source "kernel/time/Kconfig" source "kernel/time/Kconfig"
config 64BIT config 64BIT
bool "64 bit kernel" def_bool y
prompt "64 bit kernel"
help help
Select this option if you have an IBM z/Architecture machine Select this option if you have an IBM z/Architecture machine
and want to use the 64 bit addressing mode. and want to use the 64 bit addressing mode.
config 32BIT config 32BIT
bool def_bool y if !64BIT
default y if !64BIT
config KTIME_SCALAR config KTIME_SCALAR
def_bool 32BIT def_bool 32BIT
config SMP config SMP
bool "Symmetric multi-processing support" def_bool y
prompt "Symmetric multi-processing support"
---help--- ---help---
This enables support for systems with more than one CPU. If you have This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If a system with only one CPU, like most personal computers, say N. If
...@@ -189,10 +176,10 @@ config NR_CPUS ...@@ -189,10 +176,10 @@ config NR_CPUS
approximately sixteen kilobytes to the kernel image. approximately sixteen kilobytes to the kernel image.
config HOTPLUG_CPU config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs" def_bool y
prompt "Support for hot-pluggable CPUs"
depends on SMP depends on SMP
select HOTPLUG select HOTPLUG
default n
help help
Say Y here to be able to turn CPUs off and on. CPUs Say Y here to be able to turn CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu/cpu#. can be controlled through /sys/devices/system/cpu/cpu#.
...@@ -208,14 +195,16 @@ config SCHED_MC ...@@ -208,14 +195,16 @@ config SCHED_MC
increased overhead in some places. increased overhead in some places.
config SCHED_BOOK config SCHED_BOOK
bool "Book scheduler support" def_bool y
prompt "Book scheduler support"
depends on SMP && SCHED_MC depends on SMP && SCHED_MC
help help
Book scheduler support improves the CPU scheduler's decision making Book scheduler support improves the CPU scheduler's decision making
when dealing with machines that have several books. when dealing with machines that have several books.
config MATHEMU config MATHEMU
bool "IEEE FPU emulation" def_bool y
prompt "IEEE FPU emulation"
depends on MARCH_G5 depends on MARCH_G5
help help
This option is required for IEEE compliant floating point arithmetic This option is required for IEEE compliant floating point arithmetic
...@@ -223,7 +212,8 @@ config MATHEMU ...@@ -223,7 +212,8 @@ config MATHEMU
need this. need this.
config COMPAT config COMPAT
bool "Kernel support for 31 bit emulation" def_bool y
prompt "Kernel support for 31 bit emulation"
depends on 64BIT depends on 64BIT
select COMPAT_BINFMT_ELF select COMPAT_BINFMT_ELF
help help
...@@ -233,16 +223,14 @@ config COMPAT ...@@ -233,16 +223,14 @@ config COMPAT
executing 31 bit applications. It is safe to say "Y". executing 31 bit applications. It is safe to say "Y".
config SYSVIPC_COMPAT config SYSVIPC_COMPAT
bool def_bool y if COMPAT && SYSVIPC
depends on COMPAT && SYSVIPC
default y
config AUDIT_ARCH config AUDIT_ARCH
bool def_bool y
default y
config S390_EXEC_PROTECT config S390_EXEC_PROTECT
bool "Data execute protection" def_bool y
prompt "Data execute protection"
help help
This option allows to enable a buffer overflow protection for user This option allows to enable a buffer overflow protection for user
space programs and it also selects the addressing mode option above. space programs and it also selects the addressing mode option above.
...@@ -302,7 +290,8 @@ config MARCH_Z196 ...@@ -302,7 +290,8 @@ config MARCH_Z196
endchoice endchoice
config PACK_STACK config PACK_STACK
bool "Pack kernel stack" def_bool y
prompt "Pack kernel stack"
help help
This option enables the compiler option -mkernel-backchain if it This option enables the compiler option -mkernel-backchain if it
is available. If the option is available the compiler supports is available. If the option is available the compiler supports
...@@ -315,7 +304,8 @@ config PACK_STACK ...@@ -315,7 +304,8 @@ config PACK_STACK
Say Y if you are unsure. Say Y if you are unsure.
config SMALL_STACK config SMALL_STACK
bool "Use 8kb for kernel stack instead of 16kb" def_bool n
prompt "Use 8kb for kernel stack instead of 16kb"
depends on PACK_STACK && 64BIT && !LOCKDEP depends on PACK_STACK && 64BIT && !LOCKDEP
help help
If you say Y here and the compiler supports the -mkernel-backchain If you say Y here and the compiler supports the -mkernel-backchain
...@@ -327,7 +317,8 @@ config SMALL_STACK ...@@ -327,7 +317,8 @@ config SMALL_STACK
Say N if you are unsure. Say N if you are unsure.
config CHECK_STACK config CHECK_STACK
bool "Detect kernel stack overflow" def_bool y
prompt "Detect kernel stack overflow"
help help
This option enables the compiler option -mstack-guard and This option enables the compiler option -mstack-guard and
-mstack-size if they are available. If the compiler supports them -mstack-size if they are available. If the compiler supports them
...@@ -351,7 +342,8 @@ config STACK_GUARD ...@@ -351,7 +342,8 @@ config STACK_GUARD
512 for 64 bit. 512 for 64 bit.
config WARN_STACK config WARN_STACK
bool "Emit compiler warnings for function with broken stack usage" def_bool n
prompt "Emit compiler warnings for function with broken stack usage"
help help
This option enables the compiler options -mwarn-framesize and This option enables the compiler options -mwarn-framesize and
-mwarn-dynamicstack. If the compiler supports these options it -mwarn-dynamicstack. If the compiler supports these options it
...@@ -386,24 +378,24 @@ config ARCH_SPARSEMEM_DEFAULT ...@@ -386,24 +378,24 @@ config ARCH_SPARSEMEM_DEFAULT
def_bool y def_bool y
config ARCH_SELECT_MEMORY_MODEL config ARCH_SELECT_MEMORY_MODEL
def_bool y def_bool y
config ARCH_ENABLE_MEMORY_HOTPLUG config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y def_bool y if SPARSEMEM
depends on SPARSEMEM
config ARCH_ENABLE_MEMORY_HOTREMOVE config ARCH_ENABLE_MEMORY_HOTREMOVE
def_bool y def_bool y
config ARCH_HIBERNATION_POSSIBLE config ARCH_HIBERNATION_POSSIBLE
def_bool y if 64BIT def_bool y if 64BIT
source "mm/Kconfig" source "mm/Kconfig"
comment "I/O subsystem configuration" comment "I/O subsystem configuration"
config QDIO config QDIO
tristate "QDIO support" def_tristate y
prompt "QDIO support"
---help--- ---help---
This driver provides the Queued Direct I/O base support for This driver provides the Queued Direct I/O base support for
IBM System z. IBM System z.
...@@ -414,7 +406,8 @@ config QDIO ...@@ -414,7 +406,8 @@ config QDIO
If unsure, say Y. If unsure, say Y.
config CHSC_SCH config CHSC_SCH
tristate "Support for CHSC subchannels" def_tristate y
prompt "Support for CHSC subchannels"
help help
This driver allows usage of CHSC subchannels. A CHSC subchannel This driver allows usage of CHSC subchannels. A CHSC subchannel
is usually present on LPAR only. is usually present on LPAR only.
...@@ -432,7 +425,8 @@ config CHSC_SCH ...@@ -432,7 +425,8 @@ config CHSC_SCH
comment "Misc" comment "Misc"
config IPL config IPL
bool "Builtin IPL record support" def_bool y
prompt "Builtin IPL record support"
help help
If you want to use the produced kernel to IPL directly from a If you want to use the produced kernel to IPL directly from a
device, you have to merge a bootsector specific to the device device, you have to merge a bootsector specific to the device
...@@ -464,7 +458,8 @@ config FORCE_MAX_ZONEORDER ...@@ -464,7 +458,8 @@ config FORCE_MAX_ZONEORDER
default "9" default "9"
config PFAULT config PFAULT
bool "Pseudo page fault support" def_bool y
prompt "Pseudo page fault support"
help help
Select this option, if you want to use PFAULT pseudo page fault Select this option, if you want to use PFAULT pseudo page fault
handling under VM. If running native or in LPAR, this option handling under VM. If running native or in LPAR, this option
...@@ -476,7 +471,8 @@ config PFAULT ...@@ -476,7 +471,8 @@ config PFAULT
this option. this option.
config SHARED_KERNEL config SHARED_KERNEL
bool "VM shared kernel support" def_bool y
prompt "VM shared kernel support"
help help
Select this option, if you want to share the text segment of the Select this option, if you want to share the text segment of the
Linux kernel between different VM guests. This reduces memory Linux kernel between different VM guests. This reduces memory
...@@ -487,7 +483,8 @@ config SHARED_KERNEL ...@@ -487,7 +483,8 @@ config SHARED_KERNEL
doing and want to exploit this feature. doing and want to exploit this feature.
config CMM config CMM
tristate "Cooperative memory management" def_tristate n
prompt "Cooperative memory management"
help help
Select this option, if you want to enable the kernel interface Select this option, if you want to enable the kernel interface
to reduce the memory size of the system. This is accomplished to reduce the memory size of the system. This is accomplished
...@@ -499,14 +496,16 @@ config CMM ...@@ -499,14 +496,16 @@ config CMM
option. option.
config CMM_IUCV config CMM_IUCV
bool "IUCV special message interface to cooperative memory management" def_bool y
prompt "IUCV special message interface to cooperative memory management"
depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV) depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
help help
Select this option to enable the special message interface to Select this option to enable the special message interface to
the cooperative memory management. the cooperative memory management.
config APPLDATA_BASE config APPLDATA_BASE
bool "Linux - VM Monitor Stream, base infrastructure" def_bool n
prompt "Linux - VM Monitor Stream, base infrastructure"
depends on PROC_FS depends on PROC_FS
help help
This provides a kernel interface for creating and updating z/VM APPLDATA This provides a kernel interface for creating and updating z/VM APPLDATA
...@@ -521,7 +520,8 @@ config APPLDATA_BASE ...@@ -521,7 +520,8 @@ config APPLDATA_BASE
The /proc entries can also be read from, showing the current settings. The /proc entries can also be read from, showing the current settings.
config APPLDATA_MEM config APPLDATA_MEM
tristate "Monitor memory management statistics" def_tristate m
prompt "Monitor memory management statistics"
depends on APPLDATA_BASE && VM_EVENT_COUNTERS depends on APPLDATA_BASE && VM_EVENT_COUNTERS
help help
This provides memory management related data to the Linux - VM Monitor This provides memory management related data to the Linux - VM Monitor
...@@ -537,7 +537,8 @@ config APPLDATA_MEM ...@@ -537,7 +537,8 @@ config APPLDATA_MEM
appldata_mem.o. appldata_mem.o.
config APPLDATA_OS config APPLDATA_OS
tristate "Monitor OS statistics" def_tristate m
prompt "Monitor OS statistics"
depends on APPLDATA_BASE depends on APPLDATA_BASE
help help
This provides OS related data to the Linux - VM Monitor Stream, like This provides OS related data to the Linux - VM Monitor Stream, like
...@@ -551,7 +552,8 @@ config APPLDATA_OS ...@@ -551,7 +552,8 @@ config APPLDATA_OS
appldata_os.o. appldata_os.o.
config APPLDATA_NET_SUM config APPLDATA_NET_SUM
tristate "Monitor overall network statistics" def_tristate m
prompt "Monitor overall network statistics"
depends on APPLDATA_BASE && NET depends on APPLDATA_BASE && NET
help help
This provides network related data to the Linux - VM Monitor Stream, This provides network related data to the Linux - VM Monitor Stream,
...@@ -568,30 +570,32 @@ config APPLDATA_NET_SUM ...@@ -568,30 +570,32 @@ config APPLDATA_NET_SUM
source kernel/Kconfig.hz source kernel/Kconfig.hz
config S390_HYPFS_FS config S390_HYPFS_FS
bool "s390 hypervisor file system support" def_bool y
prompt "s390 hypervisor file system support"
select SYS_HYPERVISOR select SYS_HYPERVISOR
default y
help help
This is a virtual file system intended to provide accounting This is a virtual file system intended to provide accounting
information in an s390 hypervisor environment. information in an s390 hypervisor environment.
config KEXEC config KEXEC
bool "kexec system call" def_bool n
prompt "kexec system call"
help help
kexec is a system call that implements the ability to shutdown your kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot current kernel, and to start another kernel. It is like a reboot
but is independent of hardware/microcode support. but is independent of hardware/microcode support.
config ZFCPDUMP config ZFCPDUMP
bool "zfcpdump support" def_bool n
prompt "zfcpdump support"
select SMP select SMP
default n
help help
Select this option if you want to build an zfcpdump enabled kernel. Select this option if you want to build an zfcpdump enabled kernel.
Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
config S390_GUEST config S390_GUEST
bool "s390 guest support for KVM (EXPERIMENTAL)" def_bool y
prompt "s390 guest support for KVM (EXPERIMENTAL)"
depends on 64BIT && EXPERIMENTAL depends on 64BIT && EXPERIMENTAL
select VIRTIO select VIRTIO
select VIRTIO_RING select VIRTIO_RING
...@@ -603,9 +607,9 @@ bool "s390 guest support for KVM (EXPERIMENTAL)" ...@@ -603,9 +607,9 @@ bool "s390 guest support for KVM (EXPERIMENTAL)"
the default console. the default console.
config SECCOMP config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode" def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS depends on PROC_FS
default y
help help
This kernel feature is useful for number crunching applications This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their that may need to compute untrusted bytecode during their
......
menu "Kernel hacking" menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT config TRACE_IRQFLAGS_SUPPORT
bool def_bool y
default y
source "lib/Kconfig.debug" source "lib/Kconfig.debug"
...@@ -19,7 +18,8 @@ config STRICT_DEVMEM ...@@ -19,7 +18,8 @@ config STRICT_DEVMEM
If you are unsure, say Y. If you are unsure, say Y.
config DEBUG_STRICT_USER_COPY_CHECKS config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict user copy size checks" def_bool n
prompt "Strict user copy size checks"
---help--- ---help---
Enabling this option turns a certain set of sanity checks for user Enabling this option turns a certain set of sanity checks for user
copy operations into compile time warnings. copy operations into compile time warnings.
......
...@@ -2,16 +2,12 @@ CONFIG_EXPERIMENTAL=y ...@@ -2,16 +2,12 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
CONFIG_RCU_TRACE=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_NS=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_COMPAT_BRK is not set CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y CONFIG_SLAB=y
CONFIG_KPROBES=y CONFIG_KPROBES=y
CONFIG_MODULES=y CONFIG_MODULES=y
...@@ -20,24 +16,12 @@ CONFIG_MODVERSIONS=y ...@@ -20,24 +16,12 @@ CONFIG_MODVERSIONS=y
CONFIG_DEFAULT_DEADLINE=y CONFIG_DEFAULT_DEADLINE=y
CONFIG_NO_HZ=y CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y CONFIG_HIGH_RES_TIMERS=y
CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_NR_CPUS=32
CONFIG_COMPAT=y
CONFIG_S390_EXEC_PROTECT=y
CONFIG_PACK_STACK=y
CONFIG_CHECK_STACK=y
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTREMOVE=y
CONFIG_QDIO=y
CONFIG_CHSC_SCH=m
CONFIG_IPL=y
CONFIG_BINFMT_MISC=m CONFIG_BINFMT_MISC=m
CONFIG_PFAULT=y
CONFIG_HZ_100=y CONFIG_HZ_100=y
CONFIG_KEXEC=y CONFIG_KEXEC=y
CONFIG_S390_GUEST=y
CONFIG_PM=y CONFIG_PM=y
CONFIG_HIBERNATION=y CONFIG_HIBERNATION=y
CONFIG_PACKET=y CONFIG_PACKET=y
...@@ -46,16 +30,15 @@ CONFIG_NET_KEY=y ...@@ -46,16 +30,15 @@ CONFIG_NET_KEY=y
CONFIG_AFIUCV=m CONFIG_AFIUCV=m
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
# CONFIG_INET_LRO is not set
CONFIG_IPV6=y CONFIG_IPV6=y
CONFIG_NETFILTER=y CONFIG_NET_SCTPPROBE=m
CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_L2TP=m
CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_L2TP_DEBUGFS=m
CONFIG_NF_CONNTRACK=m CONFIG_VLAN_8021Q=y
# CONFIG_NF_CT_PROTO_SCTP is not set
CONFIG_NET_SCHED=y CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_PRIO=m CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_MULTIQ=y
CONFIG_NET_SCH_RED=m CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TEQL=m
...@@ -69,28 +52,14 @@ CONFIG_NET_CLS_U32=m ...@@ -69,28 +52,14 @@ CONFIG_NET_CLS_U32=m
CONFIG_CLS_U32_MARK=y CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_ACT=y CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y CONFIG_NET_ACT_POLICE=y
CONFIG_NET_ACT_NAT=m
CONFIG_CAN=m
CONFIG_CAN_RAW=m
CONFIG_CAN_BCM=m
CONFIG_CAN_VCAN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_XIP=y CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_XPRAM=m
CONFIG_DASD=y
CONFIG_DASD_PROFILE=y
CONFIG_DASD_ECKD=y
CONFIG_DASD_FBA=y
CONFIG_DASD_DIAG=y
CONFIG_DASD_EER=y
CONFIG_VIRTIO_BLK=m
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y CONFIG_CHR_DEV_ST=y
...@@ -102,101 +71,92 @@ CONFIG_SCSI_CONSTANTS=y ...@@ -102,101 +71,92 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_ZFCP=y CONFIG_ZFCP=y
CONFIG_SCSI_DH=m CONFIG_ZFCP_DIF=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_SCSI_OSD_INITIATOR=m
CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_MIRROR=y
CONFIG_DM_ZERO=y
CONFIG_DM_MULTIPATH=m
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_DUMMY=m CONFIG_DUMMY=m
CONFIG_BONDING=m CONFIG_BONDING=m
CONFIG_EQUALIZER=m CONFIG_EQUALIZER=m
CONFIG_TUN=m CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_NET_ETHERNET=y CONFIG_NET_ETHERNET=y
CONFIG_LCS=m CONFIG_VIRTIO_NET=y
CONFIG_CTCM=m
CONFIG_QETH=y
CONFIG_QETH_L2=y
CONFIG_QETH_L3=y
CONFIG_VIRTIO_NET=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m CONFIG_RAW_DRIVER=m
CONFIG_TN3270=y
CONFIG_TN3270_TTY=y
CONFIG_TN3270_FS=m
CONFIG_TN3270_CONSOLE=y
CONFIG_TN3215=y
CONFIG_TN3215_CONSOLE=y
CONFIG_SCLP_TTY=y
CONFIG_SCLP_CONSOLE=y
CONFIG_SCLP_VT220_TTY=y
CONFIG_SCLP_VT220_CONSOLE=y
CONFIG_SCLP_CPI=m
CONFIG_SCLP_ASYNC=m
CONFIG_S390_TAPE=m
CONFIG_S390_TAPE_BLOCK=y
CONFIG_S390_TAPE_34XX=m
CONFIG_ACCESSIBILITY=y
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_PROC_KCORE=y CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NFS_V3=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y CONFIG_IBM_PARTITION=y
CONFIG_DLM=m CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set CONFIG_TIMER_STATS=y
CONFIG_DEBUG_SPINLOCK=y CONFIG_PROVE_LOCKING=y
CONFIG_DEBUG_MUTEXES=y CONFIG_PROVE_RCU=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y CONFIG_DEBUG_SPINLOCK_SLEEP=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_NOTIFIERS=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y CONFIG_KPROBES_SANITY_TEST=y
CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y
CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_SAMPLES=y CONFIG_DEBUG_PAGEALLOC=y
CONFIG_CRYPTO_FIPS=y # CONFIG_FTRACE is not set
# CONFIG_STRICT_DEVMEM is not set
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_ZLIB=m CONFIG_CRYPTO_ZLIB=m
CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZO=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRC_T10DIF=y CONFIG_CRYPTO_DES_S390=m
CONFIG_CRC32=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRC7=m CONFIG_CRC7=m
CONFIG_KVM=m CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_BALLOON=m
...@@ -4,4 +4,4 @@ ...@@ -4,4 +4,4 @@
obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/workqueue.h>
#include <linux/kref.h>
#define REG_FILE_MODE 0440 #define REG_FILE_MODE 0440
#define UPDATE_FILE_MODE 0220 #define UPDATE_FILE_MODE 0220
...@@ -38,6 +40,33 @@ extern int hypfs_vm_init(void); ...@@ -38,6 +40,33 @@ extern int hypfs_vm_init(void);
extern void hypfs_vm_exit(void); extern void hypfs_vm_exit(void);
extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root); extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
/* Directory for debugfs files */ /* debugfs interface */
extern struct dentry *hypfs_dbfs_dir; struct hypfs_dbfs_file;
struct hypfs_dbfs_data {
void *buf;
void *buf_free_ptr;
size_t size;
struct hypfs_dbfs_file *dbfs_file;;
struct kref kref;
};
struct hypfs_dbfs_file {
const char *name;
int (*data_create)(void **data, void **data_free_ptr,
size_t *size);
void (*data_free)(const void *buf_free_ptr);
/* Private data for hypfs_dbfs.c */
struct hypfs_dbfs_data *data;
struct delayed_work data_free_work;
struct mutex lock;
struct dentry *dentry;
};
extern int hypfs_dbfs_init(void);
extern void hypfs_dbfs_exit(void);
extern int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
#endif /* _HYPFS_H_ */ #endif /* _HYPFS_H_ */
/*
* Hypervisor filesystem for Linux on s390 - debugfs interface
*
* Copyright (C) IBM Corp. 2010
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
#include <linux/slab.h>
#include "hypfs.h"
static struct dentry *dbfs_dir;
static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
{
struct hypfs_dbfs_data *data;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
kref_init(&data->kref);
data->dbfs_file = f;
return data;
}
static void hypfs_dbfs_data_free(struct kref *kref)
{
struct hypfs_dbfs_data *data;
data = container_of(kref, struct hypfs_dbfs_data, kref);
data->dbfs_file->data_free(data->buf_free_ptr);
kfree(data);
}
static void data_free_delayed(struct work_struct *work)
{
struct hypfs_dbfs_data *data;
struct hypfs_dbfs_file *df;
df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
mutex_lock(&df->lock);
data = df->data;
df->data = NULL;
mutex_unlock(&df->lock);
kref_put(&data->kref, hypfs_dbfs_data_free);
}
static ssize_t dbfs_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct hypfs_dbfs_data *data;
struct hypfs_dbfs_file *df;
ssize_t rc;
if (*ppos != 0)
return 0;
df = file->f_path.dentry->d_inode->i_private;
mutex_lock(&df->lock);
if (!df->data) {
data = hypfs_dbfs_data_alloc(df);
if (!data) {
mutex_unlock(&df->lock);
return -ENOMEM;
}
rc = df->data_create(&data->buf, &data->buf_free_ptr,
&data->size);
if (rc) {
mutex_unlock(&df->lock);
kfree(data);
return rc;
}
df->data = data;
schedule_delayed_work(&df->data_free_work, HZ);
}
data = df->data;
kref_get(&data->kref);
mutex_unlock(&df->lock);
rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
kref_put(&data->kref, hypfs_dbfs_data_free);
return rc;
}
static const struct file_operations dbfs_ops = {
.read = dbfs_read,
.llseek = no_llseek,
};
int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
{
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
&dbfs_ops);
if (IS_ERR(df->dentry))
return PTR_ERR(df->dentry);
mutex_init(&df->lock);
INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
return 0;
}
void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
{
debugfs_remove(df->dentry);
}
int hypfs_dbfs_init(void)
{
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
if (IS_ERR(dbfs_dir))
return PTR_ERR(dbfs_dir);
return 0;
}
void hypfs_dbfs_exit(void)
{
debugfs_remove(dbfs_dir);
}
...@@ -555,81 +555,38 @@ struct dbfs_d204 { ...@@ -555,81 +555,38 @@ struct dbfs_d204 {
char buf[]; /* d204 buffer */ char buf[]; /* d204 buffer */
} __attribute__ ((packed)); } __attribute__ ((packed));
struct dbfs_d204_private { static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
struct dbfs_d204 *d204; /* Aligned d204 data with header */
void *base; /* Base pointer (needed for vfree) */
};
static int dbfs_d204_open(struct inode *inode, struct file *file)
{ {
struct dbfs_d204_private *data;
struct dbfs_d204 *d204; struct dbfs_d204 *d204;
int rc, buf_size; int rc, buf_size;
void *base;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr); buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
data->base = vmalloc(buf_size); base = vmalloc(buf_size);
if (!data->base) { if (!base)
rc = -ENOMEM; return -ENOMEM;
goto fail_kfree_data; memset(base, 0, buf_size);
d204 = page_align_ptr(base + sizeof(d204->hdr)) - sizeof(d204->hdr);
rc = diag204_do_store(d204->buf, diag204_buf_pages);
if (rc) {
vfree(base);
return rc;
} }
memset(data->base, 0, buf_size);
d204 = page_align_ptr(data->base + sizeof(d204->hdr))
- sizeof(d204->hdr);
rc = diag204_do_store(&d204->buf, diag204_buf_pages);
if (rc)
goto fail_vfree_base;
d204->hdr.version = DBFS_D204_HDR_VERSION; d204->hdr.version = DBFS_D204_HDR_VERSION;
d204->hdr.len = PAGE_SIZE * diag204_buf_pages; d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
d204->hdr.sc = diag204_store_sc; d204->hdr.sc = diag204_store_sc;
data->d204 = d204; *data = d204;
file->private_data = data; *data_free_ptr = base;
return nonseekable_open(inode, file); *size = d204->hdr.len + sizeof(struct dbfs_d204_hdr);
fail_vfree_base:
vfree(data->base);
fail_kfree_data:
kfree(data);
return rc;
}
static int dbfs_d204_release(struct inode *inode, struct file *file)
{
struct dbfs_d204_private *data = file->private_data;
vfree(data->base);
kfree(data);
return 0; return 0;
} }
static ssize_t dbfs_d204_read(struct file *file, char __user *buf, static struct hypfs_dbfs_file dbfs_file_d204 = {
size_t size, loff_t *ppos) .name = "diag_204",
{ .data_create = dbfs_d204_create,
struct dbfs_d204_private *data = file->private_data; .data_free = vfree,
return simple_read_from_buffer(buf, size, ppos, data->d204,
data->d204->hdr.len +
sizeof(data->d204->hdr));
}
static const struct file_operations dbfs_d204_ops = {
.open = dbfs_d204_open,
.read = dbfs_d204_read,
.release = dbfs_d204_release,
.llseek = no_llseek,
}; };
static int hypfs_dbfs_init(void)
{
dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
NULL, &dbfs_d204_ops);
if (IS_ERR(dbfs_d204_file))
return PTR_ERR(dbfs_d204_file);
return 0;
}
__init int hypfs_diag_init(void) __init int hypfs_diag_init(void)
{ {
int rc; int rc;
...@@ -639,7 +596,7 @@ __init int hypfs_diag_init(void) ...@@ -639,7 +596,7 @@ __init int hypfs_diag_init(void)
return -ENODATA; return -ENODATA;
} }
if (diag204_info_type == INFO_EXT) { if (diag204_info_type == INFO_EXT) {
rc = hypfs_dbfs_init(); rc = hypfs_dbfs_create_file(&dbfs_file_d204);
if (rc) if (rc)
return rc; return rc;
} }
...@@ -660,6 +617,7 @@ void hypfs_diag_exit(void) ...@@ -660,6 +617,7 @@ void hypfs_diag_exit(void)
debugfs_remove(dbfs_d204_file); debugfs_remove(dbfs_d204_file);
diag224_delete_name_table(); diag224_delete_name_table();
diag204_free_buffer(); diag204_free_buffer();
hypfs_dbfs_remove_file(&dbfs_file_d204);
} }
/* /*
......
...@@ -20,8 +20,6 @@ static char local_guest[] = " "; ...@@ -20,8 +20,6 @@ static char local_guest[] = " ";
static char all_guests[] = "* "; static char all_guests[] = "* ";
static char *guest_query; static char *guest_query;
static struct dentry *dbfs_d2fc_file;
struct diag2fc_data { struct diag2fc_data {
__u32 version; __u32 version;
__u32 flags; __u32 flags;
...@@ -104,7 +102,7 @@ static void *diag2fc_store(char *query, unsigned int *count, int offset) ...@@ -104,7 +102,7 @@ static void *diag2fc_store(char *query, unsigned int *count, int offset)
return data; return data;
} }
static void diag2fc_free(void *data) static void diag2fc_free(const void *data)
{ {
vfree(data); vfree(data);
} }
...@@ -239,43 +237,29 @@ struct dbfs_d2fc { ...@@ -239,43 +237,29 @@ struct dbfs_d2fc {
char buf[]; /* d2fc buffer */ char buf[]; /* d2fc buffer */
} __attribute__ ((packed)); } __attribute__ ((packed));
static int dbfs_d2fc_open(struct inode *inode, struct file *file) static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
{ {
struct dbfs_d2fc *data; struct dbfs_d2fc *d2fc;
unsigned int count; unsigned int count;
data = diag2fc_store(guest_query, &count, sizeof(data->hdr)); d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
if (IS_ERR(data)) if (IS_ERR(d2fc))
return PTR_ERR(data); return PTR_ERR(d2fc);
get_clock_ext(data->hdr.tod_ext); get_clock_ext(d2fc->hdr.tod_ext);
data->hdr.len = count * sizeof(struct diag2fc_data); d2fc->hdr.len = count * sizeof(struct diag2fc_data);
data->hdr.version = DBFS_D2FC_HDR_VERSION; d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
data->hdr.count = count; d2fc->hdr.count = count;
memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved)); memset(&d2fc->hdr.reserved, 0, sizeof(d2fc->hdr.reserved));
file->private_data = data; *data = d2fc;
return nonseekable_open(inode, file); *data_free_ptr = d2fc;
} *size = d2fc->hdr.len + sizeof(struct dbfs_d2fc_hdr);
static int dbfs_d2fc_release(struct inode *inode, struct file *file)
{
diag2fc_free(file->private_data);
return 0; return 0;
} }
static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf, static struct hypfs_dbfs_file dbfs_file_2fc = {
size_t size, loff_t *ppos) .name = "diag_2fc",
{ .data_create = dbfs_diag2fc_create,
struct dbfs_d2fc *data = file->private_data; .data_free = diag2fc_free,
return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
sizeof(struct dbfs_d2fc_hdr));
}
static const struct file_operations dbfs_d2fc_ops = {
.open = dbfs_d2fc_open,
.read = dbfs_d2fc_read,
.release = dbfs_d2fc_release,
.llseek = no_llseek,
}; };
int hypfs_vm_init(void) int hypfs_vm_init(void)
...@@ -288,18 +272,12 @@ int hypfs_vm_init(void) ...@@ -288,18 +272,12 @@ int hypfs_vm_init(void)
guest_query = local_guest; guest_query = local_guest;
else else
return -EACCES; return -EACCES;
return hypfs_dbfs_create_file(&dbfs_file_2fc);
dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
NULL, &dbfs_d2fc_ops);
if (IS_ERR(dbfs_d2fc_file))
return PTR_ERR(dbfs_d2fc_file);
return 0;
} }
void hypfs_vm_exit(void) void hypfs_vm_exit(void)
{ {
if (!MACHINE_IS_VM) if (!MACHINE_IS_VM)
return; return;
debugfs_remove(dbfs_d2fc_file); hypfs_dbfs_remove_file(&dbfs_file_2fc);
} }
...@@ -46,8 +46,6 @@ static const struct super_operations hypfs_s_ops; ...@@ -46,8 +46,6 @@ static const struct super_operations hypfs_s_ops;
/* start of list of all dentries, which have to be deleted on update */ /* start of list of all dentries, which have to be deleted on update */
static struct dentry *hypfs_last_dentry; static struct dentry *hypfs_last_dentry;
struct dentry *hypfs_dbfs_dir;
static void hypfs_update_update(struct super_block *sb) static void hypfs_update_update(struct super_block *sb)
{ {
struct hypfs_sb_info *sb_info = sb->s_fs_info; struct hypfs_sb_info *sb_info = sb->s_fs_info;
...@@ -471,13 +469,12 @@ static int __init hypfs_init(void) ...@@ -471,13 +469,12 @@ static int __init hypfs_init(void)
{ {
int rc; int rc;
hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); rc = hypfs_dbfs_init();
if (IS_ERR(hypfs_dbfs_dir)) if (rc)
return PTR_ERR(hypfs_dbfs_dir); return rc;
if (hypfs_diag_init()) { if (hypfs_diag_init()) {
rc = -ENODATA; rc = -ENODATA;
goto fail_debugfs_remove; goto fail_dbfs_exit;
} }
if (hypfs_vm_init()) { if (hypfs_vm_init()) {
rc = -ENODATA; rc = -ENODATA;
...@@ -499,9 +496,8 @@ static int __init hypfs_init(void) ...@@ -499,9 +496,8 @@ static int __init hypfs_init(void)
hypfs_vm_exit(); hypfs_vm_exit();
fail_hypfs_diag_exit: fail_hypfs_diag_exit:
hypfs_diag_exit(); hypfs_diag_exit();
fail_debugfs_remove: fail_dbfs_exit:
debugfs_remove(hypfs_dbfs_dir); hypfs_dbfs_exit();
pr_err("Initialization of hypfs failed with rc=%i\n", rc); pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc; return rc;
} }
...@@ -510,7 +506,7 @@ static void __exit hypfs_exit(void) ...@@ -510,7 +506,7 @@ static void __exit hypfs_exit(void)
{ {
hypfs_diag_exit(); hypfs_diag_exit();
hypfs_vm_exit(); hypfs_vm_exit();
debugfs_remove(hypfs_dbfs_dir); hypfs_dbfs_exit();
unregister_filesystem(&hypfs_type); unregister_filesystem(&hypfs_type);
kobject_put(s390_kobj); kobject_put(s390_kobj);
} }
......
...@@ -204,6 +204,8 @@ int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *, ...@@ -204,6 +204,8 @@ int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *,
unsigned long, u8, int); unsigned long, u8, int);
int ccw_device_tm_intrg(struct ccw_device *cdev); int ccw_device_tm_intrg(struct ccw_device *cdev);
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask);
extern int ccw_device_set_online(struct ccw_device *cdev); extern int ccw_device_set_online(struct ccw_device *cdev);
extern int ccw_device_set_offline(struct ccw_device *cdev); extern int ccw_device_set_offline(struct ccw_device *cdev);
......
...@@ -202,7 +202,7 @@ static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock, ...@@ -202,7 +202,7 @@ static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
static inline int s390_nohz_delay(int cpu) static inline int s390_nohz_delay(int cpu)
{ {
return per_cpu(s390_idle, cpu).nohz_delay != 0; return __get_cpu_var(s390_idle).nohz_delay != 0;
} }
#define arch_needs_cpu(cpu) s390_nohz_delay(cpu) #define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
......
...@@ -73,6 +73,7 @@ typedef struct dasd_information2_t { ...@@ -73,6 +73,7 @@ typedef struct dasd_information2_t {
* 0x02: use diag discipline (diag) * 0x02: use diag discipline (diag)
* 0x04: set the device initially online (internal use only) * 0x04: set the device initially online (internal use only)
* 0x08: enable ERP related logging * 0x08: enable ERP related logging
* 0x20: give access to raw eckd data
*/ */
#define DASD_FEATURE_DEFAULT 0x00 #define DASD_FEATURE_DEFAULT 0x00
#define DASD_FEATURE_READONLY 0x01 #define DASD_FEATURE_READONLY 0x01
...@@ -80,6 +81,8 @@ typedef struct dasd_information2_t { ...@@ -80,6 +81,8 @@ typedef struct dasd_information2_t {
#define DASD_FEATURE_INITIAL_ONLINE 0x04 #define DASD_FEATURE_INITIAL_ONLINE 0x04
#define DASD_FEATURE_ERPLOG 0x08 #define DASD_FEATURE_ERPLOG 0x08
#define DASD_FEATURE_FAILFAST 0x10 #define DASD_FEATURE_FAILFAST 0x10
#define DASD_FEATURE_FAILONSLCK 0x20
#define DASD_FEATURE_USERAW 0x40
#define DASD_PARTN_BITS 2 #define DASD_PARTN_BITS 2
......
...@@ -4,20 +4,17 @@ ...@@ -4,20 +4,17 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void _mcount(void); extern void _mcount(void);
extern unsigned long ftrace_dyn_func;
struct dyn_arch_ftrace { }; struct dyn_arch_ftrace { };
#define MCOUNT_ADDR ((long)_mcount) #define MCOUNT_ADDR ((long)_mcount)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define MCOUNT_OFFSET_RET 18 #define MCOUNT_INSN_SIZE 12
#define MCOUNT_INSN_SIZE 24
#define MCOUNT_OFFSET 14
#else
#define MCOUNT_OFFSET_RET 26
#define MCOUNT_INSN_SIZE 30
#define MCOUNT_OFFSET 8 #define MCOUNT_OFFSET 8
#else
#define MCOUNT_INSN_SIZE 20
#define MCOUNT_OFFSET 4
#endif #endif
static inline unsigned long ftrace_call_adjust(unsigned long addr) static inline unsigned long ftrace_call_adjust(unsigned long addr)
......
...@@ -21,20 +21,4 @@ ...@@ -21,20 +21,4 @@
#define HARDIRQ_BITS 8 #define HARDIRQ_BITS 8
void clock_comparator_work(void);
static inline unsigned long long local_tick_disable(void)
{
unsigned long long old;
old = S390_lowcore.clock_comparator;
S390_lowcore.clock_comparator = -1ULL;
return old;
}
static inline void local_tick_enable(unsigned long long comp)
{
S390_lowcore.clock_comparator = comp;
}
#endif /* __ASM_HARDIRQ_H */ #endif /* __ASM_HARDIRQ_H */
#ifndef _ASM_IRQ_H #ifndef _ASM_IRQ_H
#define _ASM_IRQ_H #define _ASM_IRQ_H
#ifdef __KERNEL__
#include <linux/hardirq.h> #include <linux/hardirq.h>
/*
* the definition of irqs has changed in 2.5.46:
* NR_IRQS is no longer the number of i/o
* interrupts (65536), but rather the number
* of interrupt classes (2).
* Only external and i/o interrupts make much sense here (CH).
*/
enum interruption_class { enum interruption_class {
EXTERNAL_INTERRUPT, EXTERNAL_INTERRUPT,
IO_INTERRUPT, IO_INTERRUPT,
EXTINT_CLK,
EXTINT_IPI,
EXTINT_TMR,
EXTINT_TLA,
EXTINT_PFL,
EXTINT_DSD,
EXTINT_VRT,
EXTINT_SCP,
EXTINT_IUC,
IOINT_QAI,
IOINT_QDI,
IOINT_DAS,
IOINT_C15,
IOINT_C70,
IOINT_TAP,
IOINT_VMR,
IOINT_LCS,
IOINT_CLW,
IOINT_CTC,
IOINT_APB,
NMI_NMI,
NR_IRQS, NR_IRQS,
}; };
#endif /* __KERNEL__ */ #endif /* _ASM_IRQ_H */
#endif
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
struct pt_regs; struct pt_regs;
struct kprobe; struct kprobe;
...@@ -58,23 +57,12 @@ typedef u16 kprobe_opcode_t; ...@@ -58,23 +57,12 @@ typedef u16 kprobe_opcode_t;
/* Architecture specific copy of original instruction */ /* Architecture specific copy of original instruction */
struct arch_specific_insn { struct arch_specific_insn {
/* copy of original instruction */ /* copy of original instruction */
kprobe_opcode_t *insn; kprobe_opcode_t insn[MAX_INSN_SIZE];
int fixup;
int ilen;
int reg;
}; };
struct ins_replace_args {
kprobe_opcode_t *ptr;
kprobe_opcode_t old;
kprobe_opcode_t new;
};
struct prev_kprobe { struct prev_kprobe {
struct kprobe *kp; struct kprobe *kp;
unsigned long status; unsigned long status;
unsigned long saved_psw;
unsigned long kprobe_saved_imask;
unsigned long kprobe_saved_ctl[3];
}; };
/* per-cpu kprobe control block */ /* per-cpu kprobe control block */
...@@ -82,17 +70,13 @@ struct kprobe_ctlblk { ...@@ -82,17 +70,13 @@ struct kprobe_ctlblk {
unsigned long kprobe_status; unsigned long kprobe_status;
unsigned long kprobe_saved_imask; unsigned long kprobe_saved_imask;
unsigned long kprobe_saved_ctl[3]; unsigned long kprobe_saved_ctl[3];
struct pt_regs jprobe_saved_regs;
unsigned long jprobe_saved_r14;
unsigned long jprobe_saved_r15;
struct prev_kprobe prev_kprobe; struct prev_kprobe prev_kprobe;
struct pt_regs jprobe_saved_regs;
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
}; };
void arch_remove_kprobe(struct kprobe *p); void arch_remove_kprobe(struct kprobe *p);
void kretprobe_trampoline(void); void kretprobe_trampoline(void);
int is_prohibited_opcode(kprobe_opcode_t *instruction);
void get_instruction_type(struct arch_specific_insn *ainsn);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr); int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self, int kprobe_exceptions_notify(struct notifier_block *self,
......
...@@ -32,7 +32,6 @@ static inline void get_cpu_id(struct cpuid *ptr) ...@@ -32,7 +32,6 @@ static inline void get_cpu_id(struct cpuid *ptr)
} }
extern void s390_adjust_jiffies(void); extern void s390_adjust_jiffies(void);
extern void print_cpu_info(void);
extern int get_cpu_capability(unsigned int *); extern int get_cpu_capability(unsigned int *);
/* /*
...@@ -81,7 +80,8 @@ struct thread_struct { ...@@ -81,7 +80,8 @@ struct thread_struct {
mm_segment_t mm_segment; mm_segment_t mm_segment;
unsigned long prot_addr; /* address of protection-excep. */ unsigned long prot_addr; /* address of protection-excep. */
unsigned int trap_no; unsigned int trap_no;
per_struct per_info; struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
/* pfault_wait is used to block the process on a pfault event */ /* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait; unsigned long pfault_wait;
}; };
......
...@@ -331,10 +331,60 @@ struct pt_regs ...@@ -331,10 +331,60 @@ struct pt_regs
unsigned short ilc; unsigned short ilc;
unsigned short svcnr; unsigned short svcnr;
}; };
/*
* Program event recording (PER) register set.
*/
struct per_regs {
unsigned long control; /* PER control bits */
unsigned long start; /* PER starting address */
unsigned long end; /* PER ending address */
};
/*
* PER event contains information about the cause of the last PER exception.
*/
struct per_event {
unsigned short cause; /* PER code, ATMID and AI */
unsigned long address; /* PER address */
unsigned char paid; /* PER access identification */
};
/*
* Simplified per_info structure used to decode the ptrace user space ABI.
*/
struct per_struct_kernel {
unsigned long cr9; /* PER control bits */
unsigned long cr10; /* PER starting address */
unsigned long cr11; /* PER ending address */
unsigned long bits; /* Obsolete software bits */
unsigned long starting_addr; /* User specified start address */
unsigned long ending_addr; /* User specified end address */
unsigned short perc_atmid; /* PER trap ATMID */
unsigned long address; /* PER trap instruction address */
unsigned char access_id; /* PER trap access identification */
};
#define PER_EVENT_MASK 0xE9000000UL
#define PER_EVENT_BRANCH 0x80000000UL
#define PER_EVENT_IFETCH 0x40000000UL
#define PER_EVENT_STORE 0x20000000UL
#define PER_EVENT_STORE_REAL 0x08000000UL
#define PER_EVENT_NULLIFICATION 0x01000000UL
#define PER_CONTROL_MASK 0x00a00000UL
#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
#endif #endif
/* /*
* Now for the program event recording (trace) definitions. * Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't
* touch or even look at it if you don't want to modify the user-space
* ptrace interface. In particular stay away from it for in-kernel PER.
*/ */
typedef struct typedef struct
{ {
......
...@@ -361,6 +361,7 @@ struct qdio_initialize { ...@@ -361,6 +361,7 @@ struct qdio_initialize {
qdio_handler_t *input_handler; qdio_handler_t *input_handler;
qdio_handler_t *output_handler; qdio_handler_t *output_handler;
void (*queue_start_poll) (struct ccw_device *, int, unsigned long); void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
int scan_threshold;
unsigned long int_parm; unsigned long int_parm;
void **input_sbal_addr_array; void **input_sbal_addr_array;
void **output_sbal_addr_array; void **output_sbal_addr_array;
......
#ifndef _S390_EXTINT_H
#define _S390_EXTINT_H
/* /*
* include/asm-s390/s390_ext.h * Copyright IBM Corp. 1999,2010
* * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
* S390 version * Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Copyright IBM Corp. 1999,2007
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/ */
#ifndef _S390_EXTINT_H
#define _S390_EXTINT_H
#include <linux/types.h> #include <linux/types.h>
typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
typedef struct ext_int_info_t {
struct ext_int_info_t *next;
ext_int_handler_t handler;
__u16 code;
} ext_int_info_t;
extern ext_int_info_t *ext_int_hash[];
int register_external_interrupt(__u16 code, ext_int_handler_t handler); int register_external_interrupt(__u16 code, ext_int_handler_t handler);
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler); int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
#endif #endif /* _S390_EXTINT_H */
...@@ -20,7 +20,6 @@ extern void machine_power_off_smp(void); ...@@ -20,7 +20,6 @@ extern void machine_power_off_smp(void);
extern int __cpu_disable (void); extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu); extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu); extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex; extern struct mutex smp_cpu_state_mutex;
...@@ -71,8 +70,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) ...@@ -71,8 +70,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void); extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
#else #else
static inline int smp_rescan_cpus(void) { return 0; } static inline int smp_rescan_cpus(void) { return 0; }
static inline void cpu_die(void) { }
#endif #endif
#endif /* __ASM_SMP_H */ #endif /* __ASM_SMP_H */
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
struct task_struct; struct task_struct;
extern struct task_struct *__switch_to(void *, void *); extern struct task_struct *__switch_to(void *, void *);
extern void update_per_regs(struct task_struct *task);
static inline void save_fp_regs(s390_fp_regs *fpregs) static inline void save_fp_regs(s390_fp_regs *fpregs)
{ {
...@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -93,6 +94,7 @@ static inline void restore_access_regs(unsigned int *acrs)
if (next->mm) { \ if (next->mm) { \
restore_fp_regs(&next->thread.fp_regs); \ restore_fp_regs(&next->thread.fp_regs); \
restore_access_regs(&next->thread.acrs[0]); \ restore_access_regs(&next->thread.acrs[0]); \
update_per_regs(next); \
} \ } \
prev = __switch_to(prev,next); \ prev = __switch_to(prev,next); \
} while (0) } while (0)
...@@ -101,11 +103,9 @@ extern void account_vtime(struct task_struct *, struct task_struct *); ...@@ -101,11 +103,9 @@ extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *); extern void account_tick_vtime(struct task_struct *);
#ifdef CONFIG_PFAULT #ifdef CONFIG_PFAULT
extern void pfault_irq_init(void);
extern int pfault_init(void); extern int pfault_init(void);
extern void pfault_fini(void); extern void pfault_fini(void);
#else /* CONFIG_PFAULT */ #else /* CONFIG_PFAULT */
#define pfault_irq_init() do { } while (0)
#define pfault_init() ({-1;}) #define pfault_init() ({-1;})
#define pfault_fini() do { } while (0) #define pfault_fini() do { } while (0)
#endif /* CONFIG_PFAULT */ #endif /* CONFIG_PFAULT */
......
...@@ -74,7 +74,7 @@ struct thread_info { ...@@ -74,7 +74,7 @@ struct thread_info {
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE); return (struct thread_info *) S390_lowcore.thread_info;
} }
#define THREAD_SIZE_ORDER THREAD_ORDER #define THREAD_SIZE_ORDER THREAD_ORDER
...@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTART_SVC 4 /* restart svc with new svc number */ #define TIF_RESTART_SVC 4 /* restart svc with new svc number */
#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_MCCK_PENDING 7 /* machine check handling is pending */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
...@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void) ...@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_31BIT 17 /* 32bit process */ #define TIF_31BIT 17 /* 32bit process */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
#define TIF_FREEZE 20 /* thread is freezing for suspend */ #define TIF_SINGLE_STEP 20 /* This task is single stepped */
#define TIF_FREEZE 21 /* thread is freezing for suspend */
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) #define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
...@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT) #define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
#define _TIF_FREEZE (1<<TIF_FREEZE) #define _TIF_FREEZE (1<<TIF_FREEZE)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#ifndef _ASM_S390_TIMEX_H #ifndef _ASM_S390_TIMEX_H
#define _ASM_S390_TIMEX_H #define _ASM_S390_TIMEX_H
#include <asm/lowcore.h>
/* The value of the TOD clock for 1.1.1970. */ /* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
...@@ -49,6 +51,24 @@ static inline void store_clock_comparator(__u64 *time) ...@@ -49,6 +51,24 @@ static inline void store_clock_comparator(__u64 *time)
asm volatile("stckc %0" : "=Q" (*time)); asm volatile("stckc %0" : "=Q" (*time));
} }
void clock_comparator_work(void);
static inline unsigned long long local_tick_disable(void)
{
unsigned long long old;
old = S390_lowcore.clock_comparator;
S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
return old;
}
static inline void local_tick_enable(unsigned long long comp)
{
S390_lowcore.clock_comparator = comp;
set_clock_comparator(S390_lowcore.clock_comparator);
}
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
......
...@@ -23,14 +23,16 @@ int main(void) ...@@ -23,14 +23,16 @@ int main(void)
{ {
DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
BLANK(); BLANK();
DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK(); BLANK();
DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); DEFINE(__THREAD_per_cause,
DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); offsetof(struct task_struct, thread.per_event.cause));
DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); DEFINE(__THREAD_per_address,
offsetof(struct task_struct, thread.per_event.address));
DEFINE(__THREAD_per_paid,
offsetof(struct task_struct, thread.per_event.paid));
BLANK(); BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
...@@ -85,9 +87,9 @@ int main(void) ...@@ -85,9 +87,9 @@ int main(void)
DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
......
...@@ -4,40 +4,19 @@ ...@@ -4,40 +4,19 @@
#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ #include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
#include "compat_linux.h" /* needed for psw_compat_t */ #include "compat_linux.h" /* needed for psw_compat_t */
typedef struct { struct compat_per_struct_kernel {
__u32 cr[NUM_CR_WORDS]; __u32 cr9; /* PER control bits */
} per_cr_words32; __u32 cr10; /* PER starting address */
__u32 cr11; /* PER ending address */
typedef struct { __u32 bits; /* Obsolete software bits */
__u16 perc_atmid; /* 0x096 */ __u32 starting_addr; /* User specified start address */
__u32 address; /* 0x098 */ __u32 ending_addr; /* User specified end address */
__u8 access_id; /* 0x0a1 */ __u16 perc_atmid; /* PER trap ATMID */
} per_lowcore_words32; __u32 address; /* PER trap instruction address */
__u8 access_id; /* PER trap access identification */
typedef struct { };
union {
per_cr_words32 words;
} control_regs;
/*
* Use these flags instead of setting em_instruction_fetch
* directly they are used so that single stepping can be
* switched on & off while not affecting other tracing
*/
unsigned single_step : 1;
unsigned instruction_fetch : 1;
unsigned : 30;
/*
* These addresses are copied into cr10 & cr11 if single
* stepping is switched off
*/
__u32 starting_addr;
__u32 ending_addr;
union {
per_lowcore_words32 words;
} lowcore;
} per_struct32;
struct user_regs_struct32 struct compat_user_regs_struct
{ {
psw_compat_t psw; psw_compat_t psw;
u32 gprs[NUM_GPRS]; u32 gprs[NUM_GPRS];
...@@ -50,14 +29,14 @@ struct user_regs_struct32 ...@@ -50,14 +29,14 @@ struct user_regs_struct32
* itself as there is no "official" ptrace interface for hardware * itself as there is no "official" ptrace interface for hardware
* watchpoints. This is the way intel does it. * watchpoints. This is the way intel does it.
*/ */
per_struct32 per_info; struct compat_per_struct_kernel per_info;
u32 ieee_instruction_pointer; /* obsolete, always 0 */ u32 ieee_instruction_pointer; /* obsolete, always 0 */
}; };
struct user32 { struct compat_user {
/* We start with the registers, to mimic the way that "memory" /* We start with the registers, to mimic the way that "memory"
is returned from the ptrace(3,...) function. */ is returned from the ptrace(3,...) function. */
struct user_regs_struct32 regs; /* Where the registers are actually stored */ struct compat_user_regs_struct regs;
/* The rest of this junk is to help gdb figure out what goes where */ /* The rest of this junk is to help gdb figure out what goes where */
u32 u_tsize; /* Text segment size (pages). */ u32 u_tsize; /* Text segment size (pages). */
u32 u_dsize; /* Data segment size (pages). */ u32 u_dsize; /* Data segment size (pages). */
...@@ -79,6 +58,6 @@ typedef struct ...@@ -79,6 +58,6 @@ typedef struct
__u32 len; __u32 len;
__u32 kernel_addr; __u32 kernel_addr;
__u32 process_addr; __u32 process_addr;
} ptrace_area_emu31; } compat_ptrace_area;
#endif /* _PTRACE32_H */ #endif /* _PTRACE32_H */
This diff is collapsed.
...@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception; ...@@ -12,7 +12,7 @@ pgm_check_handler_t do_dat_exception;
extern int sysctl_userprocess_debug; extern int sysctl_userprocess_debug;
void do_single_step(struct pt_regs *regs); void do_per_trap(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit); void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs); void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs); void do_signal(struct pt_regs *regs);
......
...@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER ...@@ -51,7 +51,7 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT STACK_SIZE = 1 << STACK_SHIFT
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
...@@ -197,6 +197,8 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ...@@ -197,6 +197,8 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
ssm __SF_EMPTY(%r15) ssm __SF_EMPTY(%r15)
.endm .endm
.section .kprobes.text, "ax"
/* /*
* Scheduler resume function, called by switch_to * Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev * gpr2 = (task_struct *) prev
...@@ -206,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ...@@ -206,30 +208,21 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
*/ */
.globl __switch_to .globl __switch_to
__switch_to: __switch_to:
tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? lg %r4,__THREAD_info(%r2) # get thread_info of prev
jz __switch_to_noper # if not we're fine lg %r5,__THREAD_info(%r3) # get thread_info of next
stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
je __switch_to_noper # we got away without bashing TLB's
lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
__switch_to_noper:
lg %r4,__THREAD_info(%r2) # get thread_info of prev
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
jz __switch_to_no_mcck jz 0f
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
lg %r4,__THREAD_info(%r3) # get thread_info of next oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
__switch_to_no_mcck: stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task stg %r3,__LC_CURRENT # store task struct of next
stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct stg %r5,__LC_THREAD_INFO # store thread info of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 aghi %r5,STACK_SIZE # end of kernel stack of next
lg %r3,__THREAD_info(%r3) # load thread_info from task struct stg %r5,__LC_KERNEL_STACK # store end of kernel stack
stg %r3,__LC_THREAD_INFO
aghi %r3,STACK_SIZE
stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
br %r14 br %r14
__critical_start: __critical_start:
...@@ -309,7 +302,7 @@ sysc_work_tif: ...@@ -309,7 +302,7 @@ sysc_work_tif:
jo sysc_notify_resume jo sysc_notify_resume
tm __TI_flags+7(%r12),_TIF_RESTART_SVC tm __TI_flags+7(%r12),_TIF_RESTART_SVC
jo sysc_restart jo sysc_restart
tm __TI_flags+7(%r12),_TIF_SINGLE_STEP tm __TI_flags+7(%r12),_TIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
j sysc_return # beware of critical section cleanup j sysc_return # beware of critical section cleanup
...@@ -331,12 +324,12 @@ sysc_mcck_pending: ...@@ -331,12 +324,12 @@ sysc_mcck_pending:
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
sysc_sigpending: sysc_sigpending:
ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
la %r2,SP_PTREGS(%r15) # load pt_regs la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_signal # call do_signal brasl %r14,do_signal # call do_signal
tm __TI_flags+7(%r12),_TIF_RESTART_SVC tm __TI_flags+7(%r12),_TIF_RESTART_SVC
jo sysc_restart jo sysc_restart
tm __TI_flags+7(%r12),_TIF_SINGLE_STEP tm __TI_flags+7(%r12),_TIF_PER_TRAP
jo sysc_singlestep jo sysc_singlestep
j sysc_return j sysc_return
...@@ -361,14 +354,14 @@ sysc_restart: ...@@ -361,14 +354,14 @@ sysc_restart:
j sysc_nr_ok # restart svc j sysc_nr_ok # restart svc
# #
# _TIF_SINGLE_STEP is set, call do_single_step # _TIF_PER_TRAP is set, call do_per_trap
# #
sysc_singlestep: sysc_singlestep:
ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
larl %r14,sysc_return # load adr. of system return larl %r14,sysc_return # load adr. of system return
jg do_single_step # branch to do_sigtrap jg do_per_trap
# #
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
...@@ -524,10 +517,10 @@ pgm_no_vtime2: ...@@ -524,10 +517,10 @@ pgm_no_vtime2:
lg %r1,__TI_task(%r12) lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ? tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per jz kernel_per
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
lgf %r3,__LC_PGM_ILC # load program interruption code lgf %r3,__LC_PGM_ILC # load program interruption code
lg %r4,__LC_TRANS_EXC_CODE lg %r4,__LC_TRANS_EXC_CODE
REENABLE_IRQS REENABLE_IRQS
...@@ -556,10 +549,10 @@ pgm_svcper: ...@@ -556,10 +549,10 @@ pgm_svcper:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK LAST_BREAK
lg %r8,__TI_task(%r12) lg %r8,__TI_task(%r12)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
lmg %r2,%r6,SP_R2(%r15) # load svc arguments lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_svc j sysc_do_svc
...@@ -571,7 +564,7 @@ kernel_per: ...@@ -571,7 +564,7 @@ kernel_per:
REENABLE_IRQS REENABLE_IRQS
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_single_step brasl %r14,do_per_trap
j pgm_exit j pgm_exit
/* /*
...@@ -868,6 +861,8 @@ restart_crash: ...@@ -868,6 +861,8 @@ restart_crash:
restart_go: restart_go:
#endif #endif
.section .kprobes.text, "ax"
#ifdef CONFIG_CHECK_STACK #ifdef CONFIG_CHECK_STACK
/* /*
* The synchronous or the asynchronous stack overflowed. We are dead. * The synchronous or the asynchronous stack overflowed. We are dead.
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Copyright IBM Corp. 2009 * Copyright IBM Corp. 2009
* *
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* * Martin Schwidefsky <schwidefsky@de.ibm.com>
*/ */
#include <linux/hardirq.h> #include <linux/hardirq.h>
...@@ -12,176 +12,144 @@ ...@@ -12,176 +12,144 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kprobes.h>
#include <trace/syscall.h> #include <trace/syscall.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#ifdef CONFIG_64BIT
#define MCOUNT_OFFSET_RET 12
#else
#define MCOUNT_OFFSET_RET 22
#endif
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
void ftrace_disable_code(void); void ftrace_disable_code(void);
void ftrace_disable_return(void); void ftrace_enable_insn(void);
void ftrace_call_code(void);
void ftrace_nop_code(void);
#define FTRACE_INSN_SIZE 4
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/*
* The 64-bit mcount code looks like this:
* stg %r14,8(%r15) # offset 0
* > larl %r1,<&counter> # offset 6
* > brasl %r14,_mcount # offset 12
* lg %r14,8(%r15) # offset 18
* Total length is 24 bytes. The middle two instructions of the mcount
* block get overwritten by ftrace_make_nop / ftrace_make_call.
* The 64-bit enabled ftrace code block looks like this:
* stg %r14,8(%r15) # offset 0
* > lg %r1,__LC_FTRACE_FUNC # offset 6
* > lgr %r0,%r0 # offset 12
* > basr %r14,%r1 # offset 16
* lg %r14,8(%15) # offset 18
* The return points of the mcount/ftrace function have the same offset 18.
* The 64-bit disable ftrace code block looks like this:
* stg %r14,8(%r15) # offset 0
* > jg .+18 # offset 6
* > lgr %r0,%r0 # offset 12
* > basr %r14,%r1 # offset 16
* lg %r14,8(%15) # offset 18
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
*/
asm( asm(
" .align 4\n" " .align 4\n"
"ftrace_disable_code:\n" "ftrace_disable_code:\n"
" j 0f\n" " jg 0f\n"
" .word 0x0024\n"
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
" basr %r14,%r1\n"
"ftrace_disable_return:\n"
" lg %r14,8(15)\n"
" lgr %r0,%r0\n" " lgr %r0,%r0\n"
"0:\n"); " basr %r14,%r1\n"
"0:\n"
asm(
" .align 4\n" " .align 4\n"
"ftrace_nop_code:\n" "ftrace_enable_insn:\n"
" j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
asm( #define FTRACE_INSN_SIZE 6
" .align 4\n"
"ftrace_call_code:\n"
" stg %r14,8(%r15)\n");
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
/*
* The 31-bit mcount code looks like this:
* st %r14,4(%r15) # offset 0
* > bras %r1,0f # offset 4
* > .long _mcount # offset 8
* > .long <&counter> # offset 12
* > 0: l %r14,0(%r1) # offset 16
* > l %r1,4(%r1) # offset 20
* basr %r14,%r14 # offset 24
* l %r14,4(%r15) # offset 26
* Total length is 30 bytes. The twenty bytes starting from offset 4
* to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
* The 31-bit enabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > l %r14,__LC_FTRACE_FUNC # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The return points of the mcount/ftrace function have the same offset 26.
* The 31-bit disabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > j .+26 # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The j instruction branches to offset 30 to skip as many instructions
* as possible.
*/
asm( asm(
" .align 4\n" " .align 4\n"
"ftrace_disable_code:\n" "ftrace_disable_code:\n"
" j 1f\n"
" j 0f\n" " j 0f\n"
" l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" " .fill 12,1,0x07\n"
" basr %r14,%r1\n" "0: basr %r14,%r14\n"
"ftrace_disable_return:\n" "1:\n"
" l %r14,4(%r15)\n"
" j 0f\n"
" bcr 0,%r7\n"
" bcr 0,%r7\n"
" bcr 0,%r7\n"
" bcr 0,%r7\n"
" bcr 0,%r7\n"
" bcr 0,%r7\n"
"0:\n");
asm(
" .align 4\n" " .align 4\n"
"ftrace_nop_code:\n" "ftrace_enable_insn:\n"
" j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
asm( #define FTRACE_INSN_SIZE 4
" .align 4\n"
"ftrace_call_code:\n"
" st %r14,4(%r15)\n");
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
static int ftrace_modify_code(unsigned long ip,
void *old_code, int old_size,
void *new_code, int new_size)
{
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules code can disappear and change.
* We need to protect against faulting as well as code
* changing. We do this by using the probe_kernel_*
* functions.
* This however is just a simple sanity check.
*/
if (probe_kernel_read(replaced, (void *)ip, old_size))
return -EFAULT;
if (memcmp(replaced, old_code, old_size) != 0)
return -EINVAL;
if (probe_kernel_write((void *)ip, new_code, new_size))
return -EPERM;
return 0;
}
static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
return ftrace_modify_code(rec->ip,
ftrace_call_code, FTRACE_INSN_SIZE,
ftrace_disable_code, MCOUNT_INSN_SIZE);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr) unsigned long addr)
{ {
if (addr == MCOUNT_ADDR) if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
return ftrace_make_initial_nop(mod, rec, addr); MCOUNT_INSN_SIZE))
return ftrace_modify_code(rec->ip, return -EPERM;
ftrace_call_code, FTRACE_INSN_SIZE, return 0;
ftrace_nop_code, FTRACE_INSN_SIZE);
} }
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{ {
return ftrace_modify_code(rec->ip, if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
ftrace_nop_code, FTRACE_INSN_SIZE, FTRACE_INSN_SIZE))
ftrace_call_code, FTRACE_INSN_SIZE); return -EPERM;
return 0;
} }
int ftrace_update_ftrace_func(ftrace_func_t func) int ftrace_update_ftrace_func(ftrace_func_t func)
{ {
ftrace_dyn_func = (unsigned long)func;
return 0; return 0;
} }
int __init ftrace_dyn_arch_init(void *data) int __init ftrace_dyn_arch_init(void *data)
{ {
*(unsigned long *)data = 0; *(unsigned long *) data = 0;
return 0; return 0;
} }
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* Patch the kernel code at ftrace_graph_caller location:
* The instruction there is branch relative on condition. The condition mask
* is either all ones (always branch aka disable ftrace_graph_caller) or all
* zeroes (nop aka enable ftrace_graph_caller).
* Instruction format for brc is a7m4xxxx where m is the condition mask.
*/
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned short opcode = 0xa704;
return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned short opcode = 0xa7f4;
return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
}
static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
{
return addr - (ftrace_disable_return - ftrace_disable_code);
}
#else /* CONFIG_DYNAMIC_FTRACE */
static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
{
return addr - MCOUNT_OFFSET_RET;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
/* /*
* Hook the return address and push it in the stack of return addresses * Hook the return address and push it in the stack of return addresses
* in current thread info. * in current thread info.
*/ */
unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
unsigned long ip)
{ {
struct ftrace_graph_ent trace; struct ftrace_graph_ent trace;
...@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) ...@@ -189,14 +157,42 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
goto out; goto out;
if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out; goto out;
trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
/* Only trace if the calling function expects to. */ /* Only trace if the calling function expects to. */
if (!ftrace_graph_entry(&trace)) { if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--; current->curr_ret_stack--;
goto out; goto out;
} }
parent = (unsigned long)return_to_handler; parent = (unsigned long) return_to_handler;
out: out:
return parent; return parent;
} }
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* Patch the kernel code at ftrace_graph_caller location. The instruction
* there is branch relative and save to prepare_ftrace_return. To disable
* the call to prepare_ftrace_return we patch the bras offset to point
* directly after the instructions. To enable the call we calculate
* the original offset to prepare_ftrace_return and put it back.
*/
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned short offset;
offset = ((void *) prepare_ftrace_return -
(void *) ftrace_graph_caller) / 2;
return probe_kernel_write(ftrace_graph_caller + 2,
&offset, sizeof(offset));
}
int ftrace_disable_ftrace_graph_caller(void)
{
static unsigned short offset = 0x0002;
return probe_kernel_write(ftrace_graph_caller + 2,
&offset, sizeof(offset));
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/* /*
* arch/s390/kernel/irq.c * Copyright IBM Corp. 2004,2010
*
* Copyright IBM Corp. 2004,2007
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Thomas Spatzier (tspat@de.ibm.com) * Thomas Spatzier (tspat@de.ibm.com)
* *
...@@ -17,12 +15,42 @@ ...@@ -17,12 +15,42 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/profile.h> #include <linux/profile.h>
struct irq_class {
char *name;
char *desc;
};
static const struct irq_class intrclass_names[] = {
{.name = "EXT" },
{.name = "I/O" },
{.name = "CLK", .desc = "[EXT] Clock Comparator" },
{.name = "IPI", .desc = "[EXT] Signal Processor" },
{.name = "TMR", .desc = "[EXT] CPU Timer" },
{.name = "TAL", .desc = "[EXT] Timing Alert" },
{.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
{.name = "DSD", .desc = "[EXT] DASD Diag" },
{.name = "VRT", .desc = "[EXT] Virtio" },
{.name = "SCP", .desc = "[EXT] Service Call" },
{.name = "IUC", .desc = "[EXT] IUCV" },
{.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
{.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
{.name = "DAS", .desc = "[I/O] DASD" },
{.name = "C15", .desc = "[I/O] 3215" },
{.name = "C70", .desc = "[I/O] 3270" },
{.name = "TAP", .desc = "[I/O] Tape" },
{.name = "VMR", .desc = "[I/O] Unit Record Devices" },
{.name = "LCS", .desc = "[I/O] LCS" },
{.name = "CLW", .desc = "[I/O] CLAW" },
{.name = "CTC", .desc = "[I/O] CTC" },
{.name = "APB", .desc = "[I/O] AP Bus" },
{.name = "NMI", .desc = "[NMI] Machine Check" },
};
/* /*
* show_interrupts is needed by /proc/interrupts. * show_interrupts is needed by /proc/interrupts.
*/ */
int show_interrupts(struct seq_file *p, void *v) int show_interrupts(struct seq_file *p, void *v)
{ {
static const char *intrclass_names[] = { "EXT", "I/O", };
int i = *(loff_t *) v, j; int i = *(loff_t *) v, j;
get_online_cpus(); get_online_cpus();
...@@ -34,15 +62,16 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -34,15 +62,16 @@ int show_interrupts(struct seq_file *p, void *v)
} }
if (i < NR_IRQS) { if (i < NR_IRQS) {
seq_printf(p, "%s: ", intrclass_names[i]); seq_printf(p, "%s: ", intrclass_names[i].name);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, "%10u ", kstat_irqs(i));
#else #else
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif #endif
if (intrclass_names[i].desc)
seq_printf(p, " %s", intrclass_names[i].desc);
seq_putc(p, '\n'); seq_putc(p, '\n');
} }
put_online_cpus(); put_online_cpus();
return 0; return 0;
......
This diff is collapsed.
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
.section .kprobes.text, "ax"
.globl ftrace_stub .globl ftrace_stub
ftrace_stub: ftrace_stub:
br %r14 br %r14
...@@ -16,22 +18,12 @@ _mcount: ...@@ -16,22 +18,12 @@ _mcount:
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
br %r14 br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
.long ftrace_stub
.previous
.globl ftrace_caller .globl ftrace_caller
ftrace_caller: ftrace_caller:
#endif #endif
stm %r2,%r5,16(%r15) stm %r2,%r5,16(%r15)
bras %r1,2f bras %r1,2f
#ifdef CONFIG_DYNAMIC_FTRACE
0: .long ftrace_dyn_func
#else
0: .long ftrace_trace_function 0: .long ftrace_trace_function
#endif
1: .long function_trace_stop 1: .long function_trace_stop
2: l %r2,1b-0b(%r1) 2: l %r2,1b-0b(%r1)
icm %r2,0xf,0(%r2) icm %r2,0xf,0(%r2)
...@@ -47,21 +39,15 @@ ftrace_caller: ...@@ -47,21 +39,15 @@ ftrace_caller:
l %r14,0(%r14) l %r14,0(%r14)
basr %r14,%r14 basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE l %r2,100(%r15)
l %r3,152(%r15)
.globl ftrace_graph_caller .globl ftrace_graph_caller
ftrace_graph_caller: ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if # The bras instruction gets runtime patched to call prepare_ftrace_return.
# you know what you are doing. See ftrace_enable_graph_caller(). # See ftrace_enable_ftrace_graph_caller. The patched instruction is:
j 1f # bras %r14,prepare_ftrace_return
#endif bras %r14,0f
bras %r1,0f 0: st %r2,100(%r15)
.long prepare_ftrace_return
0: l %r2,152(%r15)
l %r4,0(%r1)
l %r3,100(%r15)
basr %r14,%r4
st %r2,100(%r15)
1:
#endif #endif
ahi %r15,96 ahi %r15,96
l %r14,56(%r15) l %r14,56(%r15)
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
.section .kprobes.text, "ax"
.globl ftrace_stub .globl ftrace_stub
ftrace_stub: ftrace_stub:
br %r14 br %r14
...@@ -16,12 +18,6 @@ _mcount: ...@@ -16,12 +18,6 @@ _mcount:
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
br %r14 br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
.quad ftrace_stub
.previous
.globl ftrace_caller .globl ftrace_caller
ftrace_caller: ftrace_caller:
#endif #endif
...@@ -35,26 +31,19 @@ ftrace_caller: ...@@ -35,26 +31,19 @@ ftrace_caller:
stg %r1,__SF_BACKCHAIN(%r15) stg %r1,__SF_BACKCHAIN(%r15)
lgr %r2,%r14 lgr %r2,%r14
lg %r3,168(%r15) lg %r3,168(%r15)
#ifdef CONFIG_DYNAMIC_FTRACE
larl %r14,ftrace_dyn_func
#else
larl %r14,ftrace_trace_function larl %r14,ftrace_trace_function
#endif
lg %r14,0(%r14) lg %r14,0(%r14)
basr %r14,%r14 basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE lg %r2,168(%r15)
lg %r3,272(%r15)
.globl ftrace_graph_caller .globl ftrace_graph_caller
ftrace_graph_caller: ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if # The bras instruction gets runtime patched to call prepare_ftrace_return.
# you know what you are doing. See ftrace_enable_graph_caller(). # See ftrace_enable_ftrace_graph_caller. The patched instruction is:
j 0f # bras %r14,prepare_ftrace_return
#endif bras %r14,0f
lg %r2,272(%r15) 0: stg %r2,168(%r15)
lg %r3,168(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,168(%r15)
0:
#endif #endif
aghi %r15,160 aghi %r15,160
lmg %r2,%r5,32(%r15) lmg %r2,%r5,32(%r15)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com>,
*/ */
#include <linux/kernel_stat.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
...@@ -255,7 +256,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -255,7 +256,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
nmi_enter(); nmi_enter();
s390_idle_check(regs, S390_lowcore.mcck_clock, s390_idle_check(regs, S390_lowcore.mcck_clock,
S390_lowcore.mcck_enter_timer); S390_lowcore.mcck_enter_timer);
kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
mci = (struct mci *) &S390_lowcore.mcck_interruption_code; mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
mcck = &__get_cpu_var(cpu_mcck); mcck = &__get_cpu_var(cpu_mcck);
umode = user_mode(regs); umode = user_mode(regs);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/kprobes.h>
#include <asm/compat.h> #include <asm/compat.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -41,6 +42,7 @@ ...@@ -41,6 +42,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h" #include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
...@@ -75,13 +77,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -75,13 +77,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
*/ */
static void default_idle(void) static void default_idle(void)
{ {
/* CPU is going idle. */ if (cpu_is_offline(smp_processor_id()))
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id())) {
preempt_enable_no_resched();
cpu_die(); cpu_die();
}
#endif
local_irq_disable(); local_irq_disable();
if (need_resched()) { if (need_resched()) {
local_irq_enable(); local_irq_enable();
...@@ -116,15 +113,17 @@ void cpu_idle(void) ...@@ -116,15 +113,17 @@ void cpu_idle(void)
} }
} }
extern void kernel_thread_starter(void); extern void __kprobes kernel_thread_starter(void);
asm( asm(
".align 4\n" ".section .kprobes.text, \"ax\"\n"
".global kernel_thread_starter\n"
"kernel_thread_starter:\n" "kernel_thread_starter:\n"
" la 2,0(10)\n" " la 2,0(10)\n"
" basr 14,9\n" " basr 14,9\n"
" la 2,0\n" " la 2,0\n"
" br 11\n"); " br 11\n"
".previous\n");
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{ {
...@@ -214,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, ...@@ -214,8 +213,10 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* start new process with ar4 pointing to the correct address space */ /* start new process with ar4 pointing to the correct address space */
p->thread.mm_segment = get_fs(); p->thread.mm_segment = get_fs();
/* Don't copy debug registers */ /* Don't copy debug registers */
memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP); clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
clear_tsk_thread_flag(p, TIF_PER_TRAP);
/* Initialize per thread user and system timer values */ /* Initialize per thread user and system timer values */
ti = task_thread_info(p); ti = task_thread_info(p);
ti->user_timer = 0; ti->user_timer = 0;
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/param.h> #include <asm/param.h>
...@@ -34,17 +34,6 @@ void __cpuinit cpu_init(void) ...@@ -34,17 +34,6 @@ void __cpuinit cpu_init(void)
enter_lazy_tlb(&init_mm, current); enter_lazy_tlb(&init_mm, current);
} }
/*
* print_cpu_info - print basic information about a cpu
*/
void __cpuinit print_cpu_info(void)
{
struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
pr_info("Processor %d started, address %d, identification %06X\n",
S390_lowcore.cpu_nr, stap(), id->ident);
}
/* /*
* show_cpuinfo - Get information on one CPU for use by procfs. * show_cpuinfo - Get information on one CPU for use by procfs.
*/ */
...@@ -57,9 +46,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -57,9 +46,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned long n = (unsigned long) v - 1; unsigned long n = (unsigned long) v - 1;
int i; int i;
s390_adjust_jiffies();
preempt_disable();
if (!n) { if (!n) {
s390_adjust_jiffies();
seq_printf(m, "vendor_id : IBM/S390\n" seq_printf(m, "vendor_id : IBM/S390\n"
"# processors : %i\n" "# processors : %i\n"
"bogomips per cpu: %lu.%02lu\n", "bogomips per cpu: %lu.%02lu\n",
...@@ -71,7 +59,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -71,7 +59,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "%s ", hwcap_str[i]); seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
get_online_cpus();
if (cpu_online(n)) { if (cpu_online(n)) {
struct cpuid *id = &per_cpu(cpu_id, n); struct cpuid *id = &per_cpu(cpu_id, n);
seq_printf(m, "processor %li: " seq_printf(m, "processor %li: "
...@@ -80,7 +68,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -80,7 +68,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"machine = %04X\n", "machine = %04X\n",
n, id->version, id->ident, id->machine); n, id->version, id->ident, id->machine);
} }
preempt_enable(); put_online_cpus();
return 0; return 0;
} }
......
This diff is collapsed.
/* /*
* arch/s390/kernel/s390_ext.c * Copyright IBM Corp. 1999,2010
* * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
* S390 version * Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/ */
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel_stat.h> #include <linux/slab.h>
#include <linux/interrupt.h>
#include <asm/cputime.h>
#include <asm/lowcore.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/lowcore.h>
#include <asm/irq.h> #include <asm/irq.h>
#include "entry.h" #include "entry.h"
struct ext_int_info {
struct ext_int_info *next;
ext_int_handler_t handler;
__u16 code;
};
/* /*
* ext_int_hash[index] is the start of the list for all external interrupts * ext_int_hash[index] is the start of the list for all external interrupts
* that hash to this index. With the current set of external interrupts * that hash to this index. With the current set of external interrupts
* (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
* iucv and 0x2603 pfault) this is always the first element. * iucv and 0x2603 pfault) this is always the first element.
*/ */
ext_int_info_t *ext_int_hash[256] = { NULL, }; static struct ext_int_info *ext_int_hash[256];
static inline int ext_hash(__u16 code) static inline int ext_hash(__u16 code)
{ {
...@@ -36,90 +39,53 @@ static inline int ext_hash(__u16 code) ...@@ -36,90 +39,53 @@ static inline int ext_hash(__u16 code)
int register_external_interrupt(__u16 code, ext_int_handler_t handler) int register_external_interrupt(__u16 code, ext_int_handler_t handler)
{ {
ext_int_info_t *p; struct ext_int_info *p;
int index; int index;
p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
if (p == NULL)
return -ENOMEM;
p->code = code;
p->handler = handler;
index = ext_hash(code);
p->next = ext_int_hash[index];
ext_int_hash[index] = p;
return 0;
}
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *p)
{
int index;
if (p == NULL) p = kmalloc(sizeof(*p), GFP_ATOMIC);
return -EINVAL; if (!p)
p->code = code; return -ENOMEM;
p->handler = handler; p->code = code;
p->handler = handler;
index = ext_hash(code); index = ext_hash(code);
p->next = ext_int_hash[index]; p->next = ext_int_hash[index];
ext_int_hash[index] = p; ext_int_hash[index] = p;
return 0; return 0;
} }
EXPORT_SYMBOL(register_external_interrupt);
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler) int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
{ {
ext_int_info_t *p, *q; struct ext_int_info *p, *q;
int index;
index = ext_hash(code);
q = NULL;
p = ext_int_hash[index];
while (p != NULL) {
if (p->code == code && p->handler == handler)
break;
q = p;
p = p->next;
}
if (p == NULL)
return -ENOENT;
if (q != NULL)
q->next = p->next;
else
ext_int_hash[index] = p->next;
kfree(p);
return 0;
}
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *p)
{
ext_int_info_t *q;
int index; int index;
if (p == NULL || p->code != code || p->handler != handler)
return -EINVAL;
index = ext_hash(code); index = ext_hash(code);
q = ext_int_hash[index]; q = NULL;
if (p != q) { p = ext_int_hash[index];
while (q != NULL) { while (p) {
if (q->next == p) if (p->code == code && p->handler == handler)
break; break;
q = q->next; q = p;
} p = p->next;
if (q == NULL) }
return -ENOENT; if (!p)
return -ENOENT;
if (q)
q->next = p->next; q->next = p->next;
} else else
ext_int_hash[index] = p->next; ext_int_hash[index] = p->next;
kfree(p);
return 0; return 0;
} }
EXPORT_SYMBOL(unregister_external_interrupt);
void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
unsigned int param32, unsigned long param64) unsigned int param32, unsigned long param64)
{ {
struct pt_regs *old_regs; struct pt_regs *old_regs;
unsigned short code; unsigned short code;
ext_int_info_t *p; struct ext_int_info *p;
int index; int index;
code = (unsigned short) ext_int_code; code = (unsigned short) ext_int_code;
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
...@@ -132,7 +98,7 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, ...@@ -132,7 +98,7 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
if (code != 0x1004) if (code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1; __get_cpu_var(s390_idle).nohz_delay = 1;
index = ext_hash(code); index = ext_hash(code);
for (p = ext_int_hash[index]; p; p = p->next) { for (p = ext_int_hash[index]; p; p = p->next) {
if (likely(p->code == code)) if (likely(p->code == code))
p->handler(ext_int_code, param32, param64); p->handler(ext_int_code, param32, param64);
...@@ -140,6 +106,3 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, ...@@ -140,6 +106,3 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
irq_exit(); irq_exit();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
EXPORT_SYMBOL(register_external_interrupt);
EXPORT_SYMBOL(unregister_external_interrupt);
...@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs) ...@@ -505,7 +505,7 @@ void do_signal(struct pt_regs *regs)
* Let tracing know that we've done the handler setup. * Let tracing know that we've done the handler setup.
*/ */
tracehook_signal_handler(signr, &info, &ka, regs, tracehook_signal_handler(signr, &info, &ka, regs,
current->thread.per_info.single_step); test_thread_flag(TIF_SINGLE_STEP));
} }
return; return;
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define KMSG_COMPONENT "cpu" #define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -161,6 +162,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, ...@@ -161,6 +162,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
{ {
unsigned long bits; unsigned long bits;
kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
/* /*
* handle bit signal external calls * handle bit signal external calls
* *
...@@ -469,25 +471,25 @@ int __cpuinit start_secondary(void *cpuvoid) ...@@ -469,25 +471,25 @@ int __cpuinit start_secondary(void *cpuvoid)
ipi_call_unlock(); ipi_call_unlock();
/* Switch on interrupts */ /* Switch on interrupts */
local_irq_enable(); local_irq_enable();
/* Print info about this processor */
print_cpu_info();
/* cpu_idle will call schedule for us */ /* cpu_idle will call schedule for us */
cpu_idle(); cpu_idle();
return 0; return 0;
} }
static void __init smp_create_idle(unsigned int cpu) struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
static void __cpuinit smp_fork_idle(struct work_struct *work)
{ {
struct task_struct *p; struct create_idle *c_idle;
/* c_idle = container_of(work, struct create_idle, work);
* don't care about the psw and regs settings since we'll never c_idle->idle = fork_idle(c_idle->cpu);
* reschedule the forked task. complete(&c_idle->done);
*/
p = fork_idle(cpu);
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
current_set[cpu] = p;
} }
static int __cpuinit smp_alloc_lowcore(int cpu) static int __cpuinit smp_alloc_lowcore(int cpu)
...@@ -551,6 +553,7 @@ static void smp_free_lowcore(int cpu) ...@@ -551,6 +553,7 @@ static void smp_free_lowcore(int cpu)
int __cpuinit __cpu_up(unsigned int cpu) int __cpuinit __cpu_up(unsigned int cpu)
{ {
struct _lowcore *cpu_lowcore; struct _lowcore *cpu_lowcore;
struct create_idle c_idle;
struct task_struct *idle; struct task_struct *idle;
struct stack_frame *sf; struct stack_frame *sf;
u32 lowcore; u32 lowcore;
...@@ -558,6 +561,19 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -558,6 +561,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
return -EIO; return -EIO;
idle = current_set[cpu];
if (!idle) {
c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
c_idle.cpu = cpu;
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle))
return PTR_ERR(c_idle.idle);
idle = c_idle.idle;
current_set[cpu] = c_idle.idle;
}
init_idle(idle, cpu);
if (smp_alloc_lowcore(cpu)) if (smp_alloc_lowcore(cpu))
return -ENOMEM; return -ENOMEM;
do { do {
...@@ -572,7 +588,6 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -572,7 +588,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
udelay(10); udelay(10);
idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->kernel_stack = (unsigned long) cpu_lowcore->kernel_stack = (unsigned long)
task_stack_page(idle) + THREAD_SIZE; task_stack_page(idle) + THREAD_SIZE;
...@@ -664,7 +679,6 @@ void __cpu_die(unsigned int cpu) ...@@ -664,7 +679,6 @@ void __cpu_die(unsigned int cpu)
udelay(10); udelay(10);
smp_free_lowcore(cpu); smp_free_lowcore(cpu);
atomic_dec(&init_mm.context.attach_count); atomic_dec(&init_mm.context.attach_count);
pr_info("Processor %d stopped\n", cpu);
} }
void cpu_die(void) void cpu_die(void)
...@@ -684,14 +698,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -684,14 +698,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#endif #endif
unsigned long async_stack, panic_stack; unsigned long async_stack, panic_stack;
struct _lowcore *lowcore; struct _lowcore *lowcore;
unsigned int cpu;
smp_detect_cpus(); smp_detect_cpus();
/* request the 0x1201 emergency signal external interrupt */ /* request the 0x1201 emergency signal external interrupt */
if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
panic("Couldn't request external interrupt 0x1201"); panic("Couldn't request external interrupt 0x1201");
print_cpu_info();
/* Reallocate current lowcore, but keep its contents. */ /* Reallocate current lowcore, but keep its contents. */
lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
...@@ -719,9 +731,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -719,9 +731,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
BUG(); BUG();
#endif #endif
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
} }
void __init smp_prepare_boot_cpu(void) void __init smp_prepare_boot_cpu(void)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#define KMSG_COMPONENT "time" #define KMSG_COMPONENT "time"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel_stat.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -37,6 +38,7 @@ ...@@ -37,6 +38,7 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/kprobes.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
...@@ -60,7 +62,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); ...@@ -60,7 +62,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
/* /*
* Scheduler clock - returns current time in nanosec units. * Scheduler clock - returns current time in nanosec units.
*/ */
unsigned long long notrace sched_clock(void) unsigned long long notrace __kprobes sched_clock(void)
{ {
return (get_clock_monotonic() * 125) >> 9; return (get_clock_monotonic() * 125) >> 9;
} }
...@@ -159,6 +161,7 @@ static void clock_comparator_interrupt(unsigned int ext_int_code, ...@@ -159,6 +161,7 @@ static void clock_comparator_interrupt(unsigned int ext_int_code,
unsigned int param32, unsigned int param32,
unsigned long param64) unsigned long param64)
{ {
kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
if (S390_lowcore.clock_comparator == -1ULL) if (S390_lowcore.clock_comparator == -1ULL)
set_clock_comparator(S390_lowcore.clock_comparator); set_clock_comparator(S390_lowcore.clock_comparator);
} }
...@@ -169,6 +172,7 @@ static void stp_timing_alert(struct stp_irq_parm *); ...@@ -169,6 +172,7 @@ static void stp_timing_alert(struct stp_irq_parm *);
static void timing_alert_interrupt(unsigned int ext_int_code, static void timing_alert_interrupt(unsigned int ext_int_code,
unsigned int param32, unsigned long param64) unsigned int param32, unsigned long param64)
{ {
kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
if (param32 & 0x00c40000) if (param32 & 0x00c40000)
etr_timing_alert((struct etr_irq_parm *) &param32); etr_timing_alert((struct etr_irq_parm *) &param32);
if (param32 & 0x00038000) if (param32 & 0x00038000)
......
...@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs, ...@@ -365,12 +365,10 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
} }
void __kprobes do_single_step(struct pt_regs *regs) void __kprobes do_per_trap(struct pt_regs *regs)
{ {
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
SIGTRAP) == NOTIFY_STOP){
return; return;
}
if (tracehook_consider_fatal_signal(current, SIGTRAP)) if (tracehook_consider_fatal_signal(current, SIGTRAP))
force_sig(SIGTRAP, current); force_sig(SIGTRAP, current);
} }
...@@ -451,8 +449,8 @@ static inline void do_fp_trap(struct pt_regs *regs, void __user *location, ...@@ -451,8 +449,8 @@ static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
"floating point exception", regs, &si); "floating point exception", regs, &si);
} }
static void illegal_op(struct pt_regs *regs, long pgm_int_code, static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code) unsigned long trans_exc_code)
{ {
siginfo_t info; siginfo_t info;
__u8 opcode[6]; __u8 opcode[6];
...@@ -688,7 +686,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code, ...@@ -688,7 +686,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
} }
asmlinkage void kernel_stack_overflow(struct pt_regs * regs) asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
{ {
bust_spinlocks(1); bust_spinlocks(1);
printk("Kernel stack overflow.\n"); printk("Kernel stack overflow.\n");
...@@ -733,5 +731,6 @@ void __init trap_init(void) ...@@ -733,5 +731,6 @@ void __init trap_init(void)
pgm_check_table[0x15] = &operand_exception; pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &space_switch_exception; pgm_check_table[0x1C] = &space_switch_exception;
pgm_check_table[0x1D] = &hfp_sqrt_exception; pgm_check_table[0x1D] = &hfp_sqrt_exception;
pfault_irq_init(); /* Enable machine checks early. */
local_mcck_enable();
} }
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kprobes.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
#include <asm/timer.h> #include <asm/timer.h>
...@@ -122,7 +123,7 @@ void account_system_vtime(struct task_struct *tsk) ...@@ -122,7 +123,7 @@ void account_system_vtime(struct task_struct *tsk)
} }
EXPORT_SYMBOL_GPL(account_system_vtime); EXPORT_SYMBOL_GPL(account_system_vtime);
void vtime_start_cpu(__u64 int_clock, __u64 enter_timer) void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
{ {
struct s390_idle_data *idle = &__get_cpu_var(s390_idle); struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
...@@ -162,7 +163,7 @@ void vtime_start_cpu(__u64 int_clock, __u64 enter_timer) ...@@ -162,7 +163,7 @@ void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
idle->sequence++; idle->sequence++;
} }
void vtime_stop_cpu(void) void __kprobes vtime_stop_cpu(void)
{ {
struct s390_idle_data *idle = &__get_cpu_var(s390_idle); struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
...@@ -323,6 +324,7 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code, ...@@ -323,6 +324,7 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code,
struct list_head cb_list; /* the callback queue */ struct list_head cb_list; /* the callback queue */
__u64 elapsed, next; __u64 elapsed, next;
kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
INIT_LIST_HEAD(&cb_list); INIT_LIST_HEAD(&cb_list);
vq = &__get_cpu_var(virt_cpu_timer); vq = &__get_cpu_var(virt_cpu_timer);
......
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
source "virt/kvm/Kconfig" source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION menuconfig VIRTUALIZATION
bool "Virtualization" def_bool y
default y prompt "Virtualization"
---help--- ---help---
Say Y here to get to see options for using your Linux host to run other Say Y here to get to see options for using your Linux host to run other
operating systems inside virtual machines (guests). operating systems inside virtual machines (guests).
...@@ -16,7 +16,8 @@ menuconfig VIRTUALIZATION ...@@ -16,7 +16,8 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION if VIRTUALIZATION
config KVM config KVM
tristate "Kernel-based Virtual Machine (KVM) support" def_tristate y
prompt "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM && EXPERIMENTAL depends on HAVE_KVM && EXPERIMENTAL
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
......
...@@ -47,7 +47,6 @@ static void __udelay_disabled(unsigned long long usecs) ...@@ -47,7 +47,6 @@ static void __udelay_disabled(unsigned long long usecs)
lockdep_on(); lockdep_on();
__ctl_load(cr0_saved, 0, 0); __ctl_load(cr0_saved, 0, 0);
local_tick_enable(clock_saved); local_tick_enable(clock_saved);
set_clock_comparator(S390_lowcore.clock_comparator);
} }
static void __udelay_enabled(unsigned long long usecs) static void __udelay_enabled(unsigned long long usecs)
...@@ -70,7 +69,6 @@ static void __udelay_enabled(unsigned long long usecs) ...@@ -70,7 +69,6 @@ static void __udelay_enabled(unsigned long long usecs)
if (clock_saved) if (clock_saved)
local_tick_enable(clock_saved); local_tick_enable(clock_saved);
} while (get_clock() < end); } while (get_clock() < end);
set_clock_comparator(S390_lowcore.clock_comparator);
} }
/* /*
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
* Copyright (C) 1995 Linus Torvalds * Copyright (C) 1995 Linus Torvalds
*/ */
#include <linux/kernel_stat.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -234,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code, ...@@ -234,13 +235,13 @@ static noinline int signal_return(struct pt_regs *regs, long int_code,
rc = __get_user(instruction, (u16 __user *) regs->psw.addr); rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
if (!rc && instruction == 0x0a77) { if (!rc && instruction == 0x0a77) {
clear_tsk_thread_flag(current, TIF_SINGLE_STEP); clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task()) if (is_compat_task())
sys32_sigreturn(); sys32_sigreturn();
else else
sys_sigreturn(); sys_sigreturn();
} else if (!rc && instruction == 0x0aad) { } else if (!rc && instruction == 0x0aad) {
clear_tsk_thread_flag(current, TIF_SINGLE_STEP); clear_tsk_thread_flag(current, TIF_PER_TRAP);
if (is_compat_task()) if (is_compat_task())
sys32_rt_sigreturn(); sys32_rt_sigreturn();
else else
...@@ -378,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access, ...@@ -378,7 +379,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
* The instruction that caused the program check will * The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP. * be repeated. Don't signal single step via SIGTRAP.
*/ */
clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0; fault = 0;
out_up: out_up:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
...@@ -480,8 +481,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) ...@@ -480,8 +481,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
/* /*
* 'pfault' pseudo page faults routines. * 'pfault' pseudo page faults routines.
*/ */
static ext_int_info_t ext_int_pfault; static int pfault_disable;
static int pfault_disable = 0;
static int __init nopfault(char *str) static int __init nopfault(char *str)
{ {
...@@ -543,6 +543,7 @@ static void pfault_interrupt(unsigned int ext_int_code, ...@@ -543,6 +543,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
struct task_struct *tsk; struct task_struct *tsk;
__u16 subcode; __u16 subcode;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
/* /*
* Get the external interruption subcode & pfault * Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this * initial/completion signal bit. VM stores this
...@@ -592,24 +593,28 @@ static void pfault_interrupt(unsigned int ext_int_code, ...@@ -592,24 +593,28 @@ static void pfault_interrupt(unsigned int ext_int_code,
} }
} }
void __init pfault_irq_init(void) static int __init pfault_irq_init(void)
{ {
if (!MACHINE_IS_VM) int rc;
return;
if (!MACHINE_IS_VM)
return 0;
/* /*
* Try to get pfault pseudo page faults going. * Try to get pfault pseudo page faults going.
*/ */
if (register_early_external_interrupt(0x2603, pfault_interrupt, rc = register_external_interrupt(0x2603, pfault_interrupt);
&ext_int_pfault) != 0) if (rc) {
panic("Couldn't request external interrupt 0x2603"); pfault_disable = 1;
return rc;
}
if (pfault_init() == 0) if (pfault_init() == 0)
return; return 0;
/* Tough luck, no pfault. */ /* Tough luck, no pfault. */
pfault_disable = 1; pfault_disable = 1;
unregister_early_external_interrupt(0x2603, pfault_interrupt, unregister_external_interrupt(0x2603, pfault_interrupt);
&ext_int_pfault); return 0;
} }
early_initcall(pfault_irq_init);
#endif #endif
...@@ -2,7 +2,8 @@ comment "S/390 block device drivers" ...@@ -2,7 +2,8 @@ comment "S/390 block device drivers"
depends on S390 && BLOCK depends on S390 && BLOCK
config BLK_DEV_XPRAM config BLK_DEV_XPRAM
tristate "XPRAM disk support" def_tristate m
prompt "XPRAM disk support"
depends on S390 && BLOCK depends on S390 && BLOCK
help help
Select this option if you want to use your expanded storage on S/390 Select this option if you want to use your expanded storage on S/390
...@@ -12,13 +13,15 @@ config BLK_DEV_XPRAM ...@@ -12,13 +13,15 @@ config BLK_DEV_XPRAM
xpram. If unsure, say "N". xpram. If unsure, say "N".
config DCSSBLK config DCSSBLK
tristate "DCSSBLK support" def_tristate m
prompt "DCSSBLK support"
depends on S390 && BLOCK depends on S390 && BLOCK
help help
Support for dcss block device Support for dcss block device
config DASD config DASD
tristate "Support for DASD devices" def_tristate y
prompt "Support for DASD devices"
depends on CCW && BLOCK depends on CCW && BLOCK
select IOSCHED_DEADLINE select IOSCHED_DEADLINE
help help
...@@ -27,28 +30,32 @@ config DASD ...@@ -27,28 +30,32 @@ config DASD
natively on a single image or an LPAR. natively on a single image or an LPAR.
config DASD_PROFILE config DASD_PROFILE
bool "Profiling support for dasd devices" def_bool y
prompt "Profiling support for dasd devices"
depends on DASD depends on DASD
help help
Enable this option if you want to see profiling information Enable this option if you want to see profiling information
in /proc/dasd/statistics. in /proc/dasd/statistics.
config DASD_ECKD config DASD_ECKD
tristate "Support for ECKD Disks" def_tristate y
prompt "Support for ECKD Disks"
depends on DASD depends on DASD
help help
ECKD devices are the most commonly used devices. You should enable ECKD devices are the most commonly used devices. You should enable
this option unless you are very sure to have no ECKD device. this option unless you are very sure to have no ECKD device.
config DASD_FBA config DASD_FBA
tristate "Support for FBA Disks" def_tristate y
prompt "Support for FBA Disks"
depends on DASD depends on DASD
help help
Select this option to be able to access FBA devices. It is safe to Select this option to be able to access FBA devices. It is safe to
say "Y". say "Y".
config DASD_DIAG config DASD_DIAG
tristate "Support for DIAG access to Disks" def_tristate y
prompt "Support for DIAG access to Disks"
depends on DASD depends on DASD
help help
Select this option if you want to use Diagnose250 command to access Select this option if you want to use Diagnose250 command to access
...@@ -56,7 +63,8 @@ config DASD_DIAG ...@@ -56,7 +63,8 @@ config DASD_DIAG
say "N". say "N".
config DASD_EER config DASD_EER
bool "Extended error reporting (EER)" def_bool y
prompt "Extended error reporting (EER)"
depends on DASD depends on DASD
help help
This driver provides a character device interface to the This driver provides a character device interface to the
......
This diff is collapsed.
...@@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) ...@@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
opm = ccw_device_get_path_mask(device->cdev); opm = ccw_device_get_path_mask(device->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
//FIXME: start with get_opm ?
if (erp->lpm == 0) if (erp->lpm == 0)
erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); erp->lpm = device->path_data.opm &
~(erp->irb.esw.esw0.sublog.lpum);
else else
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
...@@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp) ...@@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
{ {
erp->function = dasd_3990_erp_action_1; erp->function = dasd_3990_erp_action_1;
dasd_3990_erp_alternate_path(erp); dasd_3990_erp_alternate_path(erp);
if (erp->status == DASD_CQR_FAILED) { if (erp->status == DASD_CQR_FAILED &&
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
erp->status = DASD_CQR_FILLED; erp->status = DASD_CQR_FILLED;
erp->retries = 10; erp->retries = 10;
erp->lpm = LPM_ANYPATH; erp->lpm = erp->startdev->path_data.opm;
erp->function = dasd_3990_erp_action_1_sec; erp->function = dasd_3990_erp_action_1_sec;
} }
return erp; return erp;
...@@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense) ...@@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
static void static void
dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
{ {
if (sense[25] & DASD_SENSE_BIT_3) { if (sense[25] & DASD_SENSE_BIT_3) {
dasd_3990_erp_alternate_path(erp); dasd_3990_erp_alternate_path(erp);
if (erp->status == DASD_CQR_FAILED) { if (erp->status == DASD_CQR_FAILED &&
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
/* reset the lpm and the status to be able to /* reset the lpm and the status to be able to
* try further actions. */ * try further actions. */
erp->lpm = erp->startdev->path_data.opm;
erp->lpm = 0;
erp->status = DASD_CQR_NEED_ERP; erp->status = DASD_CQR_NEED_ERP;
} }
} }
......
...@@ -208,6 +208,8 @@ dasd_feature_list(char *str, char **endp) ...@@ -208,6 +208,8 @@ dasd_feature_list(char *str, char **endp)
features |= DASD_FEATURE_READONLY; features |= DASD_FEATURE_READONLY;
else if (len == 4 && !strncmp(str, "diag", 4)) else if (len == 4 && !strncmp(str, "diag", 4))
features |= DASD_FEATURE_USEDIAG; features |= DASD_FEATURE_USEDIAG;
else if (len == 3 && !strncmp(str, "raw", 3))
features |= DASD_FEATURE_USERAW;
else if (len == 6 && !strncmp(str, "erplog", 6)) else if (len == 6 && !strncmp(str, "erplog", 6))
features |= DASD_FEATURE_ERPLOG; features |= DASD_FEATURE_ERPLOG;
else if (len == 8 && !strncmp(str, "failfast", 8)) else if (len == 8 && !strncmp(str, "failfast", 8))
...@@ -639,6 +641,7 @@ dasd_put_device_wake(struct dasd_device *device) ...@@ -639,6 +641,7 @@ dasd_put_device_wake(struct dasd_device *device)
{ {
wake_up(&dasd_delete_wq); wake_up(&dasd_delete_wq);
} }
EXPORT_SYMBOL_GPL(dasd_put_device_wake);
/* /*
* Return dasd_device structure associated with cdev. * Return dasd_device structure associated with cdev.
...@@ -856,7 +859,7 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr, ...@@ -856,7 +859,7 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
spin_lock(&dasd_devmap_lock); spin_lock(&dasd_devmap_lock);
/* Changing diag discipline flag is only allowed in offline state. */ /* Changing diag discipline flag is only allowed in offline state. */
rc = count; rc = count;
if (!devmap->device) { if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
if (val) if (val)
devmap->features |= DASD_FEATURE_USEDIAG; devmap->features |= DASD_FEATURE_USEDIAG;
else else
...@@ -869,6 +872,56 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr, ...@@ -869,6 +872,56 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store); static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
/*
* use_raw controls whether the driver should give access to raw eckd data or
* operate in standard mode
*/
static ssize_t
dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
int use_raw;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
else
use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
return sprintf(buf, use_raw ? "1\n" : "0\n");
}
static ssize_t
dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
ssize_t rc;
unsigned long val;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
return -EINVAL;
spin_lock(&dasd_devmap_lock);
/* Changing diag discipline flag is only allowed in offline state. */
rc = count;
if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
if (val)
devmap->features |= DASD_FEATURE_USERAW;
else
devmap->features &= ~DASD_FEATURE_USERAW;
} else
rc = -EPERM;
spin_unlock(&dasd_devmap_lock);
return rc;
}
static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
dasd_use_raw_store);
static ssize_t static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr, dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
...@@ -1126,6 +1179,103 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr, ...@@ -1126,6 +1179,103 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store); static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
static ssize_t dasd_reservation_policy_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dasd_devmap *devmap;
int rc = 0;
devmap = dasd_find_busid(dev_name(dev));
if (IS_ERR(devmap)) {
rc = snprintf(buf, PAGE_SIZE, "ignore\n");
} else {
spin_lock(&dasd_devmap_lock);
if (devmap->features & DASD_FEATURE_FAILONSLCK)
rc = snprintf(buf, PAGE_SIZE, "fail\n");
else
rc = snprintf(buf, PAGE_SIZE, "ignore\n");
spin_unlock(&dasd_devmap_lock);
}
return rc;
}
static ssize_t dasd_reservation_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
int rc;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
rc = 0;
spin_lock(&dasd_devmap_lock);
if (sysfs_streq("ignore", buf))
devmap->features &= ~DASD_FEATURE_FAILONSLCK;
else if (sysfs_streq("fail", buf))
devmap->features |= DASD_FEATURE_FAILONSLCK;
else
rc = -EINVAL;
if (devmap->device)
devmap->device->features = devmap->features;
spin_unlock(&dasd_devmap_lock);
if (rc)
return rc;
else
return count;
}
static DEVICE_ATTR(reservation_policy, 0644,
dasd_reservation_policy_show, dasd_reservation_policy_store);
static ssize_t dasd_reservation_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dasd_device *device;
int rc = 0;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return snprintf(buf, PAGE_SIZE, "none\n");
if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
rc = snprintf(buf, PAGE_SIZE, "reserved\n");
else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
rc = snprintf(buf, PAGE_SIZE, "lost\n");
else
rc = snprintf(buf, PAGE_SIZE, "none\n");
dasd_put_device(device);
return rc;
}
static ssize_t dasd_reservation_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
int rc = 0;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if (sysfs_streq("reset", buf))
clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
else
rc = -EINVAL;
dasd_put_device(device);
if (rc)
return rc;
else
return count;
}
static DEVICE_ATTR(last_known_reservation_state, 0644,
dasd_reservation_state_show, dasd_reservation_state_store);
static struct attribute * dasd_attrs[] = { static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr, &dev_attr_readonly.attr,
&dev_attr_discipline.attr, &dev_attr_discipline.attr,
...@@ -1134,10 +1284,13 @@ static struct attribute * dasd_attrs[] = { ...@@ -1134,10 +1284,13 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_vendor.attr, &dev_attr_vendor.attr,
&dev_attr_uid.attr, &dev_attr_uid.attr,
&dev_attr_use_diag.attr, &dev_attr_use_diag.attr,
&dev_attr_raw_track_access.attr,
&dev_attr_eer_enabled.attr, &dev_attr_eer_enabled.attr,
&dev_attr_erplog.attr, &dev_attr_erplog.attr,
&dev_attr_failfast.attr, &dev_attr_failfast.attr,
&dev_attr_expires.attr, &dev_attr_expires.attr,
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
NULL, NULL,
}; };
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define KMSG_COMPONENT "dasd" #define KMSG_COMPONENT "dasd"
#include <linux/kernel_stat.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -238,6 +239,7 @@ static void dasd_ext_handler(unsigned int ext_int_code, ...@@ -238,6 +239,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
addr_t ip; addr_t ip;
int rc; int rc;
kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
switch (ext_int_code >> 24) { switch (ext_int_code >> 24) {
case DASD_DIAG_CODE_31BIT: case DASD_DIAG_CODE_31BIT:
ip = (addr_t) param32; ip = (addr_t) param32;
...@@ -617,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = { ...@@ -617,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = {
.ebcname = "DIAG", .ebcname = "DIAG",
.max_blocks = DIAG_MAX_BLOCKS, .max_blocks = DIAG_MAX_BLOCKS,
.check_device = dasd_diag_check_device, .check_device = dasd_diag_check_device,
.verify_path = dasd_generic_verify_path,
.fill_geometry = dasd_diag_fill_geometry, .fill_geometry = dasd_diag_fill_geometry,
.start_IO = dasd_start_diag, .start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO, .term_IO = dasd_diag_term_IO,
......
This diff is collapsed.
...@@ -37,14 +37,17 @@ ...@@ -37,14 +37,17 @@
#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d #define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
#define DASD_ECKD_CCW_READ_KD_MT 0x8e #define DASD_ECKD_CCW_READ_KD_MT 0x8e
#define DASD_ECKD_CCW_RELEASE 0x94 #define DASD_ECKD_CCW_RELEASE 0x94
#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e #define DASD_ECKD_CCW_READ_CKD_MT 0x9e
#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d #define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5 #define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6 #define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
#define DASD_ECKD_CCW_RESERVE 0xB4 #define DASD_ECKD_CCW_RESERVE 0xB4
#define DASD_ECKD_CCW_READ_TRACK 0xDE
#define DASD_ECKD_CCW_PFX 0xE7 #define DASD_ECKD_CCW_PFX 0xE7
#define DASD_ECKD_CCW_PFX_READ 0xEA #define DASD_ECKD_CCW_PFX_READ 0xEA
#define DASD_ECKD_CCW_RSCK 0xF9 #define DASD_ECKD_CCW_RSCK 0xF9
#define DASD_ECKD_CCW_RCD 0xFA
/* /*
* Perform Subsystem Function / Sub-Orders * Perform Subsystem Function / Sub-Orders
...@@ -57,6 +60,11 @@ ...@@ -57,6 +60,11 @@
*/ */
#define LV_COMPAT_CYL 0xFFFE #define LV_COMPAT_CYL 0xFFFE
#define FCX_MAX_DATA_FACTOR 65536
#define DASD_ECKD_RCD_DATA_SIZE 256
/***************************************************************************** /*****************************************************************************
* SECTION: Type Definitions * SECTION: Type Definitions
****************************************************************************/ ****************************************************************************/
...@@ -331,12 +339,6 @@ struct dasd_gneq { ...@@ -331,12 +339,6 @@ struct dasd_gneq {
__u8 reserved2[22]; __u8 reserved2[22];
} __attribute__ ((packed)); } __attribute__ ((packed));
struct dasd_eckd_path {
__u8 opm;
__u8 ppm;
__u8 npm;
};
struct dasd_rssd_features { struct dasd_rssd_features {
char feature[256]; char feature[256];
} __attribute__((packed)); } __attribute__((packed));
...@@ -442,7 +444,6 @@ struct dasd_eckd_private { ...@@ -442,7 +444,6 @@ struct dasd_eckd_private {
struct vd_sneq *vdsneq; struct vd_sneq *vdsneq;
struct dasd_gneq *gneq; struct dasd_gneq *gneq;
struct dasd_eckd_path path_data;
struct eckd_count count_area[5]; struct eckd_count count_area[5];
int init_cqr_status; int init_cqr_status;
int uses_cdl; int uses_cdl;
...@@ -455,6 +456,8 @@ struct dasd_eckd_private { ...@@ -455,6 +456,8 @@ struct dasd_eckd_private {
struct alias_pav_group *pavgroup; struct alias_pav_group *pavgroup;
struct alias_lcu *lcu; struct alias_lcu *lcu;
int count; int count;
u32 fcx_max_data;
}; };
......
...@@ -473,6 +473,7 @@ int dasd_eer_enable(struct dasd_device *device) ...@@ -473,6 +473,7 @@ int dasd_eer_enable(struct dasd_device *device)
cqr->retries = 255; cqr->retries = 255;
cqr->expires = 10 * HZ; cqr->expires = 10 * HZ;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
ccw = cqr->cpaddr; ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SNSS; ccw->cmd_code = DASD_ECKD_CCW_SNSS;
......
...@@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) ...@@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_DEBUG, device, DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP called (%i retries left)", "default ERP called (%i retries left)",
cqr->retries); cqr->retries);
cqr->lpm = LPM_ANYPATH; if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = device->path_data.opm;
cqr->status = DASD_CQR_FILLED; cqr->status = DASD_CQR_FILLED;
} else { } else {
pr_err("%s: default ERP has run out of retries and failed\n", pr_err("%s: default ERP has run out of retries and failed\n",
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
*/ */
#include <linux/kernel_stat.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kdev_t.h> #include <linux/kdev_t.h>
...@@ -361,6 +362,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, ...@@ -361,6 +362,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
int cstat, dstat; int cstat, dstat;
int count; int count;
kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
raw = dev_get_drvdata(&cdev->dev); raw = dev_get_drvdata(&cdev->dev);
req = (struct raw3215_req *) intparm; req = (struct raw3215_req *) intparm;
cstat = irb->scsw.cmd.cstat; cstat = irb->scsw.cmd.cstat;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* Copyright IBM Corp. 2003, 2009 * Copyright IBM Corp. 2003, 2009
*/ */
#include <linux/kernel_stat.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -329,6 +330,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) ...@@ -329,6 +330,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct raw3270_request *rq; struct raw3270_request *rq;
int rc; int rc;
kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
rp = dev_get_drvdata(&cdev->dev); rp = dev_get_drvdata(&cdev->dev);
if (!rp) if (!rp)
return; return;
......
This diff is collapsed.
...@@ -33,6 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) ...@@ -33,6 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
int cpu; int cpu;
struct sys_device *sysdev; struct sys_device *sysdev;
s390_adjust_jiffies();
pr_warning("cpu capability changed.\n"); pr_warning("cpu capability changed.\n");
get_online_cpus(); get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment