Commit e46cae44 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "The new features and main improvements in this merge for v4.9

   - Support for the UBSAN sanitizer

   - Set HAVE_EFFICIENT_UNALIGNED_ACCESS, it improves the code in some
     places

   - Improvements for the in-kernel fpu code, in particular the overhead
     for multiple consecutive in kernel fpu users is recuded

   - Add a SIMD implementation for the RAID6 gen and xor operations

   - Add RAID6 recovery based on the XC instruction

   - The PCI DMA flush logic has been improved to increase the speed of
     the map / unmap operations

   - The time synchronization code has seen some updates

  And bug fixes all over the place"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (48 commits)
  s390/con3270: fix insufficient space padding
  s390/con3270: fix use of uninitialised data
  MAINTAINERS: update DASD maintainer
  s390/cio: fix accidental interrupt enabling during resume
  s390/dasd: add missing \n to end of dev_err messages
  s390/config: Enable config options for Docker
  s390/dasd: make query host access interruptible
  s390/dasd: fix panic during offline processing
  s390/dasd: fix hanging offline processing
  s390/pci_dma: improve lazy flush for unmap
  s390/pci_dma: split dma_update_trans
  s390/pci_dma: improve map_sg
  s390/pci_dma: simplify dma address calculation
  s390/pci_dma: remove dma address range check
  iommu/s390: simplify registration of I/O address translation parameters
  s390: migrate exception table users off module.h and onto extable.h
  s390: export header for CLP ioctl
  s390/vmur: fix irq pointer dereference in int handler
  s390/dasd: add missing KOBJ_CHANGE event for unformatted devices
  s390: enable UBSAN
  ...
parents 02bafd96 6cd997db
...@@ -10135,8 +10135,8 @@ S: Supported ...@@ -10135,8 +10135,8 @@ S: Supported
F: drivers/s390/cio/ F: drivers/s390/cio/
S390 DASD DRIVER S390 DASD DRIVER
M: Stefan Weinhuber <wein@de.ibm.com> M: Stefan Haberland <sth@linux.vnet.ibm.com>
M: Stefan Haberland <stefan.haberland@de.ibm.com> M: Jan Hoeppner <hoeppner@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported S: Supported
......
...@@ -73,6 +73,7 @@ config S390 ...@@ -73,6 +73,7 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV select ARCH_HAS_KCOV
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH select ARCH_INLINE_READ_LOCK_BH
...@@ -109,6 +110,7 @@ config S390 ...@@ -109,6 +110,7 @@ config S390
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_PROT_NUMA_PROT_NONE select ARCH_WANTS_PROT_NUMA_PROT_NONE
select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2 select CLONE_BACKWARDS2
...@@ -136,6 +138,7 @@ config S390 ...@@ -136,6 +138,7 @@ config S390
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
......
...@@ -46,6 +46,8 @@ cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196 ...@@ -46,6 +46,8 @@ cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12 cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13 cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
#KBUILD_IMAGE is necessary for make rpm #KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image KBUILD_IMAGE :=arch/s390/boot/image
......
...@@ -542,7 +542,7 @@ static int __init appldata_init(void) ...@@ -542,7 +542,7 @@ static int __init appldata_init(void)
rc = PTR_ERR(appldata_pdev); rc = PTR_ERR(appldata_pdev);
goto out_driver; goto out_driver;
} }
appldata_wq = create_singlethread_workqueue("appldata"); appldata_wq = alloc_ordered_workqueue("appldata", 0);
if (!appldata_wq) { if (!appldata_wq) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_device; goto out_device;
......
...@@ -17,6 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) ...@@ -17,6 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding) KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n GCOV_PROFILE := n
UBSAN_SANITIZE := n
OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o als.o) OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o als.o)
OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
......
...@@ -260,7 +260,6 @@ CONFIG_NF_CONNTRACK_IPV4=m ...@@ -260,7 +260,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m CONFIG_NF_TABLES_ARP=m
CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_AH=m
...@@ -269,6 +268,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m ...@@ -269,6 +268,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_ECN=m
...@@ -281,7 +282,6 @@ CONFIG_IP_NF_ARP_MANGLE=m ...@@ -281,7 +282,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_AH=m
...@@ -299,6 +299,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m ...@@ -299,6 +299,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m CONFIG_RDS=m
...@@ -359,6 +361,7 @@ CONFIG_NET_ACT_SIMP=m ...@@ -359,6 +361,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m CONFIG_NET_TCPPROBE=m
...@@ -409,6 +412,7 @@ CONFIG_MD_FAULTY=m ...@@ -409,6 +412,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m CONFIG_DM_RAID=m
...@@ -428,6 +432,7 @@ CONFIG_EQUALIZER=m ...@@ -428,6 +432,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m CONFIG_IFB=m
CONFIG_MACVLAN=m CONFIG_MACVLAN=m
CONFIG_MACVTAP=m CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m CONFIG_VXLAN=m
CONFIG_TUN=m CONFIG_TUN=m
CONFIG_VETH=m CONFIG_VETH=m
...@@ -453,7 +458,6 @@ CONFIG_PPP_SYNC_TTY=m ...@@ -453,7 +458,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0 CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m CONFIG_RAW_DRIVER=m
...@@ -495,6 +499,7 @@ CONFIG_QFMT_V2=m ...@@ -495,6 +499,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y CONFIG_FUSE_FS=y
CONFIG_CUSE=m CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y CONFIG_ISO9660_FS=y
......
...@@ -15,6 +15,8 @@ CONFIG_NUMA_BALANCING=y ...@@ -15,6 +15,8 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y CONFIG_CGROUP_HUGETLB=y
...@@ -255,7 +257,6 @@ CONFIG_NF_CONNTRACK_IPV4=m ...@@ -255,7 +257,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m CONFIG_NF_TABLES_ARP=m
CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_AH=m
...@@ -264,6 +265,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m ...@@ -264,6 +265,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_ECN=m
...@@ -276,7 +279,6 @@ CONFIG_IP_NF_ARP_MANGLE=m ...@@ -276,7 +279,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_AH=m
...@@ -294,6 +296,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m ...@@ -294,6 +296,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m CONFIG_RDS=m
...@@ -353,6 +357,7 @@ CONFIG_NET_ACT_SIMP=m ...@@ -353,6 +357,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m CONFIG_NET_TCPPROBE=m
...@@ -403,6 +408,7 @@ CONFIG_MD_FAULTY=m ...@@ -403,6 +408,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m CONFIG_DM_RAID=m
...@@ -422,6 +428,7 @@ CONFIG_EQUALIZER=m ...@@ -422,6 +428,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m CONFIG_IFB=m
CONFIG_MACVLAN=m CONFIG_MACVLAN=m
CONFIG_MACVTAP=m CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m CONFIG_VXLAN=m
CONFIG_TUN=m CONFIG_TUN=m
CONFIG_VETH=m CONFIG_VETH=m
...@@ -447,7 +454,6 @@ CONFIG_PPP_SYNC_TTY=m ...@@ -447,7 +454,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0 CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m CONFIG_RAW_DRIVER=m
...@@ -487,6 +493,7 @@ CONFIG_QFMT_V2=m ...@@ -487,6 +493,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y CONFIG_FUSE_FS=y
CONFIG_CUSE=m CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y CONFIG_ISO9660_FS=y
......
...@@ -16,6 +16,8 @@ CONFIG_NUMA_BALANCING=y ...@@ -16,6 +16,8 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y CONFIG_CGROUP_HUGETLB=y
...@@ -255,7 +257,6 @@ CONFIG_NF_CONNTRACK_IPV4=m ...@@ -255,7 +257,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m CONFIG_NF_TABLES_ARP=m
CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_AH=m
...@@ -264,6 +265,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m ...@@ -264,6 +265,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_ECN=m
...@@ -276,7 +279,6 @@ CONFIG_IP_NF_ARP_MANGLE=m ...@@ -276,7 +279,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_AH=m
...@@ -294,6 +296,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m ...@@ -294,6 +296,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m CONFIG_RDS=m
...@@ -353,6 +357,7 @@ CONFIG_NET_ACT_SIMP=m ...@@ -353,6 +357,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m CONFIG_NET_TCPPROBE=m
...@@ -403,6 +408,7 @@ CONFIG_MD_FAULTY=m ...@@ -403,6 +408,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m CONFIG_DM_RAID=m
...@@ -422,6 +428,7 @@ CONFIG_EQUALIZER=m ...@@ -422,6 +428,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m CONFIG_IFB=m
CONFIG_MACVLAN=m CONFIG_MACVLAN=m
CONFIG_MACVTAP=m CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=m CONFIG_VXLAN=m
CONFIG_TUN=m CONFIG_TUN=m
CONFIG_VETH=m CONFIG_VETH=m
...@@ -447,7 +454,6 @@ CONFIG_PPP_SYNC_TTY=m ...@@ -447,7 +454,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0 CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m CONFIG_RAW_DRIVER=m
...@@ -488,6 +494,7 @@ CONFIG_QFMT_V2=m ...@@ -488,6 +494,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y CONFIG_FUSE_FS=y
CONFIG_CUSE=m CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y CONFIG_ISO9660_FS=y
......
This diff is collapsed.
...@@ -67,7 +67,7 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); ...@@ -67,7 +67,7 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
\ \
kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \ kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \
crc = ___crc32_vx(crc, data, aligned); \ crc = ___crc32_vx(crc, data, aligned); \
kernel_fpu_end(&vxstate); \ kernel_fpu_end(&vxstate, KERNEL_VXR_LOW); \
\ \
if (remaining) \ if (remaining) \
crc = ___crc32_sw(crc, data + aligned, remaining); \ crc = ___crc32_sw(crc, data + aligned, remaining); \
......
This diff is collapsed.
...@@ -58,7 +58,6 @@ static int ghash_update(struct shash_desc *desc, ...@@ -58,7 +58,6 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int n; unsigned int n;
u8 *buf = dctx->buffer; u8 *buf = dctx->buffer;
int ret;
if (dctx->bytes) { if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes); u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
...@@ -71,18 +70,14 @@ static int ghash_update(struct shash_desc *desc, ...@@ -71,18 +70,14 @@ static int ghash_update(struct shash_desc *desc,
src += n; src += n;
if (!dctx->bytes) { if (!dctx->bytes) {
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
GHASH_BLOCK_SIZE); GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
} }
} }
n = srclen & ~(GHASH_BLOCK_SIZE - 1); n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) { if (n) {
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n); cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
if (ret != n)
return -EIO;
src += n; src += n;
srclen -= n; srclen -= n;
} }
...@@ -98,17 +93,12 @@ static int ghash_update(struct shash_desc *desc, ...@@ -98,17 +93,12 @@ static int ghash_update(struct shash_desc *desc,
static int ghash_flush(struct ghash_desc_ctx *dctx) static int ghash_flush(struct ghash_desc_ctx *dctx)
{ {
u8 *buf = dctx->buffer; u8 *buf = dctx->buffer;
int ret;
if (dctx->bytes) { if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes); u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
memset(pos, 0, dctx->bytes); memset(pos, 0, dctx->bytes);
cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
if (ret != GHASH_BLOCK_SIZE)
return -EIO;
dctx->bytes = 0; dctx->bytes = 0;
} }
...@@ -146,7 +136,7 @@ static struct shash_alg ghash_alg = { ...@@ -146,7 +136,7 @@ static struct shash_alg ghash_alg = {
static int __init ghash_mod_init(void) static int __init ghash_mod_init(void)
{ {
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH)) if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_GHASH))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return crypto_register_shash(&ghash_alg); return crypto_register_shash(&ghash_alg);
......
...@@ -135,12 +135,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes) ...@@ -135,12 +135,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
else else
h = ebuf; h = ebuf;
/* generate sha256 from this page */ /* generate sha256 from this page */
if (cpacf_kimd(CPACF_KIMD_SHA_256, h, cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
pg, PAGE_SIZE) != PAGE_SIZE) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
ret = -EIO;
goto out;
}
if (n < sizeof(hash)) if (n < sizeof(hash))
memcpy(ebuf, hash, n); memcpy(ebuf, hash, n);
ret += n; ret += n;
...@@ -148,7 +143,6 @@ static int generate_entropy(u8 *ebuf, size_t nbytes) ...@@ -148,7 +143,6 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
nbytes -= n; nbytes -= n;
} }
out:
free_page((unsigned long)pg); free_page((unsigned long)pg);
return ret; return ret;
} }
...@@ -160,13 +154,11 @@ static void prng_tdes_add_entropy(void) ...@@ -160,13 +154,11 @@ static void prng_tdes_add_entropy(void)
{ {
__u64 entropy[4]; __u64 entropy[4];
unsigned int i; unsigned int i;
int ret;
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
ret = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block, cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
(char *)entropy, (char *)entropy, (char *) entropy, (char *) entropy,
sizeof(entropy)); sizeof(entropy));
BUG_ON(ret < 0 || ret != sizeof(entropy));
memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy)); memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
} }
} }
...@@ -303,21 +295,14 @@ static int __init prng_sha512_selftest(void) ...@@ -303,21 +295,14 @@ static int __init prng_sha512_selftest(void)
0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c, 0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 }; 0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
int ret = 0;
u8 buf[sizeof(random)]; u8 buf[sizeof(random)];
struct ppno_ws_s ws; struct ppno_ws_s ws;
memset(&ws, 0, sizeof(ws)); memset(&ws, 0, sizeof(ws));
/* initial seed */ /* initial seed */
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, &ws, NULL, 0, cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
seed, sizeof(seed)); &ws, NULL, 0, seed, sizeof(seed));
if (ret < 0) {
pr_err("The prng self test seed operation for the "
"SHA-512 mode failed with rc=%d\n", ret);
prng_errorflag = PRNG_SELFTEST_FAILED;
return -EIO;
}
/* check working states V and C */ /* check working states V and C */
if (memcmp(ws.V, V0, sizeof(V0)) != 0 if (memcmp(ws.V, V0, sizeof(V0)) != 0
...@@ -329,22 +314,10 @@ static int __init prng_sha512_selftest(void) ...@@ -329,22 +314,10 @@ static int __init prng_sha512_selftest(void)
} }
/* generate random bytes */ /* generate random bytes */
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN, cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0); &ws, buf, sizeof(buf), NULL, 0);
if (ret < 0) { cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
pr_err("The prng self test generate operation for " &ws, buf, sizeof(buf), NULL, 0);
"the SHA-512 mode failed with rc=%d\n", ret);
prng_errorflag = PRNG_SELFTEST_FAILED;
return -EIO;
}
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
if (ret < 0) {
pr_err("The prng self test generate operation for "
"the SHA-512 mode failed with rc=%d\n", ret);
prng_errorflag = PRNG_SELFTEST_FAILED;
return -EIO;
}
/* check against expected data */ /* check against expected data */
if (memcmp(buf, random, sizeof(random)) != 0) { if (memcmp(buf, random, sizeof(random)) != 0) {
...@@ -392,26 +365,16 @@ static int __init prng_sha512_instantiate(void) ...@@ -392,26 +365,16 @@ static int __init prng_sha512_instantiate(void)
get_tod_clock_ext(seed + 48); get_tod_clock_ext(seed + 48);
/* initial seed of the ppno drng */ /* initial seed of the ppno drng */
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0, seed, sizeof(seed)); &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
if (ret < 0) {
prng_errorflag = PRNG_SEED_FAILED;
ret = -EIO;
goto outfree;
}
/* if fips mode is enabled, generate a first block of random /* if fips mode is enabled, generate a first block of random
bytes for the FIPS 140-2 Conditional Self Test */ bytes for the FIPS 140-2 Conditional Self Test */
if (fips_enabled) { if (fips_enabled) {
prng_data->prev = prng_data->buf + prng_chunk_size; prng_data->prev = prng_data->buf + prng_chunk_size;
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN, cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows, &prng_data->ppnows,
prng_data->prev, prng_chunk_size, NULL, 0); prng_data->prev, prng_chunk_size, NULL, 0);
if (ret < 0 || ret != prng_chunk_size) {
prng_errorflag = PRNG_GEN_FAILED;
ret = -EIO;
goto outfree;
}
} }
return 0; return 0;
...@@ -440,12 +403,8 @@ static int prng_sha512_reseed(void) ...@@ -440,12 +403,8 @@ static int prng_sha512_reseed(void)
return ret; return ret;
/* do a reseed of the ppno drng with this bytestring */ /* do a reseed of the ppno drng with this bytestring */
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
&prng_data->ppnows, NULL, 0, seed, sizeof(seed)); &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
if (ret) {
prng_errorflag = PRNG_RESEED_FAILED;
return -EIO;
}
return 0; return 0;
} }
...@@ -463,12 +422,8 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes) ...@@ -463,12 +422,8 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
} }
/* PPNO generate */ /* PPNO generate */
ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN, cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
&prng_data->ppnows, buf, nbytes, NULL, 0); &prng_data->ppnows, buf, nbytes, NULL, 0);
if (ret < 0 || ret != nbytes) {
prng_errorflag = PRNG_GEN_FAILED;
return -EIO;
}
/* FIPS 140-2 Conditional Self Test */ /* FIPS 140-2 Conditional Self Test */
if (fips_enabled) { if (fips_enabled) {
...@@ -479,7 +434,7 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes) ...@@ -479,7 +434,7 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
memcpy(prng_data->prev, buf, nbytes); memcpy(prng_data->prev, buf, nbytes);
} }
return ret; return nbytes;
} }
...@@ -494,7 +449,7 @@ static int prng_open(struct inode *inode, struct file *file) ...@@ -494,7 +449,7 @@ static int prng_open(struct inode *inode, struct file *file)
static ssize_t prng_tdes_read(struct file *file, char __user *ubuf, static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
size_t nbytes, loff_t *ppos) size_t nbytes, loff_t *ppos)
{ {
int chunk, n, tmp, ret = 0; int chunk, n, ret = 0;
/* lock prng_data struct */ /* lock prng_data struct */
if (mutex_lock_interruptible(&prng_data->mutex)) if (mutex_lock_interruptible(&prng_data->mutex))
...@@ -545,13 +500,9 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf, ...@@ -545,13 +500,9 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
* *
* Note: you can still get strict X9.17 conformity by setting * Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes. * prng_chunk_size to 8 bytes.
*/ */
tmp = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block, cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
prng_data->buf, prng_data->buf, n); prng_data->buf, prng_data->buf, n);
if (tmp < 0 || tmp != n) {
ret = -EIO;
break;
}
prng_data->prngws.byte_counter += n; prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n; prng_data->prngws.reseed_counter += n;
...@@ -806,13 +757,13 @@ static int __init prng_init(void) ...@@ -806,13 +757,13 @@ static int __init prng_init(void)
int ret; int ret;
/* check if the CPU has a PRNG */ /* check if the CPU has a PRNG */
if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG)) if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* choose prng mode */ /* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) { if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PPNO operations */ /* check for MSA5 support for PPNO operations */
if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) { if (!cpacf_query_func(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
if (prng_mode == PRNG_MODE_SHA512) { if (prng_mode == PRNG_MODE_SHA512) {
pr_err("The prng module cannot " pr_err("The prng module cannot "
"start in SHA-512 mode\n"); "start in SHA-512 mode\n");
......
...@@ -91,7 +91,7 @@ static struct shash_alg alg = { ...@@ -91,7 +91,7 @@ static struct shash_alg alg = {
static int __init sha1_s390_init(void) static int __init sha1_s390_init(void)
{ {
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1)) if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_1))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return crypto_register_shash(&alg); return crypto_register_shash(&alg);
} }
......
...@@ -123,7 +123,7 @@ static int __init sha256_s390_init(void) ...@@ -123,7 +123,7 @@ static int __init sha256_s390_init(void)
{ {
int ret; int ret;
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256)) if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256))
return -EOPNOTSUPP; return -EOPNOTSUPP;
ret = crypto_register_shash(&sha256_alg); ret = crypto_register_shash(&sha256_alg);
if (ret < 0) if (ret < 0)
......
...@@ -133,7 +133,7 @@ static int __init init(void) ...@@ -133,7 +133,7 @@ static int __init init(void)
{ {
int ret; int ret;
if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512)) if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_512))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((ret = crypto_register_shash(&sha512_alg)) < 0) if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out; goto out;
......
...@@ -22,8 +22,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) ...@@ -22,8 +22,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{ {
struct s390_sha_ctx *ctx = shash_desc_ctx(desc); struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm); unsigned int bsize = crypto_shash_blocksize(desc->tfm);
unsigned int index; unsigned int index, n;
int ret;
/* how much is already in the buffer? */ /* how much is already in the buffer? */
index = ctx->count & (bsize - 1); index = ctx->count & (bsize - 1);
...@@ -35,9 +34,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) ...@@ -35,9 +34,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process one stored block */ /* process one stored block */
if (index) { if (index) {
memcpy(ctx->buf + index, data, bsize - index); memcpy(ctx->buf + index, data, bsize - index);
ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize); cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
if (ret != bsize)
return -EIO;
data += bsize - index; data += bsize - index;
len -= bsize - index; len -= bsize - index;
index = 0; index = 0;
...@@ -45,12 +42,10 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) ...@@ -45,12 +42,10 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */ /* process as many blocks as possible */
if (len >= bsize) { if (len >= bsize) {
ret = cpacf_kimd(ctx->func, ctx->state, data, n = len & ~(bsize - 1);
len & ~(bsize - 1)); cpacf_kimd(ctx->func, ctx->state, data, n);
if (ret != (len & ~(bsize - 1))) data += n;
return -EIO; len -= n;
data += ret;
len -= ret;
} }
store: store:
if (len) if (len)
...@@ -66,7 +61,6 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) ...@@ -66,7 +61,6 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
unsigned int bsize = crypto_shash_blocksize(desc->tfm); unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits; u64 bits;
unsigned int index, end, plen; unsigned int index, end, plen;
int ret;
/* SHA-512 uses 128 bit padding length */ /* SHA-512 uses 128 bit padding length */
plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8; plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8;
...@@ -88,10 +82,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) ...@@ -88,10 +82,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
*/ */
bits = ctx->count * 8; bits = ctx->count * 8;
memcpy(ctx->buf + end - 8, &bits, sizeof(bits)); memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
if (ret != end)
return -EIO;
/* copy digest to out */ /* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
......
This diff is collapsed.
...@@ -55,4 +55,28 @@ static struct facility_def facility_defs[] = { ...@@ -55,4 +55,28 @@ static struct facility_def facility_defs[] = {
-1 /* END */ -1 /* END */
} }
}, },
{
.name = "FACILITIES_KVM",
.bits = (int[]){
0, /* N3 instructions */
1, /* z/Arch mode installed */
2, /* z/Arch mode active */
3, /* DAT-enhancement */
4, /* idte segment table */
5, /* idte region table */
6, /* ASN-and-LX reuse */
7, /* stfle */
8, /* enhanced-DAT 1 */
9, /* sense-running-status */
10, /* conditional sske */
13, /* ipte-range */
14, /* nonquiescing key-setting */
73, /* transactional execution */
75, /* access-exception-fetch/store indication */
76, /* msa extension 3 */
77, /* msa extension 4 */
78, /* enhanced-DAT 2 */
-1 /* END */
}
},
}; };
...@@ -64,18 +64,18 @@ static inline int test_fp_ctl(u32 fpc) ...@@ -64,18 +64,18 @@ static inline int test_fp_ctl(u32 fpc)
return rc; return rc;
} }
#define KERNEL_VXR_V0V7 1 #define KERNEL_FPC 1
#define KERNEL_VXR_V8V15 2 #define KERNEL_VXR_V0V7 2
#define KERNEL_VXR_V16V23 4 #define KERNEL_VXR_V8V15 4
#define KERNEL_VXR_V24V31 8 #define KERNEL_VXR_V16V23 8
#define KERNEL_FPR 16 #define KERNEL_VXR_V24V31 16
#define KERNEL_FPC 256
#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15) #define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23) #define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31) #define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
#define KERNEL_FPU_MASK (KERNEL_VXR_LOW|KERNEL_VXR_HIGH|KERNEL_FPR) #define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
struct kernel_fpu; struct kernel_fpu;
...@@ -87,18 +87,28 @@ struct kernel_fpu; ...@@ -87,18 +87,28 @@ struct kernel_fpu;
* Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions. * Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions.
*/ */
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags); void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
void __kernel_fpu_end(struct kernel_fpu *state); void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags) static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{ {
preempt_disable(); preempt_disable();
__kernel_fpu_begin(state, flags); state->mask = S390_lowcore.fpu_flags;
if (!test_cpu_flag(CIF_FPU))
/* Save user space FPU state and register contents */
save_fpu_regs();
else if (state->mask & flags)
/* Save FPU/vector register in-use by the kernel */
__kernel_fpu_begin(state, flags);
S390_lowcore.fpu_flags |= flags;
} }
static inline void kernel_fpu_end(struct kernel_fpu *state) static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{ {
__kernel_fpu_end(state); S390_lowcore.fpu_flags = state->mask;
if (state->mask & flags)
/* Restore FPU/vector register in-use by the kernel */
__kernel_fpu_end(state, flags);
preempt_enable(); preempt_enable();
} }
......
...@@ -129,7 +129,8 @@ struct lowcore { ...@@ -129,7 +129,8 @@ struct lowcore {
__u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */ __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
__u64 gmap; /* 0x0398 */ __u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */ __u32 spinlock_lockval; /* 0x03a0 */
__u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ __u32 fpu_flags; /* 0x03a4 */
__u8 pad_0x03a8[0x0400-0x03a8]; /* 0x03a8 */
/* Per cpu primary space access list */ /* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */ __u32 paste[16]; /* 0x0400 */
......
...@@ -12,6 +12,7 @@ typedef struct { ...@@ -12,6 +12,7 @@ typedef struct {
struct list_head pgtable_list; struct list_head pgtable_list;
spinlock_t gmap_lock; spinlock_t gmap_lock;
struct list_head gmap_list; struct list_head gmap_list;
unsigned long gmap_asce;
unsigned long asce; unsigned long asce;
unsigned long asce_limit; unsigned long asce_limit;
unsigned long vdso_base; unsigned long vdso_base;
......
...@@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
INIT_LIST_HEAD(&mm->context.gmap_list); INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask); cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0); atomic_set(&mm->context.flush_count, 0);
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste; mm->context.alloc_pgste = page_table_allocate_pgste;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm-generic/pci.h> #include <asm-generic/pci.h>
#include <asm/pci_clp.h> #include <asm/pci_clp.h>
#include <asm/pci_debug.h> #include <asm/pci_debug.h>
#include <asm/sclp.h>
#define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000 #define PCIBIOS_MIN_MEM 0x10000000
...@@ -117,6 +118,7 @@ struct zpci_dev { ...@@ -117,6 +118,7 @@ struct zpci_dev {
spinlock_t iommu_bitmap_lock; spinlock_t iommu_bitmap_lock;
unsigned long *iommu_bitmap; unsigned long *iommu_bitmap;
unsigned long *lazy_bitmap;
unsigned long iommu_size; unsigned long iommu_size;
unsigned long iommu_pages; unsigned long iommu_pages;
unsigned int next_bit; unsigned int next_bit;
...@@ -216,6 +218,9 @@ void zpci_debug_init_device(struct zpci_dev *, const char *); ...@@ -216,6 +218,9 @@ void zpci_debug_init_device(struct zpci_dev *, const char *);
void zpci_debug_exit_device(struct zpci_dev *); void zpci_debug_exit_device(struct zpci_dev *);
void zpci_debug_info(struct zpci_dev *, struct seq_file *); void zpci_debug_info(struct zpci_dev *, struct seq_file *);
/* Error reporting */
int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* Returns the node based on PCI bus */ /* Returns the node based on PCI bus */
......
...@@ -874,35 +874,31 @@ static inline pte_t pte_mkhuge(pte_t pte) ...@@ -874,35 +874,31 @@ static inline pte_t pte_mkhuge(pte_t pte)
} }
#endif #endif
static inline void __ptep_ipte(unsigned long address, pte_t *ptep) #define IPTE_GLOBAL 0
{ #define IPTE_LOCAL 1
unsigned long pto = (unsigned long) ptep;
/* Invalidation + global TLB flush for the pte */
asm volatile(
" ipte %2,%3"
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
}
static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) static inline void __ptep_ipte(unsigned long address, pte_t *ptep, int local)
{ {
unsigned long pto = (unsigned long) ptep; unsigned long pto = (unsigned long) ptep;
/* Invalidation + local TLB flush for the pte */ /* Invalidation + TLB flush for the pte */
asm volatile( asm volatile(
" .insn rrf,0xb2210000,%2,%3,0,1" " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
[m4] "i" (local));
} }
static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep) static inline void __ptep_ipte_range(unsigned long address, int nr,
pte_t *ptep, int local)
{ {
unsigned long pto = (unsigned long) ptep; unsigned long pto = (unsigned long) ptep;
/* Invalidate a range of ptes + global TLB flush of the ptes */ /* Invalidate a range of ptes + TLB flush of the ptes */
do { do {
asm volatile( asm volatile(
" .insn rrf,0xb2210000,%2,%0,%1,0" " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
: "+a" (address), "+a" (nr) : "a" (pto) : "memory"); : [r2] "+a" (address), [r3] "+a" (nr)
: [r1] "a" (pto), [m4] "i" (local) : "memory");
} while (nr != 255); } while (nr != 255);
} }
...@@ -1239,53 +1235,33 @@ static inline void __pmdp_csp(pmd_t *pmdp) ...@@ -1239,53 +1235,33 @@ static inline void __pmdp_csp(pmd_t *pmdp)
pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
} }
static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp) #define IDTE_GLOBAL 0
{ #define IDTE_LOCAL 1
unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pmdp)
: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
: "cc" );
}
static inline void __pudp_idte(unsigned long address, pud_t *pudp)
{
unsigned long r3o;
r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
r3o |= _ASCE_TYPE_REGION3;
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pudp)
: "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
: "cc");
}
static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp, int local)
{ {
unsigned long sto; unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
asm volatile( asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,1" " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
: "=m" (*pmdp) : "+m" (*pmdp)
: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) : [r1] "a" (sto), [r2] "a" ((address & HPAGE_MASK)),
[m4] "i" (local)
: "cc" ); : "cc" );
} }
static inline void __pudp_idte_local(unsigned long address, pud_t *pudp) static inline void __pudp_idte(unsigned long address, pud_t *pudp, int local)
{ {
unsigned long r3o; unsigned long r3o;
r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t); r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
r3o |= _ASCE_TYPE_REGION3; r3o |= _ASCE_TYPE_REGION3;
asm volatile( asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,1" " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
: "=m" (*pudp) : "+m" (*pudp)
: "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK)) : [r1] "a" (r3o), [r2] "a" ((address & PUD_MASK)),
[m4] "i" (local)
: "cc"); : "cc");
} }
......
...@@ -26,17 +26,6 @@ static inline void __tlb_flush_idte(unsigned long asce) ...@@ -26,17 +26,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
: : "a" (2048), "a" (asce) : "cc"); : : "a" (2048), "a" (asce) : "cc");
} }
/*
* Flush TLB entries for a specific ASCE on the local CPU
*/
static inline void __tlb_flush_idte_local(unsigned long asce)
{
/* Local TLB flush for the mm */
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,1"
: : "a" (2048), "a" (asce) : "cc");
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void smp_ptlb_all(void); void smp_ptlb_all(void);
...@@ -65,35 +54,33 @@ static inline void __tlb_flush_full(struct mm_struct *mm) ...@@ -65,35 +54,33 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
/* Global TLB flush */ /* Global TLB flush */
__tlb_flush_global(); __tlb_flush_global();
/* Reset TLB flush mask */ /* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC) cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
} }
atomic_dec(&mm->context.flush_count); atomic_dec(&mm->context.flush_count);
preempt_enable(); preempt_enable();
} }
/* static inline void __tlb_flush_mm(struct mm_struct *mm)
* Flush TLB entries for a specific ASCE on all CPUs. Should never be used
* when more than one asce (e.g. gmap) ran on this mm.
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{ {
unsigned long gmap_asce;
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
preempt_disable(); preempt_disable();
atomic_inc(&mm->context.flush_count); atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC && gmap_asce = READ_ONCE(mm->context.gmap_asce);
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
__tlb_flush_idte_local(asce); if (gmap_asce)
__tlb_flush_idte(gmap_asce);
__tlb_flush_idte(mm->context.asce);
} else { } else {
if (MACHINE_HAS_IDTE) __tlb_flush_full(mm);
__tlb_flush_idte(asce);
else
__tlb_flush_global();
/* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
} }
/* Reset TLB flush mask */
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
atomic_dec(&mm->context.flush_count); atomic_dec(&mm->context.flush_count);
preempt_enable(); preempt_enable();
} }
...@@ -112,36 +99,17 @@ static inline void __tlb_flush_kernel(void) ...@@ -112,36 +99,17 @@ static inline void __tlb_flush_kernel(void)
/* /*
* Flush TLB entries for a specific ASCE on all CPUs. * Flush TLB entries for a specific ASCE on all CPUs.
*/ */
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) static inline void __tlb_flush_mm(struct mm_struct *mm)
{ {
if (MACHINE_HAS_TLB_LC) __tlb_flush_local();
__tlb_flush_idte_local(asce);
else
__tlb_flush_local();
} }
static inline void __tlb_flush_kernel(void) static inline void __tlb_flush_kernel(void)
{ {
if (MACHINE_HAS_TLB_LC) __tlb_flush_local();
__tlb_flush_idte_local(init_mm.context.asce);
else
__tlb_flush_local();
} }
#endif #endif
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
__tlb_flush_asce(mm, mm->context.asce);
else
__tlb_flush_full(mm);
}
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{ {
if (mm->context.flush_mm) { if (mm->context.flush_mm) {
......
...@@ -16,15 +16,13 @@ ...@@ -16,15 +16,13 @@
/* Macros to generate vector instruction byte code */ /* Macros to generate vector instruction byte code */
#define REG_NUM_INVALID 255
/* GR_NUM - Retrieve general-purpose register number /* GR_NUM - Retrieve general-purpose register number
* *
* @opd: Operand to store register number * @opd: Operand to store register number
* @r64: String designation register in the format "%rN" * @r64: String designation register in the format "%rN"
*/ */
.macro GR_NUM opd gr .macro GR_NUM opd gr
\opd = REG_NUM_INVALID \opd = 255
.ifc \gr,%r0 .ifc \gr,%r0
\opd = 0 \opd = 0
.endif .endif
...@@ -73,14 +71,11 @@ ...@@ -73,14 +71,11 @@
.ifc \gr,%r15 .ifc \gr,%r15
\opd = 15 \opd = 15
.endif .endif
.if \opd == REG_NUM_INVALID .if \opd == 255
.error "Invalid general-purpose register designation: \gr" \opd = \gr
.endif .endif
.endm .endm
/* VX_R() - Macro to encode the VX_NUM into the instruction */
#define VX_R(v) (v & 0x0F)
/* VX_NUM - Retrieve vector register number /* VX_NUM - Retrieve vector register number
* *
* @opd: Operand to store register number * @opd: Operand to store register number
...@@ -88,11 +83,10 @@ ...@@ -88,11 +83,10 @@
* *
* The vector register number is used for as input number to the * The vector register number is used for as input number to the
* instruction and, as well as, to compute the RXB field of the * instruction and, as well as, to compute the RXB field of the
* instruction. To encode the particular vector register number, * instruction.
* use the VX_R(v) macro to extract the instruction opcode.
*/ */
.macro VX_NUM opd vxr .macro VX_NUM opd vxr
\opd = REG_NUM_INVALID \opd = 255
.ifc \vxr,%v0 .ifc \vxr,%v0
\opd = 0 \opd = 0
.endif .endif
...@@ -189,8 +183,8 @@ ...@@ -189,8 +183,8 @@
.ifc \vxr,%v31 .ifc \vxr,%v31
\opd = 31 \opd = 31
.endif .endif
.if \opd == REG_NUM_INVALID .if \opd == 255
.error "Invalid vector register designation: \vxr" \opd = \vxr
.endif .endif
.endm .endm
...@@ -251,7 +245,7 @@ ...@@ -251,7 +245,7 @@
/* VECTOR GENERATE BYTE MASK */ /* VECTOR GENERATE BYTE MASK */
.macro VGBM vr imm2 .macro VGBM vr imm2
VX_NUM v1, \vr VX_NUM v1, \vr
.word (0xE700 | (VX_R(v1) << 4)) .word (0xE700 | ((v1&15) << 4))
.word \imm2 .word \imm2
MRXBOPC 0, 0x44, v1 MRXBOPC 0, 0x44, v1
.endm .endm
...@@ -267,7 +261,7 @@ ...@@ -267,7 +261,7 @@
VX_NUM v1, \v VX_NUM v1, \v
GR_NUM b2, "%r0" GR_NUM b2, "%r0"
GR_NUM r3, \gr GR_NUM r3, \gr
.word 0xE700 | (VX_R(v1) << 4) | r3 .word 0xE700 | ((v1&15) << 4) | r3
.word (b2 << 12) | (\disp) .word (b2 << 12) | (\disp)
MRXBOPC \m, 0x22, v1 MRXBOPC \m, 0x22, v1
.endm .endm
...@@ -284,12 +278,21 @@ ...@@ -284,12 +278,21 @@
VLVG \v, \gr, \index, 3 VLVG \v, \gr, \index, 3
.endm .endm
/* VECTOR LOAD REGISTER */
.macro VLR v1, v2
VX_NUM v1, \v1
VX_NUM v2, \v2
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word 0
MRXBOPC 0, 0x56, v1, v2
.endm
/* VECTOR LOAD */ /* VECTOR LOAD */
.macro VL v, disp, index="%r0", base .macro VL v, disp, index="%r0", base
VX_NUM v1, \v VX_NUM v1, \v
GR_NUM x2, \index GR_NUM x2, \index
GR_NUM b2, \base GR_NUM b2, \base
.word 0xE700 | (VX_R(v1) << 4) | x2 .word 0xE700 | ((v1&15) << 4) | x2
.word (b2 << 12) | (\disp) .word (b2 << 12) | (\disp)
MRXBOPC 0, 0x06, v1 MRXBOPC 0, 0x06, v1
.endm .endm
...@@ -299,7 +302,7 @@ ...@@ -299,7 +302,7 @@
VX_NUM v1, \vr1 VX_NUM v1, \vr1
GR_NUM x2, \index GR_NUM x2, \index
GR_NUM b2, \base GR_NUM b2, \base
.word 0xE700 | (VX_R(v1) << 4) | x2 .word 0xE700 | ((v1&15) << 4) | x2
.word (b2 << 12) | (\disp) .word (b2 << 12) | (\disp)
MRXBOPC \m3, \opc, v1 MRXBOPC \m3, \opc, v1
.endm .endm
...@@ -319,7 +322,7 @@ ...@@ -319,7 +322,7 @@
/* VECTOR LOAD ELEMENT IMMEDIATE */ /* VECTOR LOAD ELEMENT IMMEDIATE */
.macro VLEIx vr1, imm2, m3, opc .macro VLEIx vr1, imm2, m3, opc
VX_NUM v1, \vr1 VX_NUM v1, \vr1
.word 0xE700 | (VX_R(v1) << 4) .word 0xE700 | ((v1&15) << 4)
.word \imm2 .word \imm2
MRXBOPC \m3, \opc, v1 MRXBOPC \m3, \opc, v1
.endm .endm
...@@ -341,7 +344,7 @@ ...@@ -341,7 +344,7 @@
GR_NUM r1, \gr GR_NUM r1, \gr
GR_NUM b2, \base GR_NUM b2, \base
VX_NUM v3, \vr VX_NUM v3, \vr
.word 0xE700 | (r1 << 4) | VX_R(v3) .word 0xE700 | (r1 << 4) | (v3&15)
.word (b2 << 12) | (\disp) .word (b2 << 12) | (\disp)
MRXBOPC \m, 0x21, v3 MRXBOPC \m, 0x21, v3
.endm .endm
...@@ -363,7 +366,7 @@ ...@@ -363,7 +366,7 @@
VX_NUM v1, \vfrom VX_NUM v1, \vfrom
VX_NUM v3, \vto VX_NUM v3, \vto
GR_NUM b2, \base /* Base register */ GR_NUM b2, \base /* Base register */
.word 0xE700 | (VX_R(v1) << 4) | VX_R(v3) .word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp) .word (b2 << 12) | (\disp)
MRXBOPC 0, 0x36, v1, v3 MRXBOPC 0, 0x36, v1, v3
.endm .endm
...@@ -373,7 +376,7 @@ ...@@ -373,7 +376,7 @@
VX_NUM v1, \vfrom VX_NUM v1, \vfrom
VX_NUM v3, \vto VX_NUM v3, \vto
GR_NUM b2, \base /* Base register */ GR_NUM b2, \base /* Base register */
.word 0xE700 | (VX_R(v1) << 4) | VX_R(v3) .word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp) .word (b2 << 12) | (\disp)
MRXBOPC 0, 0x3E, v1, v3 MRXBOPC 0, 0x3E, v1, v3
.endm .endm
...@@ -384,16 +387,16 @@ ...@@ -384,16 +387,16 @@
VX_NUM v2, \vr2 VX_NUM v2, \vr2
VX_NUM v3, \vr3 VX_NUM v3, \vr3
VX_NUM v4, \vr4 VX_NUM v4, \vr4
.word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) .word 0xE700 | ((v1&15) << 4) | (v2&15)
.word (VX_R(v3) << 12) .word ((v3&15) << 12)
MRXBOPC VX_R(v4), 0x8C, v1, v2, v3, v4 MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
.endm .endm
/* VECTOR UNPACK LOGICAL LOW */ /* VECTOR UNPACK LOGICAL LOW */
.macro VUPLL vr1, vr2, m3 .macro VUPLL vr1, vr2, m3
VX_NUM v1, \vr1 VX_NUM v1, \vr1
VX_NUM v2, \vr2 VX_NUM v2, \vr2
.word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) .word 0xE700 | ((v1&15) << 4) | (v2&15)
.word 0x0000 .word 0x0000
MRXBOPC \m3, 0xD4, v1, v2 MRXBOPC \m3, 0xD4, v1, v2
.endm .endm
...@@ -410,13 +413,23 @@ ...@@ -410,13 +413,23 @@
/* Vector integer instructions */ /* Vector integer instructions */
/* VECTOR AND */
.macro VN vr1, vr2, vr3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC 0, 0x68, v1, v2, v3
.endm
/* VECTOR EXCLUSIVE OR */ /* VECTOR EXCLUSIVE OR */
.macro VX vr1, vr2, vr3 .macro VX vr1, vr2, vr3
VX_NUM v1, \vr1 VX_NUM v1, \vr1
VX_NUM v2, \vr2 VX_NUM v2, \vr2
VX_NUM v3, \vr3 VX_NUM v3, \vr3
.word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) .word 0xE700 | ((v1&15) << 4) | (v2&15)
.word (VX_R(v3) << 12) .word ((v3&15) << 12)
MRXBOPC 0, 0x6D, v1, v2, v3 MRXBOPC 0, 0x6D, v1, v2, v3
.endm .endm
...@@ -425,8 +438,8 @@ ...@@ -425,8 +438,8 @@
VX_NUM v1, \vr1 VX_NUM v1, \vr1
VX_NUM v2, \vr2 VX_NUM v2, \vr2
VX_NUM v3, \vr3 VX_NUM v3, \vr3
.word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) .word 0xE700 | ((v1&15) << 4) | (v2&15)
.word (VX_R(v3) << 12) .word ((v3&15) << 12)
MRXBOPC \m4, 0xB4, v1, v2, v3 MRXBOPC \m4, 0xB4, v1, v2, v3
.endm .endm
.macro VGFMB vr1, vr2, vr3 .macro VGFMB vr1, vr2, vr3
...@@ -448,9 +461,9 @@ ...@@ -448,9 +461,9 @@
VX_NUM v2, \vr2 VX_NUM v2, \vr2
VX_NUM v3, \vr3 VX_NUM v3, \vr3
VX_NUM v4, \vr4 VX_NUM v4, \vr4
.word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) .word 0xE700 | ((v1&15) << 4) | (v2&15)
.word (VX_R(v3) << 12) | (\m5 << 8) .word ((v3&15) << 12) | (\m5 << 8)
MRXBOPC VX_R(v4), 0xBC, v1, v2, v3, v4 MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
.endm .endm
.macro VGFMAB vr1, vr2, vr3, vr4 .macro VGFMAB vr1, vr2, vr3, vr4
VGFMA \vr1, \vr2, \vr3, \vr4, 0 VGFMA \vr1, \vr2, \vr3, \vr4, 0
...@@ -470,11 +483,78 @@ ...@@ -470,11 +483,78 @@
VX_NUM v1, \vr1 VX_NUM v1, \vr1
VX_NUM v2, \vr2 VX_NUM v2, \vr2
VX_NUM v3, \vr3 VX_NUM v3, \vr3
.word 0xE700 | (VX_R(v1) << 4) | VX_R(v2) .word 0xE700 | ((v1&15) << 4) | (v2&15)
.word (VX_R(v3) << 12) .word ((v3&15) << 12)
MRXBOPC 0, 0x7D, v1, v2, v3 MRXBOPC 0, 0x7D, v1, v2, v3
.endm .endm
/* VECTOR REPLICATE IMMEDIATE */
.macro VREPI vr1, imm2, m3
VX_NUM v1, \vr1
.word 0xE700 | ((v1&15) << 4)
.word \imm2
MRXBOPC \m3, 0x45, v1
.endm
.macro VREPIB vr1, imm2
VREPI \vr1, \imm2, 0
.endm
.macro VREPIH vr1, imm2
VREPI \vr1, \imm2, 1
.endm
.macro VREPIF vr1, imm2
VREPI \vr1, \imm2, 2
.endm
.macro VREPIG vr1, imm2
VREP \vr1, \imm2, 3
.endm
/* VECTOR ADD */
.macro VA vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0xF3, v1, v2, v3
.endm
.macro VAB vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 0
.endm
.macro VAH vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 1
.endm
.macro VAF vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 2
.endm
.macro VAG vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 3
.endm
.macro VAQ vr1, vr2, vr3
VA \vr1, \vr2, \vr3, 4
.endm
/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
.macro VESRAV vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0x7A, v1, v2, v3
.endm
.macro VESRAVB vr1, vr2, vr3
VESRAV \vr1, \vr2, \vr3, 0
.endm
.macro VESRAVH vr1, vr2, vr3
VESRAV \vr1, \vr2, \vr3, 1
.endm
.macro VESRAVF vr1, vr2, vr3
VESRAV \vr1, \vr2, \vr3, 2
.endm
.macro VESRAVG vr1, vr2, vr3
VESRAV \vr1, \vr2, \vr3, 3
.endm
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_VX_INSN_H */ #endif /* __ASM_S390_VX_INSN_H */
...@@ -6,6 +6,7 @@ header-y += bitsperlong.h ...@@ -6,6 +6,7 @@ header-y += bitsperlong.h
header-y += byteorder.h header-y += byteorder.h
header-y += chpid.h header-y += chpid.h
header-y += chsc.h header-y += chsc.h
header-y += clp.h
header-y += cmb.h header-y += cmb.h
header-y += dasd.h header-y += dasd.h
header-y += debug.h header-y += debug.h
......
...@@ -48,6 +48,9 @@ AFLAGS_head.o += -march=z900 ...@@ -48,6 +48,9 @@ AFLAGS_head.o += -march=z900
endif endif
GCOV_PROFILE_sclp.o := n GCOV_PROFILE_sclp.o := n
GCOV_PROFILE_als.o := n GCOV_PROFILE_als.o := n
UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_sclp.o := n
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
......
...@@ -71,9 +71,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu) ...@@ -71,9 +71,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
*/ */
struct save_area * __init save_area_boot_cpu(void) struct save_area * __init save_area_boot_cpu(void)
{ {
if (list_empty(&dump_save_areas)) return list_first_entry_or_null(&dump_save_areas, struct save_area, list);
return NULL;
return list_first_entry(&dump_save_areas, struct save_area, list);
} }
/* /*
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/module.h> #include <linux/extable.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/kernel.h> #include <linux/kernel.h>
......
This diff is collapsed.
...@@ -26,12 +26,14 @@ ...@@ -26,12 +26,14 @@
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/extable.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/uaccess.h>
#include <asm/dis.h> #include <asm/dis.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe); DEFINE_PER_CPU(struct kprobe *, current_kprobe);
......
...@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck); ...@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
* returns 0 if all registers could be validated * returns 0 if all registers could be validated
* returns 1 otherwise * returns 1 otherwise
*/ */
static int notrace s390_validate_registers(union mci mci) static int notrace s390_validate_registers(union mci mci, int umode)
{ {
int kill_task; int kill_task;
u64 zero; u64 zero;
...@@ -110,26 +110,41 @@ static int notrace s390_validate_registers(union mci mci) ...@@ -110,26 +110,41 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.gr) { if (!mci.gr) {
/* /*
* General purpose registers couldn't be restored and have * General purpose registers couldn't be restored and have
* unknown contents. Process needs to be terminated. * unknown contents. Stop system or terminate process.
*/ */
if (!umode)
s390_handle_damage();
kill_task = 1; kill_task = 1;
} }
if (!mci.fp) { if (!mci.fp) {
/* /*
* Floating point registers can't be restored and * Floating point registers can't be restored. If the
* therefore the process needs to be terminated. * kernel currently uses floating point registers the
* system is stopped. If the process has its floating
* pointer registers loaded it is terminated.
* Otherwise just revalidate the registers.
*/ */
kill_task = 1; if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
s390_handle_damage();
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
} }
fpt_save_area = &S390_lowcore.floating_pt_save_area; fpt_save_area = &S390_lowcore.floating_pt_save_area;
fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
if (!mci.fc) { if (!mci.fc) {
/* /*
* Floating point control register can't be restored. * Floating point control register can't be restored.
* Task will be terminated. * If the kernel currently uses the floating pointer
* registers and needs the FPC register the system is
* stopped. If the process has its floating pointer
* registers loaded it is terminated. Otherwiese the
* FPC is just revalidated.
*/ */
if (S390_lowcore.fpu_flags & KERNEL_FPC)
s390_handle_damage();
asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
kill_task = 1; if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
} else } else
asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
...@@ -159,10 +174,16 @@ static int notrace s390_validate_registers(union mci mci) ...@@ -159,10 +174,16 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.vr) { if (!mci.vr) {
/* /*
* Vector registers can't be restored and therefore * Vector registers can't be restored. If the kernel
* the process needs to be terminated. * currently uses vector registers the system is
* stopped. If the process has its vector registers
* loaded it is terminated. Otherwise just revalidate
* the registers.
*/ */
kill_task = 1; if (S390_lowcore.fpu_flags & KERNEL_VXR)
s390_handle_damage();
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
} }
cr0.val = S390_lowcore.cregs_save_area[0]; cr0.val = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1; cr0.afp = cr0.vx = 1;
...@@ -250,13 +271,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -250,13 +271,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
struct mcck_struct *mcck; struct mcck_struct *mcck;
unsigned long long tmp; unsigned long long tmp;
union mci mci; union mci mci;
int umode;
nmi_enter(); nmi_enter();
inc_irq_stat(NMI_NMI); inc_irq_stat(NMI_NMI);
mci.val = S390_lowcore.mcck_interruption_code; mci.val = S390_lowcore.mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck); mcck = this_cpu_ptr(&cpu_mcck);
umode = user_mode(regs);
if (mci.sd) { if (mci.sd) {
/* System damage -> stopping machine */ /* System damage -> stopping machine */
...@@ -297,22 +316,14 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ...@@ -297,22 +316,14 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
s390_handle_damage(); s390_handle_damage();
} }
} }
if (s390_validate_registers(mci)) { if (s390_validate_registers(mci, user_mode(regs))) {
if (umode) { /*
/* * Couldn't restore all register contents for the
* Couldn't restore all register contents while in * user space process -> mark task for termination.
* user mode -> mark task for termination. */
*/ mcck->kill_task = 1;
mcck->kill_task = 1; mcck->mcck_code = mci.val;
mcck->mcck_code = mci.val; set_cpu_flag(CIF_MCCK_PENDING);
set_cpu_flag(CIF_MCCK_PENDING);
} else {
/*
* Couldn't restore all register contents while in
* kernel mode -> stopping machine.
*/
s390_handle_damage();
}
} }
if (mci.cd) { if (mci.cd) {
/* Timing facility damage */ /* Timing facility damage */
......
...@@ -454,7 +454,7 @@ void s390_adjust_jiffies(void) ...@@ -454,7 +454,7 @@ void s390_adjust_jiffies(void)
: "Q" (info->capability), "d" (10000000), "d" (0) : "Q" (info->capability), "d" (10000000), "d" (0)
: "cc" : "cc"
); );
kernel_fpu_end(&fpu); kernel_fpu_end(&fpu, KERNEL_FPR);
} else } else
/* /*
* Really old machine without stsi block for basic * Really old machine without stsi block for basic
......
...@@ -50,10 +50,6 @@ ...@@ -50,10 +50,6 @@
#include <asm/cio.h> #include <asm/cio.h>
#include "entry.h" #include "entry.h"
/* change this if you have some constant time drift */
#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
u64 sched_clock_base_cc = -1; /* Force to data section. */ u64 sched_clock_base_cc = -1; /* Force to data section. */
EXPORT_SYMBOL_GPL(sched_clock_base_cc); EXPORT_SYMBOL_GPL(sched_clock_base_cc);
...@@ -282,13 +278,8 @@ extern struct timezone sys_tz; ...@@ -282,13 +278,8 @@ extern struct timezone sys_tz;
void update_vsyscall_tz(void) void update_vsyscall_tz(void)
{ {
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime; vdso_data->tz_dsttime = sys_tz.tz_dsttime;
smp_wmb();
++vdso_data->tb_update_count;
} }
/* /*
...@@ -318,51 +309,12 @@ void __init time_init(void) ...@@ -318,51 +309,12 @@ void __init time_init(void)
vtime_init(); vtime_init();
} }
/*
* The time is "clock". old is what we think the time is.
* Adjust the value by a multiple of jiffies and add the delta to ntp.
* "delay" is an approximation how long the synchronization took. If
* the time correction is positive, then "delay" is subtracted from
* the time difference and only the remaining part is passed to ntp.
*/
static unsigned long long adjust_time(unsigned long long old,
unsigned long long clock,
unsigned long long delay)
{
unsigned long long delta, ticks;
struct timex adjust;
if (clock > old) {
/* It is later than we thought. */
delta = ticks = clock - old;
delta = ticks = (delta < delay) ? 0 : delta - delay;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
adjust.offset = ticks * (1000000 / HZ);
} else {
/* It is earlier than we thought. */
delta = ticks = old - clock;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
delta = -delta;
adjust.offset = -ticks * (1000000 / HZ);
}
sched_clock_base_cc += delta;
if (adjust.offset != 0) {
pr_notice("The ETR interface has adjusted the clock "
"by %li microseconds\n", adjust.offset);
adjust.modes = ADJ_OFFSET_SINGLESHOT;
do_adjtimex(&adjust);
}
return delta;
}
static DEFINE_PER_CPU(atomic_t, clock_sync_word); static DEFINE_PER_CPU(atomic_t, clock_sync_word);
static DEFINE_MUTEX(clock_sync_mutex); static DEFINE_MUTEX(clock_sync_mutex);
static unsigned long clock_sync_flags; static unsigned long clock_sync_flags;
#define CLOCK_SYNC_HAS_ETR 0 #define CLOCK_SYNC_HAS_STP 0
#define CLOCK_SYNC_HAS_STP 1 #define CLOCK_SYNC_STP 1
#define CLOCK_SYNC_ETR 2
#define CLOCK_SYNC_STP 3
/* /*
* The get_clock function for the physical clock. It will get the current * The get_clock function for the physical clock. It will get the current
...@@ -384,34 +336,32 @@ int get_phys_clock(unsigned long long *clock) ...@@ -384,34 +336,32 @@ int get_phys_clock(unsigned long long *clock)
if (sw0 == sw1 && (sw0 & 0x80000000U)) if (sw0 == sw1 && (sw0 & 0x80000000U))
/* Success: time is in sync. */ /* Success: time is in sync. */
return 0; return 0;
if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return -EACCES; return -EACCES;
return -EAGAIN; return -EAGAIN;
} }
EXPORT_SYMBOL(get_phys_clock); EXPORT_SYMBOL(get_phys_clock);
/* /*
* Make get_sync_clock return -EAGAIN. * Make get_phys_clock() return -EAGAIN.
*/ */
static void disable_sync_clock(void *dummy) static void disable_sync_clock(void *dummy)
{ {
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
/* /*
* Clear the in-sync bit 2^31. All get_sync_clock calls will * Clear the in-sync bit 2^31. All get_phys_clock calls will
* fail until the sync bit is turned back on. In addition * fail until the sync bit is turned back on. In addition
* increase the "sequence" counter to avoid the race of an * increase the "sequence" counter to avoid the race of an
* etr event and the complete recovery against get_sync_clock. * stp event and the complete recovery against get_phys_clock.
*/ */
atomic_andnot(0x80000000, sw_ptr); atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr); atomic_inc(sw_ptr);
} }
/* /*
* Make get_sync_clock return 0 again. * Make get_phys_clock() return 0 again.
* Needs to be called from a context disabled for preemption. * Needs to be called from a context disabled for preemption.
*/ */
static void enable_sync_clock(void) static void enable_sync_clock(void)
...@@ -434,7 +384,7 @@ static inline int check_sync_clock(void) ...@@ -434,7 +384,7 @@ static inline int check_sync_clock(void)
return rc; return rc;
} }
/* Single threaded workqueue used for etr and stp sync events */ /* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq; static struct workqueue_struct *time_sync_wq;
static void __init time_init_wq(void) static void __init time_init_wq(void)
...@@ -448,20 +398,12 @@ struct clock_sync_data { ...@@ -448,20 +398,12 @@ struct clock_sync_data {
atomic_t cpus; atomic_t cpus;
int in_sync; int in_sync;
unsigned long long fixup_cc; unsigned long long fixup_cc;
int etr_port;
struct etr_aib *etr_aib;
}; };
static void clock_sync_cpu(struct clock_sync_data *sync) static void clock_sync_cpu(struct clock_sync_data *sync)
{ {
atomic_dec(&sync->cpus); atomic_dec(&sync->cpus);
enable_sync_clock(); enable_sync_clock();
/*
* This looks like a busy wait loop but it isn't. etr_sync_cpus
* is called on all other cpus while the TOD clocks is stopped.
* __udelay will stop the cpu on an enabled wait psw until the
* TOD is running again.
*/
while (sync->in_sync == 0) { while (sync->in_sync == 0) {
__udelay(1); __udelay(1);
/* /*
...@@ -582,7 +524,7 @@ void stp_queue_work(void) ...@@ -582,7 +524,7 @@ void stp_queue_work(void)
static int stp_sync_clock(void *data) static int stp_sync_clock(void *data)
{ {
static int first; static int first;
unsigned long long old_clock, delta, new_clock, clock_delta; unsigned long long clock_delta;
struct clock_sync_data *stp_sync; struct clock_sync_data *stp_sync;
struct ptff_qto qto; struct ptff_qto qto;
int rc; int rc;
...@@ -605,18 +547,18 @@ static int stp_sync_clock(void *data) ...@@ -605,18 +547,18 @@ static int stp_sync_clock(void *data)
if (stp_info.todoff[0] || stp_info.todoff[1] || if (stp_info.todoff[0] || stp_info.todoff[1] ||
stp_info.todoff[2] || stp_info.todoff[3] || stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) { stp_info.tmd != 2) {
old_clock = get_tod_clock();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
if (rc == 0) { if (rc == 0) {
new_clock = old_clock + clock_delta; /* fixup the monotonic sched clock */
delta = adjust_time(old_clock, new_clock, 0); sched_clock_base_cc += clock_delta;
if (ptff_query(PTFF_QTO) && if (ptff_query(PTFF_QTO) &&
ptff(&qto, sizeof(qto), PTFF_QTO) == 0) ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
/* Update LPAR offset */ /* Update LPAR offset */
lpar_offset = qto.tod_epoch_difference; lpar_offset = qto.tod_epoch_difference;
atomic_notifier_call_chain(&s390_epoch_delta_notifier, atomic_notifier_call_chain(&s390_epoch_delta_notifier,
0, &clock_delta); 0, &clock_delta);
fixup_clock_comparator(delta); stp_sync->fixup_cc = clock_delta;
fixup_clock_comparator(clock_delta);
rc = chsc_sstpi(stp_page, &stp_info, rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi)); sizeof(struct stp_sstpi));
if (rc == 0 && stp_info.tmd != 2) if (rc == 0 && stp_info.tmd != 2)
......
...@@ -14,11 +14,12 @@ ...@@ -14,11 +14,12 @@
*/ */
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/module.h> #include <linux/extable.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include "entry.h" #include "entry.h"
......
...@@ -24,8 +24,9 @@ obj-y += vdso32_wrapper.o ...@@ -24,8 +24,9 @@ obj-y += vdso32_wrapper.o
extra-y += vdso32.lds extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
# Disable gcov profiling for VDSO code # Disable gcov profiling and ubsan for VDSO code
GCOV_PROFILE := n GCOV_PROFILE := n
UBSAN_SANITIZE := n
# Force dependency (incbin is bad) # Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
......
...@@ -24,8 +24,9 @@ obj-y += vdso64_wrapper.o ...@@ -24,8 +24,9 @@ obj-y += vdso64_wrapper.o
extra-y += vdso64.lds extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
# Disable gcov profiling for VDSO code # Disable gcov profiling and ubsan for VDSO code
GCOV_PROFILE := n GCOV_PROFILE := n
UBSAN_SANITIZE := n
# Force dependency (incbin is bad) # Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
......
...@@ -132,10 +132,7 @@ module_param(nested, int, S_IRUGO); ...@@ -132,10 +132,7 @@ module_param(nested, int, S_IRUGO);
MODULE_PARM_DESC(nested, "Nested virtualization support"); MODULE_PARM_DESC(nested, "Nested virtualization support");
/* upper facilities limit for kvm */ /* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[16] = { unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
0xffe6000000000000UL,
0x005e000000000000UL,
};
unsigned long kvm_s390_fac_list_mask_size(void) unsigned long kvm_s390_fac_list_mask_size(void)
{ {
...@@ -248,22 +245,33 @@ static void kvm_s390_cpu_feat_init(void) ...@@ -248,22 +245,33 @@ static void kvm_s390_cpu_feat_init(void)
PTFF_QAF); PTFF_QAF);
if (test_facility(17)) { /* MSA */ if (test_facility(17)) { /* MSA */
__cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac); __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
__cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc); kvm_s390_available_subfunc.kmac);
__cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km); __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
__cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd); kvm_s390_available_subfunc.kmc);
__cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd); __cpacf_query(CPACF_KM, (cpacf_mask_t *)
kvm_s390_available_subfunc.km);
__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
kvm_s390_available_subfunc.kimd);
__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
kvm_s390_available_subfunc.klmd);
} }
if (test_facility(76)) /* MSA3 */ if (test_facility(76)) /* MSA3 */
__cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo); __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
kvm_s390_available_subfunc.pckmo);
if (test_facility(77)) { /* MSA4 */ if (test_facility(77)) { /* MSA4 */
__cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr); __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
__cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf); kvm_s390_available_subfunc.kmctr);
__cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo); __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
__cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc); kvm_s390_available_subfunc.kmf);
__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
kvm_s390_available_subfunc.kmo);
__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
kvm_s390_available_subfunc.pcc);
} }
if (test_facility(57)) /* MSA5 */ if (test_facility(57)) /* MSA5 */
__cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno); __cpacf_query(CPACF_PPNO, (cpacf_mask_t *)
kvm_s390_available_subfunc.ppno);
if (MACHINE_HAS_ESOP) if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/module.h> #include <linux/extable.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
......
...@@ -94,6 +94,7 @@ static struct gmap *gmap_alloc(unsigned long limit) ...@@ -94,6 +94,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
{ {
struct gmap *gmap; struct gmap *gmap;
unsigned long gmap_asce;
gmap = gmap_alloc(limit); gmap = gmap_alloc(limit);
if (!gmap) if (!gmap)
...@@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) ...@@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
gmap->mm = mm; gmap->mm = mm;
spin_lock(&mm->context.gmap_lock); spin_lock(&mm->context.gmap_lock);
list_add_rcu(&gmap->list, &mm->context.gmap_list); list_add_rcu(&gmap->list, &mm->context.gmap_list);
if (list_is_singular(&mm->context.gmap_list))
gmap_asce = gmap->asce;
else
gmap_asce = -1UL;
WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
spin_unlock(&mm->context.gmap_lock); spin_unlock(&mm->context.gmap_lock);
return gmap; return gmap;
} }
...@@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put); ...@@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put);
void gmap_remove(struct gmap *gmap) void gmap_remove(struct gmap *gmap)
{ {
struct gmap *sg, *next; struct gmap *sg, *next;
unsigned long gmap_asce;
/* Remove all shadow gmaps linked to this gmap */ /* Remove all shadow gmaps linked to this gmap */
if (!list_empty(&gmap->children)) { if (!list_empty(&gmap->children)) {
...@@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap) ...@@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap)
/* Remove gmap from the pre-mm list */ /* Remove gmap from the pre-mm list */
spin_lock(&gmap->mm->context.gmap_lock); spin_lock(&gmap->mm->context.gmap_lock);
list_del_rcu(&gmap->list); list_del_rcu(&gmap->list);
if (list_empty(&gmap->mm->context.gmap_list))
gmap_asce = 0;
else if (list_is_singular(&gmap->mm->context.gmap_list))
gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
struct gmap, list)->asce;
else
gmap_asce = -1UL;
WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
spin_unlock(&gmap->mm->context.gmap_lock); spin_unlock(&gmap->mm->context.gmap_lock);
synchronize_rcu(); synchronize_rcu();
/* Put reference */ /* Put reference */
......
...@@ -309,11 +309,11 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr) ...@@ -309,11 +309,11 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
int i; int i;
if (test_facility(13)) { if (test_facility(13)) {
__ptep_ipte_range(address, nr - 1, pte); __ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
return; return;
} }
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
__ptep_ipte(address, pte); __ptep_ipte(address, pte, IPTE_GLOBAL);
address += PAGE_SIZE; address += PAGE_SIZE;
pte++; pte++;
} }
......
...@@ -35,9 +35,9 @@ static inline pte_t ptep_flush_direct(struct mm_struct *mm, ...@@ -35,9 +35,9 @@ static inline pte_t ptep_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count); atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC && if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__ptep_ipte_local(addr, ptep); __ptep_ipte(addr, ptep, IPTE_LOCAL);
else else
__ptep_ipte(addr, ptep); __ptep_ipte(addr, ptep, IPTE_GLOBAL);
atomic_dec(&mm->context.flush_count); atomic_dec(&mm->context.flush_count);
return old; return old;
} }
...@@ -56,7 +56,7 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm, ...@@ -56,7 +56,7 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
pte_val(*ptep) |= _PAGE_INVALID; pte_val(*ptep) |= _PAGE_INVALID;
mm->context.flush_mm = 1; mm->context.flush_mm = 1;
} else } else
__ptep_ipte(addr, ptep); __ptep_ipte(addr, ptep, IPTE_GLOBAL);
atomic_dec(&mm->context.flush_count); atomic_dec(&mm->context.flush_count);
return old; return old;
} }
...@@ -301,9 +301,9 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, ...@@ -301,9 +301,9 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count); atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC && if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__pmdp_idte_local(addr, pmdp); __pmdp_idte(addr, pmdp, IDTE_LOCAL);
else else
__pmdp_idte(addr, pmdp); __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
atomic_dec(&mm->context.flush_count); atomic_dec(&mm->context.flush_count);
return old; return old;
} }
...@@ -322,7 +322,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, ...@@ -322,7 +322,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1; mm->context.flush_mm = 1;
} else if (MACHINE_HAS_IDTE) } else if (MACHINE_HAS_IDTE)
__pmdp_idte(addr, pmdp); __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
else else
__pmdp_csp(pmdp); __pmdp_csp(pmdp);
atomic_dec(&mm->context.flush_count); atomic_dec(&mm->context.flush_count);
...@@ -374,9 +374,9 @@ static inline pud_t pudp_flush_direct(struct mm_struct *mm, ...@@ -374,9 +374,9 @@ static inline pud_t pudp_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count); atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC && if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__pudp_idte_local(addr, pudp); __pudp_idte(addr, pudp, IDTE_LOCAL);
else else
__pudp_idte(addr, pudp); __pudp_idte(addr, pudp, IDTE_GLOBAL);
atomic_dec(&mm->context.flush_count); atomic_dec(&mm->context.flush_count);
return old; return old;
} }
...@@ -620,7 +620,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) ...@@ -620,7 +620,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
pte = *ptep; pte = *ptep;
if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
pgste = pgste_pte_notify(mm, addr, ptep, pgste); pgste = pgste_pte_notify(mm, addr, ptep, pgste);
__ptep_ipte(addr, ptep); __ptep_ipte(addr, ptep, IPTE_GLOBAL);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
pte_val(pte) |= _PAGE_PROTECT; pte_val(pte) |= _PAGE_PROTECT;
else else
......
...@@ -854,6 +854,15 @@ void zpci_stop_device(struct zpci_dev *zdev) ...@@ -854,6 +854,15 @@ void zpci_stop_device(struct zpci_dev *zdev)
} }
EXPORT_SYMBOL_GPL(zpci_stop_device); EXPORT_SYMBOL_GPL(zpci_stop_device);
int zpci_report_error(struct pci_dev *pdev,
struct zpci_report_error_header *report)
{
struct zpci_dev *zdev = to_zpci(pdev);
return sclp_pci_report(report, zdev->fh, zdev->fid);
}
EXPORT_SYMBOL(zpci_report_error);
static inline int barsize(u8 size) static inline int barsize(u8 size)
{ {
return (size) ? (1 << size) >> 10 : 0; return (size) ? (1 << size) >> 10 : 0;
......
This diff is collapsed.
...@@ -101,8 +101,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, ...@@ -101,8 +101,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
zpci_dma_exit_device(zdev); zpci_dma_exit_device(zdev);
zdev->dma_table = s390_domain->dma_table; zdev->dma_table = s390_domain->dma_table;
rc = zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table); (u64) zdev->dma_table);
if (rc) if (rc)
goto out_restore; goto out_restore;
......
...@@ -212,16 +212,6 @@ static int dasd_state_known_to_new(struct dasd_device *device) ...@@ -212,16 +212,6 @@ static int dasd_state_known_to_new(struct dasd_device *device)
{ {
/* Disable extended error reporting for this device. */ /* Disable extended error reporting for this device. */
dasd_eer_disable(device); dasd_eer_disable(device);
/* Forget the discipline information. */
if (device->discipline) {
if (device->discipline->uncheck_device)
device->discipline->uncheck_device(device);
module_put(device->discipline->owner);
}
device->discipline = NULL;
if (device->base_discipline)
module_put(device->base_discipline->owner);
device->base_discipline = NULL;
device->state = DASD_STATE_NEW; device->state = DASD_STATE_NEW;
if (device->block) if (device->block)
...@@ -336,6 +326,7 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) ...@@ -336,6 +326,7 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
{ {
int rc; int rc;
struct dasd_block *block; struct dasd_block *block;
struct gendisk *disk;
rc = 0; rc = 0;
block = device->block; block = device->block;
...@@ -346,6 +337,9 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) ...@@ -346,6 +337,9 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
if (rc) { if (rc) {
if (rc != -EAGAIN) { if (rc != -EAGAIN) {
device->state = DASD_STATE_UNFMT; device->state = DASD_STATE_UNFMT;
disk = device->block->gdp;
kobject_uevent(&disk_to_dev(disk)->kobj,
KOBJ_CHANGE);
goto out; goto out;
} }
return rc; return rc;
...@@ -2273,6 +2267,15 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) ...@@ -2273,6 +2267,15 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
cqr->intrc = -ENOLINK; cqr->intrc = -ENOLINK;
continue; continue;
} }
/*
* Don't try to start requests if device is in
* offline processing, it might wait forever
*/
if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -ENODEV;
continue;
}
/* /*
* Don't try to start requests if device is stopped * Don't try to start requests if device is stopped
* except path verification requests * except path verification requests
...@@ -3364,6 +3367,22 @@ int dasd_generic_probe(struct ccw_device *cdev, ...@@ -3364,6 +3367,22 @@ int dasd_generic_probe(struct ccw_device *cdev,
} }
EXPORT_SYMBOL_GPL(dasd_generic_probe); EXPORT_SYMBOL_GPL(dasd_generic_probe);
void dasd_generic_free_discipline(struct dasd_device *device)
{
/* Forget the discipline information. */
if (device->discipline) {
if (device->discipline->uncheck_device)
device->discipline->uncheck_device(device);
module_put(device->discipline->owner);
device->discipline = NULL;
}
if (device->base_discipline) {
module_put(device->base_discipline->owner);
device->base_discipline = NULL;
}
}
EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
/* /*
* This will one day be called from a global not_oper handler. * This will one day be called from a global not_oper handler.
* It is also used by driver_unregister during module unload. * It is also used by driver_unregister during module unload.
......
...@@ -617,6 +617,7 @@ dasd_delete_device(struct dasd_device *device) ...@@ -617,6 +617,7 @@ dasd_delete_device(struct dasd_device *device)
/* Wait for reference counter to drop to zero. */ /* Wait for reference counter to drop to zero. */
wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0); wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
dasd_generic_free_discipline(device);
/* Disconnect dasd_device structure from ccw_device structure. */ /* Disconnect dasd_device structure from ccw_device structure. */
cdev = device->cdev; cdev = device->cdev;
device->cdev = NULL; device->cdev = NULL;
......
...@@ -5201,7 +5201,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device, ...@@ -5201,7 +5201,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
cqr->buildclk = get_tod_clock(); cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED; cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr); rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) { if (rc == 0) {
*data = *host_access; *data = *host_access;
} else { } else {
......
...@@ -169,12 +169,12 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) ...@@ -169,12 +169,12 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
device = cqr->startdev; device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) { if (cqr->intrc == -ETIMEDOUT) {
dev_err(&device->cdev->dev, dev_err(&device->cdev->dev,
"A timeout error occurred for cqr %p", cqr); "A timeout error occurred for cqr %p\n", cqr);
return; return;
} }
if (cqr->intrc == -ENOLINK) { if (cqr->intrc == -ENOLINK) {
dev_err(&device->cdev->dev, dev_err(&device->cdev->dev,
"A transport error occurred for cqr %p", cqr); "A transport error occurred for cqr %p\n", cqr);
return; return;
} }
/* dump sense data */ /* dump sense data */
......
...@@ -725,6 +725,7 @@ void dasd_block_clear_timer(struct dasd_block *); ...@@ -725,6 +725,7 @@ void dasd_block_clear_timer(struct dasd_block *);
int dasd_cancel_req(struct dasd_ccw_req *); int dasd_cancel_req(struct dasd_ccw_req *);
int dasd_flush_device_queue(struct dasd_device *); int dasd_flush_device_queue(struct dasd_device *);
int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *); int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
void dasd_generic_free_discipline(struct dasd_device *);
void dasd_generic_remove (struct ccw_device *cdev); void dasd_generic_remove (struct ccw_device *cdev);
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev); int dasd_generic_set_offline (struct ccw_device *cdev);
......
...@@ -124,7 +124,12 @@ con3270_create_status(struct con3270 *cp) ...@@ -124,7 +124,12 @@ con3270_create_status(struct con3270 *cp)
static void static void
con3270_update_string(struct con3270 *cp, struct string *s, int nr) con3270_update_string(struct con3270 *cp, struct string *s, int nr)
{ {
if (s->len >= cp->view.cols - 5) if (s->len < 4) {
/* This indicates a bug, but printing a warning would
* cause a deadlock. */
return;
}
if (s->string[s->len - 4] != TO_RA)
return; return;
raw3270_buffer_address(cp->view.dev, s->string + s->len - 3, raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
cp->view.cols * (nr + 1)); cp->view.cols * (nr + 1));
...@@ -460,11 +465,11 @@ con3270_cline_end(struct con3270 *cp) ...@@ -460,11 +465,11 @@ con3270_cline_end(struct con3270 *cp)
cp->cline->len + 4 : cp->view.cols; cp->cline->len + 4 : cp->view.cols;
s = con3270_alloc_string(cp, size); s = con3270_alloc_string(cp, size);
memcpy(s->string, cp->cline->string, cp->cline->len); memcpy(s->string, cp->cline->string, cp->cline->len);
if (s->len < cp->view.cols - 5) { if (cp->cline->len < cp->view.cols - 5) {
s->string[s->len - 4] = TO_RA; s->string[s->len - 4] = TO_RA;
s->string[s->len - 1] = 0; s->string[s->len - 1] = 0;
} else { } else {
while (--size > cp->cline->len) while (--size >= cp->cline->len)
s->string[size] = cp->view.ascebc[' ']; s->string[size] = cp->view.ascebc[' '];
} }
/* Replace cline with allocated line s and reset cline. */ /* Replace cline with allocated line s and reset cline. */
......
...@@ -312,15 +312,10 @@ static int tape_3592_ioctl_kekl_set(struct tape_device *device, ...@@ -312,15 +312,10 @@ static int tape_3592_ioctl_kekl_set(struct tape_device *device,
return -ENOSYS; return -ENOSYS;
if (!crypt_enabled(device)) if (!crypt_enabled(device))
return -EUNATCH; return -EUNATCH;
ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); ext_kekls = memdup_user((char __user *)arg, sizeof(*ext_kekls));
if (!ext_kekls) if (IS_ERR(ext_kekls))
return -ENOMEM; return PTR_ERR(ext_kekls);
if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
rc = -EFAULT;
goto out;
}
rc = tape_3592_kekl_set(device, ext_kekls); rc = tape_3592_kekl_set(device, ext_kekls);
out:
kfree(ext_kekls); kfree(ext_kekls);
return rc; return rc;
} }
......
...@@ -306,10 +306,11 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -306,10 +306,11 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
{ {
struct urdev *urd; struct urdev *urd;
TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", if (!IS_ERR(irb)) {
intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
irb->scsw.cmd.count); intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
irb->scsw.cmd.count);
}
if (!intparm) { if (!intparm) {
TRACE("ur_int_handler: unsolicited interrupt\n"); TRACE("ur_int_handler: unsolicited interrupt\n");
return; return;
......
...@@ -95,12 +95,13 @@ struct chsc_ssd_area { ...@@ -95,12 +95,13 @@ struct chsc_ssd_area {
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{ {
struct chsc_ssd_area *ssd_area; struct chsc_ssd_area *ssd_area;
unsigned long flags;
int ccode; int ccode;
int ret; int ret;
int i; int i;
int mask; int mask;
spin_lock_irq(&chsc_page_lock); spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE); memset(chsc_page, 0, PAGE_SIZE);
ssd_area = chsc_page; ssd_area = chsc_page;
ssd_area->request.length = 0x0010; ssd_area->request.length = 0x0010;
...@@ -144,7 +145,7 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) ...@@ -144,7 +145,7 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
ssd->fla[i] = ssd_area->fla[i]; ssd->fla[i] = ssd_area->fla[i];
} }
out: out:
spin_unlock_irq(&chsc_page_lock); spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret; return ret;
} }
...@@ -832,9 +833,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) ...@@ -832,9 +833,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 fmt : 4; u32 fmt : 4;
u32 : 16; u32 : 16;
} __attribute__ ((packed)) *secm_area; } __attribute__ ((packed)) *secm_area;
unsigned long flags;
int ret, ccode; int ret, ccode;
spin_lock_irq(&chsc_page_lock); spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE); memset(chsc_page, 0, PAGE_SIZE);
secm_area = chsc_page; secm_area = chsc_page;
secm_area->request.length = 0x0050; secm_area->request.length = 0x0050;
...@@ -864,7 +866,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable) ...@@ -864,7 +866,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code); secm_area->response.code);
out: out:
spin_unlock_irq(&chsc_page_lock); spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret; return ret;
} }
...@@ -992,6 +994,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, ...@@ -992,6 +994,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
int chsc_get_channel_measurement_chars(struct channel_path *chp) int chsc_get_channel_measurement_chars(struct channel_path *chp)
{ {
unsigned long flags;
int ccode, ret; int ccode, ret;
struct { struct {
...@@ -1021,7 +1024,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) ...@@ -1021,7 +1024,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm) if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
return -EINVAL; return -EINVAL;
spin_lock_irq(&chsc_page_lock); spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE); memset(chsc_page, 0, PAGE_SIZE);
scmc_area = chsc_page; scmc_area = chsc_page;
scmc_area->request.length = 0x0010; scmc_area->request.length = 0x0010;
...@@ -1053,7 +1056,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) ...@@ -1053,7 +1056,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
chsc_initialize_cmg_chars(chp, scmc_area->cmcv, chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *) &scmc_area->data); (struct cmg_chars *) &scmc_area->data);
out: out:
spin_unlock_irq(&chsc_page_lock); spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret; return ret;
} }
...@@ -1134,6 +1137,7 @@ struct css_chsc_char css_chsc_characteristics; ...@@ -1134,6 +1137,7 @@ struct css_chsc_char css_chsc_characteristics;
int __init int __init
chsc_determine_css_characteristics(void) chsc_determine_css_characteristics(void)
{ {
unsigned long flags;
int result; int result;
struct { struct {
struct chsc_header request; struct chsc_header request;
...@@ -1146,7 +1150,7 @@ chsc_determine_css_characteristics(void) ...@@ -1146,7 +1150,7 @@ chsc_determine_css_characteristics(void)
u32 chsc_char[508]; u32 chsc_char[508];
} __attribute__ ((packed)) *scsc_area; } __attribute__ ((packed)) *scsc_area;
spin_lock_irq(&chsc_page_lock); spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE); memset(chsc_page, 0, PAGE_SIZE);
scsc_area = chsc_page; scsc_area = chsc_page;
scsc_area->request.length = 0x0010; scsc_area->request.length = 0x0010;
...@@ -1168,7 +1172,7 @@ chsc_determine_css_characteristics(void) ...@@ -1168,7 +1172,7 @@ chsc_determine_css_characteristics(void)
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code); scsc_area->response.code);
exit: exit:
spin_unlock_irq(&chsc_page_lock); spin_unlock_irqrestore(&chsc_page_lock, flags);
return result; return result;
} }
......
...@@ -127,7 +127,6 @@ extern int cio_resume (struct subchannel *); ...@@ -127,7 +127,6 @@ extern int cio_resume (struct subchannel *);
extern int cio_halt (struct subchannel *); extern int cio_halt (struct subchannel *);
extern int cio_start (struct subchannel *, struct ccw1 *, __u8); extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int); extern int cio_set_options (struct subchannel *, int);
extern int cio_update_schib(struct subchannel *sch); extern int cio_update_schib(struct subchannel *sch);
extern int cio_commit_config(struct subchannel *sch); extern int cio_commit_config(struct subchannel *sch);
......
...@@ -310,7 +310,7 @@ static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter) ...@@ -310,7 +310,7 @@ static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
snprintf(name, sizeof(name), "zfcp_q_%s", snprintf(name, sizeof(name), "zfcp_q_%s",
dev_name(&adapter->ccw_device->dev)); dev_name(&adapter->ccw_device->dev));
adapter->work_queue = create_singlethread_workqueue(name); adapter->work_queue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (adapter->work_queue) if (adapter->work_queue)
return 0; return 0;
......
...@@ -103,6 +103,7 @@ extern const struct raid6_calls raid6_avx2x1; ...@@ -103,6 +103,7 @@ extern const struct raid6_calls raid6_avx2x1;
extern const struct raid6_calls raid6_avx2x2; extern const struct raid6_calls raid6_avx2x2;
extern const struct raid6_calls raid6_avx2x4; extern const struct raid6_calls raid6_avx2x4;
extern const struct raid6_calls raid6_tilegx8; extern const struct raid6_calls raid6_tilegx8;
extern const struct raid6_calls raid6_s390vx8;
struct raid6_recov_calls { struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **); void (*data2)(int, size_t, int, int, void **);
...@@ -115,6 +116,7 @@ struct raid6_recov_calls { ...@@ -115,6 +116,7 @@ struct raid6_recov_calls {
extern const struct raid6_recov_calls raid6_recov_intx1; extern const struct raid6_recov_calls raid6_recov_intx1;
extern const struct raid6_recov_calls raid6_recov_ssse3; extern const struct raid6_recov_calls raid6_recov_ssse3;
extern const struct raid6_recov_calls raid6_recov_avx2; extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_calls raid6_neonx1; extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2; extern const struct raid6_calls raid6_neonx2;
......
config ARCH_HAS_UBSAN_SANITIZE_ALL config ARCH_HAS_UBSAN_SANITIZE_ALL
bool bool
config ARCH_WANTS_UBSAN_NO_NULL
def_bool n
config UBSAN config UBSAN
bool "Undefined behaviour sanity checker" bool "Undefined behaviour sanity checker"
help help
...@@ -34,3 +37,11 @@ config UBSAN_ALIGNMENT ...@@ -34,3 +37,11 @@ config UBSAN_ALIGNMENT
This option enables detection of unaligned memory accesses. This option enables detection of unaligned memory accesses.
Enabling this option on architectures that support unaligned Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives. accesses may produce a lot of false positives.
config UBSAN_NULL
bool "Enable checking of null pointers"
depends on UBSAN
default y if !ARCH_WANTS_UBSAN_NO_NULL
help
This option enables detection of memory accesses via a
null pointer.
...@@ -3,3 +3,4 @@ altivec*.c ...@@ -3,3 +3,4 @@ altivec*.c
int*.c int*.c
tables.c tables.c
neon?.c neon?.c
s390vx?.c
...@@ -7,6 +7,7 @@ raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o ...@@ -7,6 +7,7 @@ raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o
raid6_pq-$(CONFIG_TILEGX) += tilegx8.o raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
hostprogs-y += mktables hostprogs-y += mktables
...@@ -116,6 +117,11 @@ $(obj)/tilegx8.c: UNROLL := 8 ...@@ -116,6 +117,11 @@ $(obj)/tilegx8.c: UNROLL := 8
$(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE $(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
targets += s390vx8.c
$(obj)/s390vx8.c: UNROLL := 8
$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@ quiet_cmd_mktable = TABLE $@
cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
......
...@@ -68,6 +68,9 @@ const struct raid6_calls * const raid6_algos[] = { ...@@ -68,6 +68,9 @@ const struct raid6_calls * const raid6_algos[] = {
#endif #endif
#if defined(CONFIG_TILEGX) #if defined(CONFIG_TILEGX)
&raid6_tilegx8, &raid6_tilegx8,
#endif
#if defined(CONFIG_S390)
&raid6_s390vx8,
#endif #endif
&raid6_intx1, &raid6_intx1,
&raid6_intx2, &raid6_intx2,
...@@ -94,6 +97,9 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = { ...@@ -94,6 +97,9 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
#endif #endif
#ifdef CONFIG_AS_SSSE3 #ifdef CONFIG_AS_SSSE3
&raid6_recov_ssse3, &raid6_recov_ssse3,
#endif
#ifdef CONFIG_S390
&raid6_recov_s390xc,
#endif #endif
&raid6_recov_intx1, &raid6_recov_intx1,
NULL NULL
......
/*
* RAID-6 data recovery in dual failure mode based on the XC instruction.
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/export.h>
#include <linux/raid/pq.h>
static inline void xor_block(u8 *p1, u8 *p2)
{
typedef struct { u8 _[256]; } addrtype;
asm volatile(
" xc 0(256,%[p1]),0(%[p2])\n"
: "+m" (*(addrtype *) p1) : "m" (*(addrtype *) p2),
[p1] "a" (p1), [p2] "a" (p2) : "cc");
}
/* Recover two failed data blocks. */
static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
int i;
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data pages
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks-2] = p;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
/* Now do it... */
while (bytes) {
xor_block(dp, p);
xor_block(dq, q);
for (i = 0; i < 256; i++)
dq[i] = pbmul[dp[i]] ^ qmul[dq[i]];
xor_block(dp, dq);
p += 256;
q += 256;
dp += 256;
dq += 256;
bytes -= 256;
}
}
/* Recover failure of one data block plus the P block */
static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
int i;
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
/* Now do it... */
while (bytes) {
xor_block(dq, q);
for (i = 0; i < 256; i++)
dq[i] = qmul[dq[i]];
xor_block(p, dq);
p += 256;
q += 256;
dq += 256;
bytes -= 256;
}
}
const struct raid6_recov_calls raid6_recov_s390xc = {
.data2 = raid6_2data_recov_s390xc,
.datap = raid6_datap_recov_s390xc,
.valid = NULL,
.name = "s390xc",
.priority = 1,
};
/*
* raid6_vx$#.c
*
* $#-way unrolled RAID6 gen/xor functions for s390
* based on the vector facility
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* This file is postprocessed using unroll.awk.
*/
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
asm(".include \"asm/vx-insn.h\"\n");
#define NSIZE 16
static inline void LOAD_CONST(void)
{
asm volatile("VREPIB %v24,7");
asm volatile("VREPIB %v25,0x1d");
}
/*
* The SHLBYTE() operation shifts each of the 16 bytes in
* vector register y left by 1 bit and stores the result in
* vector register x.
*/
static inline void SHLBYTE(int x, int y)
{
asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y));
}
/*
* For each of the 16 bytes in the vector register y the MASK()
* operation returns 0xFF if the high bit of the byte is 1,
* or 0x00 if the high bit is 0. The result is stored in vector
* register x.
*/
static inline void MASK(int x, int y)
{
asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y));
}
static inline void AND(int x, int y, int z)
{
asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
}
static inline void XOR(int x, int y, int z)
{
asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
}
static inline void LOAD_DATA(int x, int n, u8 *ptr)
{
typedef struct { u8 _[16*n]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VLM %2,%3,0,%r1"
: : "m" (*__ptr), "a" (__ptr), "i" (x), "i" (x + n - 1));
}
static inline void STORE_DATA(int x, int n, u8 *ptr)
{
typedef struct { u8 _[16*n]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
asm volatile ("VSTM %2,%3,0,1"
: "=m" (*__ptr) : "a" (__ptr), "i" (x), "i" (x + n - 1));
}
static inline void COPY_VEC(int x, int y)
{
asm volatile ("VLR %0,%1" : : "i" (x), "i" (y));
}
static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
struct kernel_fpu vxstate;
u8 **dptr, *p, *q;
int d, z, z0;
kernel_fpu_begin(&vxstate, KERNEL_VXR);
LOAD_CONST();
dptr = (u8 **) ptrs;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0 + 1]; /* XOR parity */
q = dptr[z0 + 2]; /* RS syndrome */
for (d = 0; d < bytes; d += $#*NSIZE) {
LOAD_DATA(0,$#,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= 0; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
LOAD_DATA(16,$#,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
STORE_DATA(0,$#,&p[d]);
STORE_DATA(8,$#,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
struct kernel_fpu vxstate;
u8 **dptr, *p, *q;
int d, z, z0;
dptr = (u8 **) ptrs;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks - 2]; /* XOR parity */
q = dptr[disks - 1]; /* RS syndrome */
kernel_fpu_begin(&vxstate, KERNEL_VXR);
LOAD_CONST();
for (d = 0; d < bytes; d += $#*NSIZE) {
/* P/Q data pages */
LOAD_DATA(0,$#,&dptr[z0][d]);
COPY_VEC(8+$$,0+$$);
for (z = z0 - 1; z >= start; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
LOAD_DATA(16,$#,&dptr[z][d]);
XOR(0+$$,0+$$,16+$$);
XOR(8+$$,8+$$,16+$$);
}
/* P/Q left side optimization */
for (z = start - 1; z >= 0; z--) {
MASK(16+$$,8+$$);
AND(16+$$,16+$$,25);
SHLBYTE(8+$$,8+$$);
XOR(8+$$,8+$$,16+$$);
}
LOAD_DATA(16,$#,&p[d]);
XOR(16+$$,16+$$,0+$$);
STORE_DATA(16,$#,&p[d]);
LOAD_DATA(16,$#,&q[d]);
XOR(16+$$,16+$$,8+$$);
STORE_DATA(16,$#,&q[d]);
}
kernel_fpu_end(&vxstate, KERNEL_VXR);
}
static int raid6_s390vx$#_valid(void)
{
return MACHINE_HAS_VX;
}
const struct raid6_calls raid6_s390vx$# = {
raid6_s390vx$#_gen_syndrome,
raid6_s390vx$#_xor_syndrome,
raid6_s390vx$#_valid,
"vx128x$#",
1
};
...@@ -3,7 +3,6 @@ ifdef CONFIG_UBSAN ...@@ -3,7 +3,6 @@ ifdef CONFIG_UBSAN
CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero) CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable) CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound) CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow) CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds) CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds)
CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size) CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size)
...@@ -14,4 +13,8 @@ ifdef CONFIG_UBSAN ...@@ -14,4 +13,8 @@ ifdef CONFIG_UBSAN
ifdef CONFIG_UBSAN_ALIGNMENT ifdef CONFIG_UBSAN_ALIGNMENT
CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment) CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
endif endif
ifdef CONFIG_UBSAN_NULL
CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
endif
endif endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment