Commit 2ec4584e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "The main bulk of the s390 patches for the 4.10 merge window:

   - Add support for the contiguous memory allocator.

   - The recovery for I/O errors in the dasd device driver is improved,
     the driver will now remove channel paths that are not working
     properly.

   - Additional fields are added to /proc/sysinfo, the extended
     partition name and the partition UUID.

   - New naming for PCI devices with system defined UIDs.

   - The last few remaining alloc_bootmem calls are converted to
     memblock.

   - The thread_info structure is stripped down and moved to the
     task_struct. The only field left in thread_info is the flags field.

   - Rework of the arch topology code to fix a fake numa issue.

   - Refactoring of the atomic primitives and add a new preempt_count
     implementation.

   - Clocksource steering for the STP sync check offsets.

   - The s390 specific headers are changed to make them usable with
     CLANG.

   - Bug fixes and cleanup"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (70 commits)
  s390/cpumf: Use configuration level indication for sampling data
  s390: provide memmove implementation
  s390: cleanup arch/s390/kernel Makefile
  s390: fix initrd corruptions with gcov/kcov instrumented kernels
  s390: exclude early C code from gcov profiling
  s390/dasd: channel path aware error recovery
  s390/dasd: extend dasd path handling
  s390: remove unused labels from entry.S
  s390/vmlogrdr: fix IUCV buffer allocation
  s390/crypto: unlock on error in prng_tdes_read()
  s390/sysinfo: show partition extended name and UUID if available
  s390/numa: pin all possible cpus to nodes early
  s390/numa: establish cpu to node mapping early
  s390/topology: use cpu_topology array instead of per cpu variable
  s390/smp: initialize cpu_present_mask in setup_arch
  s390/topology: always use s390 specific sched_domain_topology_level
  s390/smp: use smp_get_base_cpu() helper function
  s390/numa: always use logical cpu and core ids
  s390: Remove VLAIS in ptff() and clear_table()
  s390: fix machine check panic stack switch
  ...
parents aa3ecf38 c19805f8
......@@ -10557,7 +10557,7 @@ F: arch/s390/pci/
F: drivers/pci/hotplug/s390_pci_hpc.c
S390 ZCRYPT DRIVER
M: Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
M: Harald Freudenberger <freude@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
......
......@@ -136,6 +136,7 @@ config S390
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
......@@ -169,6 +170,7 @@ config S390
select OLD_SIGSUSPEND3
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
select ARCH_HAS_SCALED_CPUTIME
......
......@@ -46,7 +46,7 @@ mover_end:
.align 8
.Lstack:
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.Loffset:
.quad 0x11000
.Lmvsize:
......
......@@ -66,6 +66,8 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZPOOL=m
CONFIG_ZBUD=m
......@@ -366,6 +368,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
......@@ -438,7 +442,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
......@@ -693,3 +696,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
......@@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
......@@ -434,7 +436,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
......@@ -633,3 +634,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
......@@ -362,6 +362,8 @@ CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
......@@ -434,7 +436,6 @@ CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
CONFIG_VHOST_NET=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
......@@ -632,3 +633,4 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
......@@ -507,8 +507,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n;
if (copy_to_user(ubuf, prng_data->buf, chunk))
return -EFAULT;
if (copy_to_user(ubuf, prng_data->buf, chunk)) {
ret = -EFAULT;
break;
}
nbytes -= chunk;
ret += chunk;
......
......@@ -3,6 +3,7 @@
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
* License: GPL
*/
#define KMSG_COMPONENT "hypfs"
......@@ -18,7 +19,8 @@
#include <linux/time.h>
#include <linux/parser.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/uio.h>
......@@ -443,7 +445,6 @@ static struct file_system_type hypfs_type = {
.mount = hypfs_mount,
.kill_sb = hypfs_kill_super
};
MODULE_ALIAS_FS("s390_hypfs");
static const struct super_operations hypfs_s_ops = {
.statfs = simple_statfs,
......@@ -497,21 +498,4 @@ static int __init hypfs_init(void)
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
static void __exit hypfs_exit(void)
{
unregister_filesystem(&hypfs_type);
sysfs_remove_mount_point(hypervisor_kobj, "s390");
hypfs_diag0c_exit();
hypfs_sprp_exit();
hypfs_vm_exit();
hypfs_diag_exit();
hypfs_dbfs_exit();
}
module_init(hypfs_init)
module_exit(hypfs_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
device_initcall(hypfs_init)
generic-y += asm-offsets.h
generic-y += clkdev.h
generic-y += dma-contiguous.h
generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
......
#include <generated/asm-offsets.h>
/*
* Copyright IBM Corp. 1999, 2009
* Copyright IBM Corp. 1999, 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
* Arnd Bergmann <arndb@de.ibm.com>,
*
* Atomic operations that C can't guarantee us.
* Useful for resource counting etc.
* s390 uses 'Compare And Swap' for atomicity in SMP environment.
*
* Arnd Bergmann,
*/
#ifndef __ARCH_S390_ATOMIC__
......@@ -15,62 +10,12 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
#define __ATOMIC_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa"
#define __ATOMIC_XOR "lax"
#define __ATOMIC_BARRIER "bcr 14,0\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
({ \
int old_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
__barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar"
#define __ATOMIC_XOR "xr"
#define __ATOMIC_BARRIER "\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
({ \
int old_val, new_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int atomic_read(const atomic_t *v)
{
int c;
......@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
return __atomic_add_barrier(i, &v->counter) + i;
}
static inline int atomic_fetch_add(int i, atomic_t *v)
{
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
return __atomic_add_barrier(i, &v->counter);
}
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
asm volatile(
"asi %0,%1\n"
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
__atomic_add_const(i, &v->counter);
return;
}
#endif
__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
__atomic_add(i, &v->counter);
}
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
......@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
#define ATOMIC_OPS(op, OP) \
#define ATOMIC_OPS(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
__atomic_##op(i, &v->counter); \
} \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
return __atomic_##op##_barrier(i, &v->counter); \
}
ATOMIC_OPS(and, AND)
ATOMIC_OPS(or, OR)
ATOMIC_OPS(xor, XOR)
ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)
#undef ATOMIC_OPS
......@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
asm volatile(
" cs %0,%2,%1"
: "+d" (old), "+Q" (v->counter)
: "d" (new)
: "cc", "memory");
return old;
return __atomic_cmpxchg(&v->counter, old, new);
}
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
......@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
#undef __ATOMIC_LOOP
#define ATOMIC64_INIT(i) { (i) }
#define __ATOMIC64_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag"
#define __ATOMIC64_XOR "laxg"
#define __ATOMIC64_BARRIER "bcr 14,0\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
({ \
long long old_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
__barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr"
#define __ATOMIC64_XOR "xgr"
#define __ATOMIC64_BARRIER "\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
({ \
long long old_val, new_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline long long atomic64_read(const atomic64_t *v)
static inline long atomic64_read(const atomic64_t *v)
{
long long c;
long c;
asm volatile(
" lg %0,%1\n"
......@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
return c;
}
static inline void atomic64_set(atomic64_t *v, long long i)
static inline void atomic64_set(atomic64_t *v, long i)
{
asm volatile(
" stg %1,%0\n"
: "=Q" (v->counter) : "d" (i));
}
static inline long long atomic64_add_return(long long i, atomic64_t *v)
static inline long atomic64_add_return(long i, atomic64_t *v)
{
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
return __atomic64_add_barrier(i, &v->counter) + i;
}
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
static inline long atomic64_fetch_add(long i, atomic64_t *v)
{
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
return __atomic64_add_barrier(i, &v->counter);
}
static inline void atomic64_add(long long i, atomic64_t *v)
static inline void atomic64_add(long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
asm volatile(
"agsi %0,%1\n"
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
__atomic64_add_const(i, &v->counter);
return;
}
#endif
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
__atomic64_add(i, &v->counter);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new)
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
asm volatile(
" csg %0,%2,%1"
: "+d" (old), "+Q" (v->counter)
: "d" (new)
: "cc", "memory");
return old;
return __atomic64_cmpxchg(&v->counter, old, new);
}
#define ATOMIC64_OPS(op, OP) \
#define ATOMIC64_OPS(op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
__atomic64_##op(i, &v->counter); \
} \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
return __atomic64_##op##_barrier(i, &v->counter); \
}
ATOMIC64_OPS(and, AND)
ATOMIC64_OPS(or, OR)
ATOMIC64_OPS(xor, XOR)
ATOMIC64_OPS(and)
ATOMIC64_OPS(or)
ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
#undef __ATOMIC64_LOOP
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
{
long long c, old;
long c, old;
c = atomic64_read(v);
for (;;) {
......@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
return c != u;
}
static inline long long atomic64_dec_if_positive(atomic64_t *v)
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
long long c, old, dec;
long c, old, dec;
c = atomic64_read(v);
for (;;) {
......@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc(_v) atomic64_add(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v)
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
......
/*
* Low level function for atomic operations
*
* Copyright IBM Corp. 1999, 2016
*/
#ifndef __ARCH_S390_ATOMIC_OPS__
#define __ARCH_S390_ATOMIC_OPS__
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
static inline op_type op_name(op_type val, op_type *ptr) \
{ \
op_type old; \
\
asm volatile( \
op_string " %[old],%[val],%[ptr]\n" \
op_barrier \
: [old] "=d" (old), [ptr] "+Q" (*ptr) \
: [val] "d" (val) : "cc", "memory"); \
return old; \
} \
#define __ATOMIC_OPS(op_name, op_type, op_string) \
__ATOMIC_OP(op_name, op_type, op_string, "\n") \
__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
__ATOMIC_OPS(__atomic_add, int, "laa")
__ATOMIC_OPS(__atomic_and, int, "lan")
__ATOMIC_OPS(__atomic_or, int, "lao")
__ATOMIC_OPS(__atomic_xor, int, "lax")
__ATOMIC_OPS(__atomic64_add, long, "laag")
__ATOMIC_OPS(__atomic64_and, long, "lang")
__ATOMIC_OPS(__atomic64_or, long, "laog")
__ATOMIC_OPS(__atomic64_xor, long, "laxg")
#undef __ATOMIC_OPS
#undef __ATOMIC_OP
static inline void __atomic_add_const(int val, int *ptr)
{
asm volatile(
" asi %[ptr],%[val]\n"
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
}
static inline void __atomic64_add_const(long val, long *ptr)
{
asm volatile(
" agsi %[ptr],%[val]\n"
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
}
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __ATOMIC_OP(op_name, op_string) \
static inline int op_name(int val, int *ptr) \
{ \
int old, new; \
\
asm volatile( \
"0: lr %[new],%[old]\n" \
op_string " %[new],%[val]\n" \
" cs %[old],%[new],%[ptr]\n" \
" jl 0b" \
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
return old; \
}
#define __ATOMIC_OPS(op_name, op_string) \
__ATOMIC_OP(op_name, op_string) \
__ATOMIC_OP(op_name##_barrier, op_string)
__ATOMIC_OPS(__atomic_add, "ar")
__ATOMIC_OPS(__atomic_and, "nr")
__ATOMIC_OPS(__atomic_or, "or")
__ATOMIC_OPS(__atomic_xor, "xr")
#undef __ATOMIC_OPS
#define __ATOMIC64_OP(op_name, op_string) \
static inline long op_name(long val, long *ptr) \
{ \
long old, new; \
\
asm volatile( \
"0: lgr %[new],%[old]\n" \
op_string " %[new],%[val]\n" \
" csg %[old],%[new],%[ptr]\n" \
" jl 0b" \
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
return old; \
}
#define __ATOMIC64_OPS(op_name, op_string) \
__ATOMIC64_OP(op_name, op_string) \
__ATOMIC64_OP(op_name##_barrier, op_string)
__ATOMIC64_OPS(__atomic64_add, "agr")
__ATOMIC64_OPS(__atomic64_and, "ngr")
__ATOMIC64_OPS(__atomic64_or, "ogr")
__ATOMIC64_OPS(__atomic64_xor, "xgr")
#undef __ATOMIC64_OPS
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
asm volatile(
" cs %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+Q" (*ptr)
: [new] "d" (new) : "cc", "memory");
return old;
}
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
asm volatile(
" csg %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+Q" (*ptr)
: [new] "d" (new) : "cc", "memory");
return old;
}
#endif /* __ARCH_S390_ATOMIC_OPS__ */
......@@ -42,57 +42,9 @@
#include <linux/typecheck.h>
#include <linux/compiler.h>
#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#define __BITOPS_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __BITOPS_OR "laog"
#define __BITOPS_AND "lang"
#define __BITOPS_XOR "laxg"
#define __BITOPS_BARRIER "bcr 14,0\n"
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \
unsigned long __old; \
\
typecheck(unsigned long *, (__addr)); \
asm volatile( \
__op_string " %0,%2,%1\n" \
__barrier \
: "=d" (__old), "+Q" (*(__addr)) \
: "d" (__val) \
: "cc", "memory"); \
__old; \
})
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
#define __BITOPS_BARRIER "\n"
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \
unsigned long __old, __new; \
\
typecheck(unsigned long *, (__addr)); \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
__op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
: "d" (__val) \
: "cc", "memory"); \
__old; \
})
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
......@@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
__atomic64_or(mask, addr);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
......@@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
__BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
__atomic64_and(mask, addr);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
......@@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
__atomic64_xor(mask, addr);
}
static inline int
......@@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
old = __atomic64_or_barrier(mask, addr);
return (old & mask) != 0;
}
......@@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
old = __atomic64_and_barrier(mask, addr);
return (old & ~mask) != 0;
}
......@@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
old = __atomic64_xor_barrier(mask, addr);
return (old & mask) != 0;
}
......
......@@ -104,7 +104,8 @@ struct hws_basic_entry {
unsigned int P:1; /* 28 PSW Problem state */
unsigned int AS:2; /* 29-30 PSW address-space control */
unsigned int I:1; /* 31 entry valid or invalid */
unsigned int:16;
unsigned int CL:2; /* 32-33 Configuration Level */
unsigned int:14;
unsigned int prim_asn:16; /* primary ASN */
unsigned long long ia; /* Instruction Address */
unsigned long long gpp; /* Guest Program Parameter */
......
......@@ -193,7 +193,7 @@ extern char elf_platform[];
do { \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
current_thread_info()->sys_call_table = \
current->thread.sys_call_table = \
(unsigned long) &sys_call_table; \
} while (0)
#else /* CONFIG_COMPAT */
......@@ -204,11 +204,11 @@ do { \
(current->personality & ~PER_MASK)); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
current_thread_info()->sys_call_table = \
current->thread.sys_call_table = \
(unsigned long) &sys_call_table_emu; \
} else { \
clear_thread_flag(TIF_31BIT); \
current_thread_info()->sys_call_table = \
current->thread.sys_call_table = \
(unsigned long) &sys_call_table; \
} \
} while (0)
......
/*
* Copyright IBM Corp. 2015
*/
#ifndef S390_GEN_FACILITIES_C
#error "This file can only be included by gen_facilities.c"
#endif
#include <linux/kconfig.h>
struct facility_def {
char *name;
int *bits;
};
static struct facility_def facility_defs[] = {
{
/*
* FACILITIES_ALS contains the list of facilities that are
* required to run a kernel that is compiled e.g. with
* -march=<machine>.
*/
.name = "FACILITIES_ALS",
.bits = (int[]){
#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
0, /* N3 instructions */
1, /* z/Arch mode installed */
#endif
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
18, /* long displacement facility */
#endif
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
7, /* stfle */
17, /* message security assist */
21, /* extended-immediate facility */
25, /* store clock fast */
#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
27, /* mvcos */
32, /* compare and swap and store */
33, /* compare and swap and store 2 */
34, /* general extension facility */
35, /* execute extensions */
#endif
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
45, /* fast-BCR, etc. */
#endif
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
49, /* misc-instruction-extensions */
52, /* interlocked facility 2 */
#endif
#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
53, /* load-and-zero-rightmost-byte, etc. */
#endif
-1 /* END */
}
},
{
.name = "FACILITIES_KVM",
.bits = (int[]){
0, /* N3 instructions */
1, /* z/Arch mode installed */
2, /* z/Arch mode active */
3, /* DAT-enhancement */
4, /* idte segment table */
5, /* idte region table */
6, /* ASN-and-LX reuse */
7, /* stfle */
8, /* enhanced-DAT 1 */
9, /* sense-running-status */
10, /* conditional sske */
13, /* ipte-range */
14, /* nonquiescing key-setting */
73, /* transactional execution */
75, /* access-exception-fetch/store indication */
76, /* msa extension 3 */
77, /* msa extension 4 */
78, /* enhanced-DAT 2 */
-1 /* END */
}
},
};
......@@ -97,7 +97,7 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void do_reipl(void);
extern void do_halt(void);
extern void do_poff(void);
extern void ipl_save_parameters(void);
extern void ipl_verify_parameters(void);
extern void ipl_update_parameters(void);
extern size_t append_ipl_vmparm(char *, size_t);
extern size_t append_ipl_scpdata(char *, size_t);
......
......@@ -95,7 +95,7 @@ struct lowcore {
/* Current process. */
__u64 current_task; /* 0x0310 */
__u64 thread_info; /* 0x0318 */
__u8 pad_0x318[0x320-0x318]; /* 0x0318 */
__u64 kernel_stack; /* 0x0320 */
/* Interrupt, panic and restart stack. */
......@@ -126,7 +126,8 @@ struct lowcore {
__u64 percpu_offset; /* 0x0378 */
__u64 vdso_per_cpu_data; /* 0x0380 */
__u64 machine_flags; /* 0x0388 */
__u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
__u32 preempt_count; /* 0x0390 */
__u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */
__u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */
__u32 fpu_flags; /* 0x03a4 */
......
......@@ -46,6 +46,8 @@ struct clp_fh_list_entry {
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
extern bool zpci_unique_uid;
/* List PCI functions request */
struct clp_req_list_pci {
struct clp_req_hdr hdr;
......@@ -59,7 +61,8 @@ struct clp_rsp_list_pci {
u64 resume_token;
u32 reserved2;
u16 max_fn;
u8 reserved3;
u8 : 7;
u8 uid_checking : 1;
u8 entry_size;
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
} __packed;
......
......@@ -27,17 +27,17 @@ extern int page_table_allocate_pgste;
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
typedef struct { char _[n]; } addrtype;
*s = val;
n = (n / 256) - 1;
asm volatile(
" mvc 8(248,%0),0(%0)\n"
"0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n"
" brct %1,0b\n"
: "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
: "m" (*(addrtype *) s));
struct addrtype { char _[256]; };
int i;
for (i = 0; i < n; i += 256) {
*s = val;
asm volatile(
"mvc 8(248,%[s]),0(%[s])\n"
: "+m" (*(struct addrtype *) s)
: [s] "a" (s));
s += 256 / sizeof(long);
}
}
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
......
#ifndef __ASM_PREEMPT_H
#define __ASM_PREEMPT_H
#include <asm/current.h>
#include <linux/thread_info.h>
#include <asm/atomic_ops.h>
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
static inline int preempt_count(void)
{
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
}
static inline void preempt_count_set(int pc)
{
int old, new;
do {
old = READ_ONCE(S390_lowcore.preempt_count);
new = (old & PREEMPT_NEED_RESCHED) |
(pc & ~PREEMPT_NEED_RESCHED);
} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
old, new) != old);
}
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
{
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
}
static inline void clear_preempt_need_resched(void)
{
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
}
static inline bool test_preempt_need_resched(void)
{
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
}
static inline void __preempt_count_add(int val)
{
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
__atomic_add_const(val, &S390_lowcore.preempt_count);
else
__atomic_add(val, &S390_lowcore.preempt_count);
}
static inline void __preempt_count_sub(int val)
{
__preempt_count_add(-val);
}
static inline bool __preempt_count_dec_and_test(void)
{
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
}
static inline bool should_resched(int preempt_offset)
{
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
preempt_offset);
}
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define PREEMPT_ENABLED (0)
static inline int preempt_count(void)
{
return READ_ONCE(S390_lowcore.preempt_count);
}
static inline void preempt_count_set(int pc)
{
S390_lowcore.preempt_count = pc;
}
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
{
}
static inline void clear_preempt_need_resched(void)
{
}
static inline bool test_preempt_need_resched(void)
{
return false;
}
static inline void __preempt_count_add(int val)
{
S390_lowcore.preempt_count += val;
}
static inline void __preempt_count_sub(int val)
{
S390_lowcore.preempt_count -= val;
}
static inline bool __preempt_count_dec_and_test(void)
{
return !--S390_lowcore.preempt_count && tif_need_resched();
}
static inline bool should_resched(int preempt_offset)
{
return unlikely(preempt_count() == preempt_offset &&
tif_need_resched());
}
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#ifdef CONFIG_PREEMPT
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */
#endif /* __ASM_PREEMPT_H */
......@@ -110,14 +110,20 @@ typedef struct {
struct thread_struct {
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
unsigned long user_timer; /* task cputime in user space */
unsigned long system_timer; /* task cputime in kernel space */
unsigned long sys_call_table; /* system call table address */
mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
unsigned int gmap_write_flag; /* gmap fault write indication */
unsigned int gmap_int_code; /* int code of last gmap fault */
unsigned int gmap_pfault; /* signal of a pending guest pfault */
/* Per-thread information related to debugging */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
unsigned long per_flags; /* Flags to control debug behavior */
unsigned int system_call; /* system call number in signal */
unsigned long last_break; /* last breaking-event-address. */
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
struct list_head list;
......
......@@ -101,7 +101,8 @@ struct zpci_report_error_header {
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
int sclp_get_core_info(struct sclp_core_info *info);
int _sclp_get_core_info_early(struct sclp_core_info *info);
int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);
int sclp_sdias_blk_count(void);
......@@ -119,4 +120,11 @@ void sclp_early_detect(void);
void _sclp_print_early(const char *);
void sclp_ocf_cpc_name_copy(char *dst);
static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
{
if (early)
return _sclp_get_core_info_early(info);
return _sclp_get_core_info(info);
}
#endif /* _ASM_S390_SCLP_H */
......@@ -96,7 +96,8 @@ struct tm_scsw {
u32 dstat:8;
u32 cstat:8;
u32 fcxs:8;
u32 schxs:8;
u32 ifob:1;
u32 sesq:7;
} __attribute__ ((packed));
/**
......@@ -177,6 +178,9 @@ union scsw {
#define SCHN_STAT_INTF_CTRL_CHK 0x02
#define SCHN_STAT_CHAIN_CHECK 0x01
#define SCSW_SESQ_DEV_NOFCX 3
#define SCSW_SESQ_PATH_NOFCX 4
/*
* architectured values for first sense byte
*/
......
......@@ -36,6 +36,7 @@ extern void smp_yield_cpu(int cpu);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
extern void smp_detect_cpus(void);
#else /* CONFIG_SMP */
......@@ -56,6 +57,7 @@ static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_fill_possible_mask(void) { }
static inline void smp_detect_cpus(void) { }
#endif /* CONFIG_SMP */
......@@ -69,6 +71,12 @@ static inline void smp_stop_cpu(void)
}
}
/* Return thread 0 CPU number as base CPU */
static inline int smp_get_base_cpu(int cpu)
{
return cpu - (cpu % (smp_cpu_mtid + 1));
}
#ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
......
......@@ -14,6 +14,7 @@
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
#define __HAVE_ARCH_MEMCMP /* arch function */
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
......@@ -32,6 +33,7 @@
extern int memcmp(const void *, const void *, size_t);
extern void *memcpy(void *, const void *, size_t);
extern void *memset(void *, int, size_t);
extern void *memmove(void *, const void *, size_t);
extern int strcmp(const char *,const char *);
extern size_t strlcat(char *, const char *, size_t);
extern size_t strlcpy(char *, const char *, size_t);
......@@ -40,7 +42,6 @@ extern char *strncpy(char *, const char *, size_t);
extern char *strrchr(const char *, int);
extern char *strstr(const char *, const char *);
#undef __HAVE_ARCH_MEMMOVE
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
#undef __HAVE_ARCH_STRNCMP
......
......@@ -107,6 +107,11 @@ struct sysinfo_2_2_2 {
char reserved_3[5];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
char reserved_4[3];
unsigned char vsne;
uuid_be uuid;
char reserved_5[160];
char ext_name[256];
};
#define LPAR_CHAR_DEDICATED (1 << 7)
......@@ -127,7 +132,7 @@ struct sysinfo_3_2_2 {
unsigned int caf;
char cpi[16];
char reserved_1[3];
char ext_name_encoding;
unsigned char evmne;
unsigned int reserved_2;
uuid_be uuid;
} vm[8];
......
......@@ -12,10 +12,10 @@
/*
* Size of kernel stack for each process
*/
#define THREAD_ORDER 2
#define THREAD_SIZE_ORDER 2
#define ASYNC_ORDER 2
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
#ifndef __ASSEMBLY__
......@@ -30,15 +30,7 @@
* - if the contents of this structure are changed, the assembly constants must also be changed
*/
struct thread_info {
struct task_struct *task; /* main task structure */
unsigned long flags; /* low level flags */
unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
unsigned int system_call;
__u64 user_timer;
__u64 system_timer;
unsigned long last_break; /* last breaking-event-address. */
};
/*
......@@ -46,26 +38,14 @@ struct thread_info {
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
return (struct thread_info *) S390_lowcore.thread_info;
}
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define THREAD_SIZE_ORDER THREAD_ORDER
#endif
/*
......
......@@ -52,11 +52,9 @@ static inline void store_clock_comparator(__u64 *time)
void clock_comparator_work(void);
void __init ptff_init(void);
void __init time_early_init(void);
extern unsigned char ptff_function_mask[16];
extern unsigned long lpar_offset;
extern unsigned long initial_leap_seconds;
/* Function codes for the ptff instruction. */
#define PTFF_QAF 0x00 /* query available functions */
......@@ -100,21 +98,28 @@ struct ptff_qui {
unsigned int pad_0x5c[41];
} __packed;
static inline int ptff(void *ptff_block, size_t len, unsigned int func)
{
typedef struct { char _[len]; } addrtype;
register unsigned int reg0 asm("0") = func;
register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
int rc;
asm volatile(
" .word 0x0104\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc), "+m" (*(addrtype *) ptff_block)
: "d" (reg0), "d" (reg1) : "cc");
return rc;
}
/*
* ptff - Perform timing facility function
* @ptff_block: Pointer to ptff parameter block
* @len: Length of parameter block
* @func: Function code
* Returns: Condition code (0 on success)
*/
#define ptff(ptff_block, len, func) \
({ \
struct addrtype { char _[len]; }; \
register unsigned int reg0 asm("0") = func; \
register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
int rc; \
\
asm volatile( \
" .word 0x0104\n" \
" ipm %0\n" \
" srl %0,28\n" \
: "=d" (rc), "+m" (*(struct addrtype *) reg1) \
: "d" (reg0), "d" (reg1) : "cc"); \
rc; \
})
static inline unsigned long long local_tick_disable(void)
{
......
......@@ -22,21 +22,22 @@ struct cpu_topology_s390 {
cpumask_t drawer_mask;
};
DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
#define topology_sibling_cpumask(cpu) \
(&per_cpu(cpu_topology, cpu).thread_mask)
#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id)
#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
extern cpumask_t cpus_with_topology;
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
#define mc_capable() 1
void topology_init_early(void);
int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
......@@ -46,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
#else /* CONFIG_SCHED_TOPOLOGY */
static inline void topology_init_early(void) { }
static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
static inline void topology_expect_change(void) { }
......@@ -65,7 +67,7 @@ static inline void topology_expect_change(void) { }
#define cpu_to_node cpu_to_node
static inline int cpu_to_node(int cpu)
{
return per_cpu(cpu_topology, cpu).node_id;
return cpu_topology[cpu].node_id;
}
/* Returns a pointer to the cpumask of CPUs on node 'node'. */
......
......@@ -37,14 +37,14 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
#define set_fs(x) \
({ \
#define set_fs(x) \
{ \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
__ctl_load(__pto, 7, 7); \
})
}
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
......
......@@ -33,6 +33,8 @@ struct vdso_data {
__u32 ectg_available; /* ECTG instruction present 0x58 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
__u32 ts_dir; /* TOD steering direction 0x64 */
__u64 ts_end; /* TOD steering end 0x68 */
};
struct vdso_per_cpu_data {
......
......@@ -12,6 +12,7 @@ header-y += dasd.h
header-y += debug.h
header-y += errno.h
header-y += fcntl.h
header-y += hypfs.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
......@@ -29,16 +30,16 @@ header-y += ptrace.h
header-y += qeth.h
header-y += resource.h
header-y += schid.h
header-y += sclp_ctl.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
header-y += sie.h
header-y += sigcontext.h
header-y += siginfo.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
header-y += sclp_ctl.h
header-y += sie.h
header-y += stat.h
header-y += statfs.h
header-y += swab.h
......
......@@ -2,20 +2,47 @@
# Makefile for the linux kernel.
#
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_sclp.o := n
KCOV_INSTRUMENT_als.o := n
ifdef CONFIG_FUNCTION_TRACER
# Don't trace early setup code and tracing code
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace tracer code
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace early setup code
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
endif
GCOV_PROFILE_als.o := n
GCOV_PROFILE_early.o := n
GCOV_PROFILE_sclp.o := n
KCOV_INSTRUMENT_als.o := n
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_sclp.o := n
UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_sclp.o := n
#
# Use -march=z900 for sclp.c and als.c to be able to print an error
# message if the kernel is started on a machine which is too old
#
ifneq ($(CC_FLAGS_MARCH),-march=z900)
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
CFLAGS_als.o += -march=z900
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp.o += -march=z900
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
endif
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
CFLAGS_smp.o := -Wno-nonnull
CFLAGS_smp.o := -Wno-nonnull
#
# Disable tailcall optimizations for stack / callchain walking functions
......@@ -30,27 +57,7 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -w
#
# Use -march=z900 for sclp.c and als.c to be able to print an error
# message if the kernel is started on a machine which is too old
#
CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
ifneq ($(CC_FLAGS_MARCH),-march=z900)
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp.o += -march=z900
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
CFLAGS_als.o += -march=z900
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
endif
GCOV_PROFILE_sclp.o := n
GCOV_PROFILE_als.o := n
UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_sclp.o := n
CFLAGS_sysinfo.o += -w
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
......
......@@ -25,12 +25,14 @@
int main(void)
{
/* task struct offsets */
OFFSET(__TASK_thread_info, task_struct, stack);
OFFSET(__TASK_stack, task_struct, stack);
OFFSET(__TASK_thread, task_struct, thread);
OFFSET(__TASK_pid, task_struct, pid);
BLANK();
/* thread struct offsets */
OFFSET(__THREAD_ksp, thread_struct, ksp);
OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table);
OFFSET(__THREAD_last_break, thread_struct, last_break);
OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
......@@ -39,14 +41,7 @@ int main(void)
OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
BLANK();
/* thread info offsets */
OFFSET(__TI_task, thread_info, task);
OFFSET(__TI_flags, thread_info, flags);
OFFSET(__TI_sysc_table, thread_info, sys_call_table);
OFFSET(__TI_cpu, thread_info, cpu);
OFFSET(__TI_precount, thread_info, preempt_count);
OFFSET(__TI_user_timer, thread_info, user_timer);
OFFSET(__TI_system_timer, thread_info, system_timer);
OFFSET(__TI_last_break, thread_info, last_break);
OFFSET(__TI_flags, task_struct, thread_info.flags);
BLANK();
/* pt_regs offsets */
OFFSET(__PT_ARGS, pt_regs, args);
......@@ -79,6 +74,8 @@ int main(void)
OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
OFFSET(__VDSO_TS_END, vdso_data, ts_end);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
......@@ -159,7 +156,6 @@ int main(void)
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
......@@ -173,6 +169,7 @@ int main(void)
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_PASTE, lowcore, paste);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
......
......@@ -446,7 +446,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
regs->gprs[6] = task_thread_info(current)->last_break;
regs->gprs[6] = current->thread.last_break;
}
return 0;
......@@ -523,7 +523,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
regs->gprs[5] = task_thread_info(current)->last_break;
regs->gprs[5] = current->thread.last_break;
return 0;
}
......
......@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
psw.addr = (unsigned long) s390_base_pgm_handler;
S390_lowcore.program_new_psw = psw;
s390_base_pgm_handler_fn = early_pgm_check_handler;
S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
}
static noinline __init void setup_facility_list(void)
......@@ -391,7 +392,49 @@ static int __init cad_init(void)
}
early_initcall(cad_init);
static __init void rescue_initrd(void)
static __init void memmove_early(void *dst, const void *src, size_t n)
{
unsigned long addr;
long incr;
psw_t old;
if (!n)
return;
incr = 1;
if (dst > src) {
incr = -incr;
dst += n - 1;
src += n - 1;
}
old = S390_lowcore.program_new_psw;
S390_lowcore.program_new_psw.mask = __extract_psw();
asm volatile(
" larl %[addr],1f\n"
" stg %[addr],%[psw_pgm_addr]\n"
"0: mvc 0(1,%[dst]),0(%[src])\n"
" agr %[dst],%[incr]\n"
" agr %[src],%[incr]\n"
" brctg %[n],0b\n"
"1:\n"
: [addr] "=&d" (addr),
[psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr),
[dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
: [incr] "d" (incr)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
}
static __init noinline void ipl_save_parameters(void)
{
void *src, *dst;
src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
dst = (void *) IPL_PARMBLOCK_ORIGIN;
memmove_early(dst, src, PAGE_SIZE);
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
static __init noinline void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
......@@ -405,7 +448,7 @@ static __init void rescue_initrd(void)
return;
if (INITRD_START >= min_initrd_addr)
return;
memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
#endif
}
......@@ -467,7 +510,8 @@ void __init startup_init(void)
ipl_save_parameters();
rescue_initrd();
clear_bss_section();
ptff_init();
ipl_verify_parameters();
time_early_init();
init_kernel_storage_key();
lockdep_off();
setup_lowcore_early();
......
......@@ -42,7 +42,7 @@ __PT_R13 = __PT_GPRS + 104
__PT_R14 = __PT_GPRS + 112
__PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
......@@ -123,8 +123,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
.macro LAST_BREAK scratch
srag \scratch,%r10,23
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
jz .+10
stg %r10,__TI_last_break(%r12)
stg %r10,__TASK_thread+__THREAD_last_break(%r12)
#else
jz .+14
lghi \scratch,__TASK_thread
stg %r10,__THREAD_last_break(\scratch,%r12)
#endif
.endm
.macro REENABLE_IRQS
......@@ -186,14 +192,13 @@ ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lgr %r1,%r2
aghi %r1,__TASK_thread # thread_struct of prev task
lg %r5,__TASK_thread_info(%r3) # get thread_info of next
lg %r5,__TASK_stack(%r3) # start of kernel stack of next
stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
lgr %r1,%r3
aghi %r1,__TASK_thread # thread_struct of next task
lgr %r15,%r5
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
......@@ -274,7 +279,7 @@ ENTRY(system_call)
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lg %r12,__LC_CURRENT
lghi %r14,_PIF_SYSCALL
.Lsysc_per:
lg %r15,__LC_KERNEL_STACK
......@@ -288,7 +293,13 @@ ENTRY(system_call)
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
lg %r10,__TI_sysc_table(%r12) # address of system call table
# load address of system call table
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
lg %r10,__TASK_thread+__THREAD_sysc_table(%r12)
#else
lghi %r13,__TASK_thread
lg %r10,__THREAD_sysc_table(%r13,%r12)
#endif
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz .Lsysc_nr_ok
......@@ -389,7 +400,6 @@ ENTRY(system_call)
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
jno .Lsysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
......@@ -457,7 +467,7 @@ ENTRY(system_call)
#
ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
lg %r12,__LC_THREAD_INFO
lg %r12,__LC_CURRENT
brasl %r14,schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
......@@ -478,7 +488,7 @@ ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
......@@ -501,7 +511,7 @@ ENTRY(pgm_check_handler)
2: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
lg %r15,__LC_KERNEL_STACK
lg %r14,__TI_task(%r12)
lgr %r14,%r12
aghi %r14,__TASK_thread # pointer to thread_struct
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
......@@ -567,7 +577,7 @@ ENTRY(io_int_handler)
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
......@@ -626,7 +636,7 @@ ENTRY(io_int_handler)
jo .Lio_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r12)
icm %r0,15,__LC_PREEMPT_COUNT
jnz .Lio_restore # preemption is disabled
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jno .Lio_restore
......@@ -741,7 +751,7 @@ ENTRY(ext_int_handler)
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
......@@ -798,13 +808,10 @@ ENTRY(save_fpu_regs)
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
bor %r14
stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
jz .Lsave_fpu_regs_fp # no -> store FP regs
.Lsave_fpu_regs_vx_low:
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
.Lsave_fpu_regs_vx_high:
VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
j .Lsave_fpu_regs_done # -> set CIF_FPU flag
.Lsave_fpu_regs_fp:
......@@ -851,9 +858,7 @@ load_fpu_regs:
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz .Lload_fpu_regs_fp # -> no VX, load FP regs
.Lload_fpu_regs_vx:
VLM %v0,%v15,0,%r4
.Lload_fpu_regs_vx_high:
VLM %v16,%v31,256,%r4
j .Lload_fpu_regs_done
.Lload_fpu_regs_fp:
......@@ -889,7 +894,7 @@ ENTRY(mcck_int_handler)
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
......@@ -948,7 +953,7 @@ ENTRY(mcck_int_handler)
.Lmcck_panic:
lg %r15,__LC_PANIC_STACK
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
#
......@@ -1085,7 +1090,7 @@ cleanup_critical:
jhe 0f
# set up saved registers r10 and r12
stg %r10,16(%r11) # r10 last break
stg %r12,32(%r11) # r12 thread-info pointer
stg %r12,32(%r11) # r12 task struct pointer
0: # check if the user time update has been done
clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
......@@ -1106,7 +1111,9 @@ cleanup_critical:
lg %r9,16(%r11)
srag %r9,%r9,23
jz 0f
mvc __TI_last_break(8,%r12),16(%r11)
lgr %r9,%r12
aghi %r9,__TASK_thread
mvc __THREAD_last_break(8,%r9),16(%r11)
0: # set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
......
......@@ -315,7 +315,7 @@ ENTRY(startup_kdump)
jg startup_continue
.Lstack:
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
.align 8
6: .long 0x7fffffff,0xffffffff
......
......@@ -32,11 +32,10 @@ ENTRY(startup_continue)
#
# Setup stack
#
larl %r15,init_thread_union
stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
lg %r14,__TI_task(%r15) # cache current in lowcore
larl %r14,init_task
stg %r14,__LC_CURRENT
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
larl %r15,init_thread_union
aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
#
......
......@@ -1991,10 +1991,9 @@ void __init ipl_update_parameters(void)
diag308_set_works = 1;
}
void __init ipl_save_parameters(void)
void __init ipl_verify_parameters(void)
{
struct cio_iplinfo iplinfo;
void *src, *dst;
if (cio_get_iplinfo(&iplinfo))
return;
......@@ -2005,10 +2004,6 @@ void __init ipl_save_parameters(void)
if (!iplinfo.is_qdio)
return;
ipl_flags |= IPL_PARMBLOCK_VALID;
src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
dst = (void *)IPL_PARMBLOCK_ORIGIN;
memmove(dst, src, PAGE_SIZE);
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
}
static LIST_HEAD(rcall);
......
......@@ -168,7 +168,7 @@ void do_softirq_own_stack(void)
old = current_stack_pointer();
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
/* Need to switch to the async. stack. */
new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old;
......
......@@ -5,7 +5,8 @@
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <asm/facility.h>
......@@ -183,4 +184,4 @@ static int __init lgr_init(void)
lgr_timer_set();
return 0;
}
module_init(lgr_init);
device_initcall(lgr_init);
......@@ -995,39 +995,36 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
regs.int_parm = CPU_MF_INT_SF_PRA;
sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
regs.psw.addr = sfr->basic.ia;
if (sfr->basic.T)
regs.psw.mask |= PSW_MASK_DAT;
if (sfr->basic.W)
regs.psw.mask |= PSW_MASK_WAIT;
if (sfr->basic.P)
regs.psw.mask |= PSW_MASK_PSTATE;
switch (sfr->basic.AS) {
case 0x0:
regs.psw.mask |= PSW_ASC_PRIMARY;
break;
case 0x1:
regs.psw.mask |= PSW_ASC_ACCREG;
break;
case 0x2:
regs.psw.mask |= PSW_ASC_SECONDARY;
break;
case 0x3:
regs.psw.mask |= PSW_ASC_HOME;
break;
}
psw_bits(regs.psw).ia = sfr->basic.ia;
psw_bits(regs.psw).t = sfr->basic.T;
psw_bits(regs.psw).w = sfr->basic.W;
psw_bits(regs.psw).p = sfr->basic.P;
psw_bits(regs.psw).as = sfr->basic.AS;
/*
* A non-zero guest program parameter indicates a guest
* sample.
* Note that some early samples or samples from guests without
* Use the hardware provided configuration level to decide if the
* sample belongs to a guest or host. If that is not available,
* fall back to the following heuristics:
* A non-zero guest program parameter always indicates a guest
* sample. Some early samples or samples from guests without
* lpp usage would be misaccounted to the host. We use the asn
* value as a heuristic to detect most of these guest samples.
* If the value differs from the host hpp value, we assume
* it to be a KVM guest.
* value as an addon heuristic to detect most of these guest samples.
* If the value differs from the host hpp value, we assume to be a
* KVM guest.
*/
if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
switch (sfr->basic.CL) {
case 1: /* logical partition */
sde_regs->in_guest = 0;
break;
case 2: /* virtual machine */
sde_regs->in_guest = 1;
break;
default: /* old machine, use heuristics */
if (sfr->basic.gpp ||
sfr->basic.prim_asn != (u16)sfr->basic.hpp)
sde_regs->in_guest = 1;
break;
}
overflow = 0;
if (perf_exclude_event(event, &regs, sde_regs))
......
......@@ -103,7 +103,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
{
struct thread_info *ti;
struct fake_frame
{
struct stack_frame sf;
......@@ -121,9 +120,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
/* Initialize per thread user and system timer values */
ti = task_thread_info(p);
ti->user_timer = 0;
ti->system_timer = 0;
p->thread.user_timer = 0;
p->thread.system_timer = 0;
frame->sf.back_chain = 0;
/* new return point is ret_from_fork */
......
......@@ -461,7 +461,7 @@ long arch_ptrace(struct task_struct *child, long request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
put_user(task_thread_info(child)->last_break,
put_user(child->thread.last_break,
(unsigned long __user *) data);
return 0;
case PTRACE_ENABLE_TE:
......@@ -811,7 +811,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
return 0;
case PTRACE_GET_LAST_BREAK:
put_user(task_thread_info(child)->last_break,
put_user(child->thread.last_break,
(unsigned int __user *) data);
return 0;
}
......@@ -997,10 +997,10 @@ static int s390_last_break_get(struct task_struct *target,
if (count > 0) {
if (kbuf) {
unsigned long *k = kbuf;
*k = task_thread_info(target)->last_break;
*k = target->thread.last_break;
} else {
unsigned long __user *u = ubuf;
if (__put_user(task_thread_info(target)->last_break, u))
if (__put_user(target->thread.last_break, u))
return -EFAULT;
}
}
......@@ -1113,7 +1113,7 @@ static int s390_system_call_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
unsigned int *data = &task_thread_info(target)->system_call;
unsigned int *data = &target->thread.system_call;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
......@@ -1123,7 +1123,7 @@ static int s390_system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned int *data = &task_thread_info(target)->system_call;
unsigned int *data = &target->thread.system_call;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(unsigned int));
}
......@@ -1327,7 +1327,7 @@ static int s390_compat_last_break_get(struct task_struct *target,
compat_ulong_t last_break;
if (count > 0) {
last_break = task_thread_info(target)->last_break;
last_break = target->thread.last_break;
if (kbuf) {
unsigned long *k = kbuf;
*k = last_break;
......
......@@ -35,6 +35,7 @@
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/kernel_stat.h>
#include <linux/dma-contiguous.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pfn.h>
......@@ -303,7 +304,7 @@ static void __init setup_lowcore(void)
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS |
......@@ -324,15 +325,15 @@ static void __init setup_lowcore(void)
lc->kernel_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
lc->preempt_count = S390_lowcore.preempt_count;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
......@@ -349,7 +350,7 @@ static void __init setup_lowcore(void)
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
restart_stack += ASYNC_SIZE;
/*
......@@ -412,7 +413,7 @@ static void __init setup_resources(void)
bss_resource.end = (unsigned long) &__bss_stop - 1;
for_each_memblock(memory, reg) {
res = alloc_bootmem_low(sizeof(*res));
res = memblock_virt_alloc(sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
......@@ -426,7 +427,7 @@ static void __init setup_resources(void)
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
sub_res = alloc_bootmem_low(sizeof(*sub_res));
sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
......@@ -445,7 +446,7 @@ static void __init setup_resources(void)
* part of the System RAM resource.
*/
if (crashk_res.end) {
memblock_add(crashk_res.start, resource_size(&crashk_res));
memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
insert_resource(&iomem_resource, &crashk_res);
}
......@@ -903,6 +904,7 @@ void __init setup_arch(char **cmdline_p)
setup_memory_end();
setup_memory();
dma_contiguous_reserve(memory_end);
check_initrd();
reserve_crashkernel();
......@@ -921,6 +923,8 @@ void __init setup_arch(char **cmdline_p)
cpu_detect_mhz_feature();
cpu_init();
numa_setup();
smp_detect_cpus();
topology_init_early();
/*
* Create kernel page tables and switch to virtual addressing.
......
......@@ -359,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
regs->gprs[6] = task_thread_info(current)->last_break;
regs->gprs[6] = current->thread.last_break;
}
return 0;
}
......@@ -430,7 +430,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
regs->gprs[2] = ksig->sig;
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
regs->gprs[5] = task_thread_info(current)->last_break;
regs->gprs[5] = current->thread.last_break;
return 0;
}
......@@ -467,13 +467,13 @@ void do_signal(struct pt_regs *regs)
* the debugger may change all our registers, including the system
* call information.
*/
current_thread_info()->system_call =
current->thread.system_call =
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
if (current_thread_info()->system_call) {
regs->int_code = current_thread_info()->system_call;
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
/* Check for system call restarting. */
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
......@@ -506,8 +506,8 @@ void do_signal(struct pt_regs *regs)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
if (current_thread_info()->system_call) {
regs->int_code = current_thread_info()->system_call;
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
/* Restart with sys_restart_syscall */
......
......@@ -19,6 +19,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
......@@ -259,16 +260,14 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
struct lowcore *lc = pcpu->lowcore;
struct thread_info *ti = task_thread_info(tsk);
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid;
lc->user_timer = ti->user_timer;
lc->system_timer = ti->system_timer;
lc->user_timer = tsk->thread.user_timer;
lc->system_timer = tsk->thread.system_timer;
lc->steal_timer = 0;
}
......@@ -662,14 +661,12 @@ int smp_cpu_get_polarization(int cpu)
return pcpu_devices[cpu].polarization;
}
static struct sclp_core_info *smp_get_core_info(void)
static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
{
static int use_sigp_detection;
struct sclp_core_info *info;
int address;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info && (use_sigp_detection || sclp_get_core_info(info))) {
if (use_sigp_detection || sclp_get_core_info(info, early)) {
use_sigp_detection = 1;
for (address = 0;
address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
......@@ -683,7 +680,6 @@ static struct sclp_core_info *smp_get_core_info(void)
}
info->combined = info->configured;
}
return info;
}
static int smp_add_present_cpu(int cpu);
......@@ -724,17 +720,15 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
return nr;
}
static void __init smp_detect_cpus(void)
void __init smp_detect_cpus(void)
{
unsigned int cpu, mtid, c_cpus, s_cpus;
struct sclp_core_info *info;
u16 address;
/* Get CPU information */
info = smp_get_core_info();
if (!info)
panic("smp_detect_cpus failed to allocate memory\n");
info = memblock_virt_alloc(sizeof(*info), 8);
smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
address = stap();
......@@ -770,7 +764,7 @@ static void __init smp_detect_cpus(void)
get_online_cpus();
__smp_rescan_cpus(info, 0);
put_online_cpus();
kfree(info);
memblock_free_early((unsigned long)info, sizeof(*info));
}
/*
......@@ -807,7 +801,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
base = cpu - (cpu % (smp_cpu_mtid + 1));
base = smp_get_base_cpu(cpu);
for (i = 0; i <= smp_cpu_mtid; i++) {
if (base + i < nr_cpu_ids)
if (cpu_online(base + i))
......@@ -907,7 +901,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* request the 0x1202 external call external interrupt */
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
smp_detect_cpus();
}
void __init smp_prepare_boot_cpu(void)
......@@ -973,7 +966,7 @@ static ssize_t cpu_configure_store(struct device *dev,
rc = -EBUSY;
/* disallow configuration changes of online cpus and cpu 0 */
cpu = dev->id;
cpu -= cpu % (smp_cpu_mtid + 1);
cpu = smp_get_base_cpu(cpu);
if (cpu == 0)
goto out;
for (i = 0; i <= smp_cpu_mtid; i++)
......@@ -1106,9 +1099,10 @@ int __ref smp_rescan_cpus(void)
struct sclp_core_info *info;
int nr;
info = smp_get_core_info();
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, 1);
......
......@@ -194,7 +194,7 @@ pgm_check_entry:
/* Suspend CPU not available -> panic */
larl %r15,init_thread_union
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
larl %r2,.Lpanic_string
larl %r3,_sclp_print_early
lghi %r1,0
......
......@@ -56,6 +56,20 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
}
EXPORT_SYMBOL(stsi);
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
{
switch (encoding) {
case 1: /* EBCDIC */
EBCASC(name, len);
break;
case 2: /* UTF-8 */
break;
default:
return false;
}
return true;
}
static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
{
int i;
......@@ -207,24 +221,19 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid);
seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid);
}
if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
seq_printf(m, "LPAR Extended Name: %-.256s\n", info->ext_name);
seq_printf(m, "LPAR UUID: %pUb\n", &info->uuid);
}
}
static void print_ext_name(struct seq_file *m, int lvl,
struct sysinfo_3_2_2 *info)
{
if (info->vm[lvl].ext_name_encoding == 0)
return;
if (info->ext_names[lvl][0] == 0)
return;
switch (info->vm[lvl].ext_name_encoding) {
case 1: /* EBCDIC */
EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
break;
case 2: /* UTF-8 */
break;
default:
size_t len = sizeof(info->ext_names[lvl]);
if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
return;
}
seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
info->ext_names[lvl]);
}
......
......@@ -59,19 +59,27 @@ ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
EXPORT_SYMBOL(s390_epoch_delta_notifier);
unsigned char ptff_function_mask[16];
unsigned long lpar_offset;
unsigned long initial_leap_seconds;
static unsigned long long lpar_offset;
static unsigned long long initial_leap_seconds;
static unsigned long long tod_steering_end;
static long long tod_steering_delta;
/*
* Get time offsets with PTFF
*/
void __init ptff_init(void)
void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
/* Initialize TOD steering parameters */
tod_steering_end = sched_clock_base_cc;
vdso_data->ts_end = tod_steering_end;
if (!test_facility(28))
return;
ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
/* get LPAR offset */
......@@ -80,7 +88,7 @@ void __init ptff_init(void)
/* get initial leap seconds */
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
initial_leap_seconds = (unsigned long)
initial_leap_seconds = (unsigned long long)
((long) qui.old_leap * 4096000000L);
}
......@@ -123,18 +131,6 @@ void clock_comparator_work(void)
cd->event_handler(cd);
}
/*
* Fixup the clock comparator.
*/
static void fixup_clock_comparator(unsigned long long delta)
{
/* If nobody is waiting there's nothing to fix. */
if (S390_lowcore.clock_comparator == -1ULL)
return;
S390_lowcore.clock_comparator += delta;
set_clock_comparator(S390_lowcore.clock_comparator);
}
static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
......@@ -215,7 +211,21 @@ void read_boot_clock64(struct timespec64 *ts)
static cycle_t read_tod_clock(struct clocksource *cs)
{
return get_tod_clock();
unsigned long long now, adj;
preempt_disable(); /* protect from changes to steering parameters */
now = get_tod_clock();
adj = tod_steering_end - now;
if (unlikely((s64) adj >= 0))
/*
* manually steer by 1 cycle every 2^16 cycles. This
* corresponds to shifting the tod delta by 15. 1s is
* therefore steered in ~9h. The adjust will decrease
* over time, until it finally reaches 0.
*/
now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
preempt_enable();
return now;
}
static struct clocksource clocksource_tod = {
......@@ -384,6 +394,55 @@ static inline int check_sync_clock(void)
return rc;
}
/*
* Apply clock delta to the global data structures.
* This is called once on the CPU that performed the clock sync.
*/
static void clock_sync_global(unsigned long long delta)
{
unsigned long now, adj;
struct ptff_qto qto;
/* Fixup the monotonic sched clock. */
sched_clock_base_cc += delta;
/* Adjust TOD steering parameters. */
vdso_data->tb_update_count++;
now = get_tod_clock();
adj = tod_steering_end - now;
if (unlikely((s64) adj >= 0))
/* Calculate how much of the old adjustment is left. */
tod_steering_delta = (tod_steering_delta < 0) ?
-(adj >> 15) : (adj >> 15);
tod_steering_delta += delta;
if ((abs(tod_steering_delta) >> 48) != 0)
panic("TOD clock sync offset %lli is too large to drift\n",
tod_steering_delta);
tod_steering_end = now + (abs(tod_steering_delta) << 15);
vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
vdso_data->ts_end = tod_steering_end;
vdso_data->tb_update_count++;
/* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
lpar_offset = qto.tod_epoch_difference;
/* Call the TOD clock change notifier. */
atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
}
/*
* Apply clock delta to the per-CPU data structures of this CPU.
* This is called for each online CPU after the call to clock_sync_global.
*/
static void clock_sync_local(unsigned long long delta)
{
/* Add the delta to the clock comparator. */
if (S390_lowcore.clock_comparator != -1ULL) {
S390_lowcore.clock_comparator += delta;
set_clock_comparator(S390_lowcore.clock_comparator);
}
/* Adjust the last_update_clock time-stamp. */
S390_lowcore.last_update_clock += delta;
}
/* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq;
......@@ -397,31 +456,9 @@ static void __init time_init_wq(void)
struct clock_sync_data {
atomic_t cpus;
int in_sync;
unsigned long long fixup_cc;
unsigned long long clock_delta;
};
static void clock_sync_cpu(struct clock_sync_data *sync)
{
atomic_dec(&sync->cpus);
enable_sync_clock();
while (sync->in_sync == 0) {
__udelay(1);
/*
* A different cpu changes *in_sync. Therefore use
* barrier() to force memory access.
*/
barrier();
}
if (sync->in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */
disable_sync_clock(NULL);
/*
* This round of TOD syncing is done. Set the clock comparator
* to the next tick and let the processor continue.
*/
fixup_clock_comparator(sync->fixup_cc);
}
/*
* Server Time Protocol (STP) code.
*/
......@@ -523,54 +560,46 @@ void stp_queue_work(void)
static int stp_sync_clock(void *data)
{
static int first;
struct clock_sync_data *sync = data;
unsigned long long clock_delta;
struct clock_sync_data *stp_sync;
struct ptff_qto qto;
static int first;
int rc;
stp_sync = data;
if (xchg(&first, 1) == 1) {
/* Slave */
clock_sync_cpu(stp_sync);
return 0;
}
/* Wait until all other cpus entered the sync function. */
while (atomic_read(&stp_sync->cpus) != 0)
cpu_relax();
enable_sync_clock();
rc = 0;
if (stp_info.todoff[0] || stp_info.todoff[1] ||
stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) {
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
if (rc == 0) {
/* fixup the monotonic sched clock */
sched_clock_base_cc += clock_delta;
if (ptff_query(PTFF_QTO) &&
ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
/* Update LPAR offset */
lpar_offset = qto.tod_epoch_difference;
atomic_notifier_call_chain(&s390_epoch_delta_notifier,
0, &clock_delta);
stp_sync->fixup_cc = clock_delta;
fixup_clock_comparator(clock_delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
if (rc == 0 && stp_info.tmd != 2)
rc = -EAGAIN;
if (xchg(&first, 1) == 0) {
/* Wait until all other cpus entered the sync function. */
while (atomic_read(&sync->cpus) != 0)
cpu_relax();
rc = 0;
if (stp_info.todoff[0] || stp_info.todoff[1] ||
stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) {
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
&clock_delta);
if (rc == 0) {
sync->clock_delta = clock_delta;
clock_sync_global(clock_delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
if (rc == 0 && stp_info.tmd != 2)
rc = -EAGAIN;
}
}
sync->in_sync = rc ? -EAGAIN : 1;
xchg(&first, 0);
} else {
/* Slave */
atomic_dec(&sync->cpus);
/* Wait for in_sync to be set. */
while (READ_ONCE(sync->in_sync) == 0)
__udelay(1);
}
if (rc) {
if (sync->in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */
disable_sync_clock(NULL);
stp_sync->in_sync = -EAGAIN;
} else
stp_sync->in_sync = 1;
xchg(&first, 0);
/* Apply clock delta to per-CPU fields of this CPU. */
clock_sync_local(sync->clock_delta);
return 0;
}
......
......@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
......@@ -41,15 +42,17 @@ static bool topology_enabled = true;
static DECLARE_WORK(topology_work, topology_work_fn);
/*
* Socket/Book linked lists and per_cpu(cpu_topology) updates are
* Socket/Book linked lists and cpu_topology updates are
* protected by "sched_domains_mutex".
*/
static struct mask_info socket_info;
static struct mask_info book_info;
static struct mask_info drawer_info;
DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
struct cpu_topology_s390 cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
cpumask_t cpus_with_topology;
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
......@@ -97,7 +100,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
if (lcpu < 0)
continue;
for (i = 0; i <= smp_cpu_mtid; i++) {
topo = &per_cpu(cpu_topology, lcpu + i);
topo = &cpu_topology[lcpu + i];
topo->drawer_id = drawer->id;
topo->book_id = book->id;
topo->socket_id = socket->id;
......@@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
cpumask_set_cpu(lcpu + i, &drawer->mask);
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
cpumask_set_cpu(lcpu + i, &cpus_with_topology);
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
}
......@@ -220,7 +224,7 @@ static void update_cpu_masks(void)
int cpu;
for_each_possible_cpu(cpu) {
topo = &per_cpu(cpu_topology, cpu);
topo = &cpu_topology[cpu];
topo->thread_mask = cpu_thread_map(cpu);
topo->core_mask = cpu_group_map(&socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu);
......@@ -231,6 +235,8 @@ static void update_cpu_masks(void)
topo->socket_id = cpu;
topo->book_id = cpu;
topo->drawer_id = cpu;
if (cpu_present(cpu))
cpumask_set_cpu(cpu, &cpus_with_topology);
}
}
numa_update_cpu_topology();
......@@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
stsi(info, 15, 1, min(topology_max_mnest, 4));
}
int arch_update_cpu_topology(void)
static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
struct device *dev;
int cpu, rc = 0;
int rc = 0;
cpumask_clear(&cpus_with_topology);
if (MACHINE_HAS_TOPOLOGY) {
rc = 1;
store_topology(info);
......@@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
topology_update_polarization_simple();
return rc;
}
int arch_update_cpu_topology(void)
{
struct device *dev;
int cpu, rc;
rc = __arch_update_cpu_topology();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
......@@ -394,23 +409,23 @@ int topology_cpu_init(struct cpu *cpu)
static const struct cpumask *cpu_thread_mask(int cpu)
{
return &per_cpu(cpu_topology, cpu).thread_mask;
return &cpu_topology[cpu].thread_mask;
}
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &per_cpu(cpu_topology, cpu).core_mask;
return &cpu_topology[cpu].core_mask;
}
static const struct cpumask *cpu_book_mask(int cpu)
{
return &per_cpu(cpu_topology, cpu).book_mask;
return &cpu_topology[cpu].book_mask;
}
static const struct cpumask *cpu_drawer_mask(int cpu)
{
return &per_cpu(cpu_topology, cpu).drawer_mask;
return &cpu_topology[cpu].drawer_mask;
}
static int __init early_parse_topology(char *p)
......@@ -438,19 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
mask = mask->next;
}
}
static int __init s390_topology_init(void)
void __init topology_init_early(void)
{
struct sysinfo_15_1_x *info;
int i;
set_sched_topology(s390_topology);
if (!MACHINE_HAS_TOPOLOGY)
return 0;
tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
goto out;
tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
......@@ -460,10 +476,9 @@ static int __init s390_topology_init(void)
alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3);
set_sched_topology(s390_topology);
return 0;
out:
__arch_update_cpu_topology();
}
early_initcall(s390_topology_init);
static int __init topology_init(void)
{
......
......@@ -99,8 +99,27 @@ __kernel_clock_gettime:
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
stcke 0(%r15) /* Store TOD clock */
lm %r0,%r1,1(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
s %r0,1(%r15) /* no - ts_steering_end */
sl %r1,5(%r15)
brc 3,22f
ahi %r0,-1
22: ltr %r0,%r0 /* past end of steering? */
jm 24f
srdl %r0,15 /* 1 per 2^16 */
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
jz 23f
lcr %r0,%r0 /* negative TOD offset */
lcr %r1,%r1
je 23f
ahi %r0,-1
23: a %r0,1(%r15) /* add TOD timestamp */
al %r1,5(%r15)
brc 12,25f
ahi %r0,1
j 25f
24: lm %r0,%r1,1(%r15) /* load TOD timestamp */
25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
......
......@@ -31,8 +31,27 @@ __kernel_gettimeofday:
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
stcke 0(%r15) /* Store TOD clock */
lm %r0,%r1,1(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
s %r0,1(%r15)
sl %r1,5(%r15)
brc 3,14f
ahi %r0,-1
14: ltr %r0,%r0 /* past end of steering? */
jm 16f
srdl %r0,15 /* 1 per 2^16 */
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
jz 15f
lcr %r0,%r0 /* negative TOD offset */
lcr %r1,%r1
je 15f
ahi %r0,-1
15: a %r0,1(%r15) /* add TOD timestamp */
al %r1,5(%r15)
brc 12,17f
ahi %r0,1
j 17f
16: lm %r0,%r1,1(%r15) /* load TOD timestamp */
17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
......
......@@ -83,8 +83,17 @@ __kernel_clock_gettime:
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stcke 0(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r1,1(%r15)
lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
slgr %r0,%r1 /* now - ts_steering_end */
ltgr %r0,%r0 /* past end of steering ? */
jm 17f
srlg %r0,%r0,15 /* 1 per 2^16 */
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
jz 18f
lcgr %r0,%r0 /* negative TOD offset */
18: algr %r1,%r0 /* add steering offset */
17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
......
......@@ -31,7 +31,16 @@ __kernel_gettimeofday:
jnz 0b
stcke 0(%r15) /* Store TOD clock */
lg %r1,1(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
slgr %r0,%r1 /* now - ts_steering_end */
ltgr %r0,%r0 /* past end of steering ? */
jm 6f
srlg %r0,%r0,15 /* 1 per 2^16 */
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
jz 7f
lcgr %r0,%r0 /* negative TOD offset */
7: algr %r1,%r0 /* add steering offset */
6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
......
......@@ -96,7 +96,6 @@ static void update_mt_scaling(void)
*/
static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
u64 user_scaled, system_scaled;
......@@ -119,13 +118,13 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
user = S390_lowcore.user_timer - ti->user_timer;
user = S390_lowcore.user_timer - tsk->thread.user_timer;
S390_lowcore.steal_timer -= user;
ti->user_timer = S390_lowcore.user_timer;
tsk->thread.user_timer = S390_lowcore.user_timer;
system = S390_lowcore.system_timer - ti->system_timer;
system = S390_lowcore.system_timer - tsk->thread.system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
tsk->thread.system_timer = S390_lowcore.system_timer;
user_scaled = user;
system_scaled = system;
......@@ -153,15 +152,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
void vtime_task_switch(struct task_struct *prev)
{
struct thread_info *ti;
do_account_vtime(prev, 0);
ti = task_thread_info(prev);
ti->user_timer = S390_lowcore.user_timer;
ti->system_timer = S390_lowcore.system_timer;
ti = task_thread_info(current);
S390_lowcore.user_timer = ti->user_timer;
S390_lowcore.system_timer = ti->system_timer;
prev->thread.user_timer = S390_lowcore.user_timer;
prev->thread.system_timer = S390_lowcore.system_timer;
S390_lowcore.user_timer = current->thread.user_timer;
S390_lowcore.system_timer = current->thread.system_timer;
}
/*
......@@ -181,7 +176,6 @@ void vtime_account_user(struct task_struct *tsk)
*/
void vtime_account_irq_enter(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, system, system_scaled;
timer = S390_lowcore.last_update_timer;
......@@ -193,9 +187,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling();
system = S390_lowcore.system_timer - ti->system_timer;
system = S390_lowcore.system_timer - tsk->thread.system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
tsk->thread.system_timer = S390_lowcore.system_timer;
system_scaled = system;
/* Do MT utilization scaling */
if (smp_cpu_mtid) {
......
......@@ -7,6 +7,45 @@
#include <linux/linkage.h>
#include <asm/export.h>
/*
* void *memmove(void *dest, const void *src, size_t n)
*/
ENTRY(memmove)
ltgr %r4,%r4
lgr %r1,%r2
bzr %r14
clgr %r2,%r3
jnh .Lmemmove_forward
la %r5,0(%r4,%r3)
clgr %r2,%r5
jl .Lmemmove_reverse
.Lmemmove_forward:
aghi %r4,-1
srlg %r0,%r4,8
ltgr %r0,%r0
jz .Lmemmove_rest
.Lmemmove_loop:
mvc 0(256,%r1),0(%r3)
la %r1,256(%r1)
la %r3,256(%r3)
brctg %r0,.Lmemmove_loop
.Lmemmove_rest:
larl %r5,.Lmemmove_mvc
ex %r4,0(%r5)
br %r14
.Lmemmove_reverse:
aghi %r4,-1
.Lmemmove_reverse_loop:
ic %r0,0(%r4,%r3)
stc %r0,0(%r4,%r1)
brctg %r4,.Lmemmove_reverse_loop
ic %r0,0(%r4,%r3)
stc %r0,0(%r4,%r1)
br %r14
.Lmemmove_mvc:
mvc 0(1,%r1),0(%r3)
EXPORT_SYMBOL(memmove)
/*
* memset implementation
*
......
......@@ -733,6 +733,7 @@ static void pfault_interrupt(struct ext_code ext_code,
* return to userspace schedule() to block. */
__set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
set_preempt_need_resched();
}
}
out:
......
......@@ -34,7 +34,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
return alloc_bootmem_align(size, size);
return (void *) memblock_alloc(size, size);
}
static inline pud_t *vmem_pud_alloc(void)
......@@ -61,17 +61,16 @@ pmd_t *vmem_pmd_alloc(void)
pte_t __ref *vmem_pte_alloc(void)
{
unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
pte_t *pte;
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
PTRS_PER_PTE * sizeof(pte_t));
pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
clear_table((unsigned long *) pte, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
clear_table((unsigned long *) pte, _PAGE_INVALID, size);
return pte;
}
......
......@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/node.h>
#include <linux/memory.h>
#include <linux/slab.h>
......@@ -307,13 +308,11 @@ static struct toptree *toptree_new(int id, int nodes)
/*
* Allocate and initialize core to node mapping
*/
static void create_core_to_node_map(void)
static void __ref create_core_to_node_map(void)
{
int i;
emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
if (emu_cores == NULL)
panic("Could not allocate cores to node memory");
emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
......@@ -354,13 +353,13 @@ static struct toptree *toptree_from_topology(void)
phys = toptree_new(TOPTREE_ID_PHYS, 1);
for_each_online_cpu(cpu) {
top = &per_cpu(cpu_topology, cpu);
for_each_cpu(cpu, &cpus_with_topology) {
top = &cpu_topology[cpu];
node = toptree_get_child(phys, 0);
drawer = toptree_get_child(node, top->drawer_id);
book = toptree_get_child(drawer, top->book_id);
mc = toptree_get_child(book, top->socket_id);
core = toptree_get_child(mc, top->core_id);
core = toptree_get_child(mc, smp_get_base_cpu(cpu));
if (!drawer || !book || !mc || !core)
panic("NUMA emulation could not allocate memory");
cpumask_set_cpu(cpu, &core->mask);
......@@ -378,7 +377,7 @@ static void topology_add_core(struct toptree *core)
int cpu;
for_each_cpu(cpu, &core->mask) {
top = &per_cpu(cpu_topology, cpu);
top = &cpu_topology[cpu];
cpumask_copy(&top->thread_mask, &core->mask);
cpumask_copy(&top->core_mask, &core_mc(core)->mask);
cpumask_copy(&top->book_mask, &core_book(core)->mask);
......@@ -425,6 +424,27 @@ static void print_node_to_core_map(void)
}
}
static void pin_all_possible_cpus(void)
{
int core_id, node_id, cpu;
static int initialized;
if (initialized)
return;
print_node_to_core_map();
node_id = 0;
for_each_possible_cpu(cpu) {
core_id = smp_get_base_cpu(cpu);
if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
continue;
pin_core_to_node(core_id, node_id);
cpu_topology[cpu].node_id = node_id;
node_id = (node_id + 1) % emu_nodes;
}
print_node_to_core_map();
initialized = 1;
}
/*
* Transfer physical topology into a NUMA topology and modify CPU masks
* according to the NUMA topology.
......@@ -442,7 +462,7 @@ static void emu_update_cpu_topology(void)
toptree_free(phys);
toptree_to_topology(numa);
toptree_free(numa);
print_node_to_core_map();
pin_all_possible_cpus();
}
/*
......
......@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <linux/list.h>
#include <linux/list_sort.h>
......@@ -25,10 +26,14 @@
* RETURNS:
* Pointer to the new tree node or NULL on error
*/
struct toptree *toptree_alloc(int level, int id)
struct toptree __ref *toptree_alloc(int level, int id)
{
struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
struct toptree *res;
if (slab_is_available())
res = kzalloc(sizeof(*res), GFP_KERNEL);
else
res = memblock_virt_alloc(sizeof(*res), 8);
if (!res)
return res;
......@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
* cleanly using toptree_remove. Possible children are freed
* recursively. In the end @cand itself is freed.
*/
void toptree_free(struct toptree *cand)
void __ref toptree_free(struct toptree *cand)
{
struct toptree *child, *tmp;
......@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
toptree_remove(cand);
toptree_for_each_child_safe(child, tmp, cand)
toptree_free(child);
kfree(cand);
if (slab_is_available())
kfree(cand);
else
memblock_free_early((unsigned long)cand, sizeof(*cand));
}
/**
......
......@@ -722,6 +722,11 @@ struct dev_pm_ops pcibios_pm_ops = {
static int zpci_alloc_domain(struct zpci_dev *zdev)
{
if (zpci_unique_uid) {
zdev->domain = (u16) zdev->uid;
return 0;
}
spin_lock(&zpci_domain_lock);
zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
if (zdev->domain == ZPCI_NR_DEVICES) {
......@@ -735,6 +740,9 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
static void zpci_free_domain(struct zpci_dev *zdev)
{
if (zpci_unique_uid)
return;
spin_lock(&zpci_domain_lock);
clear_bit(zdev->domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
......
......@@ -22,6 +22,8 @@
#include <asm/clp.h>
#include <uapi/asm/clp.h>
bool zpci_unique_uid;
static inline void zpci_err_clp(unsigned int rsp, int rc)
{
struct {
......@@ -315,6 +317,7 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
goto out;
}
zpci_unique_uid = rrb->response.uid_checking;
WARN_ON_ONCE(rrb->response.entry_size !=
sizeof(struct clp_fh_list_entry));
......
......@@ -69,7 +69,7 @@ static void pci_sw_counter_show(struct seq_file *m)
int i;
for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i],
atomic64_read(counter));
}
......
......@@ -181,14 +181,17 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
/*
* With zdev->tlb_refresh == 0, rpcit is not required to establish new
* translations when previously invalid translation-table entries are
* validated. With lazy unmap, it also is skipped for previously valid
* validated. With lazy unmap, rpcit is skipped for previously valid
* entries, but a global rpcit is then required before any address can
* be re-used, i.e. after each iommu bitmap wrap-around.
*/
if (!zdev->tlb_refresh &&
(!s390_iommu_strict ||
((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
return 0;
if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
if (!zdev->tlb_refresh)
return 0;
} else {
if (!s390_iommu_strict)
return 0;
}
return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
PAGE_ALIGN(size));
......@@ -257,7 +260,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
if (!zdev->tlb_refresh && !s390_iommu_strict) {
if (!s390_iommu_strict) {
/* global flush before DMA addresses are reused */
if (zpci_refresh_global(zdev))
goto out_error;
......@@ -292,7 +295,7 @@ static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
if (!zdev->iommu_bitmap)
goto out;
if (zdev->tlb_refresh || s390_iommu_strict)
if (s390_iommu_strict)
bitmap_clear(zdev->iommu_bitmap, offset, size);
else
bitmap_set(zdev->lazy_bitmap, offset, size);
......@@ -388,8 +391,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
return NULL;
pa = page_to_phys(page);
memset((void *) pa, 0, size);
map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
......@@ -419,6 +420,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
enum dma_data_direction dir)
{
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID;
......@@ -426,8 +428,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long pa = 0;
int ret;
size = PAGE_ALIGN(size);
dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
dma_addr_base = dma_alloc_address(dev, nr_pages);
if (dma_addr_base == DMA_ERROR_CODE)
return -ENOMEM;
......@@ -436,26 +437,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
flags |= ZPCI_TABLE_PROTECTED;
for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
pa = page_to_phys(sg_page(s)) + s->offset;
ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
pa = page_to_phys(sg_page(s));
ret = __dma_update_trans(zdev, pa, dma_addr,
s->offset + s->length, flags);
if (ret)
goto unmap;
dma_addr += s->length;
dma_addr += s->offset + s->length;
}
ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
if (ret)
goto unmap;
*handle = dma_addr_base;
atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
atomic64_add(nr_pages, &zdev->mapped_pages);
return ret;
unmap:
dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
ZPCI_PTE_INVALID);
dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
dma_free_address(dev, dma_addr_base, nr_pages);
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
return ret;
......@@ -564,7 +566,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
rc = -ENOMEM;
goto free_dma_table;
}
if (!zdev->tlb_refresh && !s390_iommu_strict) {
if (!s390_iommu_strict) {
zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
......
......@@ -9,7 +9,5 @@ define filechk_facilities.h
$(obj)/gen_facilities
endef
$(obj)/gen_facilities.o: $(srctree)/arch/s390/tools/gen_facilities.c
include/generated/facilities.h: $(obj)/gen_facilities FORCE
$(call filechk,facilities.h)
......@@ -7,13 +7,83 @@
*
*/
#define S390_GEN_FACILITIES_C
#include <strings.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <asm/facilities_src.h>
struct facility_def {
char *name;
int *bits;
};
static struct facility_def facility_defs[] = {
{
/*
* FACILITIES_ALS contains the list of facilities that are
* required to run a kernel that is compiled e.g. with
* -march=<machine>.
*/
.name = "FACILITIES_ALS",
.bits = (int[]){
#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
0, /* N3 instructions */
1, /* z/Arch mode installed */
#endif
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
18, /* long displacement facility */
#endif
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
7, /* stfle */
17, /* message security assist */
21, /* extended-immediate facility */
25, /* store clock fast */
#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
27, /* mvcos */
32, /* compare and swap and store */
33, /* compare and swap and store 2 */
34, /* general extension facility */
35, /* execute extensions */
#endif
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
45, /* fast-BCR, etc. */
#endif
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
49, /* misc-instruction-extensions */
52, /* interlocked facility 2 */
#endif
#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
53, /* load-and-zero-rightmost-byte, etc. */
#endif
-1 /* END */
}
},
{
.name = "FACILITIES_KVM",
.bits = (int[]){
0, /* N3 instructions */
1, /* z/Arch mode installed */
2, /* z/Arch mode active */
3, /* DAT-enhancement */
4, /* idte segment table */
5, /* idte region table */
6, /* ASN-and-LX reuse */
7, /* stfle */
8, /* enhanced-DAT 1 */
9, /* sense-running-status */
10, /* conditional sske */
13, /* ipte-range */
14, /* nonquiescing key-setting */
73, /* transactional execution */
75, /* access-exception-fetch/store indication */
76, /* msa extension 3 */
77, /* msa extension 4 */
78, /* enhanced-DAT 2 */
-1 /* END */
}
},
};
static void print_facility_list(struct facility_def *def)
{
......
......@@ -5,12 +5,13 @@
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*
* License: GPL
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
......@@ -21,10 +22,6 @@
#define SLOT_NAME_SIZE 10
static LIST_HEAD(s390_hotplug_slot_list);
MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
MODULE_LICENSE("GPL");
static int zpci_fn_configured(enum zpci_state state)
{
return state == ZPCI_FN_STATE_CONFIGURED ||
......
This diff is collapsed.
......@@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
opm = ccw_device_get_path_mask(device->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (erp->lpm == 0)
erp->lpm = device->path_data.opm &
erp->lpm = dasd_path_get_opm(device) &
~(erp->irb.esw.esw0.sublog.lpum);
else
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
......@@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
erp->status = DASD_CQR_FILLED;
erp->retries = 10;
erp->lpm = erp->startdev->path_data.opm;
erp->lpm = dasd_path_get_opm(erp->startdev);
erp->function = dasd_3990_erp_action_1_sec;
}
return erp;
......@@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
/* reset the lpm and the status to be able to
* try further actions. */
erp->lpm = erp->startdev->path_data.opm;
erp->lpm = dasd_path_get_opm(erp->startdev);
erp->status = DASD_CQR_NEED_ERP;
}
}
......@@ -2208,6 +2208,51 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_inspect_32 */
static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
{
int pos = pathmask_to_pos(lpum);
/* no remaining path, cannot disable */
if (!(dasd_path_get_opm(device) & ~lpum))
return;
dev_err(&device->cdev->dev,
"Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
device->path[pos].cssid, device->path[pos].chpid, lpum);
dasd_path_remove_opm(device, lpum);
dasd_path_add_ifccpm(device, lpum);
device->path[pos].errorclk = 0;
atomic_set(&device->path[pos].error_count, 0);
}
static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp->startdev;
__u8 lpum = erp->refers->irb.esw.esw1.lpum;
int pos = pathmask_to_pos(lpum);
unsigned long long clk;
if (!device->path_thrhld)
return;
clk = get_tod_clock();
/*
* check if the last error is longer ago than the timeout,
* if so reset error state
*/
if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
>= device->path_interval) {
atomic_set(&device->path[pos].error_count, 0);
device->path[pos].errorclk = 0;
}
atomic_inc(&device->path[pos].error_count);
device->path[pos].errorclk = clk;
/* threshold exceeded disable path if possible */
if (atomic_read(&device->path[pos].error_count) >=
device->path_thrhld)
dasd_3990_erp_disable_path(device, lpum);
}
/*
*****************************************************************************
* main ERP control functions (24 and 32 byte sense)
......@@ -2237,6 +2282,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
| SCHN_STAT_CHN_CTRL_CHK)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"channel or interface control check");
dasd_3990_erp_account_error(erp);
erp = dasd_3990_erp_action_4(erp, NULL);
}
return erp;
......
This diff is collapsed.
This diff is collapsed.
......@@ -94,6 +94,8 @@
#define FCX_MAX_DATA_FACTOR 65536
#define DASD_ECKD_RCD_DATA_SIZE 256
#define DASD_ECKD_PATH_THRHLD 256
#define DASD_ECKD_PATH_INTERVAL 300
/*****************************************************************************
* SECTION: Type Definitions
......@@ -535,8 +537,7 @@ struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
int conf_len;
/* per path configuration data */
struct dasd_conf_data *path_conf_data[8];
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
......
This diff is collapsed.
......@@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
"default ERP called (%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = device->path_data.opm;
cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_FILLED;
} else {
pr_err("%s: default ERP has run out of retries and failed\n",
......
......@@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
device->default_expires = DASD_EXPIRES;
device->default_retries = FBA_DEFAULT_RETRIES;
device->path_data.opm = LPM_ANYPATH;
dasd_path_set_opm(device, LPM_ANYPATH);
readonly = dasd_device_is_ro(device);
if (readonly)
......
This diff is collapsed.
......@@ -9,7 +9,6 @@
* Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
......@@ -1215,13 +1214,4 @@ static int __init tty3215_init(void)
tty3215_driver = driver;
return 0;
}
static void __exit tty3215_exit(void)
{
tty_unregister_driver(tty3215_driver);
put_tty_driver(tty3215_driver);
ccw_driver_unregister(&raw3215_ccw_driver);
}
module_init(tty3215_init);
module_exit(tty3215_exit);
device_initcall(tty3215_init);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void)
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment