Commit 6c09931b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "The main new feature is machine support for System zEC12 including
  transactional memory, runtime instrumentation, support for scm block
  devices via eadm subchannels, and support for CEX4 crypto cards.

  In addition there are some nice improvements: bpf jit compiler, arch
  backend for cmpxchg_double, relative exception table entries, dasd
  partition detection independent from the dasd driver ioctls, and cpu
  cache information in /proc/cpuinfo and /sys/device/cpu.

  And last but not least a series of cleanup patches from Heiko."

Fix up trivial add-add conflict in arch/s390/Kconfig due to commit
b952741c ("cputime: Generalize CONFIG_VIRT_CPU_ACCOUNTING")

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (76 commits)
  s390: update defconfig
  s390/jump label,nss: let shared kernel support depend on !JUMP_LABEL
  s390/disassembler: fix decoding of risblg instruction
  s390/bpf,jit: add support for BPF_S_ANC_ALU_XOR_X instruction
  s390/traps: move call to print_modules() out of show_regs()
  s390/mm: mark free_initrd_mem() as __init
  s390/dasd: check count address during online setting
  drivers/s390/char/monreader.c: fix error return code
  s390/cmpxchg,percpu: implement cmpxchg_double()
  s390/percpu: implement this_cpu_add_return()
  s390/percpu: implement this_cpu_xchg()
  s390/kexec: remove CONFIG_KEXEC
  s390/irq: use designated initializers for irq class array
  s390: add uninitialized_var() to suppress false positive compiler warnings
  s390/crashdump: move fill_cpu_elf_notes() prototype to header file
  s390/process: add missing header include
  s390/ptrace: add missing ifdef
  s390/ipl,decrompressor: disable branch profiling
  s390/perf_events: compile only for CONFIG_64BIT
  s390/tape: remove even more tape block leftovers
  ...
parents b3eda8d0 c397031f
......@@ -5,3 +5,4 @@ obj-$(CONFIG_CRYPTO_HW) += crypto/
obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
obj-$(CONFIG_APPLDATA_BASE) += appldata/
obj-$(CONFIG_MATHEMU) += math-emu/
obj-y += net/
This diff is collapsed.
......@@ -11,6 +11,7 @@ targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
sizes.h head$(BITS).o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
......
......@@ -71,34 +71,37 @@ void *memset(void *s, int c, size_t n)
{
char *xs;
if (c == 0)
return __builtin_memset(s, 0, n);
xs = (char *) s;
if (n > 0)
do {
*xs++ = c;
} while (--n > 0);
xs = s;
while (n--)
*xs++ = c;
return s;
}
void *memcpy(void *__dest, __const void *__src, size_t __n)
void *memcpy(void *dest, const void *src, size_t n)
{
return __builtin_memcpy(__dest, __src, __n);
const char *s = src;
char *d = dest;
while (n--)
*d++ = *s++;
return dest;
}
void *memmove(void *__dest, __const void *__src, size_t __n)
void *memmove(void *dest, const void *src, size_t n)
{
char *d;
const char *s;
if (__dest <= __src)
return __builtin_memcpy(__dest, __src, __n);
d = __dest + __n;
s = __src + __n;
while (__n--)
*--d = *--s;
return __dest;
const char *s = src;
char *d = dest;
if (d <= s) {
while (n--)
*d++ = *s++;
} else {
d += n;
s += n;
while (n--)
*--d = *--s;
}
return dest;
}
static void error(char *x)
......
......@@ -16,8 +16,8 @@ CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEMCG=y
CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
......@@ -32,20 +32,19 @@ CONFIG_EXPERT=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_BINFMT_MISC=m
CONFIG_CMM=m
CONFIG_HZ_100=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
CONFIG_PACKET=y
CONFIG_UNIX=y
......@@ -75,6 +74,7 @@ CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y
CONFIG_BPF_JIT=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=m
......@@ -121,7 +121,6 @@ CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_TRACE=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_BLK_DEV_IO_TRACE=y
......@@ -173,3 +172,4 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRC7=m
CONFIG_CMM=m
......@@ -70,7 +70,7 @@ static inline int appldata_asm(struct appldata_product_id *id,
int ry;
if (!MACHINE_IS_VM)
return -ENOSYS;
return -EOPNOTSUPP;
parm_list.diag = 0xdc;
parm_list.function = fn;
parm_list.parlist_length = sizeof(parm_list);
......
......@@ -125,32 +125,4 @@ struct chsc_cpd_info {
#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
#ifdef __KERNEL__
struct css_general_char {
u64 : 12;
u32 dynio : 1; /* bit 12 */
u32 : 28;
u32 aif : 1; /* bit 41 */
u32 : 3;
u32 mcss : 1; /* bit 45 */
u32 fcs : 1; /* bit 46 */
u32 : 1;
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
u32 : 1;
u32 qebsm : 1; /* bit 58 */
u32 : 8;
u32 aif_osa : 1; /* bit 67 */
u32 : 14;
u32 cib : 1; /* bit 82 */
u32 : 5;
u32 fcx : 1; /* bit 88 */
u32 : 7;
}__attribute__((packed));
extern struct css_general_char css_general_characteristics;
#endif /* __KERNEL__ */
#endif
......@@ -79,6 +79,18 @@ struct erw {
__u32 res16 : 16;
} __attribute__ ((packed));
/**
* struct erw_eadm - EADM Subchannel extended report word
* @b: aob error
* @r: arsb error
*/
struct erw_eadm {
__u32 : 16;
__u32 b : 1;
__u32 r : 1;
__u32 : 14;
} __packed;
/**
* struct sublog - subchannel logout area
* @res0: reserved
......@@ -169,10 +181,23 @@ struct esw3 {
__u32 zeros[3];
} __attribute__ ((packed));
/**
* struct esw_eadm - EADM Subchannel Extended Status Word (ESW)
* @sublog: subchannel logout
* @erw: extended report word
*/
struct esw_eadm {
__u32 sublog;
struct erw_eadm erw;
__u32 : 32;
__u32 : 32;
__u32 : 32;
} __packed;
/**
* struct irb - interruption response block
* @scsw: subchannel status word
* @esw: extened status word, 4 formats
* @esw: extened status word
* @ecw: extended control word
*
* The irb that is handed to the device driver when an interrupt occurs. For
......@@ -191,6 +216,7 @@ struct irb {
struct esw1 esw1;
struct esw2 esw2;
struct esw3 esw3;
struct esw_eadm eadm;
} esw;
__u8 ecw[32];
} __attribute__ ((packed,aligned(4)));
......
......@@ -7,7 +7,9 @@
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
#include <linux/mmdebug.h>
#include <linux/types.h>
#include <linux/bug.h>
extern void __xchg_called_with_bad_pointer(void);
......@@ -203,6 +205,65 @@ static inline unsigned long long __cmpxchg64(void *ptr,
})
#endif /* CONFIG_64BIT */
#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
({ \
register __typeof__(*(p1)) __old1 asm("2") = (o1); \
register __typeof__(*(p2)) __old2 asm("3") = (o2); \
register __typeof__(*(p1)) __new1 asm("4") = (n1); \
register __typeof__(*(p2)) __new2 asm("5") = (n2); \
int cc; \
asm volatile( \
insn " %[old],%[new],%[ptr]\n" \
" ipm %[cc]\n" \
" srl %[cc],28" \
: [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
: [new] "d" (__new1), "d" (__new2), \
[ptr] "Q" (*(p1)), "Q" (*(p2)) \
: "memory", "cc"); \
!cc; \
})
#define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds")
#define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg")
extern void __cmpxchg_double_called_with_bad_pointer(void);
#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
int __ret; \
switch (sizeof(*(p1))) { \
case 4: \
__ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \
break; \
case 8: \
__ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \
break; \
default: \
__cmpxchg_double_called_with_bad_pointer(); \
} \
__ret; \
})
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
__typeof__(p1) __p1 = (p1); \
__typeof__(p2) __p2 = (p2); \
int __ret; \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
if (sizeof(long) == 4) \
__ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \
else \
__ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
__ret; \
})
#define system_has_cmpxchg_double() 1
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(void *ptr,
......
......@@ -21,11 +21,15 @@
#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */
#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */
#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */
#define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */
#define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program
buffer full */
#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA)
#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \
CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \
CPU_MF_INT_SF_LSDA)
#define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL)
/* CPU measurement facility support */
static inline int cpum_cf_avail(void)
......
#ifndef _ASM_CSS_CHARS_H
#define _ASM_CSS_CHARS_H
#include <linux/types.h>
#ifdef __KERNEL__
struct css_general_char {
u64 : 12;
u32 dynio : 1; /* bit 12 */
u32 : 4;
u32 eadm : 1; /* bit 17 */
u32 : 23;
u32 aif : 1; /* bit 41 */
u32 : 3;
u32 mcss : 1; /* bit 45 */
u32 fcs : 1; /* bit 46 */
u32 : 1;
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
u32 : 1;
u32 qebsm : 1; /* bit 58 */
u32 : 8;
u32 aif_osa : 1; /* bit 67 */
u32 : 12;
u32 eadm_rf : 1; /* bit 80 */
u32 : 1;
u32 cib : 1; /* bit 82 */
u32 : 5;
u32 fcx : 1; /* bit 88 */
u32 : 19;
u32 alt_ssi : 1; /* bit 108 */
} __packed;
extern struct css_general_char css_general_characteristics;
#endif /* __KERNEL__ */
#endif
#ifndef _ASM_S390_EADM_H
#define _ASM_S390_EADM_H
#include <linux/types.h>
#include <linux/device.h>
struct arqb {
u64 data;
u16 fmt:4;
u16:12;
u16 cmd_code;
u16:16;
u16 msb_count;
u32 reserved[12];
} __packed;
#define ARQB_CMD_MOVE 1
struct arsb {
u16 fmt:4;
u32:28;
u8 ef;
u8:8;
u8 ecbi;
u8:8;
u8 fvf;
u16:16;
u8 eqc;
u32:32;
u64 fail_msb;
u64 fail_aidaw;
u64 fail_ms;
u64 fail_scm;
u32 reserved[4];
} __packed;
struct msb {
u8 fmt:4;
u8 oc:4;
u8 flags;
u16:12;
u16 bs:4;
u32 blk_count;
u64 data_addr;
u64 scm_addr;
u64:64;
} __packed;
struct aidaw {
u8 flags;
u32 :24;
u32 :32;
u64 data_addr;
} __packed;
#define MSB_OC_CLEAR 0
#define MSB_OC_READ 1
#define MSB_OC_WRITE 2
#define MSB_OC_RELEASE 3
#define MSB_FLAG_BNM 0x80
#define MSB_FLAG_IDA 0x40
#define MSB_BS_4K 0
#define MSB_BS_1M 1
#define AOB_NR_MSB 124
struct aob {
struct arqb request;
struct arsb response;
struct msb msb[AOB_NR_MSB];
} __packed __aligned(PAGE_SIZE);
struct aob_rq_header {
struct scm_device *scmdev;
char data[0];
};
struct scm_device {
u64 address;
u64 size;
unsigned int nr_max_block;
struct device dev;
struct {
unsigned int persistence:4;
unsigned int oper_state:4;
unsigned int data_state:4;
unsigned int rank:4;
unsigned int release:1;
unsigned int res_id:8;
} __packed attrs;
};
#define OP_STATE_GOOD 1
#define OP_STATE_TEMP_ERR 2
#define OP_STATE_PERM_ERR 3
struct scm_driver {
struct device_driver drv;
int (*probe) (struct scm_device *scmdev);
int (*remove) (struct scm_device *scmdev);
void (*notify) (struct scm_device *scmdev);
void (*handler) (struct scm_device *scmdev, void *data, int error);
};
int scm_driver_register(struct scm_driver *scmdrv);
void scm_driver_unregister(struct scm_driver *scmdrv);
int scm_start_aob(struct aob *aob);
void scm_irq_handler(struct aob *aob, int error);
struct eadm_ops {
int (*eadm_start) (struct aob *aob);
struct module *owner;
};
int scm_get_ref(void);
void scm_put_ref(void);
void register_eadm_ops(struct eadm_ops *ops);
void unregister_eadm_ops(struct eadm_ops *ops);
#endif /* _ASM_S390_EADM_H */
......@@ -101,6 +101,7 @@
#define HWCAP_S390_HPAGE 128
#define HWCAP_S390_ETF3EH 256
#define HWCAP_S390_HIGH_GPRS 512
#define HWCAP_S390_TE 1024
/*
* These are used to set parameters in the core dumps.
......@@ -212,4 +213,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int);
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
#endif
......@@ -140,7 +140,7 @@ struct etr_ptff_qto {
/* Inline assembly helper functions */
static inline int etr_setr(struct etr_eacr *ctrl)
{
int rc = -ENOSYS;
int rc = -EOPNOTSUPP;
asm volatile(
" .insn s,0xb2160000,%1\n"
......@@ -154,7 +154,7 @@ static inline int etr_setr(struct etr_eacr *ctrl)
/* Stores a format 1 aib with 64 bytes */
static inline int etr_stetr(struct etr_aib *aib)
{
int rc = -ENOSYS;
int rc = -EOPNOTSUPP;
asm volatile(
" .insn s,0xb2170000,%1\n"
......@@ -169,7 +169,7 @@ static inline int etr_stetr(struct etr_aib *aib)
static inline int etr_steai(struct etr_aib *aib, unsigned int func)
{
register unsigned int reg0 asm("0") = func;
int rc = -ENOSYS;
int rc = -EOPNOTSUPP;
asm volatile(
" .insn s,0xb2b30000,%1\n"
......@@ -190,7 +190,7 @@ static inline int etr_ptff(void *ptff_block, unsigned int func)
{
register unsigned int reg0 asm("0") = func;
register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
int rc = -ENOSYS;
int rc = -EOPNOTSUPP;
asm volatile(
" .word 0x0104\n"
......
......@@ -19,6 +19,7 @@ enum interruption_class {
EXTINT_IUC,
EXTINT_CMS,
EXTINT_CMC,
EXTINT_CMR,
IOINT_CIO,
IOINT_QAI,
IOINT_DAS,
......@@ -30,6 +31,7 @@ enum interruption_class {
IOINT_CLW,
IOINT_CTC,
IOINT_APB,
IOINT_ADM,
IOINT_CSC,
NMI_NMI,
NR_IRQS,
......
......@@ -14,6 +14,7 @@
/* Regular I/O interrupts. */
#define IO_SCH_ISC 3 /* regular I/O subchannels */
#define CONSOLE_ISC 1 /* console I/O subchannel */
#define EADM_SCH_ISC 4 /* EADM subchannels */
#define CHSC_SCH_ISC 7 /* CHSC subchannels */
/* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
......
......@@ -329,9 +329,13 @@ struct _lowcore {
__u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
__u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
/* align to the top of the prefix area */
__u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
} __packed;
#endif /* CONFIG_32BIT */
......
......@@ -57,7 +57,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
if (addressing_mode != HOME_SPACE_MODE) {
if (s390_user_mode != HOME_SPACE_MODE) {
/* Load primary space page table origin. */
asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_asce) );
......
......@@ -20,7 +20,7 @@
#endif
#define arch_this_cpu_to_op(pcp, val, op) \
do { \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
......@@ -39,13 +39,19 @@ do { \
} \
} while (prev__ != old__); \
preempt_enable(); \
} while (0)
new__; \
})
#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
......@@ -61,7 +67,7 @@ do { \
#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
......@@ -84,6 +90,44 @@ do { \
#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
#define arch_this_cpu_xchg(pcp, nval) \
({ \
typeof(pcp) *ptr__; \
typeof(pcp) ret__; \
preempt_disable(); \
ptr__ = __this_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \
preempt_enable(); \
ret__; \
})
#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#ifdef CONFIG_64BIT
#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
#endif
#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
typeof(pcp1) o1__ = (o1), n1__ = (n1); \
typeof(pcp2) o2__ = (o2), n2__ = (n2); \
typeof(pcp1) *p1__; \
typeof(pcp2) *p2__; \
int ret__; \
preempt_disable(); \
p1__ = __this_cpu_ptr(&(pcp1)); \
p2__ = __this_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
preempt_enable(); \
ret__; \
})
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
#ifdef CONFIG_64BIT
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
#endif
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */
......@@ -11,12 +11,15 @@
#ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/runtime_instr.h>
/*
* Default implementation of macro that returns current
......@@ -75,11 +78,20 @@ struct thread_struct {
unsigned long gmap_addr; /* address of last gmap fault. */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
unsigned long per_flags; /* Flags to control debug behavior */
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait;
struct list_head list;
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
int ri_signum;
#ifdef CONFIG_64BIT
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
#endif
};
#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */
typedef struct thread_struct thread_struct;
/*
......@@ -130,6 +142,12 @@ struct task_struct;
struct mm_struct;
struct seq_file;
#ifdef CONFIG_64BIT
extern void show_cacheinfo(struct seq_file *m);
#else
static inline void show_cacheinfo(struct seq_file *m) { }
#endif
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
......@@ -140,6 +158,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long thread_saved_pc(struct task_struct *t);
extern void show_code(struct pt_regs *regs);
extern void print_fn_code(unsigned char *code, unsigned long len);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
......@@ -331,23 +350,6 @@ extern void (*s390_base_ext_handler_fn)(void);
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
/*
* Helper macro for exception table entries
*/
#ifndef CONFIG_64BIT
#define EX_TABLE(_fault,_target) \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long " #_fault "," #_target "\n" \
".previous\n"
#else
#define EX_TABLE(_fault,_target) \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
" .quad " #_fault "," #_target "\n" \
".previous\n"
#endif
extern int memcpy_real(void *, void *, size_t);
extern void memcpy_absolute(void *, void *, size_t);
......@@ -358,4 +360,25 @@ extern void memcpy_absolute(void *, void *, size_t);
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
}
#endif /* __ASM_S390_PROCESSOR_H */
/*
* Helper macro for exception table entries
*/
#define EX_TABLE(_fault, _target) \
".section __ex_table,\"a\"\n" \
".align 4\n" \
".long (" #_fault ") - .\n" \
".long (" #_target ") - .\n" \
".previous\n"
#else /* __ASSEMBLY__ */
#define EX_TABLE(_fault, _target) \
.section __ex_table,"a" ; \
.align 4 ; \
.long (_fault) - . ; \
.long (_target) - . ; \
.previous
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
......@@ -235,6 +235,7 @@ typedef struct
#define PSW_MASK_ASC 0x0000C000UL
#define PSW_MASK_CC 0x00003000UL
#define PSW_MASK_PM 0x00000F00UL
#define PSW_MASK_RI 0x00000000UL
#define PSW_MASK_EA 0x00000000UL
#define PSW_MASK_BA 0x00000000UL
......@@ -264,10 +265,11 @@ typedef struct
#define PSW_MASK_ASC 0x0000C00000000000UL
#define PSW_MASK_CC 0x0000300000000000UL
#define PSW_MASK_PM 0x00000F0000000000UL
#define PSW_MASK_RI 0x0000008000000000UL
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
#define PSW_MASK_USER 0x00003F0180000000UL
#define PSW_MASK_USER 0x00003F8180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
......@@ -359,17 +361,19 @@ struct per_struct_kernel {
unsigned char access_id; /* PER trap access identification */
};
#define PER_EVENT_MASK 0xE9000000UL
#define PER_EVENT_MASK 0xEB000000UL
#define PER_EVENT_BRANCH 0x80000000UL
#define PER_EVENT_IFETCH 0x40000000UL
#define PER_EVENT_STORE 0x20000000UL
#define PER_EVENT_STORE_REAL 0x08000000UL
#define PER_EVENT_TRANSACTION_END 0x02000000UL
#define PER_EVENT_NULLIFICATION 0x01000000UL
#define PER_CONTROL_MASK 0x00a00000UL
#define PER_CONTROL_MASK 0x00e00000UL
#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
#define PER_CONTROL_SUSPENSION 0x00400000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
#endif
......@@ -483,6 +487,8 @@ typedef struct
#define PTRACE_GET_LAST_BREAK 0x5006
#define PTRACE_PEEK_SYSTEM_CALL 0x5007
#define PTRACE_POKE_SYSTEM_CALL 0x5008
#define PTRACE_ENABLE_TE 0x5009
#define PTRACE_DISABLE_TE 0x5010
/*
* PT_PROT definition is loosely based on hppa bsd definition in
......
#ifndef _RUNTIME_INSTR_H
#define _RUNTIME_INSTR_H
#define S390_RUNTIME_INSTR_START 0x1
#define S390_RUNTIME_INSTR_STOP 0x2
struct runtime_instr_cb {
__u64 buf_current;
__u64 buf_origin;
__u64 buf_limit;
__u32 valid : 1;
__u32 pstate : 1;
__u32 pstate_set_buf : 1;
__u32 home_space : 1;
__u32 altered : 1;
__u32 : 3;
__u32 pstate_sample : 1;
__u32 sstate_sample : 1;
__u32 pstate_collect : 1;
__u32 sstate_collect : 1;
__u32 : 1;
__u32 halted_int : 1;
__u32 int_requested : 1;
__u32 buffer_full_int : 1;
__u32 key : 4;
__u32 : 9;
__u32 rgs : 3;
__u32 mode : 4;
__u32 next : 1;
__u32 mae : 1;
__u32 : 2;
__u32 call_type_br : 1;
__u32 return_type_br : 1;
__u32 other_type_br : 1;
__u32 bc_other_type : 1;
__u32 emit : 1;
__u32 tx_abort : 1;
__u32 : 2;
__u32 bp_xn : 1;
__u32 bp_xt : 1;
__u32 bp_ti : 1;
__u32 bp_ni : 1;
__u32 suppr_y : 1;
__u32 suppr_z : 1;
__u32 dc_miss_extra : 1;
__u32 lat_lev_ignore : 1;
__u32 ic_lat_lev : 4;
__u32 dc_lat_lev : 4;
__u64 reserved1;
__u64 scaling_factor;
__u64 rsic;
__u64 reserved2;
} __packed __aligned(8);
extern struct runtime_instr_cb runtime_instr_empty_cb;
static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb)
{
asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */
: : "Q" (*cb));
}
static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
{
asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */
: "=Q" (*cb) : : "cc");
}
static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
{
#ifdef CONFIG_64BIT
if (cb_prev)
store_runtime_instr_cb(cb_prev);
#endif
}
static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
struct runtime_instr_cb *cb_prev)
{
#ifdef CONFIG_64BIT
if (cb_next)
load_runtime_instr_cb(cb_next);
else if (cb_prev)
load_runtime_instr_cb(&runtime_instr_empty_cb);
#endif
}
#ifdef CONFIG_64BIT
extern void exit_thread_runtime_instr(void);
#else
static inline void exit_thread_runtime_instr(void) { }
#endif
#endif /* _RUNTIME_INSTR_H */
/*
* Helper functions for scsw access.
*
* Copyright IBM Corp. 2008, 2009
* Copyright IBM Corp. 2008, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
......@@ -9,7 +9,7 @@
#define _ASM_S390_SCSW_H_
#include <linux/types.h>
#include <asm/chsc.h>
#include <asm/css_chars.h>
#include <asm/cio.h>
/**
......@@ -99,15 +99,47 @@ struct tm_scsw {
u32 schxs:8;
} __attribute__ ((packed));
/**
* struct eadm_scsw - subchannel status word for eadm subchannels
* @key: subchannel key
* @eswf: esw format
* @cc: deferred condition code
* @ectl: extended control
* @fctl: function control
* @actl: activity control
* @stctl: status control
* @aob: AOB address
* @dstat: device status
* @cstat: subchannel status
*/
struct eadm_scsw {
u32 key:4;
u32:1;
u32 eswf:1;
u32 cc:2;
u32:6;
u32 ectl:1;
u32:2;
u32 fctl:3;
u32 actl:7;
u32 stctl:5;
u32 aob;
u32 dstat:8;
u32 cstat:8;
u32:16;
} __packed;
/**
* union scsw - subchannel status word
* @cmd: command-mode SCSW
* @tm: transport-mode SCSW
* @eadm: eadm SCSW
*/
union scsw {
struct cmd_scsw cmd;
struct tm_scsw tm;
} __attribute__ ((packed));
struct eadm_scsw eadm;
} __packed;
#define SCSW_FCTL_CLEAR_FUNC 0x1
#define SCSW_FCTL_HALT_FUNC 0x2
......
......@@ -60,7 +60,7 @@ void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
#define SECONDARY_SPACE_MODE 2
#define HOME_SPACE_MODE 3
extern unsigned int addressing_mode;
extern unsigned int s390_user_mode;
/*
* Machine features detected in head.S
......@@ -80,6 +80,7 @@ extern unsigned int addressing_mode;
#define MACHINE_FLAG_LPAR (1UL << 12)
#define MACHINE_FLAG_SPP (1UL << 13)
#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
......@@ -98,6 +99,7 @@ extern unsigned int addressing_mode;
#define MACHINE_HAS_PFMF (0)
#define MACHINE_HAS_SPP (0)
#define MACHINE_HAS_TOPOLOGY (0)
#define MACHINE_HAS_TE (0)
#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
......@@ -109,6 +111,7 @@ extern unsigned int addressing_mode;
#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#endif /* CONFIG_64BIT */
#define ZFCPDUMP_HSA_SIZE (32UL<<20)
......
......@@ -30,6 +30,8 @@ extern int smp_vcpu_scheduled(int cpu);
extern void smp_yield_cpu(int cpu);
extern void smp_yield(void);
extern void smp_stop_cpu(void);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
#else /* CONFIG_SMP */
......@@ -43,7 +45,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
func(data);
}
static inline int smp_find_processor_id(int address) { return 0; }
static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
......
......@@ -96,7 +96,6 @@ static inline char *strcat(char *dst, const char *src)
static inline char *strcpy(char *dst, const char *src)
{
#if __GNUC__ < 4
register int r0 asm("0") = 0;
char *ret = dst;
......@@ -106,14 +105,10 @@ static inline char *strcpy(char *dst, const char *src)
: "+&a" (dst), "+&a" (src) : "d" (r0)
: "cc", "memory");
return ret;
#else
return __builtin_strcpy(dst, src);
#endif
}
static inline size_t strlen(const char *s)
{
#if __GNUC__ < 4
register unsigned long r0 asm("0") = 0;
const char *tmp = s;
......@@ -122,9 +117,6 @@ static inline size_t strlen(const char *s)
" jo 0b"
: "+d" (r0), "+a" (tmp) : : "cc");
return r0 - (unsigned long) s;
#else
return __builtin_strlen(s);
#endif
}
static inline size_t strnlen(const char * s, size_t n)
......
......@@ -80,10 +80,12 @@ static inline void restore_access_regs(unsigned int *acrs)
if (prev->mm) { \
save_fp_regs(&prev->thread.fp_regs); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
if (next->mm) { \
restore_fp_regs(&next->thread.fp_regs); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
update_per_regs(next); \
} \
prev = __switch_to(prev,next); \
......
......@@ -17,7 +17,10 @@
#include <asm/bitsperlong.h>
struct sysinfo_1_1_1 {
unsigned short :16;
unsigned char p:1;
unsigned char :6;
unsigned char t:1;
unsigned char :8;
unsigned char ccr;
unsigned char cai;
char reserved_0[28];
......@@ -30,9 +33,14 @@ struct sysinfo_1_1_1 {
char model[16];
char model_perm_cap[16];
char model_temp_cap[16];
char model_cap_rating[4];
char model_perm_cap_rating[4];
char model_temp_cap_rating[4];
unsigned int model_cap_rating;
unsigned int model_perm_cap_rating;
unsigned int model_temp_cap_rating;
unsigned char typepct[5];
unsigned char reserved_2[3];
unsigned int ncr;
unsigned int npr;
unsigned int ntr;
};
struct sysinfo_1_2_1 {
......@@ -47,8 +55,9 @@ struct sysinfo_1_2_2 {
char format;
char reserved_0[1];
unsigned short acc_offset;
char reserved_1[24];
unsigned int secondary_capability;
char reserved_1[20];
unsigned int nominal_cap;
unsigned int secondary_cap;
unsigned int capability;
unsigned short cpus_total;
unsigned short cpus_configured;
......@@ -109,6 +118,8 @@ struct sysinfo_3_2_2 {
char reserved_544[3552];
};
extern int topology_max_mnest;
#define TOPOLOGY_CPU_BITS 64
#define TOPOLOGY_NR_MAG 6
......@@ -142,21 +153,7 @@ struct sysinfo_15_1_x {
union topology_entry tle[0];
};
static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
register int r0 asm("0") = (fc << 28) | sel1;
register int r1 asm("1") = sel2;
asm volatile(
" stsi 0(%2)\n"
"0: jz 2f\n"
"1: lhi %0,%3\n"
"2:\n"
EX_TABLE(0b, 1b)
: "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
: "cc", "memory");
return r0;
}
int stsi(void *sysinfo, int fc, int sel1, int sel2);
/*
* Service level reporting interface.
......
......@@ -2,8 +2,8 @@
#define _ASM_S390_TOPOLOGY_H
#include <linux/cpumask.h>
#include <asm/sysinfo.h>
struct sysinfo_15_1_x;
struct cpu;
#ifdef CONFIG_SCHED_BOOK
......@@ -51,24 +51,6 @@ static inline void topology_expect_change(void) { }
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
extern int cpu_polarization[];
static inline void cpu_set_polarization(int cpu, int val)
{
#ifdef CONFIG_SCHED_BOOK
cpu_polarization[cpu] = val;
#endif
}
static inline int cpu_read_polarization(int cpu)
{
#ifdef CONFIG_SCHED_BOOK
return cpu_polarization[cpu];
#else
return POLARIZATION_HRZ;
#endif
}
#ifdef CONFIG_SCHED_BOOK
void s390_init_cpu_topology(void);
#else
......
......@@ -76,9 +76,22 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
struct exception_table_entry
{
unsigned long insn, fixup;
int insn, fixup;
};
static inline unsigned long extable_insn(const struct exception_table_entry *x)
{
return (unsigned long)&x->insn + x->insn;
}
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
struct uaccess_ops {
size_t (*copy_from_user)(size_t, const void __user *, void *);
size_t (*copy_from_user_small)(size_t, const void __user *, void *);
......
......@@ -277,7 +277,9 @@
#define __NR_setns 339
#define __NR_process_vm_readv 340
#define __NR_process_vm_writev 341
#define NR_syscalls 342
#define __NR_s390_runtime_instr 342
#define __NR_kcmp 343
#define NR_syscalls 344
/*
* There are some system calls that are not present on 64 bit, some
......
......@@ -23,10 +23,11 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
sysinfo.o jump_label.o lgr.o os_info.o
sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
extra-y += head.o vmlinux.lds
extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
......@@ -48,12 +49,11 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
# Kexec part
S390_KEXEC_OBJS := machine_kexec.o crash.o
S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
ifdef CONFIG_64BIT
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
obj-y += runtime_instr.o cache.o
endif
# vdso
obj-$(CONFIG_64BIT) += vdso64/
......
......@@ -157,6 +157,8 @@ int main(void)
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
#endif /* CONFIG_32BIT */
return 0;
......
/*
* Extract CPU cache information and expose them via sysfs.
*
* Copyright IBM Corp. 2012
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/facility.h>
struct cache {
unsigned long size;
unsigned int line_size;
unsigned int associativity;
unsigned int nr_sets;
unsigned int level : 3;
unsigned int type : 2;
unsigned int private : 1;
struct list_head list;
};
struct cache_dir {
struct kobject *kobj;
struct cache_index_dir *index;
};
struct cache_index_dir {
struct kobject kobj;
int cpu;
struct cache *cache;
struct cache_index_dir *next;
};
enum {
CACHE_SCOPE_NOTEXISTS,
CACHE_SCOPE_PRIVATE,
CACHE_SCOPE_SHARED,
CACHE_SCOPE_RESERVED,
};
enum {
CACHE_TYPE_SEPARATE,
CACHE_TYPE_DATA,
CACHE_TYPE_INSTRUCTION,
CACHE_TYPE_UNIFIED,
};
enum {
EXTRACT_TOPOLOGY,
EXTRACT_LINE_SIZE,
EXTRACT_SIZE,
EXTRACT_ASSOCIATIVITY,
};
enum {
CACHE_TI_UNIFIED = 0,
CACHE_TI_INSTRUCTION = 0,
CACHE_TI_DATA,
};
struct cache_info {
unsigned char : 4;
unsigned char scope : 2;
unsigned char type : 2;
};
#define CACHE_MAX_LEVEL 8
union cache_topology {
struct cache_info ci[CACHE_MAX_LEVEL];
unsigned long long raw;
};
static const char * const cache_type_string[] = {
"Data",
"Instruction",
"Unified",
};
static struct cache_dir *cache_dir_cpu[NR_CPUS];
static LIST_HEAD(cache_list);
void show_cacheinfo(struct seq_file *m)
{
struct cache *cache;
int index = 0;
list_for_each_entry(cache, &cache_list, list) {
seq_printf(m, "cache%-11d: ", index);
seq_printf(m, "level=%d ", cache->level);
seq_printf(m, "type=%s ", cache_type_string[cache->type]);
seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
seq_printf(m, "size=%luK ", cache->size >> 10);
seq_printf(m, "line_size=%u ", cache->line_size);
seq_printf(m, "associativity=%d", cache->associativity);
seq_puts(m, "\n");
index++;
}
}
static inline unsigned long ecag(int ai, int li, int ti)
{
unsigned long cmd, val;
cmd = ai << 4 | li << 1 | ti;
asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
: "=d" (val) : "a" (cmd));
return val;
}
static int __init cache_add(int level, int private, int type)
{
struct cache *cache;
int ti;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
return -ENOMEM;
ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED;
cache->size = ecag(EXTRACT_SIZE, level, ti);
cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
cache->nr_sets = cache->size / cache->associativity;
cache->nr_sets /= cache->line_size;
cache->private = private;
cache->level = level + 1;
cache->type = type - 1;
list_add_tail(&cache->list, &cache_list);
return 0;
}
static void __init cache_build_info(void)
{
struct cache *cache, *next;
union cache_topology ct;
int level, private, rc;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (level = 0; level < CACHE_MAX_LEVEL; level++) {
switch (ct.ci[level].scope) {
case CACHE_SCOPE_NOTEXISTS:
case CACHE_SCOPE_RESERVED:
return;
case CACHE_SCOPE_SHARED:
private = 0;
break;
case CACHE_SCOPE_PRIVATE:
private = 1;
break;
}
if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
rc = cache_add(level, private, CACHE_TYPE_DATA);
rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
} else {
rc = cache_add(level, private, ct.ci[level].type);
}
if (rc)
goto error;
}
return;
error:
list_for_each_entry_safe(cache, next, &cache_list, list) {
list_del(&cache->list);
kfree(cache);
}
}
static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
{
struct cache_dir *cache_dir;
struct kobject *kobj = NULL;
struct device *dev;
dev = get_cpu_device(cpu);
if (!dev)
goto out;
kobj = kobject_create_and_add("cache", &dev->kobj);
if (!kobj)
goto out;
cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
if (!cache_dir)
goto out;
cache_dir->kobj = kobj;
cache_dir_cpu[cpu] = cache_dir;
return cache_dir;
out:
kobject_put(kobj);
return NULL;
}
static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
{
return container_of(kobj, struct cache_index_dir, kobj);
}
static void cache_index_release(struct kobject *kobj)
{
struct cache_index_dir *index;
index = kobj_to_cache_index_dir(kobj);
kfree(index);
}
static ssize_t cache_index_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct kobj_attribute *kobj_attr;
kobj_attr = container_of(attr, struct kobj_attribute, attr);
return kobj_attr->show(kobj, kobj_attr, buf);
}
#define DEFINE_CACHE_ATTR(_name, _format, _value) \
static ssize_t cache_##_name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *buf) \
{ \
struct cache_index_dir *index; \
\
index = kobj_to_cache_index_dir(kobj); \
return sprintf(buf, _format, _value); \
} \
static struct kobj_attribute cache_##_name##_attr = \
__ATTR(_name, 0444, cache_##_name##_show, NULL);
DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
{
struct cache_index_dir *index;
int len;
index = kobj_to_cache_index_dir(kobj);
len = type ?
cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
len += sprintf(&buf[len], "\n");
return len;
}
static ssize_t shared_cpu_map_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return shared_cpu_map_func(kobj, 0, buf);
}
static struct kobj_attribute cache_shared_cpu_map_attr =
__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
static ssize_t shared_cpu_list_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return shared_cpu_map_func(kobj, 1, buf);
}
static struct kobj_attribute cache_shared_cpu_list_attr =
__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
static struct attribute *cache_index_default_attrs[] = {
&cache_type_attr.attr,
&cache_size_attr.attr,
&cache_number_of_sets_attr.attr,
&cache_ways_of_associativity_attr.attr,
&cache_level_attr.attr,
&cache_coherency_line_size_attr.attr,
&cache_shared_cpu_map_attr.attr,
&cache_shared_cpu_list_attr.attr,
NULL,
};
static const struct sysfs_ops cache_index_ops = {
.show = cache_index_show,
};
static struct kobj_type cache_index_type = {
.sysfs_ops = &cache_index_ops,
.release = cache_index_release,
.default_attrs = cache_index_default_attrs,
};
static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
struct cache *cache, int index,
int cpu)
{
struct cache_index_dir *index_dir;
int rc;
index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
if (!index_dir)
return -ENOMEM;
index_dir->cache = cache;
index_dir->cpu = cpu;
rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
cache_dir->kobj, "index%d", index);
if (rc)
goto out;
index_dir->next = cache_dir->index;
cache_dir->index = index_dir;
return 0;
out:
kfree(index_dir);
return rc;
}
static int __cpuinit cache_add_cpu(int cpu)
{
struct cache_dir *cache_dir;
struct cache *cache;
int rc, index = 0;
if (list_empty(&cache_list))
return 0;
cache_dir = cache_create_cache_dir(cpu);
if (!cache_dir)
return -ENOMEM;
list_for_each_entry(cache, &cache_list, list) {
if (!cache->private)
break;
rc = cache_create_index_dir(cache_dir, cache, index, cpu);
if (rc)
return rc;
index++;
}
return 0;
}
static void __cpuinit cache_remove_cpu(int cpu)
{
struct cache_index_dir *index, *next;
struct cache_dir *cache_dir;
cache_dir = cache_dir_cpu[cpu];
if (!cache_dir)
return;
index = cache_dir->index;
while (index) {
next = index->next;
kobject_put(&index->kobj);
index = next;
}
kobject_put(cache_dir->kobj);
kfree(cache_dir);
cache_dir_cpu[cpu] = NULL;
}
static int __cpuinit cache_hotplug(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
int rc = 0;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
rc = cache_add_cpu(cpu);
if (rc)
cache_remove_cpu(cpu);
break;
case CPU_DEAD:
cache_remove_cpu(cpu);
break;
}
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
static int __init cache_init(void)
{
int cpu;
if (!test_facility(34))
return 0;
cache_build_info();
for_each_online_cpu(cpu)
cache_add_cpu(cpu);
hotcpu_notifier(cache_hotplug, 0);
return 0;
}
device_initcall(cache_init);
......@@ -1646,3 +1646,16 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
llgf %r0,164(%r15) # unsigned long
stg %r0,160(%r15)
jg compat_sys_process_vm_writev
ENTRY(sys_s390_runtime_instr_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_s390_runtime_instr
ENTRY(sys_kcmp_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # pid_t
lgfr %r4,%r4 # int
llgfr %r5,%r5 # unsigned long
llgfr %r6,%r6 # unsigned long
jg sys_kcmp
/*
* Copyright IBM Corp. 2005
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*
*/
#include <linux/threads.h>
#include <linux/kexec.h>
#include <linux/reboot.h>
void machine_crash_shutdown(struct pt_regs *regs)
{
}
......@@ -13,8 +13,9 @@
#include <linux/slab.h>
#include <linux/bootmem.h>
#include <linux/elf.h>
#include <asm/ipl.h>
#include <asm/os_info.h>
#include <asm/elf.h>
#include <asm/ipl.h>
#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
......
......@@ -315,6 +315,11 @@ enum {
LONG_INSN_POPCNT,
LONG_INSN_RISBHG,
LONG_INSN_RISBLG,
LONG_INSN_RINEXT,
LONG_INSN_RIEMIT,
LONG_INSN_TABORT,
LONG_INSN_TBEGIN,
LONG_INSN_TBEGINC,
};
static char *long_insn_name[] = {
......@@ -329,7 +334,12 @@ static char *long_insn_name[] = {
[LONG_INSN_LLGHRL] = "llghrl",
[LONG_INSN_POPCNT] = "popcnt",
[LONG_INSN_RISBHG] = "risbhg",
[LONG_INSN_RISBLG] = "risblk",
[LONG_INSN_RISBLG] = "risblg",
[LONG_INSN_RINEXT] = "rinext",
[LONG_INSN_RIEMIT] = "riemit",
[LONG_INSN_TABORT] = "tabort",
[LONG_INSN_TBEGIN] = "tbegin",
[LONG_INSN_TBEGINC] = "tbeginc",
};
static struct insn opcode[] = {
......@@ -582,6 +592,17 @@ static struct insn opcode_a7[] = {
{ "", 0, INSTR_INVALID }
};
static struct insn opcode_aa[] = {
#ifdef CONFIG_64BIT
{ { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
{ "rion", 0x01, INSTR_RI_RI },
{ "tric", 0x02, INSTR_RI_RI },
{ "rioff", 0x03, INSTR_RI_RI },
{ { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
#endif
{ "", 0, INSTR_INVALID }
};
static struct insn opcode_b2[] = {
#ifdef CONFIG_64BIT
{ "sske", 0x2b, INSTR_RRF_M0RR },
......@@ -594,6 +615,9 @@ static struct insn opcode_b2[] = {
{ "lpswe", 0xb2, INSTR_S_RD },
{ "srnmt", 0xb9, INSTR_S_RD },
{ "lfas", 0xbd, INSTR_S_RD },
{ "etndg", 0xec, INSTR_RRE_R0 },
{ { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
{ "tend", 0xf8, INSTR_S_RD },
#endif
{ "stidp", 0x02, INSTR_S_RD },
{ "sck", 0x04, INSTR_S_RD },
......@@ -1150,6 +1174,7 @@ static struct insn opcode_e3[] = {
{ "stfh", 0xcb, INSTR_RXY_RRRD },
{ "chf", 0xcd, INSTR_RXY_RRRD },
{ "clhf", 0xcf, INSTR_RXY_RRRD },
{ "ntstg", 0x25, INSTR_RXY_RRRD },
#endif
{ "lrv", 0x1e, INSTR_RXY_RRRD },
{ "lrvh", 0x1f, INSTR_RXY_RRRD },
......@@ -1173,6 +1198,8 @@ static struct insn opcode_e5[] = {
{ "mvhhi", 0x44, INSTR_SIL_RDI },
{ "mvhi", 0x4c, INSTR_SIL_RDI },
{ "mvghi", 0x48, INSTR_SIL_RDI },
{ { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
#endif
{ "lasp", 0x00, INSTR_SSE_RDRD },
{ "tprot", 0x01, INSTR_SSE_RDRD },
......@@ -1210,6 +1237,9 @@ static struct insn opcode_eb[] = {
{ "cliy", 0x55, INSTR_SIY_URD },
{ "oiy", 0x56, INSTR_SIY_URD },
{ "xiy", 0x57, INSTR_SIY_URD },
{ "lric", 0x60, INSTR_RSY_RDRM },
{ "stric", 0x61, INSTR_RSY_RDRM },
{ "mric", 0x62, INSTR_RSY_RDRM },
{ "icmh", 0x80, INSTR_RSE_RURD },
{ "icmh", 0x80, INSTR_RSY_RURD },
{ "icmy", 0x81, INSTR_RSY_RURD },
......@@ -1408,6 +1438,9 @@ static struct insn *find_insn(unsigned char *code)
case 0xa7:
table = opcode_a7;
break;
case 0xaa:
table = opcode_aa;
break;
case 0xb2:
table = opcode_b2;
break;
......@@ -1601,3 +1634,26 @@ void show_code(struct pt_regs *regs)
}
printk("\n");
}
void print_fn_code(unsigned char *code, unsigned long len)
{
char buffer[64], *ptr;
int opsize, i;
while (len) {
ptr = buffer;
opsize = insn_length(*code);
ptr += sprintf(ptr, "%p: ", code);
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]);
*ptr++ = '\t';
if (i < 4)
*ptr++ = '\t';
ptr += print_insn(ptr, code, (unsigned long) code);
*ptr++ = '\n';
*ptr++ = 0;
printk(buffer);
code += opsize;
len -= opsize;
}
}
......@@ -215,36 +215,54 @@ static noinline __init void init_kernel_storage_key(void)
PAGE_DEFAULT_KEY, 0);
}
static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static noinline __init void detect_machine_type(void)
{
struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
/* Check current-configuration-level */
if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) {
if (stsi(NULL, 0, 0, 0) <= 2) {
S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
return;
}
/* Get virtual-machine cpu information. */
if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count)
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;
/* Running under KVM? If not we assume z/VM */
if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
static __init void setup_topology(void)
{
#ifdef CONFIG_64BIT
int max_mnest;
if (!test_facility(11))
return;
S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
for (max_mnest = 6; max_mnest > 1; max_mnest--) {
if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
break;
}
topology_max_mnest = max_mnest;
#endif
}
static void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
unsigned long addr;
addr = S390_lowcore.program_old_psw.addr;
fixup = search_exception_tables(addr & PSW_ADDR_INSN);
if (!fixup)
disabled_wait(0);
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
}
static noinline __init void setup_lowcore_early(void)
......@@ -267,12 +285,10 @@ static noinline __init void setup_facility_list(void)
static noinline __init void setup_hpage(void)
{
#ifndef CONFIG_DEBUG_PAGEALLOC
if (!test_facility(2) || !test_facility(8))
return;
S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
__ctl_set_bit(0, 23);
#endif
}
static __init void detect_mvpg(void)
......@@ -366,12 +382,12 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(8))
S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
if (test_facility(11))
S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
if (test_facility(27))
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
if (test_facility(40))
S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
if (test_facility(50) && test_facility(73))
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
#endif
}
......@@ -441,7 +457,6 @@ static void __init setup_boot_command_line(void)
append_to_cmdline(append_ipl_scpdata);
}
/*
* Save ipl parameters, clear bss memory, initialize storage keys
* and create a kernel NSS at startup if the SAVESYS= parm is defined
......@@ -468,6 +483,7 @@ void __init startup_init(void)
detect_diag44();
detect_machine_facilities();
setup_hpage();
setup_topology();
sclp_facilities_detect();
detect_memory_layout(memory_chunk);
#ifdef CONFIG_DYNAMIC_FTRACE
......
......@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
......@@ -412,6 +413,11 @@ ENTRY(pgm_check_handler)
1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
LAST_BREAK %r14
lg %r15,__LC_KERNEL_STACK
lg %r14,__TI_task(%r12)
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 2f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
......@@ -422,13 +428,12 @@ ENTRY(pgm_check_handler)
stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 0f
lg %r1,__TI_task(%r12)
tmhh %r8,0x0001 # kernel per event ?
jz pgm_kprobe
oi __TI_flags+7(%r12),_TIF_PER_TRAP
mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE
mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID
0: REENABLE_IRQS
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table
......@@ -1004,9 +1009,7 @@ sie_fault:
.Lhost_id:
.quad 0
.section __ex_table,"a"
.quad sie_loop,sie_fault
.previous
EX_TABLE(sie_loop,sie_fault)
#endif
.section .rodata, "a"
......
......@@ -30,33 +30,35 @@ struct irq_class {
};
static const struct irq_class intrclass_names[] = {
{.name = "EXT" },
{.name = "I/O" },
{.name = "CLK", .desc = "[EXT] Clock Comparator" },
{.name = "EXC", .desc = "[EXT] External Call" },
{.name = "EMS", .desc = "[EXT] Emergency Signal" },
{.name = "TMR", .desc = "[EXT] CPU Timer" },
{.name = "TAL", .desc = "[EXT] Timing Alert" },
{.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
{.name = "DSD", .desc = "[EXT] DASD Diag" },
{.name = "VRT", .desc = "[EXT] Virtio" },
{.name = "SCP", .desc = "[EXT] Service Call" },
{.name = "IUC", .desc = "[EXT] IUCV" },
{.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling" },
{.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter" },
{.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" },
{.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
{.name = "DAS", .desc = "[I/O] DASD" },
{.name = "C15", .desc = "[I/O] 3215" },
{.name = "C70", .desc = "[I/O] 3270" },
{.name = "TAP", .desc = "[I/O] Tape" },
{.name = "VMR", .desc = "[I/O] Unit Record Devices" },
{.name = "LCS", .desc = "[I/O] LCS" },
{.name = "CLW", .desc = "[I/O] CLAW" },
{.name = "CTC", .desc = "[I/O] CTC" },
{.name = "APB", .desc = "[I/O] AP Bus" },
{.name = "CSC", .desc = "[I/O] CHSC Subchannel" },
{.name = "NMI", .desc = "[NMI] Machine Check" },
[EXTERNAL_INTERRUPT] = {.name = "EXT"},
[IO_INTERRUPT] = {.name = "I/O"},
[EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
[EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
[EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
[EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
[EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
[EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
[EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
[EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
[EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
[EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
[EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
[EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
[EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
[IOINT_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
[IOINT_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
[IOINT_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
[IOINT_C15] = {.name = "C15", .desc = "[I/O] 3215"},
[IOINT_C70] = {.name = "C70", .desc = "[I/O] 3270"},
[IOINT_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
[IOINT_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
[IOINT_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
[IOINT_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
[IOINT_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
[IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
[IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
[IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
[NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
};
/*
......
......@@ -547,7 +547,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
*/
entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (entry) {
regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE;
return 1;
}
......
......@@ -50,16 +50,6 @@ static struct lgr_info lgr_info_last;
static struct lgr_info lgr_info_cur;
static struct debug_info *lgr_dbf;
/*
* Return number of valid stsi levels
*/
static inline int stsi_0(void)
{
int rc = stsi(NULL, 0, 0, 0);
return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
}
/*
* Copy buffer and then convert it to ASCII
*/
......@@ -76,7 +66,7 @@ static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
{
struct sysinfo_1_1_1 *si = (void *) lgr_page;
if (stsi(si, 1, 1, 1) == -ENOSYS)
if (stsi(si, 1, 1, 1))
return;
cpascii(lgr_info->manufacturer, si->manufacturer,
sizeof(si->manufacturer));
......@@ -93,7 +83,7 @@ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
{
struct sysinfo_2_2_2 *si = (void *) lgr_page;
if (stsi(si, 2, 2, 2) == -ENOSYS)
if (stsi(si, 2, 2, 2))
return;
cpascii(lgr_info->name, si->name, sizeof(si->name));
memcpy(&lgr_info->lpar_number, &si->lpar_number,
......@@ -108,7 +98,7 @@ static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
struct sysinfo_3_2_2 *si = (void *) lgr_page;
int i;
if (stsi(si, 3, 2, 2) == -ENOSYS)
if (stsi(si, 3, 2, 2))
return;
for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) {
cpascii(lgr_info->vm[i].name, si->vm[i].name,
......@@ -124,16 +114,17 @@ static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
*/
static void lgr_info_get(struct lgr_info *lgr_info)
{
int level;
memset(lgr_info, 0, sizeof(*lgr_info));
stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list));
lgr_info->level = stsi_0();
if (lgr_info->level == -ENOSYS)
return;
if (lgr_info->level >= 1)
level = stsi(NULL, 0, 0, 0);
lgr_info->level = level;
if (level >= 1)
lgr_stsi_1_1_1(lgr_info);
if (lgr_info->level >= 2)
if (level >= 2)
lgr_stsi_2_2_2(lgr_info);
if (lgr_info->level >= 3)
if (level >= 3)
lgr_stsi_3_2_2(lgr_info);
}
......
......@@ -21,6 +21,7 @@
#include <asm/reset.h>
#include <asm/ipl.h>
#include <asm/diag.h>
#include <asm/elf.h>
#include <asm/asm-offsets.h>
#include <asm/os_info.h>
......@@ -31,8 +32,6 @@ extern const unsigned long long relocate_kernel_len;
#ifdef CONFIG_CRASH_DUMP
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
/*
* Create ELF notes for one CPU
*/
......@@ -159,7 +158,7 @@ int machine_kexec_prepare(struct kimage *image)
/* Can't replace kernel image since it is read-only. */
if (ipl_flags & IPL_NSS_VALID)
return -ENOSYS;
return -EOPNOTSUPP;
if (image->type == KEXEC_TYPE_CRASH)
return machine_kexec_prepare_kdump();
......@@ -191,6 +190,10 @@ void machine_shutdown(void)
{
}
void machine_crash_shutdown(struct pt_regs *regs)
{
}
/*
* Do normal kexec
*/
......
......@@ -26,10 +26,12 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/vtimer.h>
#include <asm/exec.h>
#include <asm/irq.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
#include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
......@@ -132,6 +134,7 @@ EXPORT_SYMBOL(kernel_thread);
*/
void exit_thread(void)
{
exit_thread_runtime_instr();
}
void flush_thread(void)
......@@ -170,6 +173,11 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* Save access registers to new thread structure. */
save_access_regs(&p->thread.acrs[0]);
/* Don't copy runtime instrumentation info */
p->thread.ri_cb = NULL;
p->thread.ri_signum = 0;
frame->childregs.psw.mask &= ~PSW_MASK_RI;
#ifndef CONFIG_64BIT
/*
* save fprs to current->thread.fp_regs to merge them with
......
......@@ -39,9 +39,9 @@ void __cpuinit cpu_init(void)
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[10] = {
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs"
"edat", "etf3eh", "highgprs", "te"
};
unsigned long n = (unsigned long) v - 1;
int i;
......@@ -54,10 +54,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
num_online_cpus(), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
seq_puts(m, "features\t: ");
for (i = 0; i < 10; i++)
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n");
show_cacheinfo(m);
}
get_online_cpus();
if (cpu_online(n)) {
......
......@@ -42,6 +42,7 @@ enum s390_regset {
REGSET_GENERAL,
REGSET_FP,
REGSET_LAST_BREAK,
REGSET_TDB,
REGSET_SYSTEM_CALL,
REGSET_GENERAL_EXTENDED,
};
......@@ -52,6 +53,22 @@ void update_per_regs(struct task_struct *task)
struct thread_struct *thread = &task->thread;
struct per_regs old, new;
#ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
unsigned long cr0, cr0_new;
__ctl_store(cr0, 0, 0);
/* set or clear transaction execution bits 8 and 9. */
if (task->thread.per_flags & PER_FLAG_NO_TE)
cr0_new = cr0 & ~(3UL << 54);
else
cr0_new = cr0 | (3UL << 54);
/* Only load control register 0 if necessary. */
if (cr0 != cr0_new)
__ctl_load(cr0_new, 0, 0);
}
#endif
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
......@@ -60,6 +77,10 @@ void update_per_regs(struct task_struct *task)
/* merge TIF_SINGLE_STEP into user specified PER registers. */
if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
new.control |= PER_EVENT_IFETCH;
#ifdef CONFIG_64BIT
new.control |= PER_CONTROL_SUSPENSION;
new.control |= PER_EVENT_TRANSACTION_END;
#endif
new.start = 0;
new.end = PSW_ADDR_INSN;
}
......@@ -100,6 +121,7 @@ void ptrace_disable(struct task_struct *task)
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
clear_tsk_thread_flag(task, TIF_PER_TRAP);
task->thread.per_flags = 0;
}
#ifndef CONFIG_64BIT
......@@ -416,6 +438,16 @@ long arch_ptrace(struct task_struct *child, long request,
put_user(task_thread_info(child)->last_break,
(unsigned long __user *) data);
return 0;
case PTRACE_ENABLE_TE:
if (!MACHINE_HAS_TE)
return -EIO;
child->thread.per_flags &= ~PER_FLAG_NO_TE;
return 0;
case PTRACE_DISABLE_TE:
if (!MACHINE_HAS_TE)
return -EIO;
child->thread.per_flags |= PER_FLAG_NO_TE;
return 0;
default:
/* Removing high order bit from addr (only for 31 bit). */
addr &= PSW_ADDR_INSN;
......@@ -903,6 +935,28 @@ static int s390_last_break_set(struct task_struct *target,
return 0;
}
static int s390_tdb_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
unsigned char *data;
if (!(regs->int_code & 0x200))
return -ENODATA;
data = target->thread.trap_tdb;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
}
static int s390_tdb_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return 0;
}
#endif
static int s390_system_call_get(struct task_struct *target,
......@@ -951,6 +1005,14 @@ static const struct user_regset s390_regsets[] = {
.get = s390_last_break_get,
.set = s390_last_break_set,
},
[REGSET_TDB] = {
.core_note_type = NT_S390_TDB,
.n = 1,
.size = 256,
.align = 1,
.get = s390_tdb_get,
.set = s390_tdb_set,
},
#endif
[REGSET_SYSTEM_CALL] = {
.core_note_type = NT_S390_SYSTEM_CALL,
......@@ -1148,6 +1210,14 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_compat_last_break_get,
.set = s390_compat_last_break_set,
},
[REGSET_TDB] = {
.core_note_type = NT_S390_TDB,
.n = 1,
.size = 256,
.align = 1,
.get = s390_tdb_get,
.set = s390_tdb_set,
},
[REGSET_SYSTEM_CALL] = {
.core_note_type = NT_S390_SYSTEM_CALL,
.n = 1,
......
/*
* Copyright IBM Corp. 2012
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <asm/runtime_instr.h>
#include <asm/cpu_mf.h>
#include <asm/irq.h>
/* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb;
static int runtime_instr_avail(void)
{
return test_facility(64);
}
static void disable_runtime_instr(void)
{
struct pt_regs *regs = task_pt_regs(current);
load_runtime_instr_cb(&runtime_instr_empty_cb);
/*
* Make sure the RI bit is deleted from the PSW. If the user did not
* switch off RI before the system call the process will get a
* specification exception otherwise.
*/
regs->psw.mask &= ~PSW_MASK_RI;
}
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
{
cb->buf_limit = 0xfff;
if (s390_user_mode == HOME_SPACE_MODE)
cb->home_space = 1;
cb->int_requested = 1;
cb->pstate = 1;
cb->pstate_set_buf = 1;
cb->pstate_sample = 1;
cb->pstate_collect = 1;
cb->key = PAGE_DEFAULT_KEY;
cb->valid = 1;
}
void exit_thread_runtime_instr(void)
{
struct task_struct *task = current;
if (!task->thread.ri_cb)
return;
disable_runtime_instr();
kfree(task->thread.ri_cb);
task->thread.ri_signum = 0;
task->thread.ri_cb = NULL;
}
static void runtime_instr_int_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct siginfo info;
if (!(param32 & CPU_MF_INT_RI_MASK))
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++;
if (!current->thread.ri_cb)
return;
if (current->thread.ri_signum < SIGRTMIN ||
current->thread.ri_signum > SIGRTMAX) {
WARN_ON_ONCE(1);
return;
}
memset(&info, 0, sizeof(info));
info.si_signo = current->thread.ri_signum;
info.si_code = SI_QUEUE;
if (param32 & CPU_MF_INT_RI_BUF_FULL)
info.si_int = ENOBUFS;
else if (param32 & CPU_MF_INT_RI_HALTED)
info.si_int = ECANCELED;
else
return; /* unknown reason */
send_sig_info(current->thread.ri_signum, &info, current);
}
SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
{
struct runtime_instr_cb *cb;
if (!runtime_instr_avail())
return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) {
preempt_disable();
exit_thread_runtime_instr();
preempt_enable();
return 0;
}
if (command != S390_RUNTIME_INSTR_START ||
(signum < SIGRTMIN || signum > SIGRTMAX))
return -EINVAL;
if (!current->thread.ri_cb) {
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
} else {
cb = current->thread.ri_cb;
memset(cb, 0, sizeof(*cb));
}
init_runtime_instr_cb(cb);
current->thread.ri_signum = signum;
/* now load the control block to make it available */
preempt_disable();
current->thread.ri_cb = cb;
load_runtime_instr_cb(cb);
preempt_enable();
return 0;
}
static int __init runtime_instr_init(void)
{
int rc;
if (!runtime_instr_avail())
return 0;
measurement_alert_subclass_register();
rc = register_external_interrupt(0x1407, runtime_instr_int_handler);
if (rc)
measurement_alert_subclass_unregister();
else
pr_info("Runtime instrumentation facility initialized\n");
return rc;
}
device_initcall(runtime_instr_init);
......@@ -8,3 +8,5 @@ EXPORT_SYMBOL(_mcount);
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
EXPORT_SYMBOL(sie64a);
#endif
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
......@@ -302,10 +302,10 @@ static int __init parse_vmalloc(char *arg)
}
early_param("vmalloc", parse_vmalloc);
unsigned int addressing_mode = HOME_SPACE_MODE;
EXPORT_SYMBOL_GPL(addressing_mode);
unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
EXPORT_SYMBOL_GPL(s390_user_mode);
static int set_amode_primary(void)
static void __init set_user_mode_primary(void)
{
psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
......@@ -313,48 +313,30 @@ static int set_amode_primary(void)
psw32_user_bits =
(psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
#endif
if (MACHINE_HAS_MVCOS) {
memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
return 1;
} else {
memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
return 0;
}
}
/*
* Switch kernel/user addressing modes?
*/
static int __init early_parse_switch_amode(char *p)
{
addressing_mode = PRIMARY_SPACE_MODE;
return 0;
uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
}
early_param("switch_amode", early_parse_switch_amode);
static int __init early_parse_user_mode(char *p)
{
if (p && strcmp(p, "primary") == 0)
addressing_mode = PRIMARY_SPACE_MODE;
s390_user_mode = PRIMARY_SPACE_MODE;
else if (!p || strcmp(p, "home") == 0)
addressing_mode = HOME_SPACE_MODE;
s390_user_mode = HOME_SPACE_MODE;
else
return 1;
return 0;
}
early_param("user_mode", early_parse_user_mode);
static void setup_addressing_mode(void)
static void __init setup_addressing_mode(void)
{
if (addressing_mode == PRIMARY_SPACE_MODE) {
if (set_amode_primary())
pr_info("Address spaces switched, "
"mvcos available\n");
else
pr_info("Address spaces switched, "
"mvcos not available\n");
}
if (s390_user_mode != PRIMARY_SPACE_MODE)
return;
set_user_mode_primary();
if (MACHINE_HAS_MVCOS)
pr_info("Address spaces switched, mvcos available\n");
else
pr_info("Address spaces switched, mvcos not available\n");
}
void *restart_stack __attribute__((__section__(".data")));
......@@ -602,9 +584,7 @@ static void __init setup_memory_end(void)
static void __init setup_vmcoreinfo(void)
{
#ifdef CONFIG_KEXEC
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
#endif
}
#ifdef CONFIG_CRASH_DUMP
......@@ -980,6 +960,12 @@ static void __init setup_hwcaps(void)
* HWCAP_S390_HIGH_GPRS is bit 9.
*/
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
/*
* Transactional execution support HWCAP_S390_TE is bit 10.
*/
if (test_facility(50) && test_facility(73))
elf_hwcap |= HWCAP_S390_TE;
#endif
get_cpu_id(&cpu_id);
......
......@@ -66,7 +66,7 @@ struct pcpu {
unsigned long panic_stack; /* panic stack for the cpu */
unsigned long ec_mask; /* bit mask for ec_xxx functions */
int state; /* physical cpu state */
u32 status; /* last status received via sigp */
int polarization; /* physical polarization */
u16 address; /* physical cpu address */
};
......@@ -74,6 +74,10 @@ static u8 boot_cpu_type;
static u16 boot_cpu_address;
static struct pcpu pcpu_devices[NR_CPUS];
/*
* The smp_cpu_state_mutex must be held when changing the state or polarization
* member of a pcpu data structure within the pcpu_devices arreay.
*/
DEFINE_MUTEX(smp_cpu_state_mutex);
/*
......@@ -99,7 +103,7 @@ static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
int cc;
while (1) {
cc = __pcpu_sigp(addr, order, parm, status);
cc = __pcpu_sigp(addr, order, parm, NULL);
if (cc != SIGP_CC_BUSY)
return cc;
cpu_relax();
......@@ -111,7 +115,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
int cc, retry;
for (retry = 0; ; retry++) {
cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
if (cc != SIGP_CC_BUSY)
break;
if (retry >= 3)
......@@ -122,16 +126,18 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
static inline int pcpu_stopped(struct pcpu *pcpu)
{
u32 uninitialized_var(status);
if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
0, &pcpu->status) != SIGP_CC_STATUS_STORED)
0, &status) != SIGP_CC_STATUS_STORED)
return 0;
return !!(pcpu->status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
}
static inline int pcpu_running(struct pcpu *pcpu)
{
if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
0, &pcpu->status) != SIGP_CC_STATUS_STORED)
0, NULL) != SIGP_CC_STATUS_STORED)
return 1;
/* Status stored condition code is equivalent to cpu not running. */
return 0;
......@@ -586,6 +592,16 @@ static inline void smp_get_save_area(int cpu, u16 address) { }
#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
void smp_cpu_set_polarization(int cpu, int val)
{
pcpu_devices[cpu].polarization = val;
}
int smp_cpu_get_polarization(int cpu)
{
return pcpu_devices[cpu].polarization;
}
static struct sclp_cpu_info *smp_get_cpu_info(void)
{
static int use_sigp_detection;
......@@ -628,7 +644,7 @@ static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
pcpu->address = info->cpu[i].address;
pcpu->state = (cpu >= info->configured) ?
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
set_cpu_present(cpu, true);
if (sysfs_add && smp_add_present_cpu(cpu) != 0)
set_cpu_present(cpu, false);
......@@ -796,7 +812,7 @@ void __init smp_prepare_boot_cpu(void)
pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
S390_lowcore.percpu_offset = __per_cpu_offset[0];
cpu_set_polarization(0, POLARIZATION_UNKNOWN);
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);
set_cpu_online(0, true);
}
......@@ -862,7 +878,7 @@ static ssize_t cpu_configure_store(struct device *dev,
if (rc)
break;
pcpu->state = CPU_STATE_STANDBY;
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
topology_expect_change();
break;
case 1:
......@@ -872,7 +888,7 @@ static ssize_t cpu_configure_store(struct device *dev,
if (rc)
break;
pcpu->state = CPU_STATE_CONFIGURED;
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
topology_expect_change();
break;
default:
......@@ -959,23 +975,17 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
struct device *s = &c->dev;
int err = 0;
switch (action) {
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
break;
}
return notifier_from_errno(err);
}
static struct notifier_block __cpuinitdata smp_cpu_nb = {
.notifier_call = smp_cpu_notify,
};
static int __devinit smp_add_present_cpu(int cpu)
{
struct cpu *c = &pcpu_devices[cpu].cpu;
......@@ -1050,7 +1060,7 @@ static int __init s390_smp_init(void)
{
int cpu, rc;
register_cpu_notifier(&smp_cpu_nb);
hotcpu_notifier(smp_cpu_notify, 0);
#ifdef CONFIG_HOTPLUG_CPU
rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
if (rc)
......
......@@ -350,3 +350,5 @@ SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
This diff is collapsed.
......@@ -329,7 +329,7 @@ static unsigned long clock_sync_flags;
* The synchronous get_clock function. It will write the current clock
* value to the clock pointer and return 0 if the clock is in sync with
* the external time source. If the clock mode is local it will return
* -ENOSYS and -EAGAIN if the clock is not in sync with the external
* -EOPNOTSUPP and -EAGAIN if the clock is not in sync with the external
* reference.
*/
int get_sync_clock(unsigned long long *clock)
......@@ -347,7 +347,7 @@ int get_sync_clock(unsigned long long *clock)
return 0;
if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -ENOSYS;
return -EOPNOTSUPP;
if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return -EACCES;
......
......@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/sysinfo.h>
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
......@@ -44,9 +45,6 @@ static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS];
/* smp_cpu_state_mutex must be held when accessing this array */
int cpu_polarization[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
cpumask_t mask;
......@@ -75,10 +73,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
{
unsigned int cpu;
for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
cpu < TOPOLOGY_CPU_BITS;
cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
{
for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
unsigned int rcpu;
int lcpu;
......@@ -94,7 +89,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
} else {
cpu_core_id[lcpu] = core->id;
}
cpu_set_polarization(lcpu, tl_cpu->pp);
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
}
}
return core;
......@@ -201,7 +196,7 @@ static void topology_update_polarization_simple(void)
mutex_lock(&smp_cpu_state_mutex);
for_each_possible_cpu(cpu)
cpu_set_polarization(cpu, POLARIZATION_HRZ);
smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
mutex_unlock(&smp_cpu_state_mutex);
}
......@@ -231,7 +226,7 @@ int topology_set_cpu_management(int fc)
if (rc)
return -EBUSY;
for_each_possible_cpu(cpu)
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
return rc;
}
......@@ -250,12 +245,10 @@ static void update_cpu_core_map(void)
void store_topology(struct sysinfo_15_1_x *info)
{
int rc;
rc = stsi(info, 15, 1, 3);
if (rc != -ENOSYS)
return;
stsi(info, 15, 1, 2);
if (topology_max_mnest >= 3)
stsi(info, 15, 1, 3);
else
stsi(info, 15, 1, 2);
}
int arch_update_cpu_topology(void)
......@@ -415,7 +408,7 @@ static ssize_t cpu_polarization_show(struct device *dev,
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
switch (cpu_read_polarization(cpu)) {
switch (smp_cpu_get_polarization(cpu)) {
case POLARIZATION_HRZ:
count = sprintf(buf, "horizontal\n");
break;
......
......@@ -57,6 +57,23 @@ static int kstack_depth_to_print = 12;
static int kstack_depth_to_print = 20;
#endif /* CONFIG_64BIT */
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
unsigned long address;
if (regs->int_code & 0x200)
address = *(unsigned long *)(current->thread.trap_tdb + 24);
else
address = regs->psw.addr;
return (void __user *)
((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
#else
return (void __user *)
((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
#endif
}
/*
* For show_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
......@@ -214,7 +231,6 @@ void show_registers(struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
print_modules();
printk("CPU: %d %s %s %.*s\n",
task_thread_info(current)->cpu, print_tainted(),
init_utsname()->release,
......@@ -254,6 +270,7 @@ void die(struct pt_regs *regs, const char *str)
#endif
printk("\n");
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
print_modules();
show_regs(regs);
bust_spinlocks(0);
add_taint(TAINT_DIE);
......@@ -285,12 +302,6 @@ int is_valid_bugaddr(unsigned long addr)
return 1;
}
static inline void __user *get_psw_address(struct pt_regs *regs)
{
return (void __user *)
((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
}
static void __kprobes do_trap(struct pt_regs *regs,
int si_signo, int si_code, char *str)
{
......@@ -304,14 +315,14 @@ static void __kprobes do_trap(struct pt_regs *regs,
info.si_signo = si_signo;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = get_psw_address(regs);
info.si_addr = get_trap_ip(regs);
force_sig_info(si_signo, &info, current);
report_user_fault(regs, si_signo);
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup)
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
else {
enum bug_trap_type btt;
......@@ -381,6 +392,11 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
"translation exception")
#ifdef CONFIG_64BIT
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
"transaction constraint exception")
#endif
static inline void do_fp_trap(struct pt_regs *regs, int fpc)
{
int si_code = 0;
......@@ -408,7 +424,7 @@ static void __kprobes illegal_op(struct pt_regs *regs)
__u16 __user *location;
int signal = 0;
location = get_psw_address(regs);
location = get_trap_ip(regs);
if (user_mode(regs)) {
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
......@@ -476,7 +492,7 @@ void specification_exception(struct pt_regs *regs)
__u16 __user *location = NULL;
int signal = 0;
location = (__u16 __user *) get_psw_address(regs);
location = (__u16 __user *) get_trap_ip(regs);
if (user_mode(regs)) {
get_user(*((__u16 *) opcode), location);
......@@ -525,7 +541,7 @@ static void data_exception(struct pt_regs *regs)
__u16 __user *location;
int signal = 0;
location = get_psw_address(regs);
location = get_trap_ip(regs);
if (MACHINE_HAS_IEEE)
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
......@@ -641,6 +657,7 @@ void __init trap_init(void)
pgm_check_table[0x12] = &translation_exception;
pgm_check_table[0x13] = &special_op_exception;
#ifdef CONFIG_64BIT
pgm_check_table[0x18] = &transaction_exception;
pgm_check_table[0x38] = &do_asce_exception;
pgm_check_table[0x39] = &do_dat_exception;
pgm_check_table[0x3A] = &do_dat_exception;
......
......@@ -85,7 +85,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
static void vdso_init_data(struct vdso_data *vd)
{
vd->ectg_available =
addressing_mode != HOME_SPACE_MODE && test_facility(31);
s390_user_mode != HOME_SPACE_MODE && test_facility(31);
}
#ifdef CONFIG_64BIT
......@@ -102,7 +102,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
lowcore->vdso_per_cpu_data = __LC_PASTE;
if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
......@@ -147,7 +147,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
psal = (u32 *)(addr_t) lowcore->paste[4];
......@@ -165,7 +165,7 @@ static void vdso_init_cr5(void)
{
unsigned long cr5;
if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
......
......@@ -378,9 +378,8 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
long cpu = (long) hcpu;
idle = &per_cpu(s390_idle, cpu);
switch (action) {
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DYING:
case CPU_DYING_FROZEN:
idle->nohz_delay = 0;
default:
break;
......
......@@ -5,7 +5,7 @@ source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
def_bool y
prompt "Virtualization"
prompt "KVM"
---help---
Say Y here to get to see options for using your Linux host to run other
operating systems inside virtual machines (guests).
......
......@@ -211,7 +211,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
spin_unlock(&fi->lock);
/* deal with other level 3 hypervisors */
if (stsi(mem, 3, 2, 2) == -ENOSYS)
if (stsi(mem, 3, 2, 2))
mem->count = 0;
if (mem->count < 8)
mem->count++;
......@@ -259,7 +259,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
mem = get_zeroed_page(GFP_KERNEL);
if (!mem)
goto out_fail;
if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS)
if (stsi((void *) mem, fc, sel1, sel2))
goto out_mem;
break;
case 3:
......
......@@ -4,6 +4,7 @@
lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
obj-y += usercopy.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o
/*
* String handling functions.
*
* Copyright IBM Corp. 2012
*/
#include <linux/linkage.h>
/*
* memset implementation
*
* This code corresponds to the C construct below. We do distinguish
* between clearing (c == 0) and setting a memory array (c != 0) simply
* because nearly all memset invocations in the kernel clear memory and
* the xc instruction is preferred in such cases.
*
* void *memset(void *s, int c, size_t n)
* {
* if (likely(c == 0))
* return __builtin_memset(s, 0, n);
* return __builtin_memset(s, c, n);
* }
*/
ENTRY(memset)
basr %r5,%r0
.Lmemset_base:
ltr %r4,%r4
bzr %r14
ltr %r3,%r3
jnz .Lmemset_fill
ahi %r4,-1
lr %r3,%r4
srl %r3,8
ltr %r3,%r3
lr %r1,%r2
je .Lmemset_clear_rest
.Lmemset_clear_loop:
xc 0(256,%r1),0(%r1)
la %r1,256(%r1)
brct %r3,.Lmemset_clear_loop
.Lmemset_clear_rest:
ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
br %r14
.Lmemset_fill:
stc %r3,0(%r2)
chi %r4,1
lr %r1,%r2
ber %r14
ahi %r4,-2
lr %r3,%r4
srl %r3,8
ltr %r3,%r3
je .Lmemset_fill_rest
.Lmemset_fill_loop:
mvc 1(256,%r1),0(%r1)
la %r1,256(%r1)
brct %r3,.Lmemset_fill_loop
.Lmemset_fill_rest:
ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
br %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
mvc 1(1,%r1),0(%r1)
/*
* memcpy implementation
*
* void *memcpy(void *dest, const void *src, size_t n)
*/
ENTRY(memcpy)
basr %r5,%r0
.Lmemcpy_base:
ltr %r4,%r4
bzr %r14
ahi %r4,-1
lr %r0,%r4
srl %r0,8
ltr %r0,%r0
lr %r1,%r2
jnz .Lmemcpy_loop
.Lmemcpy_rest:
ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
br %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)
la %r1,256(%r1)
la %r3,256(%r3)
brct %r0,.Lmemcpy_loop
j .Lmemcpy_rest
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
/*
* String handling functions.
*
* Copyright IBM Corp. 2012
*/
#include <linux/linkage.h>
/*
* memset implementation
*
* This code corresponds to the C construct below. We do distinguish
* between clearing (c == 0) and setting a memory array (c != 0) simply
* because nearly all memset invocations in the kernel clear memory and
* the xc instruction is preferred in such cases.
*
* void *memset(void *s, int c, size_t n)
* {
* if (likely(c == 0))
* return __builtin_memset(s, 0, n);
* return __builtin_memset(s, c, n);
* }
*/
ENTRY(memset)
ltgr %r4,%r4
bzr %r14
ltgr %r3,%r3
jnz .Lmemset_fill
aghi %r4,-1
srlg %r3,%r4,8
ltgr %r3,%r3
lgr %r1,%r2
jz .Lmemset_clear_rest
.Lmemset_clear_loop:
xc 0(256,%r1),0(%r1)
la %r1,256(%r1)
brctg %r3,.Lmemset_clear_loop
.Lmemset_clear_rest:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
br %r14
.Lmemset_fill:
stc %r3,0(%r2)
cghi %r4,1
lgr %r1,%r2
ber %r14
aghi %r4,-2
srlg %r3,%r4,8
ltgr %r3,%r3
jz .Lmemset_fill_rest
.Lmemset_fill_loop:
mvc 1(256,%r1),0(%r1)
la %r1,256(%r1)
brctg %r3,.Lmemset_fill_loop
.Lmemset_fill_rest:
larl %r3,.Lmemset_mvc
ex %r4,0(%r3)
br %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
mvc 1(1,%r1),0(%r1)
/*
* memcpy implementation
*
* void *memcpy(void *dest, const void *src, size_t n)
*/
ENTRY(memcpy)
ltgr %r4,%r4
bzr %r14
aghi %r4,-1
srlg %r5,%r4,8
ltgr %r5,%r5
lgr %r1,%r2
jnz .Lmemcpy_loop
.Lmemcpy_rest:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
br %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)
la %r1,256(%r1)
la %r3,256(%r3)
brctg %r5,.Lmemcpy_loop
j .Lmemcpy_rest
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
......@@ -43,11 +43,7 @@ static inline char *__strnend(const char *s, size_t n)
*/
size_t strlen(const char *s)
{
#if __GNUC__ < 4
return __strend(s) - s;
#else
return __builtin_strlen(s);
#endif
}
EXPORT_SYMBOL(strlen);
......@@ -73,7 +69,6 @@ EXPORT_SYMBOL(strnlen);
*/
char *strcpy(char *dest, const char *src)
{
#if __GNUC__ < 4
register int r0 asm("0") = 0;
char *ret = dest;
......@@ -82,9 +77,6 @@ char *strcpy(char *dest, const char *src)
: "+&a" (dest), "+&a" (src) : "d" (r0)
: "cc", "memory" );
return ret;
#else
return __builtin_strcpy(dest, src);
#endif
}
EXPORT_SYMBOL(strcpy);
......@@ -106,7 +98,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
if (size) {
size_t len = (ret >= size) ? size-1 : ret;
dest[len] = '\0';
__builtin_memcpy(dest, src, len);
memcpy(dest, src, len);
}
return ret;
}
......@@ -124,8 +116,8 @@ EXPORT_SYMBOL(strlcpy);
char *strncpy(char *dest, const char *src, size_t n)
{
size_t len = __strnend(src, n) - src;
__builtin_memset(dest + len, 0, n - len);
__builtin_memcpy(dest, src, len);
memset(dest + len, 0, n - len);
memcpy(dest, src, len);
return dest;
}
EXPORT_SYMBOL(strncpy);
......@@ -171,7 +163,7 @@ size_t strlcat(char *dest, const char *src, size_t n)
if (len >= n)
len = n - 1;
dest[len] = '\0';
__builtin_memcpy(dest, src, len);
memcpy(dest, src, len);
}
return res;
}
......@@ -194,7 +186,7 @@ char *strncat(char *dest, const char *src, size_t n)
char *p = __strend(dest);
p[len] = '\0';
__builtin_memcpy(p, src, len);
memcpy(p, src, len);
return dest;
}
EXPORT_SYMBOL(strncat);
......@@ -348,41 +340,3 @@ void *memscan(void *s, int c, size_t n)
return (void *) ret;
}
EXPORT_SYMBOL(memscan);
/**
* memcpy - Copy one area of memory to another
* @dest: Where to copy to
* @src: Where to copy from
* @n: The size of the area.
*
* returns a pointer to @dest
*/
void *memcpy(void *dest, const void *src, size_t n)
{
return __builtin_memcpy(dest, src, n);
}
EXPORT_SYMBOL(memcpy);
/**
* memset - Fill a region of memory with the given value
* @s: Pointer to the start of the area.
* @c: The byte to fill the area with
* @n: The size of the area.
*
* returns a pointer to @s
*/
void *memset(void *s, int c, size_t n)
{
char *xs;
if (c == 0)
return __builtin_memset(s, 0, n);
xs = (char *) s;
if (n > 0)
do {
*xs++ = c;
} while (--n > 0);
return s;
}
EXPORT_SYMBOL(memset);
......@@ -3,7 +3,7 @@
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
page-states.o gup.o
page-states.o gup.o extable.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
#include <linux/module.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
/*
* Search one exception table for an entry corresponding to the
* given instruction address, and return the address of the entry,
* or NULL if none is found.
* We use a binary search, and thus we assume that the table is
* already sorted.
*/
const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value)
{
const struct exception_table_entry *mid;
unsigned long addr;
while (first <= last) {
mid = ((last - first) >> 1) + first;
addr = extable_insn(mid);
if (addr < value)
first = mid + 1;
else if (addr > value)
last = mid - 1;
else
return mid;
}
return NULL;
}
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
* This is used both for the kernel exception table and for
* the exception tables of modules that get loaded.
*
*/
static int cmp_ex(const void *a, const void *b)
{
const struct exception_table_entry *x = a, *y = b;
/* This compare is only valid after normalization. */
return x->insn - y->insn;
}
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
struct exception_table_entry *p;
int i;
/* Normalize entries to being relative to the start of the section */
for (p = start, i = 0; p < finish; p++, i += 8)
p->insn += i;
sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
/* Denormalize all entries */
for (p = start, i = 0; p < finish; p++, i += 8)
p->insn -= i;
}
#ifdef CONFIG_MODULES
/*
* If the exception table is sorted, any referring to the module init
* will be at the beginning or the end.
*/
void trim_init_extable(struct module *m)
{
/* Trim the beginning */
while (m->num_exentries &&
within_module_init(extable_insn(&m->extable[0]), m)) {
m->extable++;
m->num_exentries--;
}
/* Trim the end */
while (m->num_exentries &&
within_module_init(extable_insn(&m->extable[m->num_exentries-1]), m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
......@@ -111,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
if (trans_exc_code == 2)
/* Access via secondary space, set_fs setting decides */
return current->thread.mm_segment.ar4;
if (addressing_mode == HOME_SPACE_MODE)
if (s390_user_mode == HOME_SPACE_MODE)
/* User space if the access has been done via home space. */
return trans_exc_code == 3;
/*
......@@ -163,7 +163,7 @@ static noinline void do_no_context(struct pt_regs *regs)
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup) {
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
return;
}
......@@ -628,9 +628,8 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
struct thread_struct *thread, *next;
struct task_struct *tsk;
switch (action) {
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DEAD:
case CPU_DEAD_FROZEN:
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
......
......@@ -154,6 +154,43 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
return 1;
}
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next, flags;
pgd_t *pgdp, pgd;
int nr = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len)))
return 0;
local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
pgd = *pgdp;
barrier();
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
break;
if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
break;
} while (pgdp++, addr = next, addr != end);
local_irq_restore(flags);
return nr;
}
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
......
......@@ -42,7 +42,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
static unsigned long setup_zero_pages(void)
static unsigned long __init setup_zero_pages(void)
{
struct cpuid cpu_id;
unsigned int order;
......@@ -212,7 +212,7 @@ void free_initmem(void)
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
free_init_pages("initrd memory", start, end);
}
......
......@@ -609,8 +609,8 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
*/
unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
{
struct page *page;
unsigned long *table;
unsigned long *uninitialized_var(table);
struct page *uninitialized_var(page);
unsigned int mask, bit;
if (mm_has_pgste(mm))
......@@ -796,7 +796,7 @@ int s390_enable_sie(void)
struct mm_struct *mm, *old_mm;
/* Do we have switched amode? If no, we cannot do sie */
if (addressing_mode == HOME_SPACE_MODE)
if (s390_user_mode == HOME_SPACE_MODE)
return -EINVAL;
/* Do we have pgstes? if yes, we are done */
......
......@@ -107,7 +107,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
#ifdef CONFIG_64BIT
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
(address + HPAGE_SIZE <= start + size) &&
(address >= HPAGE_SIZE)) {
......
#
# Arch-specific network modules
#
obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
/*
* BPF Jit compiler for s390, help functions.
*
* Copyright IBM Corp. 2012
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/linkage.h>
/*
* Calling convention:
* registers %r2, %r6-%r8, %r10-%r11, %r13, %r15 are call saved
* %r2: skb pointer
* %r3: offset parameter
* %r5: BPF A accumulator
* %r8: return address
* %r9: save register for skb pointer
* %r10: skb->data
* %r11: skb->len - skb->data_len (headlen)
* %r12: BPF X accumulator
*
* skb_copy_bits takes 4 parameters:
* %r2 = skb pointer
* %r3 = offset into skb data
* %r4 = length to copy
* %r5 = pointer to temp buffer
*/
#define SKBDATA %r8
/* A = *(u32 *) (skb->data+K+X) */
ENTRY(sk_load_word_ind)
ar %r3,%r12 # offset += X
bmr %r8 # < 0 -> return with cc
/* A = *(u32 *) (skb->data+K) */
ENTRY(sk_load_word)
llgfr %r1,%r3 # extend offset
ahi %r3,4 # offset + 4
clr %r11,%r3 # hlen <= offset + 4 ?
jl sk_load_word_slow
l %r5,0(%r1,%r10) # get word from skb
xr %r1,%r1 # set cc to zero
br %r8
sk_load_word_slow:
lgr %r9,%r2 # save %r2
lhi %r4,4 # 4 bytes
la %r5,160(%r15) # pointer to temp buffer
brasl %r14,skb_copy_bits # get data from skb
l %r5,160(%r15) # load result from temp buffer
ltgr %r2,%r2 # set cc to (%r2 != 0)
lgr %r2,%r9 # restore %r2
br %r8
/* A = *(u16 *) (skb->data+K+X) */
ENTRY(sk_load_half_ind)
ar %r3,%r12 # offset += X
bmr %r8 # < 0 -> return with cc
/* A = *(u16 *) (skb->data+K) */
ENTRY(sk_load_half)
llgfr %r1,%r3 # extend offset
ahi %r3,2 # offset + 2
clr %r11,%r3 # hlen <= offset + 2 ?
jl sk_load_half_slow
llgh %r5,0(%r1,%r10) # get half from skb
xr %r1,%r1 # set cc to zero
br %r8
sk_load_half_slow:
lgr %r9,%r2 # save %r2
lhi %r4,2 # 2 bytes
la %r5,162(%r15) # pointer to temp buffer
brasl %r14,skb_copy_bits # get data from skb
xc 160(2,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
ltgr %r2,%r2 # set cc to (%r2 != 0)
lgr %r2,%r9 # restore %r2
br %r8
/* A = *(u8 *) (skb->data+K+X) */
ENTRY(sk_load_byte_ind)
ar %r3,%r12 # offset += X
bmr %r8 # < 0 -> return with cc
/* A = *(u8 *) (skb->data+K) */
ENTRY(sk_load_byte)
llgfr %r1,%r3 # extend offset
clr %r11,%r3 # hlen < offset ?
jle sk_load_byte_slow
lhi %r5,0
ic %r5,0(%r1,%r10) # get byte from skb
xr %r1,%r1 # set cc to zero
br %r8
sk_load_byte_slow:
lgr %r9,%r2 # save %r2
lhi %r4,1 # 1 bytes
la %r5,163(%r15) # pointer to temp buffer
brasl %r14,skb_copy_bits # get data from skb
xc 160(3,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
ltgr %r2,%r2 # set cc to (%r2 != 0)
lgr %r2,%r9 # restore %r2
br %r8
/* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */
ENTRY(sk_load_byte_msh)
llgfr %r1,%r3 # extend offset
clr %r11,%r3 # hlen < offset ?
jle sk_load_byte_slow
lhi %r12,0
ic %r12,0(%r1,%r10) # get byte from skb
nill %r12,0x0f
sll %r12,2
xr %r1,%r1 # set cc to zero
br %r8
sk_load_byte_msh_slow:
lgr %r9,%r2 # save %r2
lhi %r4,2 # 2 bytes
la %r5,162(%r15) # pointer to temp buffer
brasl %r14,skb_copy_bits # get data from skb
xc 160(3,%r15),160(%r15)
l %r12,160(%r15) # load result from temp buffer
nill %r12,0x0f
sll %r12,2
ltgr %r2,%r2 # set cc to (%r2 != 0)
lgr %r2,%r9 # restore %r2
br %r8
This diff is collapsed.
This diff is collapsed.
......@@ -70,3 +70,21 @@ config DASD_EER
This driver provides a character device interface to the
DASD extended error reporting. This is only needed if you want to
use applications written for the EER facility.
config SCM_BLOCK
def_tristate m
prompt "Support for Storage Class Memory"
depends on S390 && BLOCK && EADM_SCH && SCM_BUS
help
Block device driver for Storage Class Memory (SCM). This driver
provides a block device interface for each available SCM increment.
To compile this driver as a module, choose M here: the
module will be called scm_block.
config SCM_BLOCK_CLUSTER_WRITE
def_bool y
prompt "SCM force cluster writes"
depends on SCM_BLOCK
help
Force writes to Storage Class Memory (SCM) to be in done in clusters.
......@@ -17,3 +17,9 @@ obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o
scm_block-objs := scm_drv.o scm_blk.o
ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
scm_block-objs += scm_blk_cluster.o
endif
obj-$(CONFIG_SCM_BLOCK) += scm_block.o
This diff is collapsed.
......@@ -292,12 +292,12 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
#else
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
return -ENOSYS;
return -ENOTTY;
}
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
return -ENOSYS;
return -ENOTTY;
}
#endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment