Commit 1ee07ef6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "This patch set contains the main portion of the changes for 3.18 in
  regard to the s390 architecture.  It is a bit bigger than usual,
  mainly because of a new driver and the vector extension patches.

  The interesting bits are:
   - Quite a bit of work on the tracing front.  Uprobes is enabled and
     the ftrace code is reworked to get some of the lost performance
     back if CONFIG_FTRACE is enabled.
   - To improve boot time with CONFIG_DEBIG_PAGEALLOC, support for the
     IPTE range facility is added.
   - The rwlock code is re-factored to improve writer fairness and to be
     able to use the interlocked-access instructions.
   - The kernel part for the support of the vector extension is added.
   - The device driver to access the CD/DVD on the HMC is added, this
     will hopefully come in handy to improve the installation process.
   - Add support for control-unit initiated reconfiguration.
   - The crypto device driver is enhanced to enable the additional AP
     domains and to allow the new crypto hardware to be used.
   - Bug fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (39 commits)
  s390/ftrace: simplify enabling/disabling of ftrace_graph_caller
  s390/ftrace: remove 31 bit ftrace support
  s390/kdump: add support for vector extension
  s390/disassembler: add vector instructions
  s390: add support for vector extension
  s390/zcrypt: Toleration of new crypto hardware
  s390/idle: consolidate idle functions and definitions
  s390/nohz: use a per-cpu flag for arch_needs_cpu
  s390/vtime: do not reset idle data on CPU hotplug
  s390/dasd: add support for control unit initiated reconfiguration
  s390/dasd: fix infinite loop during format
  s390/mm: make use of ipte range facility
  s390/setup: correct 4-level kernel page table detection
  s390/topology: call set_sched_topology early
  s390/uprobes: architecture backend for uprobes
  s390/uprobes: common library for kprobes and uprobes
  s390/rwlock: use the interlocked-access facility 1 instructions
  s390/rwlock: improve writer fairness
  s390/rwlock: remove interrupt-enabling rwlock variant.
  s390/mm: remove change bit override support
  ...
parents 77654908 0cccdda8
......@@ -300,6 +300,7 @@ architectures:
- arm
- ppc
- mips
- s390
3. Configuring Kprobes
......
......@@ -58,6 +58,9 @@ config NO_IOPORT_MAP
config PCI_QUIRKS
def_bool n
config ARCH_SUPPORTS_UPROBES
def_bool 64BIT
config S390
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
......@@ -97,6 +100,7 @@ config S390
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
select DYNAMIC_FTRACE if FUNCTION_TRACER
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES if !SMP
select GENERIC_FIND_FIRST_BIT
......@@ -113,10 +117,11 @@ config S390
select HAVE_CMPXCHG_LOCAL
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE if 64BIT
select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
select HAVE_FUNCTION_TRACER if 64BIT
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
......
......@@ -35,13 +35,16 @@ endif
export LD_BFD
cflags-$(CONFIG_MARCH_G5) += -march=g5
cflags-$(CONFIG_MARCH_Z900) += -march=z900
cflags-$(CONFIG_MARCH_Z990) += -march=z990
cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109
cflags-$(CONFIG_MARCH_Z10) += -march=z10
cflags-$(CONFIG_MARCH_Z196) += -march=z196
cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12
mflags-$(CONFIG_MARCH_G5) := -march=g5
mflags-$(CONFIG_MARCH_Z900) := -march=z900
mflags-$(CONFIG_MARCH_Z990) := -march=z990
mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
mflags-$(CONFIG_MARCH_Z10) := -march=z10
mflags-$(CONFIG_MARCH_Z196) := -march=z196
mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
......
......@@ -15,11 +15,13 @@
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0)
#define __ASM_BARRIER "bcr 14,0\n"
#else
#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0)
#define __ASM_BARRIER "bcr 15,0\n"
#endif
#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
......
......@@ -8,8 +8,6 @@
#define _S390_CPUTIME_H
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/div64.h>
......@@ -167,28 +165,8 @@ static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
return clock;
}
struct s390_idle_data {
int nohz_delay;
unsigned int sequence;
unsigned long long idle_count;
unsigned long long idle_time;
unsigned long long clock_idle_enter;
unsigned long long clock_idle_exit;
unsigned long long timer_idle_enter;
unsigned long long timer_idle_exit;
};
cputime64_t arch_cpu_idle_time(int cpu);
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
cputime64_t s390_get_idle_time(int cpu);
#define arch_idle_time(cpu) s390_get_idle_time(cpu)
static inline int s390_nohz_delay(int cpu)
{
return __get_cpu_var(s390_idle).nohz_delay != 0;
}
#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
#endif /* _S390_CPUTIME_H */
......@@ -13,12 +13,13 @@
#define OPERAND_FPR 0x2 /* Operand printed as %fx */
#define OPERAND_AR 0x4 /* Operand printed as %ax */
#define OPERAND_CR 0x8 /* Operand printed as %cx */
#define OPERAND_DISP 0x10 /* Operand printed as displacement */
#define OPERAND_BASE 0x20 /* Operand printed as base register */
#define OPERAND_INDEX 0x40 /* Operand printed as index register */
#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
#define OPERAND_VR 0x10 /* Operand printed as %vx */
#define OPERAND_DISP 0x20 /* Operand printed as displacement */
#define OPERAND_BASE 0x40 /* Operand printed as base register */
#define OPERAND_INDEX 0x80 /* Operand printed as index register */
#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
struct s390_operand {
......
......@@ -102,6 +102,7 @@
#define HWCAP_S390_ETF3EH 256
#define HWCAP_S390_HIGH_GPRS 512
#define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048
/*
* These are used to set parameters in the core dumps.
......@@ -225,6 +226,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int);
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
#endif
......@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
extern void _mcount(void);
extern char ftrace_graph_caller_end;
struct dyn_arch_ftrace { };
......@@ -17,10 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_64BIT
#define MCOUNT_INSN_SIZE 12
#else
#define MCOUNT_INSN_SIZE 22
#endif
#define MCOUNT_INSN_SIZE 18
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif /* _ASM_S390_FTRACE_H */
/*
* Copyright IBM Corp. 2014
*
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _S390_IDLE_H
#define _S390_IDLE_H
#include <linux/types.h>
#include <linux/device.h>
struct s390_idle_data {
unsigned int sequence;
unsigned long long idle_count;
unsigned long long idle_time;
unsigned long long clock_idle_enter;
unsigned long long clock_idle_exit;
unsigned long long timer_idle_enter;
unsigned long long timer_idle_exit;
};
extern struct device_attribute dev_attr_idle_count;
extern struct device_attribute dev_attr_idle_time_us;
#endif /* _S390_IDLE_H */
......@@ -89,12 +89,12 @@ extern u32 ipl_flags;
extern u32 dump_prefix_page;
struct dump_save_areas {
struct save_area **areas;
struct save_area_ext **areas;
int count;
};
extern struct dump_save_areas dump_save_areas;
struct save_area *dump_save_area_create(int cpu);
struct save_area_ext *dump_save_area_create(int cpu);
extern void do_reipl(void);
extern void do_halt(void);
......
......@@ -51,6 +51,7 @@ enum interruption_class {
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_CMR,
IRQEXT_FTP,
IRQIO_CIO,
IRQIO_QAI,
IRQIO_DAS,
......
......@@ -84,6 +84,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
int probe_is_prohibited_opcode(u16 *insn);
int probe_get_fixup_type(u16 *insn);
int probe_is_insn_relative_long(u16 *insn);
#define flush_insn_slot(p) do { } while (0)
#endif /* _ASM_S390_KPROBES_H */
......@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
#include <asm/types.h>
#ifdef CONFIG_32BIT
......@@ -31,6 +32,11 @@ struct save_area {
u32 ctrl_regs[16];
} __packed;
struct save_area_ext {
struct save_area sa;
__vector128 vx_regs[32];
};
struct _lowcore {
psw_t restart_psw; /* 0x0000 */
psw_t restart_old_psw; /* 0x0008 */
......@@ -183,6 +189,11 @@ struct save_area {
u64 ctrl_regs[16];
} __packed;
struct save_area_ext {
struct save_area sa;
__vector128 vx_regs[32];
};
struct _lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
......@@ -310,7 +321,10 @@ struct _lowcore {
/* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */
__u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
/* Pointer to vector register save area */
__u64 vector_save_area_addr; /* 0x11b0 */
/* 64 bit extparam used for pfault/diag 250: defined by architecture */
__u64 ext_params2; /* 0x11B8 */
......@@ -334,9 +348,10 @@ struct _lowcore {
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
__u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
/* align to the top of the prefix area */
__u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
/* Software defined save area for vector registers */
__u8 vector_save_area[1024]; /* 0x1c00 */
} __packed;
#endif /* CONFIG_32BIT */
......
......@@ -38,7 +38,7 @@ struct mci {
__u32 pm : 1; /* 22 psw program mask and cc validity */
__u32 ia : 1; /* 23 psw instruction address validity */
__u32 fa : 1; /* 24 failing storage address validity */
__u32 : 1; /* 25 */
__u32 vr : 1; /* 25 vector register validity */
__u32 ec : 1; /* 26 external damage code validity */
__u32 fp : 1; /* 27 floating point register validity */
__u32 gr : 1; /* 28 general register validity */
......
......@@ -217,7 +217,6 @@ extern unsigned long MODULES_END;
*/
/* Hardware bits in the page table entry */
#define _PAGE_CO 0x100 /* HW Change-bit override */
#define _PAGE_PROTECT 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
......@@ -234,8 +233,8 @@ extern unsigned long MODULES_END;
#define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
_PAGE_DIRTY | _PAGE_YOUNG)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
_PAGE_YOUNG)
/*
* handle_pte_fault uses pte_present, pte_none and pte_file to find out the
......@@ -354,7 +353,6 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
......@@ -371,7 +369,6 @@ extern unsigned long MODULES_END;
#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
#define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */
#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
......@@ -873,8 +870,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
} else {
if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
pte_val(entry) |= _PAGE_CO;
*ptep = entry;
}
}
......@@ -1044,6 +1039,22 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
}
static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
#ifndef CONFIG_64BIT
/* pto in ESA mode must point to the start of the segment table */
pto &= 0x7ffffc00;
#endif
/* Invalidate a range of ptes + global TLB flush of the ptes */
do {
asm volatile(
" .insn rrf,0xb2210000,%2,%0,%1,0"
: "+a" (address), "+a" (nr) : "a" (pto) : "memory");
} while (nr != 255);
}
static inline void ptep_flush_direct(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
......
......@@ -13,9 +13,11 @@
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
#define _CIF_ASCE (1<<CIF_ASCE)
#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
#ifndef __ASSEMBLY__
......@@ -43,6 +45,8 @@ static inline int test_cpu_flag(int flag)
return !!(S390_lowcore.cpu_flags & (1U << flag));
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
......@@ -113,6 +117,7 @@ struct thread_struct {
int ri_signum;
#ifdef CONFIG_64BIT
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
__vector128 *vxrs; /* Vector register save area */
#endif
};
......@@ -285,7 +290,12 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
return (psw.addr - ilc) & mask;
#endif
}
/*
* Function to stop a processor until the next interrupt occurs
*/
void enabled_wait(void);
/*
* Function to drop a processor into disabled wait state
*/
......
......@@ -161,6 +161,12 @@ static inline long regs_return_value(struct pt_regs *regs)
return regs->gprs[2];
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->psw.addr = val | PSW_ADDR_AMODE;
}
int regs_query_register_offset(const char *name);
const char *regs_query_register_name(unsigned int offset);
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
......
......@@ -55,8 +55,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_FLAG_LPP (1UL << 13)
#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_RRBM (1UL << 16)
#define MACHINE_FLAG_TLB_LC (1UL << 17)
#define MACHINE_FLAG_VX (1UL << 18)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
......@@ -78,8 +78,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_LPP (0)
#define MACHINE_HAS_TOPOLOGY (0)
#define MACHINE_HAS_TE (0)
#define MACHINE_HAS_RRBM (0)
#define MACHINE_HAS_TLB_LC (0)
#define MACHINE_HAS_VX (0)
#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
......@@ -91,8 +91,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#endif /* CONFIG_64BIT */
/*
......
......@@ -15,6 +15,7 @@
#define SIGP_SET_ARCHITECTURE 18
#define SIGP_COND_EMERGENCY_SIGNAL 19
#define SIGP_SENSE_RUNNING 21
#define SIGP_STORE_ADDITIONAL_STATUS 23
/* SIGP condition codes */
#define SIGP_CC_ORDER_CODE_ACCEPTED 0
......@@ -33,9 +34,10 @@
#ifndef __ASSEMBLY__
static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
u32 *status)
{
register unsigned int reg1 asm ("1") = parm;
register unsigned long reg1 asm ("1") = parm;
int cc;
asm volatile(
......
......@@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu);
extern int smp_vcpu_scheduled(int cpu);
extern void smp_yield_cpu(int cpu);
extern void smp_yield(void);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
......@@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
static inline void smp_fill_possible_mask(void) { }
#endif /* CONFIG_SMP */
......
......@@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
* (the type definitions are in asm/spinlock_types.h)
*/
void arch_lock_relax(unsigned int cpu);
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
void arch_spin_relax(arch_spinlock_t *);
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
static inline void arch_spin_relax(arch_spinlock_t *lock)
{
arch_lock_relax(lock->lock);
}
static inline u32 arch_spin_lockval(int cpu)
{
return ~cpu;
......@@ -64,11 +70,6 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
_raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
}
static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
{
return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
{
if (!arch_spin_trylock_once(lp))
......@@ -91,7 +92,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
arch_spin_tryrelease_once(lp);
typecheck(unsigned int, lp->lock);
asm volatile(
__ASM_BARRIER
"st %1,%0\n"
: "+Q" (lp->lock)
: "d" (0)
: "cc", "memory");
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
......@@ -123,13 +130,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
*/
#define arch_write_can_lock(x) ((x)->lock == 0)
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
extern void _raw_write_lock_wait(arch_rwlock_t *lp);
extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{
unsigned int old = ACCESS_ONCE(rw->lock);
......@@ -144,16 +150,82 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
_raw_compare_and_swap(&rw->lock, 0, 0x80000000));
}
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __RAW_OP_OR "lao"
#define __RAW_OP_AND "lan"
#define __RAW_OP_ADD "laa"
#define __RAW_LOCK(ptr, op_val, op_string) \
({ \
unsigned int old_val; \
\
typecheck(unsigned int *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
"bcr 14,0\n" \
: "=d" (old_val), "+Q" (*ptr) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
#define __RAW_UNLOCK(ptr, op_val, op_string) \
({ \
unsigned int old_val; \
\
typecheck(unsigned int *, ptr); \
asm volatile( \
"bcr 14,0\n" \
op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" (*ptr) \
: "d" (op_val) \
: "cc", "memory"); \
old_val; \
})
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
if (!arch_read_trylock_once(rw))
unsigned int old;
old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
if ((int) old < 0)
_raw_read_lock_wait(rw);
}
static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
if (old != 0)
_raw_write_lock_wait(rw, old);
rw->owner = SPINLOCK_LOCKVAL;
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
rw->owner = 0;
__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
}
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_write_lock_wait(arch_rwlock_t *lp);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
if (!arch_read_trylock_once(rw))
_raw_read_lock_wait_flags(rw, flags);
_raw_read_lock_wait(rw);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
......@@ -169,19 +241,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
if (!arch_write_trylock_once(rw))
_raw_write_lock_wait(rw);
}
static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (!arch_write_trylock_once(rw))
_raw_write_lock_wait_flags(rw, flags);
rw->owner = SPINLOCK_LOCKVAL;
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
typecheck(unsigned int, rw->lock);
rw->owner = 0;
asm volatile(
__ASM_BARRIER
"st %1,%0\n"
: "+Q" (rw->lock)
: "d" (0)
: "cc", "memory");
}
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
if (!arch_read_trylock_once(rw))
......@@ -191,12 +268,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
if (!arch_write_trylock_once(rw))
return _raw_write_trylock_retry(rw);
if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
return 0;
rw->owner = SPINLOCK_LOCKVAL;
return 1;
}
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
static inline void arch_read_relax(arch_rwlock_t *rw)
{
arch_lock_relax(rw->owner);
}
static inline void arch_write_relax(arch_rwlock_t *rw)
{
arch_lock_relax(rw->owner);
}
#endif /* __ASM_SPINLOCK_H */
......@@ -13,6 +13,7 @@ typedef struct {
typedef struct {
unsigned int lock;
unsigned int owner;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
......
......@@ -103,6 +103,61 @@ static inline void restore_fp_regs(freg_t *fprs)
asm volatile("ld 15,%0" : : "Q" (fprs[15]));
}
static inline void save_vx_regs(__vector128 *vxrs)
{
typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
asm volatile(
" la 1,%0\n"
" .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
: "=Q" (*(addrtype *) vxrs) : : "1");
}
static inline void save_vx_regs_safe(__vector128 *vxrs)
{
unsigned long cr0, flags;
flags = arch_local_irq_save();
__ctl_store(cr0, 0, 0);
__ctl_set_bit(0, 17);
__ctl_set_bit(0, 18);
save_vx_regs(vxrs);
__ctl_load(cr0, 0, 0);
arch_local_irq_restore(flags);
}
static inline void restore_vx_regs(__vector128 *vxrs)
{
typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
asm volatile(
" la 1,%0\n"
" .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
" .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
: : "Q" (*(addrtype *) vxrs) : "1");
}
static inline void save_fp_vx_regs(struct task_struct *task)
{
#ifdef CONFIG_64BIT
if (task->thread.vxrs)
save_vx_regs(task->thread.vxrs);
else
#endif
save_fp_regs(task->thread.fp_regs.fprs);
}
static inline void restore_fp_vx_regs(struct task_struct *task)
{
#ifdef CONFIG_64BIT
if (task->thread.vxrs)
restore_vx_regs(task->thread.vxrs);
else
#endif
restore_fp_regs(task->thread.fp_regs.fprs);
}
static inline void save_access_regs(unsigned int *acrs)
{
typedef struct { int _[NUM_ACRS]; } acrstype;
......@@ -120,16 +175,16 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
save_fp_ctl(&prev->thread.fp_regs.fpc); \
save_fp_regs(prev->thread.fp_regs.fprs); \
save_fp_vx_regs(prev); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
if (next->mm) { \
update_cr_regs(next); \
restore_fp_ctl(&next->thread.fp_regs.fpc); \
restore_fp_regs(next->thread.fp_regs.fprs); \
restore_fp_vx_regs(next); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
update_cr_regs(next); \
} \
prev = __switch_to(prev,next); \
} while (0)
......
......@@ -84,11 +84,13 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
#define TIF_SECCOMP 5 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_UPROBE 7 /* breakpointed or single-stepping */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
#define TIF_SINGLE_STEP 19 /* This task is single stepped */
#define TIF_BLOCK_STEP 20 /* This task is block stepped */
#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
......@@ -97,6 +99,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
......
/*
* User-space Probes (UProbes) for s390
*
* Copyright IBM Corp. 2014
* Author(s): Jan Willeke,
*/
#ifndef _ASM_UPROBES_H
#define _ASM_UPROBES_H
#include <linux/notifier.h>
typedef u16 uprobe_opcode_t;
#define UPROBE_XOL_SLOT_BYTES 256 /* cache aligned */
#define UPROBE_SWBP_INSN 0x0002
#define UPROBE_SWBP_INSN_SIZE 2
struct arch_uprobe {
union{
uprobe_opcode_t insn[3];
uprobe_opcode_t ixol[3];
};
unsigned int saved_per : 1;
unsigned int saved_int_code;
};
struct arch_uprobe_task {
};
int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
unsigned long addr);
int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
void *data);
void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */
......@@ -22,13 +22,17 @@ struct vdso_data {
__u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
__u64 xtime_clock_sec; /* Kernel time 0x10 */
__u64 xtime_clock_nsec; /* 0x18 */
__u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */
__u64 wtom_clock_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
__u32 ectg_available; /* ECTG instruction present 0x38 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
__u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
__u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */
__u64 xtime_coarse_nsec; /* 0x28 */
__u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */
__u64 wtom_clock_nsec; /* 0x38 */
__u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */
__u64 wtom_coarse_nsec; /* 0x48 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */
__u32 tz_dsttime; /* Type of dst correction 0x54 */
__u32 ectg_available; /* ECTG instruction present 0x58 */
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
};
struct vdso_per_cpu_data {
......
......@@ -28,6 +28,4 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void);
extern void vtime_init(void);
extern void vtime_stop_cpu(void);
#endif /* _ASM_S390_TIMER_H */
......@@ -7,10 +7,14 @@
#define _ASM_S390_SIGCONTEXT_H
#include <linux/compiler.h>
#include <linux/types.h>
#define __NUM_GPRS 16
#define __NUM_FPRS 16
#define __NUM_ACRS 16
#define __NUM_GPRS 16
#define __NUM_FPRS 16
#define __NUM_ACRS 16
#define __NUM_VXRS 32
#define __NUM_VXRS_LOW 16
#define __NUM_VXRS_HIGH 16
#ifndef __s390x__
......@@ -59,6 +63,16 @@ typedef struct
_s390_fp_regs fpregs;
} _sigregs;
typedef struct
{
#ifndef __s390x__
unsigned long gprs_high[__NUM_GPRS];
#endif
unsigned long long vxrs_low[__NUM_VXRS_LOW];
__vector128 vxrs_high[__NUM_VXRS_HIGH];
unsigned char __reserved[128];
} _sigregs_ext;
struct sigcontext
{
unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS];
......
......@@ -17,6 +17,10 @@
typedef unsigned long addr_t;
typedef __signed__ long saddr_t;
typedef struct {
__u32 u[4];
} __vector128;
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_S390_TYPES_H */
......@@ -7,10 +7,15 @@
#ifndef _ASM_S390_UCONTEXT_H
#define _ASM_S390_UCONTEXT_H
#define UC_EXTENDED 0x00000001
#ifndef __s390x__
#define UC_GPRS_HIGH 1 /* uc_mcontext_ext has valid high gprs */
#define UC_VXRS 2 /* uc_mcontext_ext has valid vector regs */
/*
* The struct ucontext_extended describes how the registers are stored
* on a rt signal frame. Please note that the structure is not fixed,
* if new CPU registers are added to the user state the size of the
* struct ucontext_extended will increase.
*/
struct ucontext_extended {
unsigned long uc_flags;
struct ucontext *uc_link;
......@@ -19,11 +24,9 @@ struct ucontext_extended {
sigset_t uc_sigmask;
/* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
unsigned char __unused[128 - sizeof(sigset_t)];
unsigned long uc_gprs_high[16];
_sigregs_ext uc_mcontext_ext;
};
#endif
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
......
......@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
......@@ -52,11 +52,9 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
ifdef CONFIG_64BIT
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
......
......@@ -9,7 +9,7 @@
#include <linux/kbuild.h>
#include <linux/kvm_host.h>
#include <linux/sched.h>
#include <asm/cputime.h>
#include <asm/idle.h>
#include <asm/vdso.h>
#include <asm/pgtable.h>
......@@ -62,8 +62,12 @@ int main(void)
DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec));
DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
......@@ -73,8 +77,11 @@ int main(void)
/* constants used by the vdso */
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
BLANK();
/* idle data offsets */
DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
......
......@@ -50,6 +50,14 @@ typedef struct
_s390_fp_regs32 fpregs;
} _sigregs32;
typedef struct
{
__u32 gprs_high[__NUM_GPRS];
__u64 vxrs_low[__NUM_VXRS_LOW];
__vector128 vxrs_high[__NUM_VXRS_HIGH];
__u8 __reserved[128];
} _sigregs_ext32;
#define _SIGCONTEXT_NSIG32 64
#define _SIGCONTEXT_NSIG_BPW32 32
#define __SIGNAL_FRAMESIZE32 96
......@@ -72,6 +80,7 @@ struct ucontext32 {
compat_sigset_t uc_sigmask;
/* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
unsigned char __unused[128 - sizeof(compat_sigset_t)];
_sigregs_ext32 uc_mcontext_ext;
};
struct stat64_emu31;
......
This diff is collapsed.
......@@ -46,9 +46,9 @@ struct dump_save_areas dump_save_areas;
/*
* Allocate and add a save area for a CPU
*/
struct save_area *dump_save_area_create(int cpu)
struct save_area_ext *dump_save_area_create(int cpu)
{
struct save_area **save_areas, *save_area;
struct save_area_ext **save_areas, *save_area;
save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
if (!save_area)
......@@ -385,10 +385,46 @@ static void *nt_s390_prefix(void *ptr, struct save_area *sa)
sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
}
/*
* Initialize vxrs high note (full 128 bit VX registers 16-31)
*/
static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
{
return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME);
}
/*
* Initialize vxrs low note (lower halves of VX registers 0-15)
*/
static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
{
Elf64_Nhdr *note;
u64 len;
int i;
note = (Elf64_Nhdr *)ptr;
note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1;
note->n_descsz = 16 * 8;
note->n_type = NT_S390_VXRS_LOW;
len = sizeof(Elf64_Nhdr);
memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz);
len = roundup(len + note->n_namesz, 4);
ptr += len;
/* Copy lower halves of SIMD registers 0-15 */
for (i = 0; i < 16; i++) {
memcpy(ptr, &vx_regs[i], 8);
ptr += 8;
}
return ptr;
}
/*
* Fill ELF notes for one CPU with save area registers
*/
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs)
{
ptr = nt_prstatus(ptr, sa);
ptr = nt_fpregset(ptr, sa);
......@@ -397,6 +433,10 @@ void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
ptr = nt_s390_tod_preg(ptr, sa);
ptr = nt_s390_ctrs(ptr, sa);
ptr = nt_s390_prefix(ptr, sa);
if (MACHINE_HAS_VX && vx_regs) {
ptr = nt_s390_vx_low(ptr, vx_regs);
ptr = nt_s390_vx_high(ptr, vx_regs);
}
return ptr;
}
......@@ -484,7 +524,7 @@ static int get_cpu_cnt(void)
int i, cpus = 0;
for (i = 0; i < dump_save_areas.count; i++) {
if (dump_save_areas.areas[i]->pref_reg == 0)
if (dump_save_areas.areas[i]->sa.pref_reg == 0)
continue;
cpus++;
}
......@@ -530,17 +570,17 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
*/
static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
{
struct save_area *sa;
struct save_area_ext *sa_ext;
void *ptr_start = ptr;
int i;
ptr = nt_prpsinfo(ptr);
for (i = 0; i < dump_save_areas.count; i++) {
sa = dump_save_areas.areas[i];
if (sa->pref_reg == 0)
sa_ext = dump_save_areas.areas[i];
if (sa_ext->sa.pref_reg == 0)
continue;
ptr = fill_cpu_elf_notes(ptr, sa);
ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
}
ptr = nt_vmcoreinfo(ptr);
memset(phdr, 0, sizeof(*phdr));
......@@ -581,7 +621,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
mem_chunk_cnt = get_mem_chunk_cnt();
alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
mem_chunk_cnt * sizeof(Elf64_Phdr);
hdr = kzalloc_panic(alloc_size);
/* Init elf header */
......
This diff is collapsed.
......@@ -390,10 +390,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
if (test_facility(50) && test_facility(73))
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
if (test_facility(66))
S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
#endif
}
......
......@@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/signal.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
#include <asm/idle.h>
extern void *restart_stack;
extern unsigned long suspend_zero_pages;
......@@ -21,6 +21,8 @@ void psw_idle(struct s390_idle_data *, unsigned long);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
int alloc_vector_registers(struct task_struct *tsk);
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
......@@ -43,8 +45,10 @@ void special_op_exception(struct pt_regs *regs);
void specification_exception(struct pt_regs *regs);
void transaction_exception(struct pt_regs *regs);
void translation_exception(struct pt_regs *regs);
void vector_exception(struct pt_regs *regs);
void do_per_trap(struct pt_regs *regs);
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs);
......
......@@ -42,7 +42,8 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
......@@ -265,6 +266,10 @@ sysc_work:
jo sysc_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
#ifdef CONFIG_UPROBES
tm __TI_flags+7(%r12),_TIF_UPROBE
jo sysc_uprobe_notify
#endif
tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
jo sysc_singlestep
tm __TI_flags+7(%r12),_TIF_SIGPENDING
......@@ -322,6 +327,16 @@ sysc_notify_resume:
larl %r14,sysc_return
jg do_notify_resume
#
# _TIF_UPROBE is set, call uprobe_notify_resume
#
#ifdef CONFIG_UPROBES
sysc_uprobe_notify:
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return
jg uprobe_notify_resume
#endif
#
# _PIF_PER_TRAP is set, call do_per_trap
#
......
/*
* Dynamic function tracer architecture backend.
*
* Copyright IBM Corp. 2009
* Copyright IBM Corp. 2009,2014
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>
......@@ -17,100 +17,76 @@
#include <asm/asm-offsets.h>
#include "entry.h"
#ifdef CONFIG_DYNAMIC_FTRACE
void mcount_replace_code(void);
void ftrace_disable_code(void);
void ftrace_enable_insn(void);
#ifdef CONFIG_64BIT
/*
* The 64-bit mcount code looks like this:
* The mcount code looks like this:
* stg %r14,8(%r15) # offset 0
* > larl %r1,<&counter> # offset 6
* > brasl %r14,_mcount # offset 12
* larl %r1,<&counter> # offset 6
* brasl %r14,_mcount # offset 12
* lg %r14,8(%r15) # offset 18
* Total length is 24 bytes. The middle two instructions of the mcount
* block get overwritten by ftrace_make_nop / ftrace_make_call.
* The 64-bit enabled ftrace code block looks like this:
* stg %r14,8(%r15) # offset 0
* Total length is 24 bytes. The complete mcount block initially gets replaced
* by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
* only patch the jg/lg instruction within the block.
* Note: we do not patch the first instruction to an unconditional branch,
* since that would break kprobes/jprobes. It is easier to leave the larl
* instruction in and only modify the second instruction.
* The enabled ftrace code block looks like this:
* larl %r0,.+24 # offset 0
* > lg %r1,__LC_FTRACE_FUNC # offset 6
* > lgr %r0,%r0 # offset 12
* > basr %r14,%r1 # offset 16
* lg %r14,8(%15) # offset 18
* The return points of the mcount/ftrace function have the same offset 18.
* The 64-bit disable ftrace code block looks like this:
* stg %r14,8(%r15) # offset 0
* br %r1 # offset 12
* brcl 0,0 # offset 14
* brc 0,0 # offset 20
* The ftrace function gets called with a non-standard C function call ABI
* where r0 contains the return address. It is also expected that the called
* function only clobbers r0 and r1, but restores r2-r15.
* The return point of the ftrace function has offset 24, so execution
* continues behind the mcount block.
* larl %r0,.+24 # offset 0
* > jg .+18 # offset 6
* > lgr %r0,%r0 # offset 12
* > basr %r14,%r1 # offset 16
* lg %r14,8(%15) # offset 18
* br %r1 # offset 12
* brcl 0,0 # offset 14
* brc 0,0 # offset 20
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
*/
asm(
" .align 4\n"
"mcount_replace_code:\n"
" larl %r0,0f\n"
"ftrace_disable_code:\n"
" jg 0f\n"
" lgr %r0,%r0\n"
" basr %r14,%r1\n"
" br %r1\n"
" brcl 0,0\n"
" brc 0,0\n"
"0:\n"
" .align 4\n"
"ftrace_enable_insn:\n"
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
#define MCOUNT_BLOCK_SIZE 24
#define MCOUNT_INSN_OFFSET 6
#define FTRACE_INSN_SIZE 6
#else /* CONFIG_64BIT */
/*
* The 31-bit mcount code looks like this:
* st %r14,4(%r15) # offset 0
* > bras %r1,0f # offset 4
* > .long _mcount # offset 8
* > .long <&counter> # offset 12
* > 0: l %r14,0(%r1) # offset 16
* > l %r1,4(%r1) # offset 20
* basr %r14,%r14 # offset 24
* l %r14,4(%r15) # offset 26
* Total length is 30 bytes. The twenty bytes starting from offset 4
* to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
* The 31-bit enabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > l %r14,__LC_FTRACE_FUNC # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The return points of the mcount/ftrace function have the same offset 26.
* The 31-bit disabled ftrace code block looks like this:
* st %r14,4(%r15) # offset 0
* > j .+26 # offset 4
* > j 0f # offset 8
* > .fill 12,1,0x07 # offset 12
* 0: basr %r14,%r14 # offset 24
* l %r14,4(%r14) # offset 26
* The j instruction branches to offset 30 to skip as many instructions
* as possible.
*/
asm(
" .align 4\n"
"ftrace_disable_code:\n"
" j 1f\n"
" j 0f\n"
" .fill 12,1,0x07\n"
"0: basr %r14,%r14\n"
"1:\n"
" .align 4\n"
"ftrace_enable_insn:\n"
" l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
#define FTRACE_INSN_SIZE 4
#endif /* CONFIG_64BIT */
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
/* Initial replacement of the whole mcount block */
if (addr == MCOUNT_ADDR) {
if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
mcount_replace_code,
MCOUNT_BLOCK_SIZE))
return -EPERM;
return 0;
}
if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
MCOUNT_INSN_SIZE))
return -EPERM;
......@@ -135,8 +111,6 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
......@@ -162,31 +136,26 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
return parent;
}
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* Patch the kernel code at ftrace_graph_caller location. The instruction
* there is branch relative and save to prepare_ftrace_return. To disable
* the call to prepare_ftrace_return we patch the bras offset to point
* directly after the instructions. To enable the call we calculate
* the original offset to prepare_ftrace_return and put it back.
* there is branch relative on condition. To enable the ftrace graph code
* block, we simply patch the mask field of the instruction to zero and
* turn the instruction into a nop.
* To disable the ftrace graph code the mask field will be patched to
* all ones, which turns the instruction into an unconditional branch.
*/
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned short offset;
u8 op = 0x04; /* set mask field to zero */
offset = ((void *) prepare_ftrace_return -
(void *) ftrace_graph_caller) / 2;
return probe_kernel_write((void *) ftrace_graph_caller + 2,
&offset, sizeof(offset));
return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
}
int ftrace_disable_ftrace_graph_caller(void)
{
static unsigned short offset = 0x0002;
u8 op = 0xf4; /* set mask field to all ones */
return probe_kernel_write((void *) ftrace_graph_caller + 2,
&offset, sizeof(offset));
return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
......@@ -398,7 +398,7 @@ ENTRY(startup_kdump)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
.insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
.insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f
la %r0,1
......
/*
* Idle functions for s390.
*
* Copyright IBM Corp. 2014
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void __kprobes enabled_wait(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
unsigned long long idle_time;
unsigned long psw_mask;
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
/* Account time spent with enabled wait psw loaded as idle time. */
idle->sequence++;
smp_wmb();
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
account_idle_time(idle_time);
smp_wmb();
idle->sequence++;
}
static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long idle_count;
unsigned int sequence;
do {
sequence = ACCESS_ONCE(idle->sequence);
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return sprintf(buf, "%llu\n", idle_count);
}
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long now, idle_time, idle_enter, idle_exit;
unsigned int sequence;
do {
now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
cputime64_t arch_cpu_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
unsigned long long now, idle_enter, idle_exit;
unsigned int sequence;
do {
now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
void arch_cpu_idle_enter(void)
{
local_mcck_disable();
}
void arch_cpu_idle(void)
{
if (!test_cpu_flag(CIF_MCCK_PENDING))
/* Halt the cpu and keep track of cpu time accounting. */
enabled_wait();
local_irq_enable();
}
void arch_cpu_idle_exit(void)
{
local_mcck_enable();
if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck();
}
void arch_cpu_idle_dead(void)
{
cpu_die();
}
......@@ -70,6 +70,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
{.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
......@@ -258,7 +259,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
ext_code = *(struct ext_code *) &regs->int_code;
if (ext_code.code != EXT_IRQ_CLK_COMP)
__get_cpu_var(s390_idle).nohz_delay = 1;
set_cpu_flag(CIF_NOHZ_DELAY);
index = ext_hash(ext_code.code);
rcu_read_lock();
......
......@@ -58,161 +58,13 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
.insn_size = MAX_INSN_SIZE,
};
static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
{
if (!is_known_insn((unsigned char *)insn))
return -EINVAL;
switch (insn[0] >> 8) {
case 0x0c: /* bassm */
case 0x0b: /* bsm */
case 0x83: /* diag */
case 0x44: /* ex */
case 0xac: /* stnsm */
case 0xad: /* stosm */
return -EINVAL;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x00: /* exrl */
return -EINVAL;
}
}
switch (insn[0]) {
case 0x0101: /* pr */
case 0xb25a: /* bsa */
case 0xb240: /* bakr */
case 0xb258: /* bsg */
case 0xb218: /* pc */
case 0xb228: /* pt */
case 0xb98d: /* epsw */
return -EINVAL;
}
return 0;
}
static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
{
/* default fixup method */
int fixup = FIXUP_PSW_NORMAL;
switch (insn[0] >> 8) {
case 0x05: /* balr */
case 0x0d: /* basr */
fixup = FIXUP_RETURN_REGISTER;
/* if r2 = 0, no branch will be taken */
if ((insn[0] & 0x0f) == 0)
fixup |= FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x06: /* bctr */
case 0x07: /* bcr */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x45: /* bal */
case 0x4d: /* bas */
fixup = FIXUP_RETURN_REGISTER;
break;
case 0x47: /* bc */
case 0x46: /* bct */
case 0x86: /* bxh */
case 0x87: /* bxle */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x82: /* lpsw */
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xb2: /* lpswe */
if ((insn[0] & 0xff) == 0xb2)
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xa7: /* bras */
if ((insn[0] & 0x0f) == 0x05)
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xc0:
if ((insn[0] & 0x0f) == 0x05) /* brasl */
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xeb:
switch (insn[2] & 0xff) {
case 0x44: /* bxhg */
case 0x45: /* bxleg */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
case 0xe3: /* bctg */
if ((insn[2] & 0xff) == 0x46)
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0xec:
switch (insn[2] & 0xff) {
case 0xe5: /* clgrb */
case 0xe6: /* cgrb */
case 0xf6: /* crb */
case 0xf7: /* clrb */
case 0xfc: /* cgib */
case 0xfd: /* cglib */
case 0xfe: /* cib */
case 0xff: /* clib */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
}
return fixup;
}
static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
{
/* Check if we have a RIL-b or RIL-c format instruction which
* we need to modify in order to avoid instruction emulation. */
switch (insn[0] >> 8) {
case 0xc0:
if ((insn[0] & 0x0f) == 0x00) /* larl */
return true;
break;
case 0xc4:
switch (insn[0] & 0x0f) {
case 0x02: /* llhrl */
case 0x04: /* lghrl */
case 0x05: /* lhrl */
case 0x06: /* llghrl */
case 0x07: /* sthrl */
case 0x08: /* lgrl */
case 0x0b: /* stgrl */
case 0x0c: /* lgfrl */
case 0x0d: /* lrl */
case 0x0e: /* llgfrl */
case 0x0f: /* strl */
return true;
}
break;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
case 0x06: /* clghrl */
case 0x07: /* clhrl */
case 0x08: /* cgrl */
case 0x0a: /* clgrl */
case 0x0c: /* cgfrl */
case 0x0d: /* crl */
case 0x0e: /* clgfrl */
case 0x0f: /* clrl */
return true;
}
break;
}
return false;
}
static void __kprobes copy_instruction(struct kprobe *p)
{
s64 disp, new_disp;
u64 addr, new_addr;
memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
if (!is_insn_relative_long(p->ainsn.insn))
if (!probe_is_insn_relative_long(p->ainsn.insn))
return;
/*
* For pc-relative instructions in RIL-b or RIL-c format patch the
......@@ -276,7 +128,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if ((unsigned long) p->addr & 0x01)
return -EINVAL;
/* Make sure the probe isn't going on a difficult instruction */
if (is_prohibited_opcode(p->addr))
if (probe_is_prohibited_opcode(p->addr))
return -EINVAL;
if (s390_get_insn_slot(p))
return -ENOMEM;
......@@ -605,7 +457,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
int fixup = get_fixup_type(p->ainsn.insn);
int fixup = probe_get_fixup_type(p->ainsn.insn);
if (fixup & FIXUP_PSW_NORMAL)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
......@@ -789,11 +641,6 @@ void __kprobes jprobe_return(void)
asm volatile(".word 0x0002");
}
static void __used __kprobes jprobe_return_end(void)
{
asm volatile("bcr 0,0");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
......
......@@ -25,6 +25,7 @@
#include <asm/elf.h>
#include <asm/asm-offsets.h>
#include <asm/os_info.h>
#include <asm/switch_to.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
......@@ -43,7 +44,7 @@ static void add_elf_notes(int cpu)
memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
ptr = fill_cpu_elf_notes(ptr, sa);
ptr = fill_cpu_elf_notes(ptr, sa, NULL);
memset(ptr, 0, sizeof(struct elf_note));
}
......@@ -53,8 +54,11 @@ static void add_elf_notes(int cpu)
static void setup_regs(void)
{
unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
struct _lowcore *lc;
int cpu, this_cpu;
/* Get lowcore pointer from store status of this CPU (absolute zero) */
lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area;
this_cpu = smp_find_processor_id(stap());
add_elf_notes(this_cpu);
for_each_online_cpu(cpu) {
......@@ -64,6 +68,8 @@ static void setup_regs(void)
continue;
add_elf_notes(cpu);
}
if (MACHINE_HAS_VX)
save_vx_regs_safe((void *) lc->vector_save_area_addr);
/* Copy dump CPU store status info to absolute zero */
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
}
......
......@@ -8,62 +8,72 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
#include <asm/ptrace.h>
.section .kprobes.text, "ax"
ENTRY(ftrace_stub)
br %r14
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
lgrl %r1,ftrace_trace_function
#else
lgr %r2,%r0
aghi %r2,-MCOUNT_INSN_SIZE
larl %r4,function_trace_op
lg %r4,0(%r4)
larl %r1,ftrace_trace_function
lg %r1,0(%r1)
#endif
stm %r2,%r5,16(%r15)
bras %r1,1f
0: .long ftrace_trace_function
1: st %r14,56(%r15)
lr %r0,%r15
ahi %r15,-96
l %r3,100(%r15)
la %r2,0(%r14)
st %r0,__SF_BACKCHAIN(%r15)
la %r3,0(%r3)
ahi %r2,-MCOUNT_INSN_SIZE
l %r14,0b-0b(%r1)
l %r14,0(%r14)
basr %r14,%r14
lgr %r3,%r14
la %r5,STACK_PTREGS(%r15)
basr %r14,%r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
l %r2,100(%r15)
l %r3,152(%r15)
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller.
ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return
bras %r14,0f
0: st %r2,100(%r15)
j ftrace_graph_caller_end
lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
lg %r3,(STACK_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
ftrace_graph_caller_end:
.globl ftrace_graph_caller_end
#endif
ahi %r15,96
l %r14,56(%r15)
lm %r2,%r5,16(%r15)
br %r14
lg %r1,(STACK_PTREGS_PSW+8)(%r15)
lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
br %r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler)
stm %r2,%r5,16(%r15)
st %r14,56(%r15)
lr %r0,%r15
ahi %r15,-96
st %r0,__SF_BACKCHAIN(%r15)
bras %r1,0f
.long ftrace_return_to_handler
0: l %r2,0b-0b(%r1)
basr %r14,%r2
lr %r14,%r2
ahi %r15,96
lm %r2,%r5,16(%r15)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
brasl %r14,ftrace_return_to_handler
aghi %r15,STACK_FRAME_OVERHEAD
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14
#endif
/*
* Copyright IBM Corp. 2008, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
.section .kprobes.text, "ax"
ENTRY(ftrace_stub)
br %r14
ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
ENTRY(ftrace_caller)
#endif
stmg %r2,%r5,32(%r15)
stg %r14,112(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
lgr %r2,%r14
lg %r3,168(%r15)
aghi %r2,-MCOUNT_INSN_SIZE
larl %r14,ftrace_trace_function
lg %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
lg %r2,168(%r15)
lg %r3,272(%r15)
ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return
bras %r14,0f
0: stg %r2,168(%r15)
#endif
aghi %r15,160
lmg %r2,%r5,32(%r15)
lg %r14,112(%r15)
br %r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-160
stg %r1,__SF_BACKCHAIN(%r15)
brasl %r14,ftrace_return_to_handler
aghi %r15,160
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14
#endif
......@@ -20,6 +20,7 @@
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/crw.h>
#include <asm/switch_to.h>
struct mcck_struct {
int kill_task;
......@@ -163,6 +164,21 @@ static int notrace s390_revalidate_registers(struct mci *mci)
" ld 15,120(%0)\n"
: : "a" (fpt_save_area));
}
#ifdef CONFIG_64BIT
/* Revalidate vector registers */
if (MACHINE_HAS_VX && current->thread.vxrs) {
if (!mci->vr) {
/*
* Vector registers can't be restored and therefore
* the process needs to be terminated.
*/
kill_task = 1;
}
restore_vx_regs((__vector128 *)
S390_lowcore.vector_save_area_addr);
}
#endif
/* Revalidate access registers */
asm volatile(
" lam 0,15,0(%0)"
......
......@@ -49,7 +49,7 @@ PGM_CHECK_DEFAULT /* 17 */
PGM_CHECK_64BIT(transaction_exception) /* 18 */
PGM_CHECK_DEFAULT /* 19 */
PGM_CHECK_DEFAULT /* 1a */
PGM_CHECK_DEFAULT /* 1b */
PGM_CHECK_64BIT(vector_exception) /* 1b */
PGM_CHECK(space_switch_exception) /* 1c */
PGM_CHECK(hfp_sqrt_exception) /* 1d */
PGM_CHECK_DEFAULT /* 1e */
......
......@@ -61,30 +61,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
void arch_cpu_idle(void)
{
local_mcck_disable();
if (test_cpu_flag(CIF_MCCK_PENDING)) {
local_mcck_enable();
local_irq_enable();
return;
}
/* Halt the cpu and keep track of cpu time accounting. */
vtime_stop_cpu();
local_irq_enable();
}
void arch_cpu_idle_exit(void)
{
if (test_cpu_flag(CIF_MCCK_PENDING))
s390_handle_mcck();
}
void arch_cpu_idle_dead(void)
{
cpu_die();
}
extern void __kprobes kernel_thread_starter(void);
/*
......
......@@ -23,7 +23,6 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
*/
void cpu_init(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
struct cpuid *id = &__get_cpu_var(cpu_id);
get_cpu_id(id);
......@@ -31,7 +30,6 @@ void cpu_init(void)
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
memset(idle, 0, sizeof(*idle));
}
/*
......@@ -41,7 +39,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs", "te"
"edat", "etf3eh", "highgprs", "te", "vx"
};
unsigned long n = (unsigned long) v - 1;
int i;
......
This diff is collapsed.
......@@ -343,6 +343,9 @@ static void __init setup_lowcore(void)
__ctl_set_bit(14, 29);
}
#else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
......@@ -452,8 +455,8 @@ static void __init setup_memory_end(void)
#ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
if (tmp <= (1UL << 42))
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
vmax = 1UL << 42; /* 3-level kernel page table */
else
vmax = 1UL << 53; /* 4-level kernel page table */
......@@ -765,6 +768,12 @@ static void __init setup_hwcaps(void)
*/
if (test_facility(50) && test_facility(73))
elf_hwcap |= HWCAP_S390_TE;
/*
* Vector extension HWCAP_S390_VXRS is bit 11.
*/
if (test_facility(129))
elf_hwcap |= HWCAP_S390_VXRS;
#endif
get_cpu_id(&cpu_id);
......
This diff is collapsed.
......@@ -45,6 +45,7 @@
#include <asm/debug.h>
#include <asm/os_info.h>
#include <asm/sigp.h>
#include <asm/idle.h>
#include "entry.h"
enum {
......@@ -82,7 +83,8 @@ DEFINE_MUTEX(smp_cpu_state_mutex);
/*
* Signal processor helper functions.
*/
static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
u32 *status)
{
int cc;
......@@ -178,6 +180,9 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
goto out;
}
#else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
if (vdso_alloc_per_cpu(lc))
goto out;
#endif
......@@ -333,12 +338,6 @@ int smp_vcpu_scheduled(int cpu)
return pcpu_running(pcpu_devices + cpu);
}
void smp_yield(void)
{
if (MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,0x44");
}
void smp_yield_cpu(int cpu)
{
if (MACHINE_HAS_DIAG9C)
......@@ -517,35 +516,53 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
static void __init smp_get_save_area(int cpu, u16 address)
{
void *lc = pcpu_devices[0].lowcore;
struct save_area *save_area;
struct save_area_ext *sa_ext;
unsigned long vx_sa;
if (is_kdump_kernel())
return;
if (!OLDMEM_BASE && (address == boot_cpu_address ||
ipl_info.type != IPL_TYPE_FCP_DUMP))
return;
save_area = dump_save_area_create(cpu);
if (!save_area)
sa_ext = dump_save_area_create(cpu);
if (!sa_ext)
panic("could not allocate memory for save area\n");
if (address == boot_cpu_address) {
/* Copy the registers of the boot cpu. */
copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
SAVE_AREA_BASE - PAGE_SIZE, 0);
if (MACHINE_HAS_VX)
save_vx_regs_safe(sa_ext->vx_regs);
return;
}
/* Get the registers of a non-boot cpu. */
__pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
if (!MACHINE_HAS_VX)
return;
/* Get the VX registers */
vx_sa = __get_free_page(GFP_KERNEL);
if (!vx_sa)
panic("could not allocate memory for VX save area\n");
__pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
free_page(vx_sa);
}
int smp_store_status(int cpu)
{
unsigned long vx_sa;
struct pcpu *pcpu;
pcpu = pcpu_devices + cpu;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
if (!MACHINE_HAS_VX)
return 0;
vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
vx_sa, NULL);
return 0;
}
......@@ -667,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
cpu_init();
preempt_disable();
init_cpu_timer();
init_cpu_vtimer();
vtime_init();
pfault_init();
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
......@@ -726,6 +743,7 @@ int __cpu_disable(void)
cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
__ctl_load(cregs, 0, 15);
clear_cpu_flag(CIF_NOHZ_DELAY);
return 0;
}
......@@ -898,42 +916,6 @@ static struct attribute_group cpu_common_attr_group = {
.attrs = cpu_common_attrs,
};
static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long idle_count;
unsigned int sequence;
do {
sequence = ACCESS_ONCE(idle->sequence);
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return sprintf(buf, "%llu\n", idle_count);
}
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long now, idle_time, idle_enter, idle_exit;
unsigned int sequence;
do {
now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
static struct attribute *cpu_online_attrs[] = {
&dev_attr_idle_count.attr,
&dev_attr_idle_time_us.attr,
......
......@@ -232,6 +232,19 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++;
}
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec =
(long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
vdso_data->wtom_coarse_sec =
vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_coarse_nsec =
vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
vdso_data->wtom_coarse_sec++;
}
vdso_data->tk_mult = tk->tkr.mult;
vdso_data->tk_shift = tk->tkr.shift;
smp_wmb();
......
......@@ -464,15 +464,17 @@ static struct sched_domain_topology_level s390_topology[] = {
static int __init topology_init(void)
{
if (!MACHINE_HAS_TOPOLOGY) {
if (MACHINE_HAS_TOPOLOGY)
set_topology_timer();
else
topology_update_polarization_simple();
goto out;
}
set_topology_timer();
out:
set_sched_topology(s390_topology);
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
static int __init early_topology_init(void)
{
set_sched_topology(s390_topology);
return 0;
}
early_initcall(early_topology_init);
......@@ -18,6 +18,8 @@
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/switch_to.h>
#include "entry.h"
int show_unhandled_signals = 1;
......@@ -58,15 +60,10 @@ int is_valid_bugaddr(unsigned long addr)
return 1;
}
static void __kprobes do_trap(struct pt_regs *regs,
int si_signo, int si_code, char *str)
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
{
siginfo_t info;
if (notify_die(DIE_TRAP, str, regs, 0,
regs->int_code, si_signo) == NOTIFY_STOP)
return;
if (user_mode(regs)) {
info.si_signo = si_signo;
info.si_errno = 0;
......@@ -90,6 +87,15 @@ static void __kprobes do_trap(struct pt_regs *regs,
}
}
static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code,
char *str)
{
if (notify_die(DIE_TRAP, str, regs, 0,
regs->int_code, si_signo) == NOTIFY_STOP)
return;
do_report_trap(regs, si_signo, si_code, str);
}
void __kprobes do_per_trap(struct pt_regs *regs)
{
siginfo_t info;
......@@ -178,6 +184,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
siginfo_t info;
__u8 opcode[6];
__u16 __user *location;
int is_uprobe_insn = 0;
int signal = 0;
location = get_trap_ip(regs);
......@@ -194,6 +201,10 @@ void __kprobes illegal_op(struct pt_regs *regs)
force_sig_info(SIGTRAP, &info, current);
} else
signal = SIGILL;
#ifdef CONFIG_UPROBES
} else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
is_uprobe_insn = 1;
#endif
#ifdef CONFIG_MATHEMU
} else if (opcode[0] == 0xb3) {
if (get_user(*((__u16 *) (opcode+2)), location+1))
......@@ -219,11 +230,13 @@ void __kprobes illegal_op(struct pt_regs *regs)
#endif
} else
signal = SIGILL;
} else {
/*
* If we get an illegal op in kernel mode, send it through the
* kprobes notifier. If kprobes doesn't pick it up, SIGILL
*/
}
/*
* We got either an illegal op in kernel mode, or user space trapped
* on a uprobes illegal instruction. See if kprobes or uprobes picks
* it up. If not, SIGILL.
*/
if (is_uprobe_insn || !user_mode(regs)) {
if (notify_die(DIE_BPT, "bpt", regs, 0,
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
......@@ -292,6 +305,74 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
#endif
#ifdef CONFIG_64BIT
int alloc_vector_registers(struct task_struct *tsk)
{
__vector128 *vxrs;
int i;
/* Allocate vector register save area. */
vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
GFP_KERNEL|__GFP_REPEAT);
if (!vxrs)
return -ENOMEM;
preempt_disable();
if (tsk == current)
save_fp_regs(tsk->thread.fp_regs.fprs);
/* Copy the 16 floating point registers */
for (i = 0; i < 16; i++)
*(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i];
tsk->thread.vxrs = vxrs;
if (tsk == current) {
__ctl_set_bit(0, 17);
restore_vx_regs(vxrs);
}
preempt_enable();
return 0;
}
void vector_exception(struct pt_regs *regs)
{
int si_code, vic;
if (!MACHINE_HAS_VX) {
do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
return;
}
/* get vector interrupt code from fpc */
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
si_code = FPE_FLTINV;
break;
case 2: /* division by zero */
si_code = FPE_FLTDIV;
break;
case 3: /* overflow */
si_code = FPE_FLTOVF;
break;
case 4: /* underflow */
si_code = FPE_FLTUND;
break;
case 5: /* inexact */
si_code = FPE_FLTRES;
break;
default: /* unknown cause */
si_code = 0;
}
do_trap(regs, SIGFPE, si_code, "vector exception");
}
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
return 1;
}
__setup("novx", disable_vector_extension);
#endif
void data_exception(struct pt_regs *regs)
{
__u16 __user *location;
......@@ -357,6 +438,18 @@ void data_exception(struct pt_regs *regs)
}
}
#endif
#ifdef CONFIG_64BIT
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !current->thread.vxrs &&
(current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
alloc_vector_registers(current);
/* Vector data exception is suppressing, rewind psw. */
regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
clear_pt_regs_flag(regs, PIF_PER_TRAP);
return;
}
#endif
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
signal = SIGFPE;
else
......
/*
* User-space Probes (UProbes) for s390
*
* Copyright IBM Corp. 2014
* Author(s): Jan Willeke,
*/
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/uprobes.h>
#include <linux/compat.h>
#include <linux/kdebug.h>
#include <asm/switch_to.h>
#include <asm/facility.h>
#include <asm/dis.h>
#include "entry.h"
#define UPROBE_TRAP_NR UINT_MAX
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long addr)
{
return probe_is_prohibited_opcode(auprobe->insn);
}
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT)
return -EINVAL;
if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT)
return -EINVAL;
clear_pt_regs_flag(regs, PIF_PER_TRAP);
auprobe->saved_per = psw_bits(regs->psw).r;
auprobe->saved_int_code = regs->int_code;
regs->int_code = UPROBE_TRAP_NR;
regs->psw.addr = current->utask->xol_vaddr;
set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
update_cr_regs(current);
return 0;
}
bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
{
struct pt_regs *regs = task_pt_regs(tsk);
if (regs->int_code != UPROBE_TRAP_NR)
return true;
return false;
}
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int fixup = probe_get_fixup_type(auprobe->insn);
struct uprobe_task *utask = current->utask;
clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
update_cr_regs(current);
psw_bits(regs->psw).r = auprobe->saved_per;
regs->int_code = auprobe->saved_int_code;
if (fixup & FIXUP_PSW_NORMAL)
regs->psw.addr += utask->vaddr - utask->xol_vaddr;
if (fixup & FIXUP_RETURN_REGISTER) {
int reg = (auprobe->insn[0] & 0xf0) >> 4;
regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
}
if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
int ilen = insn_length(auprobe->insn[0] >> 8);
if (regs->psw.addr - utask->xol_vaddr == ilen)
regs->psw.addr = utask->vaddr + ilen;
}
/* If per tracing was active generate trap */
if (regs->psw.mask & PSW_MASK_PER)
do_per_trap(regs);
return 0;
}
int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
void *data)
{
struct die_args *args = data;
struct pt_regs *regs = args->regs;
if (!user_mode(regs))
return NOTIFY_DONE;
if (regs->int_code & 0x200) /* Trap during transaction */
return NOTIFY_DONE;
switch (val) {
case DIE_BPT:
if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP;
break;
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
default:
break;
}
return NOTIFY_DONE;
}
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
clear_thread_flag(TIF_UPROBE_SINGLESTEP);
regs->int_code = auprobe->saved_int_code;
regs->psw.addr = current->utask->vaddr;
}
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
struct pt_regs *regs)
{
unsigned long orig;
orig = regs->gprs[14];
regs->gprs[14] = trampoline;
return orig;
}
/* Instruction Emulation */
static void adjust_psw_addr(psw_t *psw, unsigned long len)
{
psw->addr = __rewind_psw(*psw, -len);
}
#define EMU_ILLEGAL_OP 1
#define EMU_SPECIFICATION 2
#define EMU_ADDRESSING 3
#define emu_load_ril(ptr, output) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
else \
*(output) = input; \
__rc; \
})
#define emu_store_ril(ptr, input) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (put_user(*(input), ptr)) \
__rc = EMU_ADDRESSING; \
__rc; \
})
#define emu_cmp_ril(regs, ptr, cmp) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
else if (input > *(cmp)) \
psw_bits((regs)->psw).cc = 1; \
else if (input < *(cmp)) \
psw_bits((regs)->psw).cc = 2; \
else \
psw_bits((regs)->psw).cc = 0; \
__rc; \
})
struct insn_ril {
u8 opc0;
u8 reg : 4;
u8 opc1 : 4;
s32 disp;
} __packed;
union split_register {
u64 u64;
u32 u32[2];
u16 u16[4];
s64 s64;
s32 s32[2];
s16 s16[4];
};
/*
* pc relative instructions are emulated, since parameters may not be
* accessible from the xol area due to range limitations.
*/
static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
union split_register *rx;
struct insn_ril *insn;
unsigned int ilen;
void *uptr;
int rc = 0;
insn = (struct insn_ril *) &auprobe->insn;
rx = (union split_register *) &regs->gprs[insn->reg];
uptr = (void *)(regs->psw.addr + (insn->disp * 2));
ilen = insn_length(insn->opc0);
switch (insn->opc0) {
case 0xc0:
switch (insn->opc1) {
case 0x00: /* larl */
rx->u64 = (unsigned long)uptr;
break;
}
break;
case 0xc4:
switch (insn->opc1) {
case 0x02: /* llhrl */
rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
break;
case 0x04: /* lghrl */
rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
break;
case 0x05: /* lhrl */
rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
break;
case 0x06: /* llghrl */
rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
break;
case 0x08: /* lgrl */
rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
break;
case 0x0c: /* lgfrl */
rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
break;
case 0x0d: /* lrl */
rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
break;
case 0x0e: /* llgfrl */
rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
break;
case 0x07: /* sthrl */
rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
break;
case 0x0b: /* stgrl */
rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
break;
case 0x0f: /* strl */
rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
case 0xc6:
switch (insn->opc1) {
case 0x02: /* pfdrl */
if (!test_facility(34))
rc = EMU_ILLEGAL_OP;
break;
case 0x04: /* cghrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
break;
case 0x05: /* chrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
break;
case 0x06: /* clghrl */
rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
break;
case 0x07: /* clhrl */
rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
break;
case 0x08: /* cgrl */
rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
break;
case 0x0a: /* clgrl */
rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
break;
case 0x0c: /* cgfrl */
rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
break;
case 0x0d: /* crl */
rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
break;
case 0x0e: /* clgfrl */
rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
break;
case 0x0f: /* clrl */
rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
}
adjust_psw_addr(&regs->psw, ilen);
switch (rc) {
case EMU_ILLEGAL_OP:
regs->int_code = ilen << 16 | 0x0001;
do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
break;
case EMU_SPECIFICATION:
regs->int_code = ilen << 16 | 0x0006;
do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
break;
case EMU_ADDRESSING:
regs->int_code = ilen << 16 | 0x0005;
do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
break;
}
}
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) ||
((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) &&
!is_compat_task())) {
regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
return true;
}
if (probe_is_insn_relative_long(auprobe->insn)) {
handle_insn_ril(auprobe, regs);
return true;
}
return false;
}
......@@ -19,14 +19,20 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
basr %r1,0
la %r1,4f-.(%r1)
chi %r2,__CLOCK_REALTIME
je 0f
chi %r2,__CLOCK_MONOTONIC
je 0f
la %r1,5f-4f(%r1)
chi %r2,__CLOCK_REALTIME_COARSE
je 0f
chi %r2,__CLOCK_MONOTONIC_COARSE
jne 3f
0: ltr %r3,%r3
jz 2f /* res == NULL */
basr %r1,0
1: l %r0,4f-1b(%r1)
1: l %r0,0(%r1)
xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
st %r0,4(%r3) /* store tp->tv_usec */
2: lhi %r2,0
......@@ -35,5 +41,6 @@ __kernel_clock_getres:
svc 0
br %r14
4: .long __CLOCK_REALTIME_RES
5: .long __CLOCK_COARSE_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
......@@ -21,8 +21,12 @@ __kernel_clock_gettime:
.cfi_startproc
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,__CLOCK_REALTIME_COARSE
je 10f
chi %r2,__CLOCK_REALTIME
je 11f
chi %r2,__CLOCK_MONOTONIC_COARSE
je 9f
chi %r2,__CLOCK_MONOTONIC
jne 19f
......@@ -30,8 +34,8 @@ __kernel_clock_gettime:
1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
stck 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15)
stcke 24(%r15) /* Store TOD clock */
lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f
......@@ -68,12 +72,32 @@ __kernel_clock_gettime:
lhi %r2,0
br %r14
/* CLOCK_MONOTONIC_COARSE */
9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 9b
l %r2,__VDSO_WTOM_CRS_SEC+4(%r5)
l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 9b
j 8b
/* CLOCK_REALTIME_COARSE */
10: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 10b
l %r2,__VDSO_XTIME_CRS_SEC+4(%r5)
l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 10b
j 17f
/* CLOCK_REALTIME */
11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
stck 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15)
stcke 24(%r15) /* Store TOD clock */
lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
......
......@@ -29,8 +29,8 @@ __kernel_gettimeofday:
l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
stck 24(%r15) /* Store TOD clock */
lm %r0,%r1,24(%r15)
stcke 24(%r15) /* Store TOD clock */
lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
......
......@@ -19,6 +19,12 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
larl %r1,4f
cghi %r2,__CLOCK_REALTIME_COARSE
je 0f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 0f
larl %r1,3f
cghi %r2,__CLOCK_REALTIME
je 0f
cghi %r2,__CLOCK_MONOTONIC
......@@ -32,7 +38,6 @@ __kernel_clock_getres:
jz 2f
0: ltgr %r3,%r3
jz 1f /* res == NULL */
larl %r1,3f
lg %r0,0(%r1)
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
stg %r0,8(%r3) /* store tp->tv_usec */
......@@ -42,5 +47,6 @@ __kernel_clock_getres:
svc 0
br %r14
3: .quad __CLOCK_REALTIME_RES
4: .quad __CLOCK_COARSE_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
......@@ -20,12 +20,16 @@
__kernel_clock_gettime:
.cfi_startproc
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME_COARSE
je 4f
cghi %r2,__CLOCK_REALTIME
je 5f
cghi %r2,__CLOCK_THREAD_CPUTIME_ID
je 9f
cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 3f
cghi %r2,__CLOCK_MONOTONIC
jne 12f
......@@ -33,10 +37,10 @@ __kernel_clock_gettime:
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
stck 48(%r15) /* Store TOD clock */
stcke 48(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r0,__VDSO_WTOM_SEC(%r5)
lg %r1,48(%r15)
lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_WTOM_NSEC(%r5)
......@@ -54,13 +58,33 @@ __kernel_clock_gettime:
lghi %r2,0
br %r14
/* CLOCK_MONOTONIC_COARSE */
3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 3b
lg %r0,__VDSO_WTOM_CRS_SEC(%r5)
lg %r1,__VDSO_WTOM_CRS_NSEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 3b
j 2b
/* CLOCK_REALTIME_COARSE */
4: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 4b
lg %r0,__VDSO_XTIME_CRS_SEC(%r5)
lg %r1,__VDSO_XTIME_CRS_NSEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 4b
j 7f
/* CLOCK_REALTIME */
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
stck 48(%r15) /* Store TOD clock */
stcke 48(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r1,48(%r15)
lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
......
......@@ -28,8 +28,8 @@ __kernel_gettimeofday:
lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
stcke 48(%r15) /* Store TOD clock */
lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
......
......@@ -6,27 +6,18 @@
*/
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
#include <asm/irq.h>
#include "entry.h"
static void virt_timer_expire(void);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
static LIST_HEAD(virt_timer_list);
static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
......@@ -152,49 +143,6 @@ void vtime_account_system(struct task_struct *tsk)
__attribute__((alias("vtime_account_irq_enter")));
EXPORT_SYMBOL_GPL(vtime_account_system);
void __kprobes vtime_stop_cpu(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
unsigned long long idle_time;
unsigned long psw_mask;
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
idle->nohz_delay = 0;
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
/* Account time spent with enabled wait psw loaded as idle time. */
idle->sequence++;
smp_wmb();
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
account_idle_time(idle_time);
smp_wmb();
idle->sequence++;
}
cputime64_t s390_get_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
unsigned long long now, idle_enter, idle_exit;
unsigned int sequence;
do {
now = get_tod_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
......@@ -372,31 +320,8 @@ EXPORT_SYMBOL(del_virt_timer);
/*
* Start the virtual CPU timer on the current CPU.
*/
void init_cpu_vtimer(void)
void vtime_init(void)
{
/* set initial cpu timer */
set_vtimer(VTIMER_MAX_SLICE);
}
static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
struct s390_idle_data *idle;
long cpu = (long) hcpu;
idle = &per_cpu(s390_idle, cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DYING:
idle->nohz_delay = 0;
default:
break;
}
return NOTIFY_OK;
}
void __init vtime_init(void)
{
/* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
cpu_notifier(s390_nohz_notify, 0);
}
......@@ -6,3 +6,5 @@ lib-y += delay.o string.o uaccess.o find.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
......@@ -43,7 +43,7 @@ static void __udelay_disabled(unsigned long long usecs)
lockdep_off();
do {
set_clock_comparator(end);
vtime_stop_cpu();
enabled_wait();
} while (get_tod_clock_fast() < end);
lockdep_on();
__ctl_load(cr0, 0, 0);
......@@ -62,7 +62,7 @@ static void __udelay_enabled(unsigned long long usecs)
clock_saved = local_tick_disable();
set_clock_comparator(end);
}
vtime_stop_cpu();
enabled_wait();
if (clock_saved)
local_tick_enable(clock_saved);
} while (get_tod_clock_fast() < end);
......
/*
* Common helper functions for kprobes and uprobes
*
* Copyright IBM Corp. 2014
*/
#include <linux/kprobes.h>
#include <asm/dis.h>
int probe_is_prohibited_opcode(u16 *insn)
{
if (!is_known_insn((unsigned char *)insn))
return -EINVAL;
switch (insn[0] >> 8) {
case 0x0c: /* bassm */
case 0x0b: /* bsm */
case 0x83: /* diag */
case 0x44: /* ex */
case 0xac: /* stnsm */
case 0xad: /* stosm */
return -EINVAL;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x00: /* exrl */
return -EINVAL;
}
}
switch (insn[0]) {
case 0x0101: /* pr */
case 0xb25a: /* bsa */
case 0xb240: /* bakr */
case 0xb258: /* bsg */
case 0xb218: /* pc */
case 0xb228: /* pt */
case 0xb98d: /* epsw */
case 0xe560: /* tbegin */
case 0xe561: /* tbeginc */
case 0xb2f8: /* tend */
return -EINVAL;
}
return 0;
}
int probe_get_fixup_type(u16 *insn)
{
/* default fixup method */
int fixup = FIXUP_PSW_NORMAL;
switch (insn[0] >> 8) {
case 0x05: /* balr */
case 0x0d: /* basr */
fixup = FIXUP_RETURN_REGISTER;
/* if r2 = 0, no branch will be taken */
if ((insn[0] & 0x0f) == 0)
fixup |= FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x06: /* bctr */
case 0x07: /* bcr */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x45: /* bal */
case 0x4d: /* bas */
fixup = FIXUP_RETURN_REGISTER;
break;
case 0x47: /* bc */
case 0x46: /* bct */
case 0x86: /* bxh */
case 0x87: /* bxle */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0x82: /* lpsw */
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xb2: /* lpswe */
if ((insn[0] & 0xff) == 0xb2)
fixup = FIXUP_NOT_REQUIRED;
break;
case 0xa7: /* bras */
if ((insn[0] & 0x0f) == 0x05)
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xc0:
if ((insn[0] & 0x0f) == 0x05) /* brasl */
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xeb:
switch (insn[2] & 0xff) {
case 0x44: /* bxhg */
case 0x45: /* bxleg */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
case 0xe3: /* bctg */
if ((insn[2] & 0xff) == 0x46)
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
case 0xec:
switch (insn[2] & 0xff) {
case 0xe5: /* clgrb */
case 0xe6: /* cgrb */
case 0xf6: /* crb */
case 0xf7: /* clrb */
case 0xfc: /* cgib */
case 0xfd: /* cglib */
case 0xfe: /* cib */
case 0xff: /* clib */
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
}
break;
}
return fixup;
}
int probe_is_insn_relative_long(u16 *insn)
{
/* Check if we have a RIL-b or RIL-c format instruction which
* we need to modify in order to avoid instruction emulation. */
switch (insn[0] >> 8) {
case 0xc0:
if ((insn[0] & 0x0f) == 0x00) /* larl */
return true;
break;
case 0xc4:
switch (insn[0] & 0x0f) {
case 0x02: /* llhrl */
case 0x04: /* lghrl */
case 0x05: /* lhrl */
case 0x06: /* llghrl */
case 0x07: /* sthrl */
case 0x08: /* lgrl */
case 0x0b: /* stgrl */
case 0x0c: /* lgfrl */
case 0x0d: /* lrl */
case 0x0e: /* llgfrl */
case 0x0f: /* strl */
return true;
}
break;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
case 0x06: /* clghrl */
case 0x07: /* clhrl */
case 0x08: /* cgrl */
case 0x0a: /* clgrl */
case 0x0c: /* cgfrl */
case 0x0d: /* crl */
case 0x0e: /* clgfrl */
case 0x0f: /* clrl */
return true;
}
break;
}
return false;
}
......@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
void arch_spin_relax(arch_spinlock_t *lp)
{
unsigned int cpu = lp->lock;
if (cpu != 0) {
if (MACHINE_IS_VM || MACHINE_IS_KVM ||
!smp_vcpu_scheduled(~cpu))
smp_yield_cpu(~cpu);
}
}
EXPORT_SYMBOL(arch_spin_relax);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
int count;
......@@ -122,15 +111,21 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
unsigned int owner, old;
int count = spin_retry;
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
#endif
owner = 0;
while (1) {
if (count-- <= 0) {
smp_yield();
if (owner && !smp_vcpu_scheduled(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner);
if ((int) old < 0)
continue;
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
......@@ -139,28 +134,6 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
int count = spin_retry;
local_irq_restore(flags);
while (1) {
if (count-- <= 0) {
smp_yield();
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
if ((int) old < 0)
continue;
local_irq_disable();
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
local_irq_restore(flags);
}
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
......@@ -177,46 +150,62 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
void _raw_write_lock_wait(arch_rwlock_t *rw)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
{
unsigned int old;
unsigned int owner, old;
int count = spin_retry;
owner = 0;
while (1) {
if (count-- <= 0) {
smp_yield();
if (owner && !smp_vcpu_scheduled(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
if (old)
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return;
owner = ACCESS_ONCE(rw->owner);
smp_rmb();
if ((int) old >= 0) {
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
old = prev;
}
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
void _raw_write_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
unsigned int owner, old, prev;
int count = spin_retry;
local_irq_restore(flags);
prev = 0x80000000;
owner = 0;
while (1) {
if (count-- <= 0) {
smp_yield();
if (owner && !smp_vcpu_scheduled(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
if (old)
continue;
local_irq_disable();
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return;
local_irq_restore(flags);
owner = ACCESS_ONCE(rw->owner);
if ((int) old >= 0 &&
_raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
prev = old;
else
smp_rmb();
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
}
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
EXPORT_SYMBOL(_raw_write_lock_wait);
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
......@@ -233,3 +222,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
return 0;
}
EXPORT_SYMBOL(_raw_write_trylock_retry);
void arch_lock_relax(unsigned int cpu)
{
if (!cpu)
return;
if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
return;
smp_yield_cpu(~cpu);
}
EXPORT_SYMBOL(arch_lock_relax);
......@@ -54,7 +54,6 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
return;
}
seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
seq_putc(m, '\n');
}
......@@ -129,7 +128,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
}
#ifdef CONFIG_64BIT
#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
#else
#define _PMD_PROT_MASK 0
#endif
......@@ -157,7 +156,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
}
#ifdef CONFIG_64BIT
#define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO)
#define _PUD_PROT_MASK _REGION3_ENTRY_RO
#else
#define _PUD_PROT_MASK 0
#endif
......
......@@ -88,7 +88,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= pte_page(pte)[1].index;
} else
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
*(pmd_t *) ptep = pmd;
}
......
......@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
#include <asm/pgtable.h>
#include <asm/page.h>
......@@ -103,27 +104,50 @@ int set_memory_x(unsigned long addr, int numpages)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
static void ipte_range(pte_t *pte, unsigned long address, int nr)
{
int i;
if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
__ptep_ipte_range(address, nr - 1, pte);
return;
}
for (i = 0; i < nr; i++) {
__ptep_ipte(address, pte);
address += PAGE_SIZE;
pte++;
}
}
void kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
int nr, i, j;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i;
for (i = 0; i < numpages; i++) {
for (i = 0; i < numpages;) {
address = page_to_phys(page + i);
pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
__ptep_ipte(address, pte);
pte_val(*pte) = _PAGE_INVALID;
continue;
nr = (unsigned long)pte >> ilog2(sizeof(long));
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
nr = min(numpages - i, nr);
if (enable) {
for (j = 0; j < nr; j++) {
pte_val(*pte) = __pa(address);
address += PAGE_SIZE;
pte++;
}
} else {
ipte_range(pte, address, nr);
}
pte_val(*pte) = __pa(address);
i += nr;
}
}
......
......@@ -236,8 +236,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!new_page)
goto out;
pmd_val(*pm_dir) = __pa(new_page) |
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
_SEGMENT_ENTRY_CO;
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
......@@ -253,9 +252,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pt_dir = pte_offset_kernel(pm_dir, address);
if (pte_none(*pt_dir)) {
unsigned long new_page;
void *new_page;
new_page =__pa(vmem_alloc_pages(0));
new_page = vmemmap_alloc_block(PAGE_SIZE, node);
if (!new_page)
goto out;
pte_val(*pt_dir) =
......@@ -263,7 +262,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
}
address += PAGE_SIZE;
}
memset((void *)start, 0, end - start);
ret = 0;
out:
return ret;
......
This diff is collapsed.
......@@ -1432,6 +1432,29 @@ static ssize_t dasd_reservation_state_store(struct device *dev,
static DEVICE_ATTR(last_known_reservation_state, 0644,
dasd_reservation_state_show, dasd_reservation_state_store);
static ssize_t dasd_pm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
u8 opm, nppm, cablepm, cuirpm, hpfpm;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sprintf(buf, "0\n");
opm = device->path_data.opm;
nppm = device->path_data.npm;
cablepm = device->path_data.cablepm;
cuirpm = device->path_data.cuirpm;
hpfpm = device->path_data.hpfpm;
dasd_put_device(device);
return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
cablepm, cuirpm, hpfpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
......@@ -1450,6 +1473,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr,
&dev_attr_path_masks.attr,
NULL,
};
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -33,3 +33,6 @@ obj-$(CONFIG_S390_VMUR) += vmur.o
zcore_mod-objs := sclp_sdias.o zcore.o
obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o
hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
obj-$(CONFIG_HMC_DRV) += hmcdrv.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment