Commit d586c86d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull second set of s390 patches from Martin Schwidefsky:
 "The second part of Heikos uaccess rework, the page table walker for
  uaccess is now a thing of the past (yay!)

  The code change to fix the theoretical TLB flush problem allows us to
  add a TLB flush optimization for zEC12, this machine has new
  instructions that allow to do CPU local TLB flushes for single pages
  and for all pages of a specific address space.

  Plus the usual bug fixing and some more cleanup"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/uaccess: rework uaccess code - fix locking issues
  s390/mm,tlb: optimize TLB flushing for zEC12
  s390/mm,tlb: safeguard against speculative TLB creation
  s390/irq: Use defines for external interruption codes
  s390/irq: Add defines for external interruption codes
  s390/sclp: add timeout for queued requests
  kvm/s390: also set guest pages back to stable on kexec/kdump
  lcs: Add missing destroy_timer_on_stack()
  s390/tape: Add missing destroy_timer_on_stack()
  s390/tape: Use del_timer_sync()
  s390/3270: fix crash with multiple reset device requests
  s390/bitops,atomic: add missing memory barriers
  s390/zcrypt: add length check for aligned data to avoid overflow in msg-type 6
parents e9f37d3a 457f2180
...@@ -15,23 +15,29 @@ ...@@ -15,23 +15,29 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define __ATOMIC_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OR "lao" #define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan" #define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa" #define __ATOMIC_ADD "laa"
#define __ATOMIC_BARRIER "bcr 14,0\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string) \ #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
({ \ ({ \
int old_val; \ int old_val; \
\ \
typecheck(atomic_t *, ptr); \ typecheck(atomic_t *, ptr); \
asm volatile( \ asm volatile( \
__barrier \
op_string " %0,%2,%1\n" \ op_string " %0,%2,%1\n" \
__barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \ : "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \ : "d" (op_val) \
: "cc", "memory"); \ : "cc", "memory"); \
...@@ -43,8 +49,9 @@ ...@@ -43,8 +49,9 @@
#define __ATOMIC_OR "or" #define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr" #define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar" #define __ATOMIC_ADD "ar"
#define __ATOMIC_BARRIER "\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string) \ #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
({ \ ({ \
int old_val, new_val; \ int old_val, new_val; \
\ \
...@@ -82,7 +89,7 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -82,7 +89,7 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i; return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
} }
static inline void atomic_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
...@@ -94,12 +101,10 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -94,12 +101,10 @@ static inline void atomic_add(int i, atomic_t *v)
: "+Q" (v->counter) : "+Q" (v->counter)
: "i" (i) : "i" (i)
: "cc", "memory"); : "cc", "memory");
} else { return;
atomic_add_return(i, v);
} }
#else
atomic_add_return(i, v);
#endif #endif
__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
} }
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
...@@ -115,12 +120,12 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -115,12 +120,12 @@ static inline void atomic_add(int i, atomic_t *v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND); __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
} }
static inline void atomic_set_mask(unsigned int mask, atomic_t *v) static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
__ATOMIC_LOOP(v, mask, __ATOMIC_OR); __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
} }
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
...@@ -157,19 +162,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -157,19 +162,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define __ATOMIC64_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC64_OR "laog" #define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang" #define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag" #define __ATOMIC64_ADD "laag"
#define __ATOMIC64_BARRIER "bcr 14,0\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string) \ #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
({ \ ({ \
long long old_val; \ long long old_val; \
\ \
typecheck(atomic64_t *, ptr); \ typecheck(atomic64_t *, ptr); \
asm volatile( \ asm volatile( \
__barrier \
op_string " %0,%2,%1\n" \ op_string " %0,%2,%1\n" \
__barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \ : "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \ : "d" (op_val) \
: "cc", "memory"); \ : "cc", "memory"); \
...@@ -181,8 +191,9 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -181,8 +191,9 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "ogr" #define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr" #define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr" #define __ATOMIC64_ADD "agr"
#define __ATOMIC64_BARRIER "\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string) \ #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
({ \ ({ \
long long old_val, new_val; \ long long old_val, new_val; \
\ \
...@@ -220,17 +231,32 @@ static inline void atomic64_set(atomic64_t *v, long long i) ...@@ -220,17 +231,32 @@ static inline void atomic64_set(atomic64_t *v, long long i)
static inline long long atomic64_add_return(long long i, atomic64_t *v) static inline long long atomic64_add_return(long long i, atomic64_t *v)
{ {
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i; return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
}
static inline void atomic64_add(long long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
asm volatile(
"agsi %0,%1\n"
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
return;
}
#endif
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
} }
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{ {
__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND); __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
} }
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{ {
__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR); __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
} }
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
...@@ -334,25 +360,13 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) ...@@ -334,25 +360,13 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
} while (atomic64_cmpxchg(v, old, new) != old); } while (atomic64_cmpxchg(v, old, new) != old);
} }
#endif /* CONFIG_64BIT */
static inline void atomic64_add(long long i, atomic64_t *v) static inline void atomic64_add(long long i, atomic64_t *v)
{ {
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
asm volatile(
"agsi %0,%1\n"
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
} else {
atomic64_add_return(i, v);
}
#else
atomic64_add_return(i, v); atomic64_add_return(i, v);
#endif
} }
#endif /* CONFIG_64BIT */
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{ {
long long c, old; long long c, old;
......
...@@ -47,14 +47,18 @@ ...@@ -47,14 +47,18 @@
#include <linux/typecheck.h> #include <linux/typecheck.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/barrier.h>
#define __BITOPS_NO_BARRIER "\n"
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
#define __BITOPS_OR "or" #define __BITOPS_OR "or"
#define __BITOPS_AND "nr" #define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr" #define __BITOPS_XOR "xr"
#define __BITOPS_BARRIER "\n"
#define __BITOPS_LOOP(__addr, __val, __op_string) \ #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \ ({ \
unsigned long __old, __new; \ unsigned long __old, __new; \
\ \
...@@ -67,7 +71,7 @@ ...@@ -67,7 +71,7 @@
" jl 0b" \ " jl 0b" \
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
: "d" (__val) \ : "d" (__val) \
: "cc"); \ : "cc", "memory"); \
__old; \ __old; \
}) })
...@@ -78,17 +82,20 @@ ...@@ -78,17 +82,20 @@
#define __BITOPS_OR "laog" #define __BITOPS_OR "laog"
#define __BITOPS_AND "lang" #define __BITOPS_AND "lang"
#define __BITOPS_XOR "laxg" #define __BITOPS_XOR "laxg"
#define __BITOPS_BARRIER "bcr 14,0\n"
#define __BITOPS_LOOP(__addr, __val, __op_string) \ #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \ ({ \
unsigned long __old; \ unsigned long __old; \
\ \
typecheck(unsigned long *, (__addr)); \ typecheck(unsigned long *, (__addr)); \
asm volatile( \ asm volatile( \
__barrier \
__op_string " %0,%2,%1\n" \ __op_string " %0,%2,%1\n" \
__barrier \
: "=d" (__old), "+Q" (*(__addr)) \ : "=d" (__old), "+Q" (*(__addr)) \
: "d" (__val) \ : "d" (__val) \
: "cc"); \ : "cc", "memory"); \
__old; \ __old; \
}) })
...@@ -97,8 +104,9 @@ ...@@ -97,8 +104,9 @@
#define __BITOPS_OR "ogr" #define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr" #define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr" #define __BITOPS_XOR "xgr"
#define __BITOPS_BARRIER "\n"
#define __BITOPS_LOOP(__addr, __val, __op_string) \ #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \ ({ \
unsigned long __old, __new; \ unsigned long __old, __new; \
\ \
...@@ -111,7 +119,7 @@ ...@@ -111,7 +119,7 @@
" jl 0b" \ " jl 0b" \
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
: "d" (__val) \ : "d" (__val) \
: "cc"); \ : "cc", "memory"); \
__old; \ __old; \
}) })
...@@ -149,12 +157,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -149,12 +157,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
"oi %0,%b1\n" "oi %0,%b1\n"
: "+Q" (*caddr) : "+Q" (*caddr)
: "i" (1 << (nr & 7)) : "i" (1 << (nr & 7))
: "cc"); : "cc", "memory");
return; return;
} }
#endif #endif
mask = 1UL << (nr & (BITS_PER_LONG - 1)); mask = 1UL << (nr & (BITS_PER_LONG - 1));
__BITOPS_LOOP(addr, mask, __BITOPS_OR); __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
} }
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
...@@ -170,12 +178,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -170,12 +178,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
"ni %0,%b1\n" "ni %0,%b1\n"
: "+Q" (*caddr) : "+Q" (*caddr)
: "i" (~(1 << (nr & 7))) : "i" (~(1 << (nr & 7)))
: "cc"); : "cc", "memory");
return; return;
} }
#endif #endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
__BITOPS_LOOP(addr, mask, __BITOPS_AND); __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
} }
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
...@@ -191,12 +199,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -191,12 +199,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
"xi %0,%b1\n" "xi %0,%b1\n"
: "+Q" (*caddr) : "+Q" (*caddr)
: "i" (1 << (nr & 7)) : "i" (1 << (nr & 7))
: "cc"); : "cc", "memory");
return; return;
} }
#endif #endif
mask = 1UL << (nr & (BITS_PER_LONG - 1)); mask = 1UL << (nr & (BITS_PER_LONG - 1));
__BITOPS_LOOP(addr, mask, __BITOPS_XOR); __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
} }
static inline int static inline int
...@@ -206,8 +214,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -206,8 +214,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask; unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1)); mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __BITOPS_LOOP(addr, mask, __BITOPS_OR); old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
barrier();
return (old & mask) != 0; return (old & mask) != 0;
} }
...@@ -218,8 +225,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -218,8 +225,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask; unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
old = __BITOPS_LOOP(addr, mask, __BITOPS_AND); old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
barrier();
return (old & ~mask) != 0; return (old & ~mask) != 0;
} }
...@@ -230,8 +236,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -230,8 +236,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask; unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1)); mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR); old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
barrier();
return (old & mask) != 0; return (old & mask) != 0;
} }
......
#ifndef _ASM_S390_FUTEX_H #ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H #define _ASM_S390_FUTEX_H
#include <linux/futex.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/futex.h>
#include <asm/mmu_context.h>
#include <asm/errno.h> #include <asm/errno.h>
int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval); #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old); asm volatile( \
" sacf 256\n" \
"0: l %1,0(%6)\n" \
"1:"insn \
"2: cs %1,%2,0(%6)\n" \
"3: jl 1b\n" \
" lhi %0,0\n" \
"4: sacf 768\n" \
EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc");
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
...@@ -14,13 +27,37 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ...@@ -14,13 +27,37 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20; int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20; int cmparg = (encoded_op << 20) >> 20;
int oldval, ret; int oldval = 0, newval, ret;
update_primary_asce(current);
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
pagefault_disable(); pagefault_disable();
ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval); switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("lr %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("lr %2,%1\nar %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("lr %2,%1\nor %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("lr %2,%1\nnr %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("lr %2,%1\nxr %2,%5\n",
ret, oldval, newval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable(); pagefault_enable();
if (!ret) { if (!ret) {
...@@ -37,4 +74,23 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) ...@@ -37,4 +74,23 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
return ret; return ret;
} }
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret;
update_primary_asce(current);
asm volatile(
" sacf 256\n"
"0: cs %1,%4,0(%5)\n"
"1: la %0,0\n"
"2: sacf 768\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory");
*uval = oldval;
return ret;
}
#endif /* _ASM_S390_FUTEX_H */ #endif /* _ASM_S390_FUTEX_H */
...@@ -16,6 +16,20 @@ ...@@ -16,6 +16,20 @@
/* This number is used when no interrupt has been assigned */ /* This number is used when no interrupt has been assigned */
#define NO_IRQ 0 #define NO_IRQ 0
/* External interruption codes */
#define EXT_IRQ_INTERRUPT_KEY 0x0040
#define EXT_IRQ_CLK_COMP 0x1004
#define EXT_IRQ_CPU_TIMER 0x1005
#define EXT_IRQ_WARNING_TRACK 0x1007
#define EXT_IRQ_MALFUNC_ALERT 0x1200
#define EXT_IRQ_EMERGENCY_SIG 0x1201
#define EXT_IRQ_EXTERNAL_CALL 0x1202
#define EXT_IRQ_TIMING_ALERT 0x1406
#define EXT_IRQ_MEASURE_ALERT 0x1407
#define EXT_IRQ_SERVICE_SIG 0x2401
#define EXT_IRQ_CP_SERVICE 0x2603
#define EXT_IRQ_IUCV 0x4000
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/hardirq.h> #include <linux/hardirq.h>
...@@ -77,8 +91,8 @@ struct ext_code { ...@@ -77,8 +91,8 @@ struct ext_code {
typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);
int register_external_interrupt(u16 code, ext_int_handler_t handler); int register_external_irq(u16 code, ext_int_handler_t handler);
int unregister_external_interrupt(u16 code, ext_int_handler_t handler); int unregister_external_irq(u16 code, ext_int_handler_t handler);
enum irq_subclass { enum irq_subclass {
IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
......
#ifndef __MMU_H #ifndef __MMU_H
#define __MMU_H #define __MMU_H
#include <linux/cpumask.h>
#include <linux/errno.h> #include <linux/errno.h>
typedef struct { typedef struct {
cpumask_t cpu_attach_mask;
atomic_t attach_count; atomic_t attach_count;
unsigned int flush_mm; unsigned int flush_mm;
spinlock_t list_lock; spinlock_t list_lock;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0); atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
...@@ -29,41 +30,61 @@ static inline int init_new_context(struct task_struct *tsk, ...@@ -29,41 +30,61 @@ static inline int init_new_context(struct task_struct *tsk,
#define destroy_context(mm) do { } while (0) #define destroy_context(mm) do { } while (0)
#ifndef CONFIG_64BIT static inline void update_user_asce(struct mm_struct *mm, int load_primary)
#define LCTL_OPCODE "lctl"
#else
#define LCTL_OPCODE "lctlg"
#endif
static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
{ {
pgd_t *pgd = mm->pgd; pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
/* Load primary space page table origin. */ if (load_primary)
asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce)); __ctl_load(S390_lowcore.user_asce, 1, 1);
set_fs(current->thread.mm_segment); set_fs(current->thread.mm_segment);
} }
static inline void clear_user_asce(struct mm_struct *mm, int load_primary)
{
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
if (load_primary)
__ctl_load(S390_lowcore.user_asce, 1, 1);
__ctl_load(S390_lowcore.user_asce, 7, 7);
}
static inline void update_primary_asce(struct task_struct *tsk)
{
unsigned long asce;
__ctl_store(asce, 1, 1);
if (asce != S390_lowcore.kernel_asce)
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
set_tsk_thread_flag(tsk, TIF_ASCE);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
update_primary_asce(tsk);
if (prev == next) if (prev == next)
return; return;
if (MACHINE_HAS_TLB_LC)
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
if (atomic_inc_return(&next->context.attach_count) >> 16) { if (atomic_inc_return(&next->context.attach_count) >> 16) {
/* Delay update_mm until all TLB flushes are done. */ /* Delay update_user_asce until all TLB flushes are done. */
set_tsk_thread_flag(tsk, TIF_TLB_WAIT); set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
/* Clear old ASCE by loading the kernel ASCE. */
clear_user_asce(next, 0);
} else { } else {
cpumask_set_cpu(cpu, mm_cpumask(next)); cpumask_set_cpu(cpu, mm_cpumask(next));
update_mm(next, tsk); update_user_asce(next, 0);
if (next->context.flush_mm) if (next->context.flush_mm)
/* Flush pending TLBs */ /* Flush pending TLBs */
__tlb_flush_mm(next); __tlb_flush_mm(next);
} }
atomic_dec(&prev->context.attach_count); atomic_dec(&prev->context.attach_count);
WARN_ON(atomic_read(&prev->context.attach_count) < 0); WARN_ON(atomic_read(&prev->context.attach_count) < 0);
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
} }
#define finish_arch_post_lock_switch finish_arch_post_lock_switch #define finish_arch_post_lock_switch finish_arch_post_lock_switch
...@@ -80,7 +101,7 @@ static inline void finish_arch_post_lock_switch(void) ...@@ -80,7 +101,7 @@ static inline void finish_arch_post_lock_switch(void)
cpu_relax(); cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
update_mm(mm, tsk); update_user_asce(mm, 0);
if (mm->context.flush_mm) if (mm->context.flush_mm)
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
preempt_enable(); preempt_enable();
......
...@@ -1070,12 +1070,35 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) ...@@ -1070,12 +1070,35 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
} }
static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
#ifndef CONFIG_64BIT
/* pto in ESA mode must point to the start of the segment table */
pto &= 0x7ffffc00;
#endif
/* Invalidation + local TLB flush for the pte */
asm volatile(
" .insn rrf,0xb2210000,%2,%3,0,1"
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
}
static inline void ptep_flush_direct(struct mm_struct *mm, static inline void ptep_flush_direct(struct mm_struct *mm,
unsigned long address, pte_t *ptep) unsigned long address, pte_t *ptep)
{ {
int active, count;
if (pte_val(*ptep) & _PAGE_INVALID) if (pte_val(*ptep) & _PAGE_INVALID)
return; return;
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__ptep_ipte_local(address, ptep);
else
__ptep_ipte(address, ptep); __ptep_ipte(address, ptep);
atomic_sub(0x10000, &mm->context.attach_count);
} }
static inline void ptep_flush_lazy(struct mm_struct *mm, static inline void ptep_flush_lazy(struct mm_struct *mm,
...@@ -1384,35 +1407,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) ...@@ -1384,35 +1407,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
{
unsigned long sto = (unsigned long) pmdp -
pmd_index(address) * sizeof(pmd_t);
if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pmdp)
: "m" (*pmdp), "a" (sto),
"a" ((address & HPAGE_MASK))
: "cc"
);
}
}
static inline void __pmd_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
_SEGMENT_ENTRY_INVALID;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
}
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{ {
...@@ -1481,18 +1475,80 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) ...@@ -1481,18 +1475,80 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
static inline void __pmdp_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
_SEGMENT_ENTRY_INVALID;
register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
asm volatile(
" csp %1,%3"
: "=m" (*pmdp)
: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
}
static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
{
unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pmdp)
: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
: "cc" );
}
static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
{
unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,1"
: "=m" (*pmdp)
: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
: "cc" );
}
static inline void pmdp_flush_direct(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
int active, count;
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
return;
if (!MACHINE_HAS_IDTE) {
__pmdp_csp(pmdp);
return;
}
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__pmdp_idte_local(address, pmdp);
else
__pmdp_idte(address, pmdp);
atomic_sub(0x10000, &mm->context.attach_count);
}
static inline void pmdp_flush_lazy(struct mm_struct *mm, static inline void pmdp_flush_lazy(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp) unsigned long address, pmd_t *pmdp)
{ {
int active, count; int active, count;
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
return;
active = (mm == current->active_mm) ? 1 : 0; active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count); count = atomic_add_return(0x10000, &mm->context.attach_count);
if ((count & 0xffff) <= active) { if ((count & 0xffff) <= active) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1; mm->context.flush_mm = 1;
} else } else if (MACHINE_HAS_IDTE)
__pmd_idte(address, pmdp); __pmdp_idte(address, pmdp);
else
__pmdp_csp(pmdp);
atomic_sub(0x10000, &mm->context.attach_count); atomic_sub(0x10000, &mm->context.attach_count);
} }
...@@ -1545,7 +1601,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, ...@@ -1545,7 +1601,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
pmd_t pmd; pmd_t pmd;
pmd = *pmdp; pmd = *pmdp;
__pmd_idte(address, pmdp); pmdp_flush_direct(vma->vm_mm, address, pmdp);
*pmdp = pmd_mkold(pmd); *pmdp = pmd_mkold(pmd);
return pmd_young(pmd); return pmd_young(pmd);
} }
...@@ -1556,7 +1612,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, ...@@ -1556,7 +1612,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
{ {
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
__pmd_idte(address, pmdp); pmdp_flush_direct(mm, address, pmdp);
pmd_clear(pmdp); pmd_clear(pmdp);
return pmd; return pmd;
} }
...@@ -1572,7 +1628,7 @@ static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, ...@@ -1572,7 +1628,7 @@ static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp) unsigned long address, pmd_t *pmdp)
{ {
__pmd_idte(address, pmdp); pmdp_flush_direct(vma->vm_mm, address, pmdp);
} }
#define __HAVE_ARCH_PMDP_SET_WRPROTECT #define __HAVE_ARCH_PMDP_SET_WRPROTECT
...@@ -1582,7 +1638,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, ...@@ -1582,7 +1638,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
if (pmd_write(pmd)) { if (pmd_write(pmd)) {
__pmd_idte(address, pmdp); pmdp_flush_direct(mm, address, pmdp);
set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
} }
} }
......
...@@ -68,6 +68,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, ...@@ -68,6 +68,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_FLAG_TOPOLOGY (1UL << 14) #define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_FLAG_TE (1UL << 15) #define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_RRBM (1UL << 16) #define MACHINE_FLAG_RRBM (1UL << 16)
#define MACHINE_FLAG_TLB_LC (1UL << 17)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
...@@ -90,6 +91,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, ...@@ -90,6 +91,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_HAS_TOPOLOGY (0) #define MACHINE_HAS_TOPOLOGY (0)
#define MACHINE_HAS_TE (0) #define MACHINE_HAS_TE (0)
#define MACHINE_HAS_RRBM (0) #define MACHINE_HAS_RRBM (0)
#define MACHINE_HAS_TLB_LC (0)
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1) #define MACHINE_HAS_CSP (1)
...@@ -102,6 +104,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, ...@@ -102,6 +104,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
/* /*
......
...@@ -132,6 +132,7 @@ static inline void restore_access_regs(unsigned int *acrs) ...@@ -132,6 +132,7 @@ static inline void restore_access_regs(unsigned int *acrs)
update_cr_regs(next); \ update_cr_regs(next); \
} \ } \
prev = __switch_to(prev,next); \ prev = __switch_to(prev,next); \
update_primary_asce(current); \
} while (0) } while (0)
#define finish_arch_switch(prev) do { \ #define finish_arch_switch(prev) do { \
......
...@@ -82,6 +82,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -82,6 +82,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */
#define TIF_ASCE 5 /* primary asce needs fixup / uaccess */
#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_MCCK_PENDING 7 /* machine check handling is pending */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */
...@@ -99,6 +100,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -99,6 +100,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT)
#define _TIF_ASCE (1<<TIF_ASCE)
#define _TIF_PER_TRAP (1<<TIF_PER_TRAP) #define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
......
...@@ -57,8 +57,6 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, ...@@ -57,8 +57,6 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
tlb->end = end; tlb->end = end;
tlb->fullmm = !(start | (end+1)); tlb->fullmm = !(start | (end+1));
tlb->batch = NULL; tlb->batch = NULL;
if (tlb->fullmm)
__tlb_flush_mm(mm);
} }
static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb)
...@@ -96,9 +94,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) ...@@ -96,9 +94,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address) unsigned long address)
{ {
if (!tlb->fullmm) page_table_free_rcu(tlb, (unsigned long *) pte);
return page_table_free_rcu(tlb, (unsigned long *) pte);
page_table_free(tlb->mm, (unsigned long *) pte);
} }
/* /*
...@@ -114,9 +110,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, ...@@ -114,9 +110,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31)) if (tlb->mm->context.asce_limit <= (1UL << 31))
return; return;
if (!tlb->fullmm) tlb_remove_table(tlb, pmd);
return tlb_remove_table(tlb, pmd);
crst_table_free(tlb->mm, (unsigned long *) pmd);
#endif #endif
} }
...@@ -133,9 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -133,9 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42)) if (tlb->mm->context.asce_limit <= (1UL << 42))
return; return;
if (!tlb->fullmm) tlb_remove_table(tlb, pud);
return tlb_remove_table(tlb, pud);
crst_table_free(tlb->mm, (unsigned long *) pud);
#endif #endif
} }
......
...@@ -7,19 +7,41 @@ ...@@ -7,19 +7,41 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
/* /*
* Flush all tlb entries on the local cpu. * Flush all TLB entries on the local CPU.
*/ */
static inline void __tlb_flush_local(void) static inline void __tlb_flush_local(void)
{ {
asm volatile("ptlb" : : : "memory"); asm volatile("ptlb" : : : "memory");
} }
#ifdef CONFIG_SMP
/* /*
* Flush all tlb entries on all cpus. * Flush TLB entries for a specific ASCE on all CPUs
*/ */
static inline void __tlb_flush_idte(unsigned long asce)
{
/* Global TLB flush for the mm */
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,0"
: : "a" (2048), "a" (asce) : "cc");
}
/*
* Flush TLB entries for a specific ASCE on the local CPU
*/
static inline void __tlb_flush_idte_local(unsigned long asce)
{
/* Local TLB flush for the mm */
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,1"
: : "a" (2048), "a" (asce) : "cc");
}
#ifdef CONFIG_SMP
void smp_ptlb_all(void); void smp_ptlb_all(void);
/*
* Flush all TLB entries on all CPUs.
*/
static inline void __tlb_flush_global(void) static inline void __tlb_flush_global(void)
{ {
register unsigned long reg2 asm("2"); register unsigned long reg2 asm("2");
...@@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void) ...@@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void)
: : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
} }
/*
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
* this implicates multiple ASCEs!).
*/
static inline void __tlb_flush_full(struct mm_struct *mm) static inline void __tlb_flush_full(struct mm_struct *mm)
{ {
cpumask_t local_cpumask;
preempt_disable(); preempt_disable();
/* atomic_add(0x10000, &mm->context.attach_count);
* If the process only ran on the local cpu, do a local flush. if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
*/ /* Local TLB flush */
cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
__tlb_flush_local(); __tlb_flush_local();
} else {
/* Global TLB flush */
__tlb_flush_global();
/* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
atomic_sub(0x10000, &mm->context.attach_count);
preempt_enable();
}
/*
* Flush TLB entries for a specific ASCE on all CPUs.
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
int active, count;
preempt_disable();
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
__tlb_flush_idte_local(asce);
} else {
if (MACHINE_HAS_IDTE)
__tlb_flush_idte(asce);
else else
__tlb_flush_global(); __tlb_flush_global();
/* Reset TLB flush mask */
if (MACHINE_HAS_TLB_LC)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
atomic_sub(0x10000, &mm->context.attach_count);
preempt_enable(); preempt_enable();
} }
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
__tlb_flush_idte((unsigned long) init_mm.pgd |
init_mm.context.asce_bits);
else
__tlb_flush_global();
}
#else #else
#define __tlb_flush_full(mm) __tlb_flush_local()
#define __tlb_flush_global() __tlb_flush_local() #define __tlb_flush_global() __tlb_flush_local()
#endif #define __tlb_flush_full(mm) __tlb_flush_local()
/* /*
* Flush all tlb entries of a page table on all cpus. * Flush TLB entries for a specific ASCE on all CPUs.
*/ */
static inline void __tlb_flush_idte(unsigned long asce) static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{ {
asm volatile( if (MACHINE_HAS_TLB_LC)
" .insn rrf,0xb98e0000,0,%0,%1,0" __tlb_flush_idte_local(asce);
: : "a" (2048), "a" (asce) : "cc" ); else
__tlb_flush_local();
} }
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_TLB_LC)
__tlb_flush_idte_local((unsigned long) init_mm.pgd |
init_mm.context.asce_bits);
else
__tlb_flush_local();
}
#endif
static inline void __tlb_flush_mm(struct mm_struct * mm) static inline void __tlb_flush_mm(struct mm_struct * mm)
{ {
/* /*
...@@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) ...@@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu. * only ran on the local cpu.
*/ */
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
__tlb_flush_idte((unsigned long) mm->pgd | __tlb_flush_asce(mm, (unsigned long) mm->pgd |
mm->context.asce_bits); mm->context.asce_bits);
else else
__tlb_flush_full(mm); __tlb_flush_full(mm);
...@@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end) unsigned long end)
{ {
__tlb_flush_mm(&init_mm); __tlb_flush_kernel();
} }
#endif /* _S390_TLBFLUSH_H */ #endif /* _S390_TLBFLUSH_H */
...@@ -92,8 +92,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) ...@@ -92,8 +92,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
#define ARCH_HAS_SORT_EXTABLE #define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE #define ARCH_HAS_SEARCH_EXTABLE
int __handle_fault(unsigned long, unsigned long, int);
/** /**
* __copy_from_user: - Copy a block of data from user space, with less checking. * __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space. * @to: Destination address, in kernel space.
......
...@@ -136,6 +136,7 @@ int main(void) ...@@ -136,6 +136,7 @@ int main(void)
DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data));
DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source));
DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
......
...@@ -386,6 +386,8 @@ static __init void detect_machine_facilities(void) ...@@ -386,6 +386,8 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TE; S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
if (test_facility(66)) if (test_facility(66))
S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM; S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
#endif #endif
} }
......
...@@ -38,9 +38,9 @@ __PT_R14 = __PT_GPRS + 56 ...@@ -38,9 +38,9 @@ __PT_R14 = __PT_GPRS + 56
__PT_R15 = __PT_GPRS + 60 __PT_R15 = __PT_GPRS + 60
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP ) _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE)
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING | _TIF_ASCE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT) _TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
...@@ -241,6 +241,8 @@ sysc_work: ...@@ -241,6 +241,8 @@ sysc_work:
jo sysc_sigpending jo sysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume jo sysc_notify_resume
tm __TI_flags+3(%r12),_TIF_ASCE
jo sysc_uaccess
j sysc_return # beware of critical section cleanup j sysc_return # beware of critical section cleanup
# #
...@@ -259,6 +261,14 @@ sysc_mcck_pending: ...@@ -259,6 +261,14 @@ sysc_mcck_pending:
la %r14,BASED(sysc_return) la %r14,BASED(sysc_return)
br %r1 # TIF bit will be cleared by handler br %r1 # TIF bit will be cleared by handler
#
# _TIF_ASCE is set, load user space asce
#
sysc_uaccess:
ni __TI_flags+3(%r12),255-_TIF_ASCE
lctl %c1,%c1,__LC_USER_ASCE # load primary asce
j sysc_return
# #
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
...@@ -522,6 +532,8 @@ io_work_tif: ...@@ -522,6 +532,8 @@ io_work_tif:
jo io_sigpending jo io_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo io_notify_resume jo io_notify_resume
tm __TI_flags+3(%r12),_TIF_ASCE
jo io_uaccess
j io_return # beware of critical section cleanup j io_return # beware of critical section cleanup
# #
...@@ -534,6 +546,14 @@ io_mcck_pending: ...@@ -534,6 +546,14 @@ io_mcck_pending:
TRACE_IRQS_OFF TRACE_IRQS_OFF
j io_return j io_return
#
# _TIF_ASCE is set, load user space asce
#
io_uaccess:
ni __TI_flags+3(%r12),255-_TIF_ASCE
lctl %c1,%c1,__LC_USER_ASCE # load primary asce
j io_return
# #
# _TIF_NEED_RESCHED is set, call schedule # _TIF_NEED_RESCHED is set, call schedule
# #
......
...@@ -43,9 +43,9 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -43,9 +43,9 @@ STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP ) _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE)
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING | _TIF_ASCE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT) _TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
...@@ -275,6 +275,8 @@ sysc_work: ...@@ -275,6 +275,8 @@ sysc_work:
jo sysc_sigpending jo sysc_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume jo sysc_notify_resume
tm __TI_flags+7(%r12),_TIF_ASCE
jo sysc_uaccess
j sysc_return # beware of critical section cleanup j sysc_return # beware of critical section cleanup
# #
...@@ -291,6 +293,14 @@ sysc_mcck_pending: ...@@ -291,6 +293,14 @@ sysc_mcck_pending:
larl %r14,sysc_return larl %r14,sysc_return
jg s390_handle_mcck # TIF bit will be cleared by handler jg s390_handle_mcck # TIF bit will be cleared by handler
#
# _TIF_ASCE is set, load user space asce
#
sysc_uaccess:
ni __TI_flags+7(%r12),255-_TIF_ASCE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j sysc_return
# #
# _TIF_SIGPENDING is set, call do_signal # _TIF_SIGPENDING is set, call do_signal
# #
...@@ -559,6 +569,8 @@ io_work_tif: ...@@ -559,6 +569,8 @@ io_work_tif:
jo io_sigpending jo io_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo io_notify_resume jo io_notify_resume
tm __TI_flags+7(%r12),_TIF_ASCE
jo io_uaccess
j io_return # beware of critical section cleanup j io_return # beware of critical section cleanup
# #
...@@ -570,6 +582,14 @@ io_mcck_pending: ...@@ -570,6 +582,14 @@ io_mcck_pending:
TRACE_IRQS_OFF TRACE_IRQS_OFF
j io_return j io_return
#
# _TIF_ASCE is set, load user space asce
#
io_uaccess:
ni __TI_flags+7(%r12),255-_TIF_ASCE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j io_return
# #
# _TIF_NEED_RESCHED is set, call schedule # _TIF_NEED_RESCHED is set, call schedule
# #
......
...@@ -207,7 +207,7 @@ static inline int ext_hash(u16 code) ...@@ -207,7 +207,7 @@ static inline int ext_hash(u16 code)
return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1); return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1);
} }
int register_external_interrupt(u16 code, ext_int_handler_t handler) int register_external_irq(u16 code, ext_int_handler_t handler)
{ {
struct ext_int_info *p; struct ext_int_info *p;
unsigned long flags; unsigned long flags;
...@@ -225,9 +225,9 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler) ...@@ -225,9 +225,9 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler)
spin_unlock_irqrestore(&ext_int_hash_lock, flags); spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0; return 0;
} }
EXPORT_SYMBOL(register_external_interrupt); EXPORT_SYMBOL(register_external_irq);
int unregister_external_interrupt(u16 code, ext_int_handler_t handler) int unregister_external_irq(u16 code, ext_int_handler_t handler)
{ {
struct ext_int_info *p; struct ext_int_info *p;
unsigned long flags; unsigned long flags;
...@@ -243,7 +243,7 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler) ...@@ -243,7 +243,7 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
spin_unlock_irqrestore(&ext_int_hash_lock, flags); spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0; return 0;
} }
EXPORT_SYMBOL(unregister_external_interrupt); EXPORT_SYMBOL(unregister_external_irq);
static irqreturn_t do_ext_interrupt(int irq, void *dummy) static irqreturn_t do_ext_interrupt(int irq, void *dummy)
{ {
...@@ -253,7 +253,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy) ...@@ -253,7 +253,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
int index; int index;
ext_code = *(struct ext_code *) &regs->int_code; ext_code = *(struct ext_code *) &regs->int_code;
if (ext_code.code != 0x1004) if (ext_code.code != EXT_IRQ_CLK_COMP)
__get_cpu_var(s390_idle).nohz_delay = 1; __get_cpu_var(s390_idle).nohz_delay = 1;
index = ext_hash(ext_code.code); index = ext_hash(ext_code.code);
......
...@@ -673,7 +673,8 @@ static int __init cpumf_pmu_init(void) ...@@ -673,7 +673,8 @@ static int __init cpumf_pmu_init(void)
ctl_clear_bit(0, 48); ctl_clear_bit(0, 48);
/* register handler for measurement-alert interruptions */ /* register handler for measurement-alert interruptions */
rc = register_external_interrupt(0x1407, cpumf_measurement_alert); rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (rc) { if (rc) {
pr_err("Registering for CPU-measurement alerts " pr_err("Registering for CPU-measurement alerts "
"failed with rc=%i\n", rc); "failed with rc=%i\n", rc);
...@@ -684,7 +685,8 @@ static int __init cpumf_pmu_init(void) ...@@ -684,7 +685,8 @@ static int __init cpumf_pmu_init(void)
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) { if (rc) {
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
unregister_external_interrupt(0x1407, cpumf_measurement_alert); unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
goto out; goto out;
} }
perf_cpu_notifier(cpumf_pmu_notifier); perf_cpu_notifier(cpumf_pmu_notifier);
......
...@@ -1621,7 +1621,8 @@ static int __init init_cpum_sampling_pmu(void) ...@@ -1621,7 +1621,8 @@ static int __init init_cpum_sampling_pmu(void)
pr_err("Registering for s390dbf failed\n"); pr_err("Registering for s390dbf failed\n");
debug_register_view(sfdbg, &debug_sprintf_view); debug_register_view(sfdbg, &debug_sprintf_view);
err = register_external_interrupt(0x1407, cpumf_measurement_alert); err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (err) { if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_ALRT); pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
goto out; goto out;
...@@ -1630,7 +1631,8 @@ static int __init init_cpum_sampling_pmu(void) ...@@ -1630,7 +1631,8 @@ static int __init init_cpum_sampling_pmu(void)
err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW);
if (err) { if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_PERF); pr_cpumsf_err(RS_INIT_FAILURE_PERF);
unregister_external_interrupt(0x1407, cpumf_measurement_alert); unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
goto out; goto out;
} }
perf_cpu_notifier(cpumf_pmu_notifier); perf_cpu_notifier(cpumf_pmu_notifier);
......
...@@ -138,7 +138,8 @@ static int __init runtime_instr_init(void) ...@@ -138,7 +138,8 @@ static int __init runtime_instr_init(void)
return 0; return 0;
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
rc = register_external_interrupt(0x1407, runtime_instr_int_handler); rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
runtime_instr_int_handler);
if (rc) if (rc)
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
else else
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/irq.h>
LC_EXT_NEW_PSW = 0x58 # addr of ext int handler LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
...@@ -73,9 +74,9 @@ _sclp_wait_int: ...@@ -73,9 +74,9 @@ _sclp_wait_int:
lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt
.LwaitS1: .LwaitS1:
lh %r7,LC_EXT_INT_CODE lh %r7,LC_EXT_INT_CODE
chi %r7,0x1004 # timeout? chi %r7,EXT_IRQ_CLK_COMP # timeout?
je .LtimeoutS1 je .LtimeoutS1
chi %r7,0x2401 # service int? chi %r7,EXT_IRQ_SERVICE_SIG # service int?
jne .LloopS1 jne .LloopS1
sr %r2,%r2 sr %r2,%r2
l %r3,LC_EXT_INT_PARAM l %r3,LC_EXT_INT_PARAM
......
...@@ -236,6 +236,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) ...@@ -236,6 +236,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{ {
struct _lowcore *lc = pcpu->lowcore; struct _lowcore *lc = pcpu->lowcore;
if (MACHINE_HAS_TLB_LC)
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
atomic_inc(&init_mm.context.attach_count); atomic_inc(&init_mm.context.attach_count);
lc->cpu_nr = cpu; lc->cpu_nr = cpu;
lc->percpu_offset = __per_cpu_offset[cpu]; lc->percpu_offset = __per_cpu_offset[cpu];
...@@ -760,6 +763,9 @@ void __cpu_die(unsigned int cpu) ...@@ -760,6 +763,9 @@ void __cpu_die(unsigned int cpu)
cpu_relax(); cpu_relax();
pcpu_free_lowcore(pcpu); pcpu_free_lowcore(pcpu);
atomic_dec(&init_mm.context.attach_count); atomic_dec(&init_mm.context.attach_count);
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
} }
void __noreturn cpu_die(void) void __noreturn cpu_die(void)
...@@ -785,10 +791,10 @@ void __init smp_fill_possible_mask(void) ...@@ -785,10 +791,10 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
/* request the 0x1201 emergency signal external interrupt */ /* request the 0x1201 emergency signal external interrupt */
if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201"); panic("Couldn't request external interrupt 0x1201");
/* request the 0x1202 external call external interrupt */ /* request the 0x1202 external call external interrupt */
if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202"); panic("Couldn't request external interrupt 0x1202");
smp_detect_cpus(); smp_detect_cpus();
} }
......
...@@ -262,11 +262,11 @@ void __init time_init(void) ...@@ -262,11 +262,11 @@ void __init time_init(void)
stp_reset(); stp_reset();
/* request the clock comparator external interrupt */ /* request the clock comparator external interrupt */
if (register_external_interrupt(0x1004, clock_comparator_interrupt)) if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
panic("Couldn't request external interrupt 0x1004"); panic("Couldn't request external interrupt 0x1004");
/* request the timing alert external interrupt */ /* request the timing alert external interrupt */
if (register_external_interrupt(0x1406, timing_alert_interrupt)) if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406"); panic("Couldn't request external interrupt 0x1406");
if (clocksource_register(&clocksource_tod) != 0) if (clocksource_register(&clocksource_tod) != 0)
......
...@@ -167,6 +167,10 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) ...@@ -167,6 +167,10 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
switch (subcode) { switch (subcode) {
case 0:
case 1:
page_table_reset_pgste(current->mm, 0, TASK_SIZE);
return -EOPNOTSUPP;
case 3: case 3:
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
page_table_reset_pgste(current->mm, 0, TASK_SIZE); page_table_reset_pgste(current->mm, 0, TASK_SIZE);
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for s390-specific library files.. # Makefile for s390-specific library files..
# #
lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o lib-y += delay.o string.o uaccess.o find.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_SMP) += spinlock.o lib-$(CONFIG_SMP) += spinlock.o
/*
* Copyright IBM Corp. 2007
*
*/
#ifndef __ARCH_S390_LIB_UACCESS_H
#define __ARCH_S390_LIB_UACCESS_H
unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n);
unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n);
unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n);
unsigned long clear_user_pt(void __user *to, unsigned long n);
unsigned long strnlen_user_pt(const char __user *src, unsigned long count);
long strncpy_from_user_pt(char *dst, const char __user *src, long count);
#endif /* __ARCH_S390_LIB_UACCESS_H */
This diff is collapsed.
...@@ -106,21 +106,24 @@ void bust_spinlocks(int yes) ...@@ -106,21 +106,24 @@ void bust_spinlocks(int yes)
* Returns the address space associated with the fault. * Returns the address space associated with the fault.
* Returns 0 for kernel space and 1 for user space. * Returns 0 for kernel space and 1 for user space.
*/ */
static inline int user_space_fault(unsigned long trans_exc_code) static inline int user_space_fault(struct pt_regs *regs)
{ {
unsigned long trans_exc_code;
/* /*
* The lowest two bits of the translation exception * The lowest two bits of the translation exception
* identification indicate which paging table was used. * identification indicate which paging table was used.
*/ */
trans_exc_code &= 3; trans_exc_code = regs->int_parm_long & 3;
if (trans_exc_code == 2) if (trans_exc_code == 3) /* home space -> kernel */
/* Access via secondary space, set_fs setting decides */ return 0;
if (user_mode(regs))
return 1;
if (trans_exc_code == 2) /* secondary space -> set_fs */
return current->thread.mm_segment.ar4; return current->thread.mm_segment.ar4;
/* if (current->flags & PF_VCPU)
* Access via primary space or access register is from user space return 1;
* and access via home space is from the kernel. return 0;
*/
return trans_exc_code != 3;
} }
static inline void report_user_fault(struct pt_regs *regs, long signr) static inline void report_user_fault(struct pt_regs *regs, long signr)
...@@ -172,7 +175,7 @@ static noinline void do_no_context(struct pt_regs *regs) ...@@ -172,7 +175,7 @@ static noinline void do_no_context(struct pt_regs *regs)
* terminate things with extreme prejudice. * terminate things with extreme prejudice.
*/ */
address = regs->int_parm_long & __FAIL_ADDR_MASK; address = regs->int_parm_long & __FAIL_ADDR_MASK;
if (!user_space_fault(regs->int_parm_long)) if (!user_space_fault(regs))
printk(KERN_ALERT "Unable to handle kernel pointer dereference" printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address); " at virtual kernel address %p\n", (void *)address);
else else
...@@ -296,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access) ...@@ -296,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context. * user context.
*/ */
fault = VM_FAULT_BADCONTEXT; fault = VM_FAULT_BADCONTEXT;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
goto out; goto out;
address = trans_exc_code & __FAIL_ADDR_MASK; address = trans_exc_code & __FAIL_ADDR_MASK;
...@@ -441,30 +444,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs) ...@@ -441,30 +444,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
do_fault_error(regs, fault); do_fault_error(regs, fault);
} }
int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
int access, fault;
/* Emulate a uaccess fault from kernel mode. */
regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
regs.psw.addr |= PSW_ADDR_AMODE;
regs.int_code = pgm_int_code;
regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
access = write ? VM_WRITE : VM_READ;
fault = do_exception(&regs, access);
/*
* Since the fault happened in kernel mode while performing a uaccess
* all we need to do now is emulating a fixup in case "fault" is not
* zero.
* For the calling uaccess functions this results always in -EFAULT.
*/
return fault ? -EFAULT : 0;
}
#ifdef CONFIG_PFAULT #ifdef CONFIG_PFAULT
/* /*
* 'pfault' pseudo page faults routines. * 'pfault' pseudo page faults routines.
...@@ -645,7 +624,7 @@ static int __init pfault_irq_init(void) ...@@ -645,7 +624,7 @@ static int __init pfault_irq_init(void)
{ {
int rc; int rc;
rc = register_external_interrupt(0x2603, pfault_interrupt); rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
if (rc) if (rc)
goto out_extint; goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
...@@ -656,7 +635,7 @@ static int __init pfault_irq_init(void) ...@@ -656,7 +635,7 @@ static int __init pfault_irq_init(void)
return 0; return 0;
out_pfault: out_pfault:
unregister_external_interrupt(0x2603, pfault_interrupt); unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
out_extint: out_extint:
pfault_disable = 1; pfault_disable = 1;
return rc; return rc;
......
...@@ -123,10 +123,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ...@@ -123,10 +123,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
pmd_t *pmdp = (pmd_t *) ptep; pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep); pte_t pte = huge_ptep_get(ptep);
if (MACHINE_HAS_IDTE) pmdp_flush_direct(mm, addr, pmdp);
__pmd_idte(addr, pmdp);
else
__pmd_csp(pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
return pte; return pte;
} }
......
...@@ -124,8 +124,6 @@ void __init paging_init(void) ...@@ -124,8 +124,6 @@ void __init paging_init(void)
__ctl_load(S390_lowcore.kernel_asce, 13, 13); __ctl_load(S390_lowcore.kernel_asce, 13, 13);
arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
atomic_set(&init_mm.context.attach_count, 1);
sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
...@@ -136,6 +134,11 @@ void __init paging_init(void) ...@@ -136,6 +134,11 @@ void __init paging_init(void)
void __init mem_init(void) void __init mem_init(void)
{ {
if (MACHINE_HAS_TLB_LC)
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(0, mm_cpumask(&init_mm));
atomic_set(&init_mm.context.attach_count, 1);
max_mapnr = max_low_pfn; max_mapnr = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
......
...@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg) ...@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg)
struct mm_struct *mm = arg; struct mm_struct *mm = arg;
if (current->active_mm == mm) if (current->active_mm == mm)
update_mm(mm, current); update_user_asce(mm, 1);
__tlb_flush_local(); __tlb_flush_local();
} }
...@@ -107,8 +107,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) ...@@ -107,8 +107,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{ {
pgd_t *pgd; pgd_t *pgd;
if (current->active_mm == mm) if (current->active_mm == mm) {
clear_user_asce(mm, 1);
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
}
while (mm->context.asce_limit > limit) { while (mm->context.asce_limit > limit) {
pgd = mm->pgd; pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
...@@ -132,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) ...@@ -132,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
crst_table_free(mm, (unsigned long *) pgd); crst_table_free(mm, (unsigned long *) pgd);
} }
if (current->active_mm == mm) if (current->active_mm == mm)
update_mm(mm, current); update_user_asce(mm, 1);
} }
#endif #endif
...@@ -198,7 +200,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) ...@@ -198,7 +200,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
static void gmap_flush_tlb(struct gmap *gmap) static void gmap_flush_tlb(struct gmap *gmap)
{ {
if (MACHINE_HAS_IDTE) if (MACHINE_HAS_IDTE)
__tlb_flush_idte((unsigned long) gmap->table | __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
_ASCE_TYPE_REGION1); _ASCE_TYPE_REGION1);
else else
__tlb_flush_global(); __tlb_flush_global();
...@@ -217,7 +219,7 @@ void gmap_free(struct gmap *gmap) ...@@ -217,7 +219,7 @@ void gmap_free(struct gmap *gmap)
/* Flush tlb. */ /* Flush tlb. */
if (MACHINE_HAS_IDTE) if (MACHINE_HAS_IDTE)
__tlb_flush_idte((unsigned long) gmap->table | __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
_ASCE_TYPE_REGION1); _ASCE_TYPE_REGION1);
else else
__tlb_flush_global(); __tlb_flush_global();
......
...@@ -138,7 +138,6 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -138,7 +138,6 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
} }
ret = 0; ret = 0;
out: out:
flush_tlb_kernel_range(start, end);
return ret; return ret;
} }
...@@ -265,7 +264,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) ...@@ -265,7 +264,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
memset((void *)start, 0, end - start); memset((void *)start, 0, end - start);
ret = 0; ret = 0;
out: out:
flush_tlb_kernel_range(start, end);
return ret; return ret;
} }
......
...@@ -1033,7 +1033,7 @@ int hwsampler_setup(void) ...@@ -1033,7 +1033,7 @@ int hwsampler_setup(void)
max_sampler_rate = cb->qsi.max_sampl_rate; max_sampler_rate = cb->qsi.max_sampl_rate;
} }
} }
register_external_interrupt(0x1407, hws_ext_handler); register_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
hws_state = HWS_DEALLOCATED; hws_state = HWS_DEALLOCATED;
rc = 0; rc = 0;
...@@ -1068,7 +1068,7 @@ int hwsampler_shutdown(void) ...@@ -1068,7 +1068,7 @@ int hwsampler_shutdown(void)
hws_wq = NULL; hws_wq = NULL;
} }
unregister_external_interrupt(0x1407, hws_ext_handler); unregister_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
hws_state = HWS_INIT; hws_state = HWS_INIT;
rc = 0; rc = 0;
} }
......
...@@ -646,7 +646,7 @@ dasd_diag_init(void) ...@@ -646,7 +646,7 @@ dasd_diag_init(void)
ASCEBC(dasd_diag_discipline.ebcname, 4); ASCEBC(dasd_diag_discipline.ebcname, 4);
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_interrupt(0x2603, dasd_ext_handler); register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline; dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0; return 0;
} }
...@@ -654,7 +654,7 @@ dasd_diag_init(void) ...@@ -654,7 +654,7 @@ dasd_diag_init(void)
static void __exit static void __exit
dasd_diag_cleanup(void) dasd_diag_cleanup(void)
{ {
unregister_external_interrupt(0x2603, dasd_ext_handler); unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
dasd_diag_discipline_pointer = NULL; dasd_diag_discipline_pointer = NULL;
} }
......
...@@ -632,6 +632,8 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data) ...@@ -632,6 +632,8 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
raw3270_size_device_done(rp); raw3270_size_device_done(rp);
} else } else
raw3270_writesf_readpart(rp); raw3270_writesf_readpart(rp);
memset(&rp->init_reset, 0, sizeof(rp->init_reset));
memset(&rp->init_data, 0, sizeof(rp->init_data));
} }
static int static int
...@@ -639,9 +641,10 @@ __raw3270_reset_device(struct raw3270 *rp) ...@@ -639,9 +641,10 @@ __raw3270_reset_device(struct raw3270 *rp)
{ {
int rc; int rc;
/* Check if reset is already pending */
if (rp->init_reset.view)
return -EBUSY;
/* Store reset data stream to init_data/init_reset */ /* Store reset data stream to init_data/init_reset */
memset(&rp->init_reset, 0, sizeof(rp->init_reset));
memset(&rp->init_data, 0, sizeof(rp->init_data));
rp->init_data[0] = TW_KR; rp->init_data[0] = TW_KR;
rp->init_reset.ccw.cmd_code = TC_EWRITEA; rp->init_reset.ccw.cmd_code = TC_EWRITEA;
rp->init_reset.ccw.flags = CCW_FLAG_SLI; rp->init_reset.ccw.flags = CCW_FLAG_SLI;
...@@ -850,7 +853,7 @@ raw3270_create_device(struct ccw_device *cdev) ...@@ -850,7 +853,7 @@ raw3270_create_device(struct ccw_device *cdev)
char *ascebc; char *ascebc;
int rc; int rc;
rp = kmalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
if (!rp) if (!rp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ascebc = kmalloc(256, GFP_KERNEL); ascebc = kmalloc(256, GFP_KERNEL);
......
...@@ -91,6 +91,9 @@ static struct sclp_req sclp_suspend_req; ...@@ -91,6 +91,9 @@ static struct sclp_req sclp_suspend_req;
/* Timer for request retries. */ /* Timer for request retries. */
static struct timer_list sclp_request_timer; static struct timer_list sclp_request_timer;
/* Timer for queued requests. */
static struct timer_list sclp_queue_timer;
/* Internal state: is the driver initialized? */ /* Internal state: is the driver initialized? */
static volatile enum sclp_init_state_t { static volatile enum sclp_init_state_t {
sclp_init_state_uninitialized, sclp_init_state_uninitialized,
...@@ -215,6 +218,76 @@ sclp_request_timeout(unsigned long data) ...@@ -215,6 +218,76 @@ sclp_request_timeout(unsigned long data)
sclp_process_queue(); sclp_process_queue();
} }
/*
* Returns the expire value in jiffies of the next pending request timeout,
* if any. Needs to be called with sclp_lock.
*/
static unsigned long __sclp_req_queue_find_next_timeout(void)
{
unsigned long expires_next = 0;
struct sclp_req *req;
list_for_each_entry(req, &sclp_req_queue, list) {
if (!req->queue_expires)
continue;
if (!expires_next ||
(time_before(req->queue_expires, expires_next)))
expires_next = req->queue_expires;
}
return expires_next;
}
/*
* Returns expired request, if any, and removes it from the list.
*/
static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
{
unsigned long flags, now;
struct sclp_req *req;
spin_lock_irqsave(&sclp_lock, flags);
now = jiffies;
/* Don't need list_for_each_safe because we break out after list_del */
list_for_each_entry(req, &sclp_req_queue, list) {
if (!req->queue_expires)
continue;
if (time_before_eq(req->queue_expires, now)) {
if (req->status == SCLP_REQ_QUEUED) {
req->status = SCLP_REQ_QUEUED_TIMEOUT;
list_del(&req->list);
goto out;
}
}
}
req = NULL;
out:
spin_unlock_irqrestore(&sclp_lock, flags);
return req;
}
/*
* Timeout handler for queued requests. Removes request from list and
* invokes callback. This timer can be set per request in situations where
* waiting too long would be harmful to the system, e.g. during SE reboot.
*/
static void sclp_req_queue_timeout(unsigned long data)
{
unsigned long flags, expires_next;
struct sclp_req *req;
do {
req = __sclp_req_queue_remove_expired_req();
if (req && req->callback)
req->callback(req, req->callback_data);
} while (req);
spin_lock_irqsave(&sclp_lock, flags);
expires_next = __sclp_req_queue_find_next_timeout();
if (expires_next)
mod_timer(&sclp_queue_timer, expires_next);
spin_unlock_irqrestore(&sclp_lock, flags);
}
/* Try to start a request. Return zero if the request was successfully /* Try to start a request. Return zero if the request was successfully
* started or if it will be started at a later time. Return non-zero otherwise. * started or if it will be started at a later time. Return non-zero otherwise.
* Called while sclp_lock is locked. */ * Called while sclp_lock is locked. */
...@@ -317,6 +390,13 @@ sclp_add_request(struct sclp_req *req) ...@@ -317,6 +390,13 @@ sclp_add_request(struct sclp_req *req)
req->start_count = 0; req->start_count = 0;
list_add_tail(&req->list, &sclp_req_queue); list_add_tail(&req->list, &sclp_req_queue);
rc = 0; rc = 0;
if (req->queue_timeout) {
req->queue_expires = jiffies + req->queue_timeout * HZ;
if (!timer_pending(&sclp_queue_timer) ||
time_after(sclp_queue_timer.expires, req->queue_expires))
mod_timer(&sclp_queue_timer, req->queue_expires);
} else
req->queue_expires = 0;
/* Start if request is first in list */ /* Start if request is first in list */
if (sclp_running_state == sclp_running_state_idle && if (sclp_running_state == sclp_running_state_idle &&
req->list.prev == &sclp_req_queue) { req->list.prev == &sclp_req_queue) {
...@@ -892,7 +972,7 @@ sclp_check_interface(void) ...@@ -892,7 +972,7 @@ sclp_check_interface(void)
spin_lock_irqsave(&sclp_lock, flags); spin_lock_irqsave(&sclp_lock, flags);
/* Prepare init mask command */ /* Prepare init mask command */
rc = register_external_interrupt(0x2401, sclp_check_handler); rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
if (rc) { if (rc) {
spin_unlock_irqrestore(&sclp_lock, flags); spin_unlock_irqrestore(&sclp_lock, flags);
return rc; return rc;
...@@ -925,7 +1005,7 @@ sclp_check_interface(void) ...@@ -925,7 +1005,7 @@ sclp_check_interface(void)
} else } else
rc = -EBUSY; rc = -EBUSY;
} }
unregister_external_interrupt(0x2401, sclp_check_handler); unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
spin_unlock_irqrestore(&sclp_lock, flags); spin_unlock_irqrestore(&sclp_lock, flags);
return rc; return rc;
} }
...@@ -1113,6 +1193,8 @@ sclp_init(void) ...@@ -1113,6 +1193,8 @@ sclp_init(void)
INIT_LIST_HEAD(&sclp_reg_list); INIT_LIST_HEAD(&sclp_reg_list);
list_add(&sclp_state_change_event.list, &sclp_reg_list); list_add(&sclp_state_change_event.list, &sclp_reg_list);
init_timer(&sclp_request_timer); init_timer(&sclp_request_timer);
init_timer(&sclp_queue_timer);
sclp_queue_timer.function = sclp_req_queue_timeout;
/* Check interface */ /* Check interface */
spin_unlock_irqrestore(&sclp_lock, flags); spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_check_interface(); rc = sclp_check_interface();
...@@ -1124,7 +1206,7 @@ sclp_init(void) ...@@ -1124,7 +1206,7 @@ sclp_init(void)
if (rc) if (rc)
goto fail_init_state_uninitialized; goto fail_init_state_uninitialized;
/* Register interrupt handler */ /* Register interrupt handler */
rc = register_external_interrupt(0x2401, sclp_interrupt_handler); rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
if (rc) if (rc)
goto fail_unregister_reboot_notifier; goto fail_unregister_reboot_notifier;
sclp_init_state = sclp_init_state_initialized; sclp_init_state = sclp_init_state_initialized;
......
...@@ -133,6 +133,11 @@ struct sclp_req { ...@@ -133,6 +133,11 @@ struct sclp_req {
/* Callback that is called after reaching final status. */ /* Callback that is called after reaching final status. */
void (*callback)(struct sclp_req *, void *data); void (*callback)(struct sclp_req *, void *data);
void *callback_data; void *callback_data;
int queue_timeout; /* request queue timeout (sec), set by
caller of sclp_add_request(), if
needed */
/* Internal fields */
unsigned long queue_expires; /* request queue timeout (jiffies) */
}; };
#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */ #define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
...@@ -140,6 +145,9 @@ struct sclp_req { ...@@ -140,6 +145,9 @@ struct sclp_req {
#define SCLP_REQ_RUNNING 0x02 /* request is currently running */ #define SCLP_REQ_RUNNING 0x02 /* request is currently running */
#define SCLP_REQ_DONE 0x03 /* request is completed successfully */ #define SCLP_REQ_DONE 0x03 /* request is completed successfully */
#define SCLP_REQ_FAILED 0x05 /* request is finally failed */ #define SCLP_REQ_FAILED 0x05 /* request is finally failed */
#define SCLP_REQ_QUEUED_TIMEOUT 0x06 /* request on queue timed out */
#define SCLP_QUEUE_INTERVAL 5 /* timeout interval for request queue */
/* function pointers that a high level driver has to use for registration */ /* function pointers that a high level driver has to use for registration */
/* of some routines it wants to be called from the low level driver */ /* of some routines it wants to be called from the low level driver */
...@@ -173,6 +181,7 @@ int sclp_deactivate(void); ...@@ -173,6 +181,7 @@ int sclp_deactivate(void);
int sclp_reactivate(void); int sclp_reactivate(void);
int sclp_service_call(sclp_cmdw_t command, void *sccb); int sclp_service_call(sclp_cmdw_t command, void *sccb);
int sclp_sync_request(sclp_cmdw_t command, void *sccb); int sclp_sync_request(sclp_cmdw_t command, void *sccb);
int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
int sclp_sdias_init(void); int sclp_sdias_init(void);
void sclp_sdias_exit(void); void sclp_sdias_exit(void);
......
...@@ -36,6 +36,11 @@ static void sclp_sync_callback(struct sclp_req *req, void *data) ...@@ -36,6 +36,11 @@ static void sclp_sync_callback(struct sclp_req *req, void *data)
} }
int sclp_sync_request(sclp_cmdw_t cmd, void *sccb) int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
{
return sclp_sync_request_timeout(cmd, sccb, 0);
}
int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
{ {
struct completion completion; struct completion completion;
struct sclp_req *request; struct sclp_req *request;
...@@ -44,6 +49,8 @@ int sclp_sync_request(sclp_cmdw_t cmd, void *sccb) ...@@ -44,6 +49,8 @@ int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
request = kzalloc(sizeof(*request), GFP_KERNEL); request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request) if (!request)
return -ENOMEM; return -ENOMEM;
if (timeout)
request->queue_timeout = timeout;
request->command = cmd; request->command = cmd;
request->sccb = sccb; request->sccb = sccb;
request->status = SCLP_REQ_FILLED; request->status = SCLP_REQ_FILLED;
...@@ -110,7 +117,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info) ...@@ -110,7 +117,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info)
if (!sccb) if (!sccb)
return -ENOMEM; return -ENOMEM;
sccb->header.length = sizeof(*sccb); sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
SCLP_QUEUE_INTERVAL);
if (rc) if (rc)
goto out; goto out;
if (sccb->header.response_code != 0x0010) { if (sccb->header.response_code != 0x0010) {
...@@ -144,7 +152,7 @@ static int do_cpu_configure(sclp_cmdw_t cmd) ...@@ -144,7 +152,7 @@ static int do_cpu_configure(sclp_cmdw_t cmd)
if (!sccb) if (!sccb)
return -ENOMEM; return -ENOMEM;
sccb->header.length = sizeof(*sccb); sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request(cmd, sccb); rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc) if (rc)
goto out; goto out;
switch (sccb->header.response_code) { switch (sccb->header.response_code) {
...@@ -214,7 +222,7 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) ...@@ -214,7 +222,7 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
return -ENOMEM; return -ENOMEM;
sccb->header.length = PAGE_SIZE; sccb->header.length = PAGE_SIZE;
sccb->rn = rn; sccb->rn = rn;
rc = sclp_sync_request(cmd, sccb); rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc) if (rc)
goto out; goto out;
switch (sccb->header.response_code) { switch (sccb->header.response_code) {
...@@ -269,7 +277,8 @@ static int sclp_attach_storage(u8 id) ...@@ -269,7 +277,8 @@ static int sclp_attach_storage(u8 id)
if (!sccb) if (!sccb)
return -ENOMEM; return -ENOMEM;
sccb->header.length = PAGE_SIZE; sccb->header.length = PAGE_SIZE;
rc = sclp_sync_request(0x00080001 | id << 8, sccb); rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
SCLP_QUEUE_INTERVAL);
if (rc) if (rc)
goto out; goto out;
switch (sccb->header.response_code) { switch (sccb->header.response_code) {
......
...@@ -78,7 +78,8 @@ tape_std_assign(struct tape_device *device) ...@@ -78,7 +78,8 @@ tape_std_assign(struct tape_device *device)
rc = tape_do_io_interruptible(device, request); rc = tape_do_io_interruptible(device, request);
del_timer(&timeout); del_timer_sync(&timeout);
destroy_timer_on_stack(&timeout);
if (rc != 0) { if (rc != 0) {
DBF_EVENT(3, "%08x: assign failed - device might be busy\n", DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
......
...@@ -626,8 +626,8 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) ...@@ -626,8 +626,8 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
return -ENOMEM; return -ENOMEM;
if (copy_from_user(ep11_dev_list.targets, if (copy_from_user(ep11_dev_list.targets,
(struct ep11_target_dev *)xcrb->targets, (struct ep11_target_dev __force __user *)
xcrb->targets_num * xcrb->targets, xcrb->targets_num *
sizeof(struct ep11_target_dev))) sizeof(struct ep11_target_dev)))
return -EFAULT; return -EFAULT;
} }
......
...@@ -315,6 +315,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, ...@@ -315,6 +315,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen; char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
char *function_code; char *function_code;
if (CEIL4(xcRB->request_control_blk_length) <
xcRB->request_control_blk_length)
return -EINVAL; /* overflow after alignment*/
/* length checks */ /* length checks */
ap_msg->length = sizeof(struct type6_hdr) + ap_msg->length = sizeof(struct type6_hdr) +
CEIL4(xcRB->request_control_blk_length) + CEIL4(xcRB->request_control_blk_length) +
...@@ -333,6 +337,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, ...@@ -333,6 +337,10 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return -EINVAL; return -EINVAL;
} }
if (CEIL4(xcRB->reply_control_blk_length) <
xcRB->reply_control_blk_length)
return -EINVAL; /* overflow after alignment*/
replylen = sizeof(struct type86_fmt2_msg) + replylen = sizeof(struct type86_fmt2_msg) +
CEIL4(xcRB->reply_control_blk_length) + CEIL4(xcRB->reply_control_blk_length) +
xcRB->reply_data_length; xcRB->reply_data_length;
...@@ -415,12 +423,18 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, ...@@ -415,12 +423,18 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
unsigned int dom_val; /* domain id */ unsigned int dom_val; /* domain id */
} __packed * payload_hdr; } __packed * payload_hdr;
if (CEIL4(xcRB->req_len) < xcRB->req_len)
return -EINVAL; /* overflow after alignment*/
/* length checks */ /* length checks */
ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len; ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE - if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
(sizeof(struct type6_hdr))) (sizeof(struct type6_hdr)))
return -EINVAL; return -EINVAL;
if (CEIL4(xcRB->resp_len) < xcRB->resp_len)
return -EINVAL; /* overflow after alignment*/
if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE - if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
(sizeof(struct type86_fmt2_msg))) (sizeof(struct type86_fmt2_msg)))
return -EINVAL; return -EINVAL;
...@@ -432,7 +446,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, ...@@ -432,7 +446,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
/* Import CPRB data from the ioctl input parameter */ /* Import CPRB data from the ioctl input parameter */
if (copy_from_user(&(msg->cprbx.cprb_len), if (copy_from_user(&(msg->cprbx.cprb_len),
(char *)xcRB->req, xcRB->req_len)) { (char __force __user *)xcRB->req, xcRB->req_len)) {
return -EFAULT; return -EFAULT;
} }
...@@ -645,7 +659,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, ...@@ -645,7 +659,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
return -EINVAL; return -EINVAL;
/* Copy response CPRB to user */ /* Copy response CPRB to user */
if (copy_to_user((char *)xcRB->resp, if (copy_to_user((char __force __user *)xcRB->resp,
data + msg->fmt2.offset1, msg->fmt2.count1)) data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT; return -EFAULT;
xcRB->resp_len = msg->fmt2.count1; xcRB->resp_len = msg->fmt2.count1;
......
...@@ -477,7 +477,7 @@ static int __init kvm_devices_init(void) ...@@ -477,7 +477,7 @@ static int __init kvm_devices_init(void)
INIT_WORK(&hotplug_work, hotplug_devices); INIT_WORK(&hotplug_work, hotplug_devices);
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_interrupt(0x2603, kvm_extint_handler); register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler);
scan_devices(); scan_devices();
return 0; return 0;
......
...@@ -899,6 +899,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, ...@@ -899,6 +899,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
add_timer(&timer); add_timer(&timer);
wait_event(reply->wait_q, reply->received); wait_event(reply->wait_q, reply->received);
del_timer_sync(&timer); del_timer_sync(&timer);
destroy_timer_on_stack(&timer);
LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
rc = reply->rc; rc = reply->rc;
lcs_put_reply(reply); lcs_put_reply(reply);
......
...@@ -2028,7 +2028,7 @@ static int __init iucv_init(void) ...@@ -2028,7 +2028,7 @@ static int __init iucv_init(void)
rc = iucv_query_maxconn(); rc = iucv_query_maxconn();
if (rc) if (rc)
goto out_ctl; goto out_ctl;
rc = register_external_interrupt(0x4000, iucv_external_interrupt); rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
if (rc) if (rc)
goto out_ctl; goto out_ctl;
iucv_root = root_device_register("iucv"); iucv_root = root_device_register("iucv");
...@@ -2078,7 +2078,7 @@ static int __init iucv_init(void) ...@@ -2078,7 +2078,7 @@ static int __init iucv_init(void)
root_device_unregister(iucv_root); root_device_unregister(iucv_root);
out_int: out_int:
unregister_external_interrupt(0x4000, iucv_external_interrupt); unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
out_ctl: out_ctl:
ctl_clear_bit(0, 1); ctl_clear_bit(0, 1);
out: out:
...@@ -2109,7 +2109,7 @@ static void __exit iucv_exit(void) ...@@ -2109,7 +2109,7 @@ static void __exit iucv_exit(void)
cpu_notifier_register_done(); cpu_notifier_register_done();
root_device_unregister(iucv_root); root_device_unregister(iucv_root);
bus_unregister(&iucv_bus); bus_unregister(&iucv_bus);
unregister_external_interrupt(0x4000, iucv_external_interrupt); unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
} }
subsys_initcall(iucv_init); subsys_initcall(iucv_init);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment