Commit afa538f0 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-20140516' of...

Merge tag 'kvm-s390-20140516' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

1. Correct locking for lazy storage key handling
   A test loop with multiple CPUs triggered a race in the lazy storage
   key handling as introduced by commit 934bc131
   (KVM: s390: Allow skeys to be enabled for the current process). This
   race should not happen with Linux guests, but let's fix it anyway.
   Patch touches !/kvm/ code, but is from the s390 maintainer.

2. Better handling of broken guests
   If we detect a program check loop we stop the guest instead of
   wasting CPU cycles.

3. Better handling on MVPG emulation
   The move page handling is improved to be architecturally correct.

3. Trace point rework
   Let's rework the kvm trace points to have a common header file (for
   later perf usage) and provided a table based instruction decoder.

4. Interpretive execution of SIGP external call
   Let the hardware handle most cases of SIGP external call (IPI) and
   wire up the fixup code for the corner cases.

5. Initial preparations for the IBC facility
   Prepare the code to handle instruction blocking
parents d9f89b88 fda902cb
...@@ -32,8 +32,10 @@ ...@@ -32,8 +32,10 @@
#define KVM_NR_IRQCHIPS 1 #define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096 #define KVM_IRQCHIP_NUM_PINS 4096
#define SIGP_CTRL_C 0x00800000
struct sca_entry { struct sca_entry {
atomic_t scn; atomic_t ctrl;
__u32 reserved; __u32 reserved;
__u64 sda; __u64 sda;
__u64 reserved2[2]; __u64 reserved2[2];
...@@ -80,7 +82,9 @@ struct sca_block { ...@@ -80,7 +82,9 @@ struct sca_block {
struct kvm_s390_sie_block { struct kvm_s390_sie_block {
atomic_t cpuflags; /* 0x0000 */ atomic_t cpuflags; /* 0x0000 */
__u32 prefix; /* 0x0004 */ __u32 : 1; /* 0x0004 */
__u32 prefix : 18;
__u32 : 13;
__u8 reserved08[4]; /* 0x0008 */ __u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE (1<<0) #define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */ __u32 prog0c; /* 0x000c */
......
...@@ -66,5 +66,6 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); ...@@ -66,5 +66,6 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
unsigned long sclp_get_hsa_size(void); unsigned long sclp_get_hsa_size(void);
void sclp_early_detect(void); void sclp_early_detect(void);
int sclp_has_siif(void); int sclp_has_siif(void);
unsigned int sclp_get_ibc(void);
#endif /* _ASM_S390_SCLP_H */ #endif /* _ASM_S390_SCLP_H */
#ifndef _UAPI_ASM_S390_SIE_H
#define _UAPI_ASM_S390_SIE_H
#include <asm/sigp.h>
#define diagnose_codes \
{ 0x10, "DIAG (0x10) release pages" }, \
{ 0x44, "DIAG (0x44) time slice end" }, \
{ 0x9c, "DIAG (0x9c) time slice end directed" }, \
{ 0x204, "DIAG (0x204) logical-cpu utilization" }, \
{ 0x258, "DIAG (0x258) page-reference services" }, \
{ 0x308, "DIAG (0x308) ipl functions" }, \
{ 0x500, "DIAG (0x500) KVM virtio functions" }, \
{ 0x501, "DIAG (0x501) KVM breakpoint" }
#define sigp_order_codes \
{ SIGP_SENSE, "SIGP sense" }, \
{ SIGP_EXTERNAL_CALL, "SIGP external call" }, \
{ SIGP_EMERGENCY_SIGNAL, "SIGP emergency signal" }, \
{ SIGP_STOP, "SIGP stop" }, \
{ SIGP_STOP_AND_STORE_STATUS, "SIGP stop and store status" }, \
{ SIGP_SET_ARCHITECTURE, "SIGP set architecture" }, \
{ SIGP_SET_PREFIX, "SIGP set prefix" }, \
{ SIGP_SENSE_RUNNING, "SIGP sense running" }, \
{ SIGP_RESTART, "SIGP restart" }, \
{ SIGP_INITIAL_CPU_RESET, "SIGP initial cpu reset" }, \
{ SIGP_STORE_STATUS_AT_ADDRESS, "SIGP store status at address" }
#define icpt_prog_codes \
{ 0x0001, "Prog Operation" }, \
{ 0x0002, "Prog Privileged Operation" }, \
{ 0x0003, "Prog Execute" }, \
{ 0x0004, "Prog Protection" }, \
{ 0x0005, "Prog Addressing" }, \
{ 0x0006, "Prog Specification" }, \
{ 0x0007, "Prog Data" }, \
{ 0x0008, "Prog Fixedpoint overflow" }, \
{ 0x0009, "Prog Fixedpoint divide" }, \
{ 0x000A, "Prog Decimal overflow" }, \
{ 0x000B, "Prog Decimal divide" }, \
{ 0x000C, "Prog HFP exponent overflow" }, \
{ 0x000D, "Prog HFP exponent underflow" }, \
{ 0x000E, "Prog HFP significance" }, \
{ 0x000F, "Prog HFP divide" }, \
{ 0x0010, "Prog Segment translation" }, \
{ 0x0011, "Prog Page translation" }, \
{ 0x0012, "Prog Translation specification" }, \
{ 0x0013, "Prog Special operation" }, \
{ 0x0015, "Prog Operand" }, \
{ 0x0016, "Prog Trace table" }, \
{ 0x0017, "Prog ASNtranslation specification" }, \
{ 0x001C, "Prog Spaceswitch event" }, \
{ 0x001D, "Prog HFP square root" }, \
{ 0x001F, "Prog PCtranslation specification" }, \
{ 0x0020, "Prog AFX translation" }, \
{ 0x0021, "Prog ASX translation" }, \
{ 0x0022, "Prog LX translation" }, \
{ 0x0023, "Prog EX translation" }, \
{ 0x0024, "Prog Primary authority" }, \
{ 0x0025, "Prog Secondary authority" }, \
{ 0x0026, "Prog LFXtranslation exception" }, \
{ 0x0027, "Prog LSXtranslation exception" }, \
{ 0x0028, "Prog ALET specification" }, \
{ 0x0029, "Prog ALEN translation" }, \
{ 0x002A, "Prog ALE sequence" }, \
{ 0x002B, "Prog ASTE validity" }, \
{ 0x002C, "Prog ASTE sequence" }, \
{ 0x002D, "Prog Extended authority" }, \
{ 0x002E, "Prog LSTE sequence" }, \
{ 0x002F, "Prog ASTE instance" }, \
{ 0x0030, "Prog Stack full" }, \
{ 0x0031, "Prog Stack empty" }, \
{ 0x0032, "Prog Stack specification" }, \
{ 0x0033, "Prog Stack type" }, \
{ 0x0034, "Prog Stack operation" }, \
{ 0x0039, "Prog Region first translation" }, \
{ 0x003A, "Prog Region second translation" }, \
{ 0x003B, "Prog Region third translation" }, \
{ 0x0040, "Prog Monitor event" }, \
{ 0x0080, "Prog PER event" }, \
{ 0x0119, "Prog Crypto operation" }
#define exit_code_ipa0(ipa0, opcode, mnemonic) \
{ (ipa0 << 8 | opcode), #ipa0 " " mnemonic }
#define exit_code(opcode, mnemonic) \
{ opcode, mnemonic }
#define icpt_insn_codes \
exit_code_ipa0(0x01, 0x01, "PR"), \
exit_code_ipa0(0x01, 0x04, "PTFF"), \
exit_code_ipa0(0x01, 0x07, "SCKPF"), \
exit_code_ipa0(0xAA, 0x00, "RINEXT"), \
exit_code_ipa0(0xAA, 0x01, "RION"), \
exit_code_ipa0(0xAA, 0x02, "TRIC"), \
exit_code_ipa0(0xAA, 0x03, "RIOFF"), \
exit_code_ipa0(0xAA, 0x04, "RIEMIT"), \
exit_code_ipa0(0xB2, 0x02, "STIDP"), \
exit_code_ipa0(0xB2, 0x04, "SCK"), \
exit_code_ipa0(0xB2, 0x05, "STCK"), \
exit_code_ipa0(0xB2, 0x06, "SCKC"), \
exit_code_ipa0(0xB2, 0x07, "STCKC"), \
exit_code_ipa0(0xB2, 0x08, "SPT"), \
exit_code_ipa0(0xB2, 0x09, "STPT"), \
exit_code_ipa0(0xB2, 0x0d, "PTLB"), \
exit_code_ipa0(0xB2, 0x10, "SPX"), \
exit_code_ipa0(0xB2, 0x11, "STPX"), \
exit_code_ipa0(0xB2, 0x12, "STAP"), \
exit_code_ipa0(0xB2, 0x14, "SIE"), \
exit_code_ipa0(0xB2, 0x16, "SETR"), \
exit_code_ipa0(0xB2, 0x17, "STETR"), \
exit_code_ipa0(0xB2, 0x18, "PC"), \
exit_code_ipa0(0xB2, 0x20, "SERVC"), \
exit_code_ipa0(0xB2, 0x28, "PT"), \
exit_code_ipa0(0xB2, 0x29, "ISKE"), \
exit_code_ipa0(0xB2, 0x2a, "RRBE"), \
exit_code_ipa0(0xB2, 0x2b, "SSKE"), \
exit_code_ipa0(0xB2, 0x2c, "TB"), \
exit_code_ipa0(0xB2, 0x2e, "PGIN"), \
exit_code_ipa0(0xB2, 0x2f, "PGOUT"), \
exit_code_ipa0(0xB2, 0x30, "CSCH"), \
exit_code_ipa0(0xB2, 0x31, "HSCH"), \
exit_code_ipa0(0xB2, 0x32, "MSCH"), \
exit_code_ipa0(0xB2, 0x33, "SSCH"), \
exit_code_ipa0(0xB2, 0x34, "STSCH"), \
exit_code_ipa0(0xB2, 0x35, "TSCH"), \
exit_code_ipa0(0xB2, 0x36, "TPI"), \
exit_code_ipa0(0xB2, 0x37, "SAL"), \
exit_code_ipa0(0xB2, 0x38, "RSCH"), \
exit_code_ipa0(0xB2, 0x39, "STCRW"), \
exit_code_ipa0(0xB2, 0x3a, "STCPS"), \
exit_code_ipa0(0xB2, 0x3b, "RCHP"), \
exit_code_ipa0(0xB2, 0x3c, "SCHM"), \
exit_code_ipa0(0xB2, 0x40, "BAKR"), \
exit_code_ipa0(0xB2, 0x48, "PALB"), \
exit_code_ipa0(0xB2, 0x4c, "TAR"), \
exit_code_ipa0(0xB2, 0x50, "CSP"), \
exit_code_ipa0(0xB2, 0x54, "MVPG"), \
exit_code_ipa0(0xB2, 0x58, "BSG"), \
exit_code_ipa0(0xB2, 0x5a, "BSA"), \
exit_code_ipa0(0xB2, 0x5f, "CHSC"), \
exit_code_ipa0(0xB2, 0x74, "SIGA"), \
exit_code_ipa0(0xB2, 0x76, "XSCH"), \
exit_code_ipa0(0xB2, 0x78, "STCKE"), \
exit_code_ipa0(0xB2, 0x7c, "STCKF"), \
exit_code_ipa0(0xB2, 0x7d, "STSI"), \
exit_code_ipa0(0xB2, 0xb0, "STFLE"), \
exit_code_ipa0(0xB2, 0xb1, "STFL"), \
exit_code_ipa0(0xB2, 0xb2, "LPSWE"), \
exit_code_ipa0(0xB2, 0xf8, "TEND"), \
exit_code_ipa0(0xB2, 0xfc, "TABORT"), \
exit_code_ipa0(0xB9, 0x1e, "KMAC"), \
exit_code_ipa0(0xB9, 0x28, "PCKMO"), \
exit_code_ipa0(0xB9, 0x2a, "KMF"), \
exit_code_ipa0(0xB9, 0x2b, "KMO"), \
exit_code_ipa0(0xB9, 0x2d, "KMCTR"), \
exit_code_ipa0(0xB9, 0x2e, "KM"), \
exit_code_ipa0(0xB9, 0x2f, "KMC"), \
exit_code_ipa0(0xB9, 0x3e, "KIMD"), \
exit_code_ipa0(0xB9, 0x3f, "KLMD"), \
exit_code_ipa0(0xB9, 0x8a, "CSPG"), \
exit_code_ipa0(0xB9, 0x8d, "EPSW"), \
exit_code_ipa0(0xB9, 0x8e, "IDTE"), \
exit_code_ipa0(0xB9, 0x8f, "CRDTE"), \
exit_code_ipa0(0xB9, 0x9c, "EQBS"), \
exit_code_ipa0(0xB9, 0xa2, "PTF"), \
exit_code_ipa0(0xB9, 0xab, "ESSA"), \
exit_code_ipa0(0xB9, 0xae, "RRBM"), \
exit_code_ipa0(0xB9, 0xaf, "PFMF"), \
exit_code_ipa0(0xE3, 0x03, "LRAG"), \
exit_code_ipa0(0xE3, 0x13, "LRAY"), \
exit_code_ipa0(0xE3, 0x25, "NTSTG"), \
exit_code_ipa0(0xE5, 0x00, "LASP"), \
exit_code_ipa0(0xE5, 0x01, "TPROT"), \
exit_code_ipa0(0xE5, 0x60, "TBEGIN"), \
exit_code_ipa0(0xE5, 0x61, "TBEGINC"), \
exit_code_ipa0(0xEB, 0x25, "STCTG"), \
exit_code_ipa0(0xEB, 0x2f, "LCTLG"), \
exit_code_ipa0(0xEB, 0x60, "LRIC"), \
exit_code_ipa0(0xEB, 0x61, "STRIC"), \
exit_code_ipa0(0xEB, 0x62, "MRIC"), \
exit_code_ipa0(0xEB, 0x8a, "SQBS"), \
exit_code_ipa0(0xC8, 0x01, "ECTG"), \
exit_code(0x0a, "SVC"), \
exit_code(0x80, "SSM"), \
exit_code(0x82, "LPSW"), \
exit_code(0x83, "DIAG"), \
exit_code(0xae, "SIGP"), \
exit_code(0xac, "STNSM"), \
exit_code(0xad, "STOSM"), \
exit_code(0xb1, "LRA"), \
exit_code(0xb6, "STCTL"), \
exit_code(0xb7, "LCTL"), \
exit_code(0xee, "PLO")
#define sie_intercept_code \
{ 0x00, "Host interruption" }, \
{ 0x04, "Instruction" }, \
{ 0x08, "Program interruption" }, \
{ 0x0c, "Instruction and program interruption" }, \
{ 0x10, "External request" }, \
{ 0x14, "External interruption" }, \
{ 0x18, "I/O request" }, \
{ 0x1c, "Wait state" }, \
{ 0x20, "Validity" }, \
{ 0x28, "Stop request" }, \
{ 0x2c, "Operation exception" }, \
{ 0x38, "Partial-execution" }, \
{ 0x3c, "I/O interruption" }, \
{ 0x40, "I/O instruction" }, \
{ 0x48, "Timing subset" }
/*
* This is the simple interceptable instructions decoder.
*
* It will be used as userspace interface and it can be used in places
* that does not allow to use general decoder functions,
* such as trace events declarations.
*
* Some userspace tools may want to parse this code
* and would be confused by switch(), if() and other statements,
* but they can understand conditional operator.
*/
#define INSN_DECODE_IPA0(ipa0, insn, rshift, mask) \
(insn >> 56) == (ipa0) ? \
((ipa0 << 8) | ((insn >> rshift) & mask)) :
#define INSN_DECODE(insn) (insn >> 56)
/*
* The macro icpt_insn_decoder() takes an intercepted instruction
* and returns a key, which can be used to find a mnemonic name
* of the instruction in the icpt_insn_codes table.
*/
#define icpt_insn_decoder(insn) \
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xb9, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xe3, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
INSN_DECODE(insn)
#endif /* _UAPI_ASM_S390_SIE_H */
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
static int diag_release_pages(struct kvm_vcpu *vcpu) static int diag_release_pages(struct kvm_vcpu *vcpu)
{ {
unsigned long start, end; unsigned long start, end;
unsigned long prefix = vcpu->arch.sie_block->prefix; unsigned long prefix = kvm_s390_get_prefix(vcpu);
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
unsigned long gra) unsigned long gra)
{ {
unsigned long prefix = vcpu->arch.sie_block->prefix; unsigned long prefix = kvm_s390_get_prefix(vcpu);
if (gra < 2 * PAGE_SIZE) if (gra < 2 * PAGE_SIZE)
gra += prefix; gra += prefix;
...@@ -99,7 +99,7 @@ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu, ...@@ -99,7 +99,7 @@ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
unsigned long __gpa; \ unsigned long __gpa; \
\ \
__gpa = (unsigned long)(gra); \ __gpa = (unsigned long)(gra); \
__gpa += __vcpu->arch.sie_block->prefix; \ __gpa += kvm_s390_get_prefix(__vcpu); \
kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \ kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
}) })
...@@ -124,7 +124,7 @@ static inline __must_check ...@@ -124,7 +124,7 @@ static inline __must_check
int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
unsigned long len) unsigned long len)
{ {
unsigned long gpa = gra + vcpu->arch.sie_block->prefix; unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
return kvm_write_guest(vcpu->kvm, gpa, data, len); return kvm_write_guest(vcpu->kvm, gpa, data, len);
} }
...@@ -150,7 +150,7 @@ static inline __must_check ...@@ -150,7 +150,7 @@ static inline __must_check
int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
unsigned long len) unsigned long len)
{ {
unsigned long gpa = gra + vcpu->arch.sie_block->prefix; unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
return kvm_read_guest(vcpu->kvm, gpa, data, len); return kvm_read_guest(vcpu->kvm, gpa, data, len);
} }
......
...@@ -195,6 +195,7 @@ static int handle_itdb(struct kvm_vcpu *vcpu) ...@@ -195,6 +195,7 @@ static int handle_itdb(struct kvm_vcpu *vcpu)
static int handle_prog(struct kvm_vcpu *vcpu) static int handle_prog(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_pgm_info pgm_info; struct kvm_s390_pgm_info pgm_info;
psw_t psw;
int rc; int rc;
vcpu->stat.exit_program_interruption++; vcpu->stat.exit_program_interruption++;
...@@ -207,7 +208,14 @@ static int handle_prog(struct kvm_vcpu *vcpu) ...@@ -207,7 +208,14 @@ static int handle_prog(struct kvm_vcpu *vcpu)
} }
trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
if (rc)
return rc;
/* Avoid endless loops of specification exceptions */
if (!is_valid_psw(&psw))
return -EOPNOTSUPP;
}
rc = handle_itdb(vcpu); rc = handle_itdb(vcpu);
if (rc) if (rc)
return rc; return rc;
...@@ -264,6 +272,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) ...@@ -264,6 +272,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
irq.type = KVM_S390_INT_CPU_TIMER; irq.type = KVM_S390_INT_CPU_TIMER;
break; break;
case EXT_IRQ_EXTERNAL_CALL: case EXT_IRQ_EXTERNAL_CALL:
if (kvm_s390_si_ext_call_pending(vcpu))
return 0;
irq.type = KVM_S390_INT_EXTERNAL_CALL; irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.parm = vcpu->arch.sie_block->extcpuaddr; irq.parm = vcpu->arch.sie_block->extcpuaddr;
break; break;
...@@ -284,33 +294,26 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) ...@@ -284,33 +294,26 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
*/ */
static int handle_mvpg_pei(struct kvm_vcpu *vcpu) static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
{ {
unsigned long hostaddr, srcaddr, dstaddr;
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct mm_struct *mm = current->mm; unsigned long srcaddr, dstaddr;
int reg1, reg2, rc; int reg1, reg2, rc;
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
/* Make sure that the source is paged-in */ /* Make sure that the source is paged-in */
hostaddr = gmap_fault(srcaddr, vcpu->arch.gmap); srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
if (IS_ERR_VALUE(hostaddr)) if (kvm_is_error_gpa(vcpu->kvm, srcaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&mm->mmap_sem); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
rc = get_user_pages(current, mm, hostaddr, 1, 0, 0, NULL, NULL); if (rc != 0)
up_read(&mm->mmap_sem);
if (rc < 0)
return rc; return rc;
/* Make sure that the destination is paged-in */ /* Make sure that the destination is paged-in */
hostaddr = gmap_fault(dstaddr, vcpu->arch.gmap); dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
if (IS_ERR_VALUE(hostaddr)) if (kvm_is_error_gpa(vcpu->kvm, dstaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&mm->mmap_sem); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
rc = get_user_pages(current, mm, hostaddr, 1, 1, 0, NULL, NULL); if (rc != 0)
up_read(&mm->mmap_sem);
if (rc < 0)
return rc; return rc;
psw->addr = __rewind_psw(*psw, 4); psw->addr = __rewind_psw(*psw, 4);
...@@ -322,6 +325,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) ...@@ -322,6 +325,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
return handle_mvpg_pei(vcpu); return handle_mvpg_pei(vcpu);
if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
return kvm_s390_handle_sigp_pei(vcpu);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -148,8 +148,7 @@ static void __unset_cpu_idle(struct kvm_vcpu *vcpu) ...@@ -148,8 +148,7 @@ static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_ECALL_PEND | atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
&vcpu->arch.sie_block->cpuflags); &vcpu->arch.sie_block->cpuflags);
vcpu->arch.sie_block->lctl = 0x0000; vcpu->arch.sie_block->lctl = 0x0000;
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
...@@ -524,6 +523,20 @@ static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu) ...@@ -524,6 +523,20 @@ static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
} }
} }
/* Check whether SIGP interpretation facility has an external call pending */
int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
{
atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
if (!psw_extint_disabled(vcpu) &&
(vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
(atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 1;
return 0;
}
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
...@@ -554,6 +567,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) ...@@ -554,6 +567,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
if (!rc && kvm_cpu_has_pending_timer(vcpu)) if (!rc && kvm_cpu_has_pending_timer(vcpu))
rc = 1; rc = 1;
if (!rc && kvm_s390_si_ext_call_pending(vcpu))
rc = 1;
return rc; return rc;
} }
...@@ -610,7 +626,8 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) ...@@ -610,7 +626,8 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
while (list_empty(&vcpu->arch.local_int.list) && while (list_empty(&vcpu->arch.local_int.list) &&
list_empty(&vcpu->arch.local_int.float_int->list) && list_empty(&vcpu->arch.local_int.float_int->list) &&
(!vcpu->arch.local_int.timer_due) && (!vcpu->arch.local_int.timer_due) &&
!signal_pending(current)) { !signal_pending(current) &&
!kvm_s390_si_ext_call_pending(vcpu)) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&vcpu->arch.local_int.lock); spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock); spin_unlock(&vcpu->arch.local_int.float_int->lock);
...@@ -667,6 +684,11 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -667,6 +684,11 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
} }
atomic_set(&li->active, 0); atomic_set(&li->active, 0);
spin_unlock_bh(&li->lock); spin_unlock_bh(&li->lock);
/* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
atomic_clear_mask(SIGP_CTRL_C,
&vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
} }
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
......
...@@ -633,7 +633,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -633,7 +633,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->ecb |= 0x10; vcpu->arch.sie_block->ecb |= 0x10;
vcpu->arch.sie_block->ecb2 = 8; vcpu->arch.sie_block->ecb2 = 8;
vcpu->arch.sie_block->eca = 0xC1002000U; vcpu->arch.sie_block->eca = 0xD1002000U;
if (sclp_has_siif()) if (sclp_has_siif())
vcpu->arch.sie_block->eca |= 1; vcpu->arch.sie_block->eca |= 1;
vcpu->arch.sie_block->fac = (int) (long) vfacilities; vcpu->arch.sie_block->fac = (int) (long) vfacilities;
...@@ -753,7 +753,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) ...@@ -753,7 +753,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
/* match against both prefix pages */ /* match against both prefix pages */
if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) { if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
exit_sie_sync(vcpu); exit_sie_sync(vcpu);
...@@ -1017,7 +1017,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1017,7 +1017,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
int rc; int rc;
rc = gmap_ipte_notify(vcpu->arch.gmap, rc = gmap_ipte_notify(vcpu->arch.gmap,
vcpu->arch.sie_block->prefix, kvm_s390_get_prefix(vcpu),
PAGE_SIZE * 2); PAGE_SIZE * 2);
if (rc) if (rc)
return rc; return rc;
...@@ -1045,15 +1045,30 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1045,15 +1045,30 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu) /**
* kvm_arch_fault_in_page - fault-in guest page if necessary
* @vcpu: The corresponding virtual cpu
* @gpa: Guest physical address
* @writable: Whether the page should be writable or not
*
* Make sure that a guest page has been faulted-in on the host.
*
* Return: Zero on success, negative error code otherwise.
*/
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
{ {
long rc;
hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
hva_t hva;
long rc;
hva = gmap_fault(gpa, vcpu->arch.gmap);
if (IS_ERR_VALUE(hva))
return (long)hva;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL); rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return rc;
return rc < 0 ? rc : 0;
} }
static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
...@@ -1191,9 +1206,12 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) ...@@ -1191,9 +1206,12 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
} else if (current->thread.gmap_pfault) { } else if (current->thread.gmap_pfault) {
trace_kvm_s390_major_guest_pfault(vcpu); trace_kvm_s390_major_guest_pfault(vcpu);
current->thread.gmap_pfault = 0; current->thread.gmap_pfault = 0;
if (kvm_arch_setup_async_pf(vcpu) || if (kvm_arch_setup_async_pf(vcpu)) {
(kvm_arch_fault_in_sync(vcpu) >= 0))
rc = 0; rc = 0;
} else {
gpa_t gpa = current->thread.gmap_addr;
rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
}
} }
if (rc == -1) { if (rc == -1) {
...@@ -1320,7 +1338,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1320,7 +1338,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
if (vcpu->sigset_active) if (vcpu->sigset_active)
...@@ -1339,6 +1357,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1339,6 +1357,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
{ {
unsigned char archmode = 1; unsigned char archmode = 1;
unsigned int px;
u64 clkcomp; u64 clkcomp;
int rc; int rc;
...@@ -1357,8 +1376,9 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) ...@@ -1357,8 +1376,9 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
vcpu->run->s.regs.gprs, 128); vcpu->run->s.regs.gprs, 128);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
&vcpu->arch.sie_block->gpsw, 16); &vcpu->arch.sie_block->gpsw, 16);
px = kvm_s390_get_prefix(vcpu);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
&vcpu->arch.sie_block->prefix, 4); &px, 4);
rc |= write_guest_abs(vcpu, rc |= write_guest_abs(vcpu,
gpa + offsetof(struct save_area, fp_ctrl_reg), gpa + offsetof(struct save_area, fp_ctrl_reg),
&vcpu->arch.guest_fpregs.fpc, 4); &vcpu->arch.guest_fpregs.fpc, 4);
......
...@@ -61,9 +61,15 @@ static inline int kvm_is_ucontrol(struct kvm *kvm) ...@@ -61,9 +61,15 @@ static inline int kvm_is_ucontrol(struct kvm *kvm)
#endif #endif
} }
#define GUEST_PREFIX_SHIFT 13
static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
{
return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
}
static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
{ {
vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u; vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
vcpu->arch.sie_block->ihcpu = 0xffff; vcpu->arch.sie_block->ihcpu = 0xffff;
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
} }
...@@ -142,6 +148,7 @@ void kvm_s390_reinject_io_int(struct kvm *kvm, ...@@ -142,6 +148,7 @@ void kvm_s390_reinject_io_int(struct kvm *kvm,
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
/* implemented in priv.c */ /* implemented in priv.c */
int is_valid_psw(psw_t *psw);
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu); int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
...@@ -153,8 +160,10 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); ...@@ -153,8 +160,10 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */ /* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */ /* implemented in kvm-s390.c */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
...@@ -212,6 +221,7 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc) ...@@ -212,6 +221,7 @@ static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int psw_extint_disabled(struct kvm_vcpu *vcpu); int psw_extint_disabled(struct kvm_vcpu *vcpu);
void kvm_s390_destroy_adapters(struct kvm *kvm); void kvm_s390_destroy_adapters(struct kvm *kvm);
int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu);
/* implemented in guestdbg.c */ /* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
......
...@@ -119,8 +119,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) ...@@ -119,8 +119,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
if (operand2 & 3) if (operand2 & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
address = vcpu->arch.sie_block->prefix; address = kvm_s390_get_prefix(vcpu);
address = address & 0x7fffe000u;
/* get the value */ /* get the value */
rc = write_guest(vcpu, operand2, &address, sizeof(address)); rc = write_guest(vcpu, operand2, &address, sizeof(address));
...@@ -365,7 +364,8 @@ static void handle_new_psw(struct kvm_vcpu *vcpu) ...@@ -365,7 +364,8 @@ static void handle_new_psw(struct kvm_vcpu *vcpu)
#define PSW_ADDR_24 0x0000000000ffffffUL #define PSW_ADDR_24 0x0000000000ffffffUL
#define PSW_ADDR_31 0x000000007fffffffUL #define PSW_ADDR_31 0x000000007fffffffUL
static int is_valid_psw(psw_t *psw) { int is_valid_psw(psw_t *psw)
{
if (psw->mask & PSW_MASK_UNASSIGNED) if (psw->mask & PSW_MASK_UNASSIGNED)
return 0; return 0;
if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
...@@ -376,6 +376,8 @@ static int is_valid_psw(psw_t *psw) { ...@@ -376,6 +376,8 @@ static int is_valid_psw(psw_t *psw) {
return 0; return 0;
if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
return 0; return 0;
if (psw->addr & 1)
return 0;
return 1; return 1;
} }
......
...@@ -458,3 +458,38 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) ...@@ -458,3 +458,38 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
kvm_s390_set_psw_cc(vcpu, rc); kvm_s390_set_psw_cc(vcpu, rc);
return 0; return 0;
} }
/*
* Handle SIGP partial execution interception.
*
* This interception will occur at the source cpu when a source cpu sends an
* external call to a target cpu and the target cpu has the WAIT bit set in
* its cpuflags. Interception will occurr after the interrupt indicator bits at
* the target cpu have been set. All error cases will lead to instruction
* interception, therefore nothing is to be checked or prepared.
*/
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
{
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
struct kvm_vcpu *dest_vcpu;
u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
if (order_code == SIGP_EXTERNAL_CALL) {
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
BUG_ON(dest_vcpu == NULL);
spin_lock_bh(&dest_vcpu->arch.local_int.lock);
if (waitqueue_active(&dest_vcpu->wq))
wake_up_interruptible(&dest_vcpu->wq);
dest_vcpu->preempted = true;
spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
return 0;
}
return -EOPNOTSUPP;
}
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _TRACE_KVM_H #define _TRACE_KVM_H
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <asm/sigp.h> #include <asm/sie.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/dis.h> #include <asm/dis.h>
...@@ -125,17 +125,6 @@ TRACE_EVENT(kvm_s390_sie_fault, ...@@ -125,17 +125,6 @@ TRACE_EVENT(kvm_s390_sie_fault,
VCPU_TP_PRINTK("%s", "fault in sie instruction") VCPU_TP_PRINTK("%s", "fault in sie instruction")
); );
#define sie_intercept_code \
{0x04, "Instruction"}, \
{0x08, "Program interruption"}, \
{0x0C, "Instruction and program interruption"}, \
{0x10, "External request"}, \
{0x14, "External interruption"}, \
{0x18, "I/O request"}, \
{0x1C, "Wait state"}, \
{0x20, "Validity"}, \
{0x28, "Stop request"}
TRACE_EVENT(kvm_s390_sie_exit, TRACE_EVENT(kvm_s390_sie_exit,
TP_PROTO(VCPU_PROTO_COMMON, u8 icptcode), TP_PROTO(VCPU_PROTO_COMMON, u8 icptcode),
TP_ARGS(VCPU_ARGS_COMMON, icptcode), TP_ARGS(VCPU_ARGS_COMMON, icptcode),
...@@ -165,7 +154,6 @@ TRACE_EVENT(kvm_s390_intercept_instruction, ...@@ -165,7 +154,6 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
TP_STRUCT__entry( TP_STRUCT__entry(
VCPU_FIELD_COMMON VCPU_FIELD_COMMON
__field(__u64, instruction) __field(__u64, instruction)
__field(char, insn[8])
), ),
TP_fast_assign( TP_fast_assign(
...@@ -176,10 +164,8 @@ TRACE_EVENT(kvm_s390_intercept_instruction, ...@@ -176,10 +164,8 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
VCPU_TP_PRINTK("intercepted instruction %016llx (%s)", VCPU_TP_PRINTK("intercepted instruction %016llx (%s)",
__entry->instruction, __entry->instruction,
insn_to_mnemonic((unsigned char *) __print_symbolic(icpt_insn_decoder(__entry->instruction),
&__entry->instruction, icpt_insn_codes))
__entry->insn, sizeof(__entry->insn)) ?
"unknown" : __entry->insn)
); );
/* /*
...@@ -227,18 +213,6 @@ TRACE_EVENT(kvm_s390_intercept_validity, ...@@ -227,18 +213,6 @@ TRACE_EVENT(kvm_s390_intercept_validity,
* Trace points for instructions that are of special interest. * Trace points for instructions that are of special interest.
*/ */
#define sigp_order_codes \
{SIGP_SENSE, "sense"}, \
{SIGP_EXTERNAL_CALL, "external call"}, \
{SIGP_EMERGENCY_SIGNAL, "emergency signal"}, \
{SIGP_STOP, "stop"}, \
{SIGP_STOP_AND_STORE_STATUS, "stop and store status"}, \
{SIGP_SET_ARCHITECTURE, "set architecture"}, \
{SIGP_SET_PREFIX, "set prefix"}, \
{SIGP_STORE_STATUS_AT_ADDRESS, "store status at addr"}, \
{SIGP_SENSE_RUNNING, "sense running"}, \
{SIGP_RESTART, "restart"}
TRACE_EVENT(kvm_s390_handle_sigp, TRACE_EVENT(kvm_s390_handle_sigp,
TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr, \ TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr, \
__u32 parameter), __u32 parameter),
...@@ -265,12 +239,28 @@ TRACE_EVENT(kvm_s390_handle_sigp, ...@@ -265,12 +239,28 @@ TRACE_EVENT(kvm_s390_handle_sigp,
__entry->cpu_addr, __entry->parameter) __entry->cpu_addr, __entry->parameter)
); );
#define diagnose_codes \ TRACE_EVENT(kvm_s390_handle_sigp_pei,
{0x10, "release pages"}, \ TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr),
{0x44, "time slice end"}, \ TP_ARGS(VCPU_ARGS_COMMON, order_code, cpu_addr),
{0x308, "ipl functions"}, \
{0x500, "kvm hypercall"}, \ TP_STRUCT__entry(
{0x501, "kvm breakpoint"} VCPU_FIELD_COMMON
__field(__u8, order_code)
__field(__u16, cpu_addr)
),
TP_fast_assign(
VCPU_ASSIGN_COMMON
__entry->order_code = order_code;
__entry->cpu_addr = cpu_addr;
),
VCPU_TP_PRINTK("handle sigp pei order %02x (%s), cpu address %04x",
__entry->order_code,
__print_symbolic(__entry->order_code,
sigp_order_codes),
__entry->cpu_addr)
);
TRACE_EVENT(kvm_s390_handle_diag, TRACE_EVENT(kvm_s390_handle_diag,
TP_PROTO(VCPU_PROTO_COMMON, __u16 code), TP_PROTO(VCPU_PROTO_COMMON, __u16 code),
......
...@@ -958,8 +958,10 @@ void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, ...@@ -958,8 +958,10 @@ void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
unsigned long addr, next; unsigned long addr, next;
pgd_t *pgd; pgd_t *pgd;
down_write(&mm->mmap_sem);
if (init_skey && mm_use_skey(mm))
goto out_up;
addr = start; addr = start;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
do { do {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
...@@ -967,7 +969,10 @@ void page_table_reset_pgste(struct mm_struct *mm, unsigned long start, ...@@ -967,7 +969,10 @@ void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
continue; continue;
next = page_table_reset_pud(mm, pgd, addr, next, init_skey); next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
} while (pgd++, addr = next, addr != end); } while (pgd++, addr = next, addr != end);
up_read(&mm->mmap_sem); if (init_skey)
current->mm->context.use_skey = 1;
out_up:
up_write(&mm->mmap_sem);
} }
EXPORT_SYMBOL(page_table_reset_pgste); EXPORT_SYMBOL(page_table_reset_pgste);
...@@ -1384,19 +1389,6 @@ EXPORT_SYMBOL_GPL(s390_enable_sie); ...@@ -1384,19 +1389,6 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
*/ */
void s390_enable_skey(void) void s390_enable_skey(void)
{ {
/*
* To avoid races between multiple vcpus, ending in calling
* page_table_reset twice or more,
* the page_table_lock is taken for serialization.
*/
spin_lock(&current->mm->page_table_lock);
if (mm_use_skey(current->mm)) {
spin_unlock(&current->mm->page_table_lock);
return;
}
current->mm->context.use_skey = 1;
spin_unlock(&current->mm->page_table_lock);
page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
} }
EXPORT_SYMBOL_GPL(s390_enable_skey); EXPORT_SYMBOL_GPL(s390_enable_skey);
......
...@@ -27,7 +27,9 @@ struct read_info_sccb { ...@@ -27,7 +27,9 @@ struct read_info_sccb {
u8 loadparm[8]; /* 24-31 */ u8 loadparm[8]; /* 24-31 */
u8 _reserved1[48 - 32]; /* 32-47 */ u8 _reserved1[48 - 32]; /* 32-47 */
u64 facilities; /* 48-55 */ u64 facilities; /* 48-55 */
u8 _reserved2[84 - 56]; /* 56-83 */ u8 _reserved2a[76 - 56]; /* 56-75 */
u32 ibc; /* 76-79 */
u8 _reserved2b[84 - 80]; /* 80-83 */
u8 fac84; /* 84 */ u8 fac84; /* 84 */
u8 fac85; /* 85 */ u8 fac85; /* 85 */
u8 _reserved3[91 - 86]; /* 86-90 */ u8 _reserved3[91 - 86]; /* 86-90 */
...@@ -47,6 +49,7 @@ static unsigned long sclp_hsa_size; ...@@ -47,6 +49,7 @@ static unsigned long sclp_hsa_size;
static unsigned int sclp_max_cpu; static unsigned int sclp_max_cpu;
static struct sclp_ipl_info sclp_ipl_info; static struct sclp_ipl_info sclp_ipl_info;
static unsigned char sclp_siif; static unsigned char sclp_siif;
static u32 sclp_ibc;
u64 sclp_facilities; u64 sclp_facilities;
u8 sclp_fac84; u8 sclp_fac84;
...@@ -111,6 +114,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) ...@@ -111,6 +114,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp_rzm <<= 20; sclp_rzm <<= 20;
sclp_ibc = sccb->ibc;
if (!sccb->hcpua) { if (!sccb->hcpua) {
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
...@@ -168,6 +172,12 @@ int sclp_has_siif(void) ...@@ -168,6 +172,12 @@ int sclp_has_siif(void)
} }
EXPORT_SYMBOL(sclp_has_siif); EXPORT_SYMBOL(sclp_has_siif);
unsigned int sclp_get_ibc(void)
{
return sclp_ibc;
}
EXPORT_SYMBOL(sclp_get_ibc);
/* /*
* This function will be called after sclp_facilities_detect(), which gets * This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. The sclp_facilities_detect() function retrieves * called from early.c code. The sclp_facilities_detect() function retrieves
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment