Commit 8f00067a authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-4.11-2' of...

Merge tag 'kvm-s390-next-4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Fixes and features for 4.11 (via kvm/next)

- enable some simd extensions for guests
- enable nx for guests
- debug log for cpu model
- PER fixes
- remove bitwise annotation from ar_t
- detect guests in operation exception program check loops
- fix potential null-pointer dereference for ucontrol guests

- also contains merge for fix that went into 4.10 to avoid conflicts
parents d9c0e59f fb7dc1d4
...@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu) ...@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
ipte_unlock_simple(vcpu); ipte_unlock_simple(vcpu);
} }
static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
enum gacc_mode mode) enum gacc_mode mode)
{ {
union alet alet; union alet alet;
...@@ -465,7 +465,9 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, ...@@ -465,7 +465,9 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
struct trans_exc_code_bits { struct trans_exc_code_bits {
unsigned long addr : 52; /* Translation-exception Address */ unsigned long addr : 52; /* Translation-exception Address */
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
unsigned long : 6; unsigned long : 2;
unsigned long b56 : 1;
unsigned long : 3;
unsigned long b60 : 1; unsigned long b60 : 1;
unsigned long b61 : 1; unsigned long b61 : 1;
unsigned long as : 2; /* ASCE Identifier */ unsigned long as : 2; /* ASCE Identifier */
...@@ -485,7 +487,7 @@ enum prot_type { ...@@ -485,7 +487,7 @@ enum prot_type {
}; };
static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
ar_t ar, enum gacc_mode mode, enum prot_type prot) u8 ar, enum gacc_mode mode, enum prot_type prot)
{ {
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
struct trans_exc_code_bits *tec; struct trans_exc_code_bits *tec;
...@@ -497,14 +499,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, ...@@ -497,14 +499,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
switch (code) { switch (code) {
case PGM_PROTECTION: case PGM_PROTECTION:
switch (prot) { switch (prot) {
case PROT_TYPE_LA:
tec->b56 = 1;
break;
case PROT_TYPE_KEYC:
tec->b60 = 1;
break;
case PROT_TYPE_ALC: case PROT_TYPE_ALC:
tec->b60 = 1; tec->b60 = 1;
/* FALL THROUGH */ /* FALL THROUGH */
case PROT_TYPE_DAT: case PROT_TYPE_DAT:
tec->b61 = 1; tec->b61 = 1;
break; break;
default: /* LA and KEYC set b61 to 0, other params undefined */
return code;
} }
/* FALL THROUGH */ /* FALL THROUGH */
case PGM_ASCE_TYPE: case PGM_ASCE_TYPE:
...@@ -539,7 +545,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, ...@@ -539,7 +545,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
} }
static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
unsigned long ga, ar_t ar, enum gacc_mode mode) unsigned long ga, u8 ar, enum gacc_mode mode)
{ {
int rc; int rc;
struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
...@@ -771,7 +777,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu, ...@@ -771,7 +777,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1; return 1;
} }
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *pages, unsigned long nr_pages, unsigned long *pages, unsigned long nr_pages,
const union asce asce, enum gacc_mode mode) const union asce asce, enum gacc_mode mode)
{ {
...@@ -803,7 +809,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, ...@@ -803,7 +809,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
return 0; return 0;
} }
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode) unsigned long len, enum gacc_mode mode)
{ {
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
...@@ -877,7 +883,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ...@@ -877,7 +883,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* Note: The IPTE lock is not taken during this function, so the caller * Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this. * has to take care of this.
*/ */
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode) unsigned long *gpa, enum gacc_mode mode)
{ {
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
...@@ -910,7 +916,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, ...@@ -910,7 +916,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
/** /**
* check_gva_range - test a range of guest virtual addresses for accessibility * check_gva_range - test a range of guest virtual addresses for accessibility
*/ */
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode) unsigned long length, enum gacc_mode mode)
{ {
unsigned long gpa; unsigned long gpa;
......
...@@ -162,11 +162,11 @@ enum gacc_mode { ...@@ -162,11 +162,11 @@ enum gacc_mode {
}; };
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
ar_t ar, unsigned long *gpa, enum gacc_mode mode); u8 ar, unsigned long *gpa, enum gacc_mode mode);
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode); unsigned long length, enum gacc_mode mode);
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode); unsigned long len, enum gacc_mode mode);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
...@@ -218,7 +218,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ...@@ -218,7 +218,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* if data has been changed in guest space in case of an exception. * if data has been changed in guest space in case of an exception.
*/ */
static inline __must_check static inline __must_check
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len) unsigned long len)
{ {
return access_guest(vcpu, ga, ar, data, len, GACC_STORE); return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
...@@ -238,7 +238,7 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, ...@@ -238,7 +238,7 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
* data will be copied from guest space to kernel space. * data will be copied from guest space to kernel space.
*/ */
static inline __must_check static inline __must_check
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len) unsigned long len)
{ {
return access_guest(vcpu, ga, ar, data, len, GACC_FETCH); return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
...@@ -247,10 +247,11 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, ...@@ -247,10 +247,11 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
/** /**
* read_guest_instr - copy instruction data from guest space to kernel space * read_guest_instr - copy instruction data from guest space to kernel space
* @vcpu: virtual cpu * @vcpu: virtual cpu
* @ga: guest address
* @data: destination address in kernel space * @data: destination address in kernel space
* @len: number of bytes to copy * @len: number of bytes to copy
* *
* Copy @len bytes from the current psw address (guest space) to @data (kernel * Copy @len bytes from the given address (guest space) to @data (kernel
* space). * space).
* *
* The behaviour of read_guest_instr is identical to read_guest, except that * The behaviour of read_guest_instr is identical to read_guest, except that
...@@ -258,10 +259,10 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, ...@@ -258,10 +259,10 @@ int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
* address-space mode. * address-space mode.
*/ */
static inline __must_check static inline __must_check
int read_guest_instr(struct kvm_vcpu *vcpu, void *data, unsigned long len) int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
unsigned long len)
{ {
return access_guest(vcpu, vcpu->arch.sie_block->gpsw.addr, 0, data, len, return access_guest(vcpu, ga, 0, data, len, GACC_IFETCH);
GACC_IFETCH);
} }
/** /**
......
...@@ -388,14 +388,13 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu) ...@@ -388,14 +388,13 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
#define per_write_wp_event(code) \ #define per_write_wp_event(code) \
(code & (PER_CODE_STORE | PER_CODE_STORE_REAL)) (code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
static int debug_exit_required(struct kvm_vcpu *vcpu) static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
unsigned long peraddr)
{ {
u8 perc = vcpu->arch.sie_block->perc;
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
struct kvm_hw_wp_info_arch *wp_info = NULL; struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL; struct kvm_hw_bp_info_arch *bp_info = NULL;
unsigned long addr = vcpu->arch.sie_block->gpsw.addr; unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
unsigned long peraddr = vcpu->arch.sie_block->peraddr;
if (guestdbg_hw_bp_enabled(vcpu)) { if (guestdbg_hw_bp_enabled(vcpu)) {
if (per_write_wp_event(perc) && if (per_write_wp_event(perc) &&
...@@ -437,36 +436,118 @@ static int debug_exit_required(struct kvm_vcpu *vcpu) ...@@ -437,36 +436,118 @@ static int debug_exit_required(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
{
u8 exec_ilen = 0;
u16 opcode[3];
int rc;
if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
/* PER address references the fetched or the execute instr */
*addr = vcpu->arch.sie_block->peraddr;
/*
* Manually detect if we have an EXECUTE instruction. As
* instructions are always 2 byte aligned we can read the
* first two bytes unconditionally
*/
rc = read_guest_instr(vcpu, *addr, &opcode, 2);
if (rc)
return rc;
if (opcode[0] >> 8 == 0x44)
exec_ilen = 4;
if ((opcode[0] & 0xff0f) == 0xc600)
exec_ilen = 6;
} else {
/* instr was suppressed, calculate the responsible instr */
*addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
kvm_s390_get_ilen(vcpu));
if (vcpu->arch.sie_block->icptstatus & 0x01) {
exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
if (!exec_ilen)
exec_ilen = 4;
}
}
if (exec_ilen) {
/* read the complete EXECUTE instr to detect the fetched addr */
rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
if (rc)
return rc;
if (exec_ilen == 6) {
/* EXECUTE RELATIVE LONG - RIL-b format */
s32 rl = *((s32 *) (opcode + 1));
/* rl is a _signed_ 32 bit value specifying halfwords */
*addr += (u64)(s64) rl * 2;
} else {
/* EXECUTE - RX-a format */
u32 base = (opcode[1] & 0xf000) >> 12;
u32 disp = opcode[1] & 0x0fff;
u32 index = opcode[0] & 0x000f;
*addr = base ? vcpu->run->s.regs.gprs[base] : 0;
*addr += index ? vcpu->run->s.regs.gprs[index] : 0;
*addr += disp;
}
*addr = kvm_s390_logical_to_effective(vcpu, *addr);
}
return 0;
}
#define guest_per_enabled(vcpu) \ #define guest_per_enabled(vcpu) \
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu) int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
{ {
const u64 cr10 = vcpu->arch.sie_block->gcr[10];
const u64 cr11 = vcpu->arch.sie_block->gcr[11];
const u8 ilen = kvm_s390_get_ilen(vcpu); const u8 ilen = kvm_s390_get_ilen(vcpu);
struct kvm_s390_pgm_info pgm_info = { struct kvm_s390_pgm_info pgm_info = {
.code = PGM_PER, .code = PGM_PER,
.per_code = PER_CODE_IFETCH, .per_code = PER_CODE_IFETCH,
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen), .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
}; };
unsigned long fetched_addr;
int rc;
/* /*
* The PSW points to the next instruction, therefore the intercepted * The PSW points to the next instruction, therefore the intercepted
* instruction generated a PER i-fetch event. PER address therefore * instruction generated a PER i-fetch event. PER address therefore
* points at the previous PSW address (could be an EXECUTE function). * points at the previous PSW address (could be an EXECUTE function).
*/ */
return kvm_s390_inject_prog_irq(vcpu, &pgm_info); if (!guestdbg_enabled(vcpu))
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
if (!guest_per_enabled(vcpu) ||
!(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
return 0;
rc = per_fetched_addr(vcpu, &fetched_addr);
if (rc < 0)
return rc;
if (rc)
/* instruction-fetching exceptions */
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (in_addr_range(fetched_addr, cr10, cr11))
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
return 0;
} }
static void filter_guest_per_event(struct kvm_vcpu *vcpu) static int filter_guest_per_event(struct kvm_vcpu *vcpu)
{ {
const u8 perc = vcpu->arch.sie_block->perc; const u8 perc = vcpu->arch.sie_block->perc;
u64 peraddr = vcpu->arch.sie_block->peraddr;
u64 addr = vcpu->arch.sie_block->gpsw.addr; u64 addr = vcpu->arch.sie_block->gpsw.addr;
u64 cr9 = vcpu->arch.sie_block->gcr[9]; u64 cr9 = vcpu->arch.sie_block->gcr[9];
u64 cr10 = vcpu->arch.sie_block->gcr[10]; u64 cr10 = vcpu->arch.sie_block->gcr[10];
u64 cr11 = vcpu->arch.sie_block->gcr[11]; u64 cr11 = vcpu->arch.sie_block->gcr[11];
/* filter all events, demanded by the guest */ /* filter all events, demanded by the guest */
u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK; u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
unsigned long fetched_addr;
int rc;
if (!guest_per_enabled(vcpu)) if (!guest_per_enabled(vcpu))
guest_perc = 0; guest_perc = 0;
...@@ -478,9 +559,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu) ...@@ -478,9 +559,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
guest_perc &= ~PER_CODE_BRANCH; guest_perc &= ~PER_CODE_BRANCH;
/* filter "instruction-fetching" events */ /* filter "instruction-fetching" events */
if (guest_perc & PER_CODE_IFETCH && if (guest_perc & PER_CODE_IFETCH) {
!in_addr_range(peraddr, cr10, cr11)) rc = per_fetched_addr(vcpu, &fetched_addr);
guest_perc &= ~PER_CODE_IFETCH; if (rc < 0)
return rc;
/*
* Don't inject an irq on exceptions. This would make handling
* on icpt code 8 very complex (as PSW was already rewound).
*/
if (rc || !in_addr_range(fetched_addr, cr10, cr11))
guest_perc &= ~PER_CODE_IFETCH;
}
/* All other PER events will be given to the guest */ /* All other PER events will be given to the guest */
/* TODO: Check altered address/address space */ /* TODO: Check altered address/address space */
...@@ -489,6 +578,7 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu) ...@@ -489,6 +578,7 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
if (!guest_perc) if (!guest_perc)
vcpu->arch.sie_block->iprcc &= ~PGM_PER; vcpu->arch.sie_block->iprcc &= ~PGM_PER;
return 0;
} }
#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH) #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
...@@ -496,14 +586,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu) ...@@ -496,14 +586,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1) #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff) #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
{ {
int new_as; int rc, new_as;
if (debug_exit_required(vcpu)) if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
vcpu->arch.sie_block->peraddr))
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
filter_guest_per_event(vcpu); rc = filter_guest_per_event(vcpu);
if (rc)
return rc;
/* /*
* Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
...@@ -532,4 +625,5 @@ void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) ...@@ -532,4 +625,5 @@ void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
(pssec(vcpu) || old_ssec(vcpu))) (pssec(vcpu) || old_ssec(vcpu)))
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
} }
return 0;
} }
...@@ -238,7 +238,9 @@ static int handle_prog(struct kvm_vcpu *vcpu) ...@@ -238,7 +238,9 @@ static int handle_prog(struct kvm_vcpu *vcpu)
vcpu->stat.exit_program_interruption++; vcpu->stat.exit_program_interruption++;
if (guestdbg_enabled(vcpu) && per_event(vcpu)) { if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
kvm_s390_handle_per_event(vcpu); rc = kvm_s390_handle_per_event(vcpu);
if (rc)
return rc;
/* the interrupt might have been filtered out completely */ /* the interrupt might have been filtered out completely */
if (vcpu->arch.sie_block->iprcc == 0) if (vcpu->arch.sie_block->iprcc == 0)
return 0; return 0;
...@@ -359,6 +361,9 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) ...@@ -359,6 +361,9 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
static int handle_operexc(struct kvm_vcpu *vcpu) static int handle_operexc(struct kvm_vcpu *vcpu)
{ {
psw_t oldpsw, newpsw;
int rc;
vcpu->stat.exit_operation_exception++; vcpu->stat.exit_operation_exception++;
trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa, trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
vcpu->arch.sie_block->ipb); vcpu->arch.sie_block->ipb);
...@@ -369,6 +374,24 @@ static int handle_operexc(struct kvm_vcpu *vcpu) ...@@ -369,6 +374,24 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0) if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
if (rc)
return rc;
/*
* Avoid endless loops of operation exceptions, if the pgm new
* PSW will cause a new operation exception.
* The heuristic checks if the pgm new psw is within 6 bytes before
* the faulting psw address (with same DAT, AS settings) and the
* new psw is not a wait psw and the fault was not triggered by
* problem state.
*/
oldpsw = vcpu->arch.sie_block->gpsw;
if (oldpsw.addr - newpsw.addr <= 6 &&
!(newpsw.mask & PSW_MASK_WAIT) &&
!(oldpsw.mask & PSW_MASK_PSTATE) &&
(newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
(newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
return -EOPNOTSUPP;
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
} }
......
...@@ -217,7 +217,7 @@ static void allow_cpu_feat(unsigned long nr) ...@@ -217,7 +217,7 @@ static void allow_cpu_feat(unsigned long nr)
static inline int plo_test_bit(unsigned char nr) static inline int plo_test_bit(unsigned char nr)
{ {
register unsigned long r0 asm("0") = (unsigned long) nr | 0x100; register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
int cc = 3; /* subfunction not available */ int cc;
asm volatile( asm volatile(
/* Parameter registers are ignored for "test bit" */ /* Parameter registers are ignored for "test bit" */
...@@ -442,6 +442,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -442,6 +442,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int is_dirty = 0; int is_dirty = 0;
if (kvm_is_ucontrol(kvm))
return -EINVAL;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
r = -EINVAL; r = -EINVAL;
...@@ -505,6 +508,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) ...@@ -505,6 +508,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
} else if (MACHINE_HAS_VX) { } else if (MACHINE_HAS_VX) {
set_kvm_facility(kvm->arch.model.fac_mask, 129); set_kvm_facility(kvm->arch.model.fac_mask, 129);
set_kvm_facility(kvm->arch.model.fac_list, 129); set_kvm_facility(kvm->arch.model.fac_list, 129);
if (test_facility(134)) {
set_kvm_facility(kvm->arch.model.fac_mask, 134);
set_kvm_facility(kvm->arch.model.fac_list, 134);
}
if (test_facility(135)) {
set_kvm_facility(kvm->arch.model.fac_mask, 135);
set_kvm_facility(kvm->arch.model.fac_list, 135);
}
r = 0; r = 0;
} else } else
r = -EINVAL; r = -EINVAL;
...@@ -821,6 +832,13 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -821,6 +832,13 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
} }
memcpy(kvm->arch.model.fac_list, proc->fac_list, memcpy(kvm->arch.model.fac_list, proc->fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE); S390_ARCH_FAC_LIST_SIZE_BYTE);
VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
kvm->arch.model.fac_list[0],
kvm->arch.model.fac_list[1],
kvm->arch.model.fac_list[2]);
} else } else
ret = -EFAULT; ret = -EFAULT;
kfree(proc); kfree(proc);
...@@ -894,6 +912,13 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -894,6 +912,13 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
proc->ibc = kvm->arch.model.ibc; proc->ibc = kvm->arch.model.ibc;
memcpy(&proc->fac_list, kvm->arch.model.fac_list, memcpy(&proc->fac_list, kvm->arch.model.fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE); S390_ARCH_FAC_LIST_SIZE_BYTE);
VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
kvm->arch.model.fac_list[0],
kvm->arch.model.fac_list[1],
kvm->arch.model.fac_list[2]);
if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
ret = -EFAULT; ret = -EFAULT;
kfree(proc); kfree(proc);
...@@ -916,7 +941,18 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -916,7 +941,18 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE); S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE); sizeof(S390_lowcore.stfle_fac_list));
VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
kvm->arch.model.ibc,
kvm->arch.model.cpuid);
VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
mach->fac_mask[0],
mach->fac_mask[1],
mach->fac_mask[2]);
VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
mach->fac_list[0],
mach->fac_list[1],
mach->fac_list[2]);
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT; ret = -EFAULT;
kfree(mach); kfree(mach);
...@@ -1437,7 +1473,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1437,7 +1473,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Populate the facility mask initially. */ /* Populate the facility mask initially. */
memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE); sizeof(S390_lowcore.stfle_fac_list));
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size()) if (i < kvm_s390_fac_list_mask_size())
kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
...@@ -1938,6 +1974,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1938,6 +1974,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi) if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
vcpu->arch.sie_block->ecb2 |= 0x08; vcpu->arch.sie_block->ecb2 |= 0x08;
if (test_kvm_facility(vcpu->kvm, 130))
vcpu->arch.sie_block->ecb2 |= 0x20;
vcpu->arch.sie_block->eca = 0x1002000U; vcpu->arch.sie_block->eca = 0x1002000U;
if (sclp.has_cei) if (sclp.has_cei)
vcpu->arch.sie_block->eca |= 0x80000000U; vcpu->arch.sie_block->eca |= 0x80000000U;
...@@ -2578,7 +2616,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) ...@@ -2578,7 +2616,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
* to look up the current opcode to get the length of the instruction * to look up the current opcode to get the length of the instruction
* to be able to forward the PSW. * to be able to forward the PSW.
*/ */
rc = read_guest_instr(vcpu, &opcode, 1); rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
ilen = insn_length(opcode); ilen = insn_length(opcode);
if (rc < 0) { if (rc < 0) {
return rc; return rc;
......
...@@ -86,9 +86,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) ...@@ -86,9 +86,7 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
} }
typedef u8 __bitwise ar_t; static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
{ {
u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
...@@ -101,7 +99,7 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar) ...@@ -101,7 +99,7 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
u64 *address1, u64 *address2, u64 *address1, u64 *address2,
ar_t *ar_b1, ar_t *ar_b2) u8 *ar_b1, u8 *ar_b2)
{ {
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
...@@ -125,7 +123,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2 ...@@ -125,7 +123,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
} }
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
{ {
u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
...@@ -140,7 +138,7 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) ...@@ -140,7 +138,7 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
} }
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
{ {
u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
...@@ -379,7 +377,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, ...@@ -379,7 +377,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu); int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
/* support for Basic/Extended SCA handling */ /* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm) static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
......
...@@ -54,7 +54,7 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) ...@@ -54,7 +54,7 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
static int handle_set_clock(struct kvm_vcpu *vcpu) static int handle_set_clock(struct kvm_vcpu *vcpu)
{ {
int rc; int rc;
ar_t ar; u8 ar;
u64 op2, val; u64 op2, val;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
...@@ -79,7 +79,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) ...@@ -79,7 +79,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
u64 operand2; u64 operand2;
u32 address; u32 address;
int rc; int rc;
ar_t ar; u8 ar;
vcpu->stat.instruction_spx++; vcpu->stat.instruction_spx++;
...@@ -117,7 +117,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) ...@@ -117,7 +117,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
u64 operand2; u64 operand2;
u32 address; u32 address;
int rc; int rc;
ar_t ar; u8 ar;
vcpu->stat.instruction_stpx++; vcpu->stat.instruction_stpx++;
...@@ -147,7 +147,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) ...@@ -147,7 +147,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
u16 vcpu_id = vcpu->vcpu_id; u16 vcpu_id = vcpu->vcpu_id;
u64 ga; u64 ga;
int rc; int rc;
ar_t ar; u8 ar;
vcpu->stat.instruction_stap++; vcpu->stat.instruction_stap++;
...@@ -380,7 +380,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu) ...@@ -380,7 +380,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
u32 tpi_data[3]; u32 tpi_data[3];
int rc; int rc;
u64 addr; u64 addr;
ar_t ar; u8 ar;
addr = kvm_s390_get_base_disp_s(vcpu, &ar); addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3) if (addr & 3)
...@@ -548,7 +548,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) ...@@ -548,7 +548,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
psw_compat_t new_psw; psw_compat_t new_psw;
u64 addr; u64 addr;
int rc; int rc;
ar_t ar; u8 ar;
if (gpsw->mask & PSW_MASK_PSTATE) if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
...@@ -575,7 +575,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) ...@@ -575,7 +575,7 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
psw_t new_psw; psw_t new_psw;
u64 addr; u64 addr;
int rc; int rc;
ar_t ar; u8 ar;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
...@@ -597,7 +597,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu) ...@@ -597,7 +597,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
u64 stidp_data = vcpu->kvm->arch.model.cpuid; u64 stidp_data = vcpu->kvm->arch.model.cpuid;
u64 operand2; u64 operand2;
int rc; int rc;
ar_t ar; u8 ar;
vcpu->stat.instruction_stidp++; vcpu->stat.instruction_stidp++;
...@@ -644,7 +644,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) ...@@ -644,7 +644,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
ASCEBC(mem->vm[0].cpi, 16); ASCEBC(mem->vm[0].cpi, 16);
} }
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
u8 fc, u8 sel1, u16 sel2) u8 fc, u8 sel1, u16 sel2)
{ {
vcpu->run->exit_reason = KVM_EXIT_S390_STSI; vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
...@@ -663,7 +663,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ...@@ -663,7 +663,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
unsigned long mem = 0; unsigned long mem = 0;
u64 operand2; u64 operand2;
int rc = 0; int rc = 0;
ar_t ar; u8 ar;
vcpu->stat.instruction_stsi++; vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
...@@ -970,7 +970,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) ...@@ -970,7 +970,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs; int reg, rc, nr_regs;
u32 ctl_array[16]; u32 ctl_array[16];
u64 ga; u64 ga;
ar_t ar; u8 ar;
vcpu->stat.instruction_lctl++; vcpu->stat.instruction_lctl++;
...@@ -1009,7 +1009,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) ...@@ -1009,7 +1009,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs; int reg, rc, nr_regs;
u32 ctl_array[16]; u32 ctl_array[16];
u64 ga; u64 ga;
ar_t ar; u8 ar;
vcpu->stat.instruction_stctl++; vcpu->stat.instruction_stctl++;
...@@ -1043,7 +1043,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) ...@@ -1043,7 +1043,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs; int reg, rc, nr_regs;
u64 ctl_array[16]; u64 ctl_array[16];
u64 ga; u64 ga;
ar_t ar; u8 ar;
vcpu->stat.instruction_lctlg++; vcpu->stat.instruction_lctlg++;
...@@ -1081,7 +1081,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu) ...@@ -1081,7 +1081,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs; int reg, rc, nr_regs;
u64 ctl_array[16]; u64 ctl_array[16];
u64 ga; u64 ga;
ar_t ar; u8 ar;
vcpu->stat.instruction_stctg++; vcpu->stat.instruction_stctg++;
...@@ -1132,7 +1132,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) ...@@ -1132,7 +1132,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
unsigned long hva, gpa; unsigned long hva, gpa;
int ret = 0, cc = 0; int ret = 0, cc = 0;
bool writable; bool writable;
ar_t ar; u8 ar;
vcpu->stat.instruction_tprot++; vcpu->stat.instruction_tprot++;
......
...@@ -324,6 +324,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -324,6 +324,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* Run-time-Instrumentation */ /* Run-time-Instrumentation */
if (test_kvm_facility(vcpu->kvm, 64)) if (test_kvm_facility(vcpu->kvm, 64))
scb_s->ecb3 |= scb_o->ecb3 & 0x01U; scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
/* Instruction Execution Prevention */
if (test_kvm_facility(vcpu->kvm, 130))
scb_s->ecb2 |= scb_o->ecb2 & 0x20U;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
scb_s->eca |= scb_o->eca & 0x00000001U; scb_s->eca |= scb_o->eca & 0x00000001U;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
......
...@@ -741,7 +741,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) ...@@ -741,7 +741,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
pgste_set_unlock(ptep, new); pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
return 0; return cc;
} }
EXPORT_SYMBOL(reset_guest_reference_bit); EXPORT_SYMBOL(reset_guest_reference_bit);
......
...@@ -80,6 +80,8 @@ static struct facility_def facility_defs[] = { ...@@ -80,6 +80,8 @@ static struct facility_def facility_defs[] = {
76, /* msa extension 3 */ 76, /* msa extension 3 */
77, /* msa extension 4 */ 77, /* msa extension 4 */
78, /* enhanced-DAT 2 */ 78, /* enhanced-DAT 2 */
130, /* instruction-execution-protection */
131, /* enhanced-SOP 2 and side-effect */
-1 /* END */ -1 /* END */
} }
}, },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment