Commit 0e8bc06a authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

KVM: s390: PSW forwarding / rewinding / ilc rework

We have some confusion about ilc vs. ilen in our current code. So let's
correctly use the term ilen when dealing with (ilc << 1).

Program irq injection didn't take care of the correct ilc in case of
irqs triggered by EXECUTE functions, let's provide one function
kvm_s390_get_ilen() to take care of all that.

Also, manually specifying in intercept handlers the size of the
instruction (and sometimes overwriting that value for EXECUTE internally)
doesn't make too much sense. So also provide the functions:
- kvm_s390_retry_instr to retry the currently intercepted instruction
- kvm_s390_rewind_psw to rewind the PSW without internal overwrites
- kvm_s390_forward_psw to forward the PSW
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 6fd8e67d
...@@ -38,17 +38,32 @@ static const intercept_handler_t instruction_handlers[256] = { ...@@ -38,17 +38,32 @@ static const intercept_handler_t instruction_handlers[256] = {
[0xeb] = kvm_s390_handle_eb, [0xeb] = kvm_s390_handle_eb,
}; };
void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
u8 ilen = 0;
switch (vcpu->arch.sie_block->icptcode) {
case ICPT_INST:
case ICPT_INSTPROGI:
case ICPT_OPEREXC:
case ICPT_PARTEXEC:
case ICPT_IOINST:
/* instruction only stored for these icptcodes */
ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
/* Use the length of the EXECUTE instruction if necessary */ /* Use the length of the EXECUTE instruction if necessary */
if (sie_block->icptstatus & 1) { if (sie_block->icptstatus & 1) {
ilc = (sie_block->icptstatus >> 4) & 0x6; ilen = (sie_block->icptstatus >> 4) & 0x6;
if (!ilc) if (!ilen)
ilc = 4; ilen = 4;
}
break;
case ICPT_PROGI:
/* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
ilen = vcpu->arch.sie_block->pgmilc & 0x6;
break;
} }
sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc); return ilen;
} }
static int handle_noop(struct kvm_vcpu *vcpu) static int handle_noop(struct kvm_vcpu *vcpu)
...@@ -318,7 +333,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) ...@@ -318,7 +333,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
if (rc != 0) if (rc != 0)
return rc; return rc;
kvm_s390_rewind_psw(vcpu, 4); kvm_s390_retry_instr(vcpu);
return 0; return 0;
} }
......
...@@ -335,23 +335,6 @@ static void set_intercept_indicators(struct kvm_vcpu *vcpu) ...@@ -335,23 +335,6 @@ static void set_intercept_indicators(struct kvm_vcpu *vcpu)
set_intercept_indicators_stop(vcpu); set_intercept_indicators_stop(vcpu);
} }
static u16 get_ilc(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->icptcode) {
case ICPT_INST:
case ICPT_INSTPROGI:
case ICPT_OPEREXC:
case ICPT_PARTEXEC:
case ICPT_IOINST:
/* last instruction only stored for these icptcodes */
return insn_length(vcpu->arch.sie_block->ipa >> 8);
case ICPT_PROGI:
return vcpu->arch.sie_block->pgmilc;
default:
return 0;
}
}
static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
...@@ -588,7 +571,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -588,7 +571,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_pgm_info pgm_info; struct kvm_s390_pgm_info pgm_info;
int rc = 0, nullifying = false; int rc = 0, nullifying = false;
u16 ilc = get_ilc(vcpu); u16 ilen = kvm_s390_get_ilen(vcpu);
spin_lock(&li->lock); spin_lock(&li->lock);
pgm_info = li->irq.pgm; pgm_info = li->irq.pgm;
...@@ -596,8 +579,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -596,8 +579,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
memset(&li->irq.pgm, 0, sizeof(pgm_info)); memset(&li->irq.pgm, 0, sizeof(pgm_info));
spin_unlock(&li->lock); spin_unlock(&li->lock);
VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d", VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
pgm_info.code, ilc); pgm_info.code, ilen);
vcpu->stat.deliver_program_int++; vcpu->stat.deliver_program_int++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
pgm_info.code, 0); pgm_info.code, 0);
...@@ -682,9 +665,10 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -682,9 +665,10 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
} }
if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
kvm_s390_rewind_psw(vcpu, ilc); kvm_s390_rewind_psw(vcpu, ilen);
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); /* bit 1+2 of the target are the ilc, so we can directly use ilen */
rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
(u64 *) __LC_LAST_BREAK); (u64 *) __LC_LAST_BREAK);
rc |= put_guest_lc(vcpu, pgm_info.code, rc |= put_guest_lc(vcpu, pgm_info.code,
......
...@@ -2181,7 +2181,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) ...@@ -2181,7 +2181,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
if (rc) if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
psw->addr = __rewind_psw(*psw, -insn_length(opcode)); kvm_s390_forward_psw(vcpu, insn_length(opcode));
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/processor.h>
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
...@@ -212,8 +213,22 @@ int kvm_s390_reinject_io_int(struct kvm *kvm, ...@@ -212,8 +213,22 @@ int kvm_s390_reinject_io_int(struct kvm *kvm,
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
/* implemented in intercept.c */ /* implemented in intercept.c */
void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc); u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
{
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
}
static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
{
kvm_s390_rewind_psw(vcpu, -ilen);
}
static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
{
kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
}
/* implemented in priv.c */ /* implemented in priv.c */
int is_valid_psw(psw_t *psw); int is_valid_psw(psw_t *psw);
......
...@@ -173,7 +173,7 @@ static int handle_skey(struct kvm_vcpu *vcpu) ...@@ -173,7 +173,7 @@ static int handle_skey(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
kvm_s390_rewind_psw(vcpu, 4); kvm_s390_retry_instr(vcpu);
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
return 0; return 0;
} }
...@@ -184,7 +184,7 @@ static int handle_ipte_interlock(struct kvm_vcpu *vcpu) ...@@ -184,7 +184,7 @@ static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
if (psw_bits(vcpu->arch.sie_block->gpsw).p) if (psw_bits(vcpu->arch.sie_block->gpsw).p)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
kvm_s390_rewind_psw(vcpu, 4); kvm_s390_retry_instr(vcpu);
VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
return 0; return 0;
} }
...@@ -759,8 +759,8 @@ static int handle_essa(struct kvm_vcpu *vcpu) ...@@ -759,8 +759,8 @@ static int handle_essa(struct kvm_vcpu *vcpu)
if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Rewind PSW to repeat the ESSA instruction */ /* Retry the ESSA instruction */
kvm_s390_rewind_psw(vcpu, 4); kvm_s390_retry_instr(vcpu);
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
down_read(&gmap->mm->mmap_sem); down_read(&gmap->mm->mmap_sem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment