Commit 722c2cd7 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-master-4.16-1' of...

Merge tag 'kvm-s390-master-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Fixes and improvements for 4.16

- optimization for the exitless interrupt support that was merged
  in 4.16-rc1
- improve the branch prediction blocking for nested KVM
- replace some jump tables with switch statements to improve
  expoline performance
parents 7928b2cb baabee67
...@@ -22,22 +22,6 @@ ...@@ -22,22 +22,6 @@
#include "trace.h" #include "trace.h"
#include "trace-s390.h" #include "trace-s390.h"
static const intercept_handler_t instruction_handlers[256] = {
[0x01] = kvm_s390_handle_01,
[0x82] = kvm_s390_handle_lpsw,
[0x83] = kvm_s390_handle_diag,
[0xaa] = kvm_s390_handle_aa,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
[0xb6] = kvm_s390_handle_stctl,
[0xb7] = kvm_s390_handle_lctl,
[0xb9] = kvm_s390_handle_b9,
[0xe3] = kvm_s390_handle_e3,
[0xe5] = kvm_s390_handle_e5,
[0xeb] = kvm_s390_handle_eb,
};
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
...@@ -129,16 +113,39 @@ static int handle_validity(struct kvm_vcpu *vcpu) ...@@ -129,16 +113,39 @@ static int handle_validity(struct kvm_vcpu *vcpu)
static int handle_instruction(struct kvm_vcpu *vcpu) static int handle_instruction(struct kvm_vcpu *vcpu)
{ {
intercept_handler_t handler;
vcpu->stat.exit_instruction++; vcpu->stat.exit_instruction++;
trace_kvm_s390_intercept_instruction(vcpu, trace_kvm_s390_intercept_instruction(vcpu,
vcpu->arch.sie_block->ipa, vcpu->arch.sie_block->ipa,
vcpu->arch.sie_block->ipb); vcpu->arch.sie_block->ipb);
handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
if (handler) switch (vcpu->arch.sie_block->ipa >> 8) {
return handler(vcpu); case 0x01:
return -EOPNOTSUPP; return kvm_s390_handle_01(vcpu);
case 0x82:
return kvm_s390_handle_lpsw(vcpu);
case 0x83:
return kvm_s390_handle_diag(vcpu);
case 0xaa:
return kvm_s390_handle_aa(vcpu);
case 0xae:
return kvm_s390_handle_sigp(vcpu);
case 0xb2:
return kvm_s390_handle_b2(vcpu);
case 0xb6:
return kvm_s390_handle_stctl(vcpu);
case 0xb7:
return kvm_s390_handle_lctl(vcpu);
case 0xb9:
return kvm_s390_handle_b9(vcpu);
case 0xe3:
return kvm_s390_handle_e3(vcpu);
case 0xe5:
return kvm_s390_handle_e5(vcpu);
case 0xeb:
return kvm_s390_handle_eb(vcpu);
default:
return -EOPNOTSUPP;
}
} }
static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu) static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
......
...@@ -187,12 +187,6 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) ...@@ -187,12 +187,6 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
return kvm_s390_get_cpu_timer(vcpu) >> 63; return kvm_s390_get_cpu_timer(vcpu) >> 63;
} }
static inline int is_ioirq(unsigned long irq_type)
{
return ((irq_type >= IRQ_PEND_IO_ISC_7) &&
(irq_type <= IRQ_PEND_IO_ISC_0));
}
static uint64_t isc_to_isc_bits(int isc) static uint64_t isc_to_isc_bits(int isc)
{ {
return (0x80 >> isc) << 24; return (0x80 >> isc) << 24;
...@@ -236,10 +230,15 @@ static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gis ...@@ -236,10 +230,15 @@ static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gis
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
} }
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
{ {
return vcpu->kvm->arch.float_int.pending_irqs | return vcpu->kvm->arch.float_int.pending_irqs |
vcpu->arch.local_int.pending_irqs | vcpu->arch.local_int.pending_irqs;
}
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
{
return pending_irqs_no_gisa(vcpu) |
kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
} }
...@@ -337,7 +336,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) ...@@ -337,7 +336,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
{ {
if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
return; return;
else if (psw_ioint_disabled(vcpu)) else if (psw_ioint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
...@@ -1011,24 +1010,6 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, ...@@ -1011,24 +1010,6 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
return rc; return rc;
} }
typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_MCHK_EX] = __deliver_machine_check,
[IRQ_PEND_MCHK_REP] = __deliver_machine_check,
[IRQ_PEND_PROG] = __deliver_prog,
[IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
[IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
[IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
[IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
[IRQ_PEND_RESTART] = __deliver_restart,
[IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
[IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
[IRQ_PEND_EXT_SERVICE] = __deliver_service,
[IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
[IRQ_PEND_VIRTIO] = __deliver_virtio,
};
/* Check whether an external call is pending (deliverable or not) */ /* Check whether an external call is pending (deliverable or not) */
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{ {
...@@ -1192,7 +1173,6 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -1192,7 +1173,6 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
deliver_irq_t func;
int rc = 0; int rc = 0;
unsigned long irq_type; unsigned long irq_type;
unsigned long irqs; unsigned long irqs;
...@@ -1212,16 +1192,57 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ...@@ -1212,16 +1192,57 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
while ((irqs = deliverable_irqs(vcpu)) && !rc) { while ((irqs = deliverable_irqs(vcpu)) && !rc) {
/* bits are in the reverse order of interrupt priority */ /* bits are in the reverse order of interrupt priority */
irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT); irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
if (is_ioirq(irq_type)) { switch (irq_type) {
case IRQ_PEND_IO_ISC_0:
case IRQ_PEND_IO_ISC_1:
case IRQ_PEND_IO_ISC_2:
case IRQ_PEND_IO_ISC_3:
case IRQ_PEND_IO_ISC_4:
case IRQ_PEND_IO_ISC_5:
case IRQ_PEND_IO_ISC_6:
case IRQ_PEND_IO_ISC_7:
rc = __deliver_io(vcpu, irq_type); rc = __deliver_io(vcpu, irq_type);
} else { break;
func = deliver_irq_funcs[irq_type]; case IRQ_PEND_MCHK_EX:
if (!func) { case IRQ_PEND_MCHK_REP:
WARN_ON_ONCE(func == NULL); rc = __deliver_machine_check(vcpu);
clear_bit(irq_type, &li->pending_irqs); break;
continue; case IRQ_PEND_PROG:
} rc = __deliver_prog(vcpu);
rc = func(vcpu); break;
case IRQ_PEND_EXT_EMERGENCY:
rc = __deliver_emergency_signal(vcpu);
break;
case IRQ_PEND_EXT_EXTERNAL:
rc = __deliver_external_call(vcpu);
break;
case IRQ_PEND_EXT_CLOCK_COMP:
rc = __deliver_ckc(vcpu);
break;
case IRQ_PEND_EXT_CPU_TIMER:
rc = __deliver_cpu_timer(vcpu);
break;
case IRQ_PEND_RESTART:
rc = __deliver_restart(vcpu);
break;
case IRQ_PEND_SET_PREFIX:
rc = __deliver_set_prefix(vcpu);
break;
case IRQ_PEND_PFAULT_INIT:
rc = __deliver_pfault_init(vcpu);
break;
case IRQ_PEND_EXT_SERVICE:
rc = __deliver_service(vcpu);
break;
case IRQ_PEND_PFAULT_DONE:
rc = __deliver_pfault_done(vcpu);
break;
case IRQ_PEND_VIRTIO:
rc = __deliver_virtio(vcpu);
break;
default:
WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
clear_bit(irq_type, &li->pending_irqs);
} }
} }
...@@ -1701,7 +1722,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) ...@@ -1701,7 +1722,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
break; break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa))
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break; break;
default: default:
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT); kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
......
...@@ -19,8 +19,6 @@ ...@@ -19,8 +19,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sclp.h> #include <asm/sclp.h>
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
/* Transactional Memory Execution related macros */ /* Transactional Memory Execution related macros */
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
#define TDB_FORMAT1 1 #define TDB_FORMAT1 1
......
...@@ -795,55 +795,60 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ...@@ -795,55 +795,60 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
return rc; return rc;
} }
static const intercept_handler_t b2_handlers[256] = {
[0x02] = handle_stidp,
[0x04] = handle_set_clock,
[0x10] = handle_set_prefix,
[0x11] = handle_store_prefix,
[0x12] = handle_store_cpu_address,
[0x14] = kvm_s390_handle_vsie,
[0x21] = handle_ipte_interlock,
[0x29] = handle_iske,
[0x2a] = handle_rrbe,
[0x2b] = handle_sske,
[0x2c] = handle_test_block,
[0x30] = handle_io_inst,
[0x31] = handle_io_inst,
[0x32] = handle_io_inst,
[0x33] = handle_io_inst,
[0x34] = handle_io_inst,
[0x35] = handle_io_inst,
[0x36] = handle_io_inst,
[0x37] = handle_io_inst,
[0x38] = handle_io_inst,
[0x39] = handle_io_inst,
[0x3a] = handle_io_inst,
[0x3b] = handle_io_inst,
[0x3c] = handle_io_inst,
[0x50] = handle_ipte_interlock,
[0x56] = handle_sthyi,
[0x5f] = handle_io_inst,
[0x74] = handle_io_inst,
[0x76] = handle_io_inst,
[0x7d] = handle_stsi,
[0xb1] = handle_stfl,
[0xb2] = handle_lpswe,
};
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
{ {
intercept_handler_t handler; switch (vcpu->arch.sie_block->ipa & 0x00ff) {
case 0x02:
/* return handle_stidp(vcpu);
* A lot of B2 instructions are priviledged. Here we check for case 0x04:
* the privileged ones, that we can handle in the kernel. return handle_set_clock(vcpu);
* Anything else goes to userspace. case 0x10:
*/ return handle_set_prefix(vcpu);
handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; case 0x11:
if (handler) return handle_store_prefix(vcpu);
return handler(vcpu); case 0x12:
return handle_store_cpu_address(vcpu);
return -EOPNOTSUPP; case 0x14:
return kvm_s390_handle_vsie(vcpu);
case 0x21:
case 0x50:
return handle_ipte_interlock(vcpu);
case 0x29:
return handle_iske(vcpu);
case 0x2a:
return handle_rrbe(vcpu);
case 0x2b:
return handle_sske(vcpu);
case 0x2c:
return handle_test_block(vcpu);
case 0x30:
case 0x31:
case 0x32:
case 0x33:
case 0x34:
case 0x35:
case 0x36:
case 0x37:
case 0x38:
case 0x39:
case 0x3a:
case 0x3b:
case 0x3c:
case 0x5f:
case 0x74:
case 0x76:
return handle_io_inst(vcpu);
case 0x56:
return handle_sthyi(vcpu);
case 0x7d:
return handle_stsi(vcpu);
case 0xb1:
return handle_stfl(vcpu);
case 0xb2:
return handle_lpswe(vcpu);
default:
return -EOPNOTSUPP;
}
} }
static int handle_epsw(struct kvm_vcpu *vcpu) static int handle_epsw(struct kvm_vcpu *vcpu)
...@@ -1105,25 +1110,22 @@ static int handle_essa(struct kvm_vcpu *vcpu) ...@@ -1105,25 +1110,22 @@ static int handle_essa(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static const intercept_handler_t b9_handlers[256] = {
[0x8a] = handle_ipte_interlock,
[0x8d] = handle_epsw,
[0x8e] = handle_ipte_interlock,
[0x8f] = handle_ipte_interlock,
[0xab] = handle_essa,
[0xaf] = handle_pfmf,
};
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
{ {
intercept_handler_t handler; switch (vcpu->arch.sie_block->ipa & 0x00ff) {
case 0x8a:
/* This is handled just as for the B2 instructions. */ case 0x8e:
handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; case 0x8f:
if (handler) return handle_ipte_interlock(vcpu);
return handler(vcpu); case 0x8d:
return handle_epsw(vcpu);
return -EOPNOTSUPP; case 0xab:
return handle_essa(vcpu);
case 0xaf:
return handle_pfmf(vcpu);
default:
return -EOPNOTSUPP;
}
} }
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
...@@ -1271,22 +1273,20 @@ static int handle_stctg(struct kvm_vcpu *vcpu) ...@@ -1271,22 +1273,20 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
} }
static const intercept_handler_t eb_handlers[256] = {
[0x2f] = handle_lctlg,
[0x25] = handle_stctg,
[0x60] = handle_ri,
[0x61] = handle_ri,
[0x62] = handle_ri,
};
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
{ {
intercept_handler_t handler; switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
case 0x25:
handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; return handle_stctg(vcpu);
if (handler) case 0x2f:
return handler(vcpu); return handle_lctlg(vcpu);
return -EOPNOTSUPP; case 0x60:
case 0x61:
case 0x62:
return handle_ri(vcpu);
default:
return -EOPNOTSUPP;
}
} }
static int handle_tprot(struct kvm_vcpu *vcpu) static int handle_tprot(struct kvm_vcpu *vcpu)
...@@ -1346,10 +1346,12 @@ static int handle_tprot(struct kvm_vcpu *vcpu) ...@@ -1346,10 +1346,12 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{ {
/* For e5xx... instructions we only handle TPROT */ switch (vcpu->arch.sie_block->ipa & 0x00ff) {
if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) case 0x01:
return handle_tprot(vcpu); return handle_tprot(vcpu);
return -EOPNOTSUPP; default:
return -EOPNOTSUPP;
}
} }
static int handle_sckpf(struct kvm_vcpu *vcpu) static int handle_sckpf(struct kvm_vcpu *vcpu)
...@@ -1380,17 +1382,14 @@ static int handle_ptff(struct kvm_vcpu *vcpu) ...@@ -1380,17 +1382,14 @@ static int handle_ptff(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static const intercept_handler_t x01_handlers[256] = {
[0x04] = handle_ptff,
[0x07] = handle_sckpf,
};
int kvm_s390_handle_01(struct kvm_vcpu *vcpu) int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{ {
intercept_handler_t handler; switch (vcpu->arch.sie_block->ipa & 0x00ff) {
case 0x04:
handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; return handle_ptff(vcpu);
if (handler) case 0x07:
return handler(vcpu); return handle_sckpf(vcpu);
return -EOPNOTSUPP; default:
return -EOPNOTSUPP;
}
} }
...@@ -821,6 +821,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -821,6 +821,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{ {
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
int guest_bp_isolation;
int rc; int rc;
handle_last_fault(vcpu, vsie_page); handle_last_fault(vcpu, vsie_page);
...@@ -831,6 +832,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -831,6 +832,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
s390_handle_mcck(); s390_handle_mcck();
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
/* save current guest state of bp isolation override */
guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
/*
* The guest is running with BPBC, so we have to force it on for our
* nested guest. This is done by enabling BPBC globally, so the BPBC
* control in the SCB (which the nested guest can modify) is simply
* ignored.
*/
if (test_kvm_facility(vcpu->kvm, 82) &&
vcpu->arch.sie_block->fpf & FPF_BPBC)
set_thread_flag(TIF_ISOLATE_BP_GUEST);
local_irq_disable(); local_irq_disable();
guest_enter_irqoff(); guest_enter_irqoff();
local_irq_enable(); local_irq_enable();
...@@ -840,6 +855,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -840,6 +855,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
local_irq_disable(); local_irq_disable();
guest_exit_irqoff(); guest_exit_irqoff();
local_irq_enable(); local_irq_enable();
/* restore guest state for bp isolation override */
if (!guest_bp_isolation)
clear_thread_flag(TIF_ISOLATE_BP_GUEST);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
if (rc == -EINTR) { if (rc == -EINTR) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment