Commit 214ff83d authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: hyperv: implement PV IPI send hypercalls

Using hypercall for sending IPIs is faster because this allows to specify
any number of vCPUs (even > 64 with sparse CPU set), the whole procedure
will take only one VMEXIT.

Current Hyper-V TLFS (v5.0b) claims that HvCallSendSyntheticClusterIpi
hypercall can't be 'fast' (passing parameters through registers) but
apparently this is not true, Windows always uses it as 'fast' so we need
to support that.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2cefc5fe
...@@ -4791,3 +4791,10 @@ CPU when the exception is taken. If this virtual SError is taken to EL1 using ...@@ -4791,3 +4791,10 @@ CPU when the exception is taken. If this virtual SError is taken to EL1 using
AArch64, this value will be reported in the ISS field of ESR_ELx. AArch64, this value will be reported in the ISS field of ESR_ELx.
See KVM_CAP_VCPU_EVENTS for more details. See KVM_CAP_VCPU_EVENTS for more details.
8.20 KVM_CAP_HYPERV_SEND_IPI
Architectures: x86
This capability indicates that KVM supports paravirtualized Hyper-V IPI send
hypercalls:
HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
...@@ -1405,6 +1405,107 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, ...@@ -1405,6 +1405,107 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
} }
static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
bool ex, bool fast)
{
struct kvm *kvm = current_vcpu->kvm;
struct kvm_hv *hv = &kvm->arch.hyperv;
struct hv_send_ipi_ex send_ipi_ex;
struct hv_send_ipi send_ipi;
struct kvm_vcpu *vcpu;
unsigned long valid_bank_mask;
u64 sparse_banks[64];
int sparse_banks_len, bank, i, sbank;
struct kvm_lapic_irq irq = {.delivery_mode = APIC_DM_FIXED};
bool all_cpus;
if (!ex) {
if (!fast) {
if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
sizeof(send_ipi))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
sparse_banks[0] = send_ipi.cpu_mask;
irq.vector = send_ipi.vector;
} else {
/* 'reserved' part of hv_send_ipi should be 0 */
if (unlikely(ingpa >> 32 != 0))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
sparse_banks[0] = outgpa;
irq.vector = (u32)ingpa;
}
all_cpus = false;
valid_bank_mask = BIT_ULL(0);
trace_kvm_hv_send_ipi(irq.vector, sparse_banks[0]);
} else {
if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
sizeof(send_ipi_ex))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
send_ipi_ex.vp_set.format,
send_ipi_ex.vp_set.valid_bank_mask);
irq.vector = send_ipi_ex.vector;
valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
sizeof(sparse_banks[0]);
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
if (!sparse_banks_len)
goto ret_success;
if (!all_cpus &&
kvm_read_guest(kvm,
ingpa + offsetof(struct hv_send_ipi_ex,
vp_set.bank_contents),
sparse_banks,
sparse_banks_len))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
}
if ((irq.vector < HV_IPI_LOW_VECTOR) ||
(irq.vector > HV_IPI_HIGH_VECTOR))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
if (all_cpus || atomic_read(&hv->num_mismatched_vp_indexes)) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (all_cpus || hv_vcpu_in_sparse_set(
&vcpu->arch.hyperv, sparse_banks,
valid_bank_mask)) {
/* We fail only when APIC is disabled */
kvm_apic_set_irq(vcpu, &irq, NULL);
}
}
goto ret_success;
}
/*
* num_mismatched_vp_indexes is zero so every vcpu has
* vp_index == vcpu_idx.
*/
sbank = 0;
for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, 64) {
for_each_set_bit(i, (unsigned long *)&sparse_banks[sbank], 64) {
u32 vp_index = bank * 64 + i;
struct kvm_vcpu *vcpu =
get_vcpu_by_vpidx(kvm, vp_index);
/* Unknown vCPU specified */
if (!vcpu)
continue;
/* We fail only when APIC is disabled */
kvm_apic_set_irq(vcpu, &irq, NULL);
}
sbank++;
}
ret_success:
return HV_STATUS_SUCCESS;
}
bool kvm_hv_hypercall_enabled(struct kvm *kvm) bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{ {
return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE; return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
...@@ -1574,6 +1675,20 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ...@@ -1574,6 +1675,20 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
} }
ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
break; break;
case HVCALL_SEND_IPI:
if (unlikely(rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
break;
case HVCALL_SEND_IPI_EX:
if (unlikely(fast || rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
break;
default: default:
ret = HV_STATUS_INVALID_HYPERCALL_CODE; ret = HV_STATUS_INVALID_HYPERCALL_CODE;
break; break;
......
...@@ -1418,6 +1418,48 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex, ...@@ -1418,6 +1418,48 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex,
__entry->valid_bank_mask, __entry->format, __entry->valid_bank_mask, __entry->format,
__entry->address_space, __entry->flags) __entry->address_space, __entry->flags)
); );
/*
* Tracepoints for kvm_hv_send_ipi.
*/
TRACE_EVENT(kvm_hv_send_ipi,
TP_PROTO(u32 vector, u64 processor_mask),
TP_ARGS(vector, processor_mask),
TP_STRUCT__entry(
__field(u32, vector)
__field(u64, processor_mask)
),
TP_fast_assign(
__entry->vector = vector;
__entry->processor_mask = processor_mask;
),
TP_printk("vector %x processor_mask 0x%llx",
__entry->vector, __entry->processor_mask)
);
TRACE_EVENT(kvm_hv_send_ipi_ex,
TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
TP_ARGS(vector, format, valid_bank_mask),
TP_STRUCT__entry(
__field(u32, vector)
__field(u64, format)
__field(u64, valid_bank_mask)
),
TP_fast_assign(
__entry->vector = vector;
__entry->format = format;
__entry->valid_bank_mask = valid_bank_mask;
),
TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
__entry->vector, __entry->format,
__entry->valid_bank_mask)
);
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -2912,6 +2912,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -2912,6 +2912,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_VP_INDEX: case KVM_CAP_HYPERV_VP_INDEX:
case KVM_CAP_HYPERV_EVENTFD: case KVM_CAP_HYPERV_EVENTFD:
case KVM_CAP_HYPERV_TLBFLUSH: case KVM_CAP_HYPERV_TLBFLUSH:
case KVM_CAP_HYPERV_SEND_IPI:
case KVM_CAP_PCI_SEGMENT: case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS: case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_X86_ROBUST_SINGLESTEP:
......
...@@ -955,6 +955,7 @@ struct kvm_ppc_resize_hpt { ...@@ -955,6 +955,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158
#define KVM_CAP_MSR_PLATFORM_INFO 159 #define KVM_CAP_MSR_PLATFORM_INFO 159
#define KVM_CAP_PPC_NESTED_HV 160 #define KVM_CAP_PPC_NESTED_HV 160
#define KVM_CAP_HYPERV_SEND_IPI 161
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment