Commit f0a32ee4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "Fixes for interrupt controller emulation in ARM/ARM64 and x86, plus a
  one-liner x86 KVM guest fix"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: Update APICv on APIC reset
  KVM: VMX: Do not fully reset PI descriptor on vCPU reset
  kvm: Return -ENODEV from update_persistent_clock
  KVM: arm/arm64: vgic-its: Check GITS_BASER Valid bit before saving tables
  KVM: arm/arm64: vgic-its: Check CBASER/BASER validity before enabling the ITS
  KVM: arm/arm64: vgic-its: Fix vgic_its_restore_collection_table returned value
  KVM: arm/arm64: vgic-its: Fix return value for device table restore
  arm/arm64: kvm: Disable branch profiling in HYP code
  arm/arm64: kvm: Move initialization completion message
  arm/arm64: KVM: set right LR register value for 32 bit guest when inject abort
  KVM: arm64: its: Fix missing dynamic allocation check in scan_its_table
parents b1878b85 b33c8732
...@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) ...@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
u32 return_offset = (is_thumb) ? 2 : 4; u32 return_offset = (is_thumb) ? 2 : 4;
kvm_update_psr(vcpu, UND_MODE); kvm_update_psr(vcpu, UND_MODE);
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset; *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */ /* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
...@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) ...@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/ */
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{ {
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset; u32 vect_offset;
u32 return_offset = (is_thumb) ? 4 : 0; u32 return_offset = (is_pabt) ? 4 : 8;
bool is_lpae; bool is_lpae;
kvm_update_psr(vcpu, ABT_MODE); kvm_update_psr(vcpu, ABT_MODE);
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part # Makefile for Kernel-based Virtual Machine module, HYP part
# #
ccflags-y += -fno-stack-protector ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm KVM=../../../../virt/kvm
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part # Makefile for Kernel-based Virtual Machine module, HYP part
# #
ccflags-y += -fno-stack-protector ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm KVM=../../../../virt/kvm
......
...@@ -33,12 +33,26 @@ ...@@ -33,12 +33,26 @@
#define LOWER_EL_AArch64_VECTOR 0x400 #define LOWER_EL_AArch64_VECTOR 0x400
#define LOWER_EL_AArch32_VECTOR 0x600 #define LOWER_EL_AArch32_VECTOR 0x600
/*
* Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
*/
static const u8 return_offsets[8][2] = {
[0] = { 0, 0 }, /* Reset, unused */
[1] = { 4, 2 }, /* Undefined */
[2] = { 0, 0 }, /* SVC, unused */
[3] = { 4, 4 }, /* Prefetch abort */
[4] = { 8, 8 }, /* Data abort */
[5] = { 0, 0 }, /* HVC, unused */
[6] = { 4, 4 }, /* IRQ, unused */
[7] = { 4, 4 }, /* FIQ, unused */
};
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{ {
unsigned long cpsr; unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu); unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
u32 return_offset = (is_thumb) ? 4 : 0; u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
cpsr = mode | COMPAT_PSR_I_BIT; cpsr = mode | COMPAT_PSR_I_BIT;
......
...@@ -79,7 +79,7 @@ static void kvm_get_wallclock(struct timespec *now) ...@@ -79,7 +79,7 @@ static void kvm_get_wallclock(struct timespec *now)
static int kvm_set_wallclock(const struct timespec *now) static int kvm_set_wallclock(const struct timespec *now)
{ {
return -1; return -ENODEV;
} }
static u64 kvm_clock_read(void) static u64 kvm_clock_read(void)
......
...@@ -1992,6 +1992,11 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -1992,6 +1992,11 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
vcpu->arch.pv_eoi.msr_val = 0; vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic); apic_update_ppr(apic);
if (vcpu->arch.apicv_active) {
kvm_x86_ops->apicv_post_state_restore(vcpu);
kvm_x86_ops->hwapic_irr_update(vcpu, -1);
kvm_x86_ops->hwapic_isr_update(vcpu, -1);
}
vcpu->arch.apic_arb_prio = 0; vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0; vcpu->arch.apic_attention = 0;
......
...@@ -5619,9 +5619,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -5619,9 +5619,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
if (kvm_vcpu_apicv_active(vcpu))
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
if (vmx->vpid != 0) if (vmx->vpid != 0)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
......
...@@ -1326,21 +1326,12 @@ static void teardown_hyp_mode(void) ...@@ -1326,21 +1326,12 @@ static void teardown_hyp_mode(void)
{ {
int cpu; int cpu;
if (is_kernel_in_hyp_mode())
return;
free_hyp_pgds(); free_hyp_pgds();
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
hyp_cpu_pm_exit(); hyp_cpu_pm_exit();
} }
static int init_vhe_mode(void)
{
kvm_info("VHE mode initialized successfully\n");
return 0;
}
/** /**
* Inits Hyp-mode on all online CPUs * Inits Hyp-mode on all online CPUs
*/ */
...@@ -1421,8 +1412,6 @@ static int init_hyp_mode(void) ...@@ -1421,8 +1412,6 @@ static int init_hyp_mode(void)
} }
} }
kvm_info("Hyp mode initialized successfully\n");
return 0; return 0;
out_err: out_err:
...@@ -1456,6 +1445,7 @@ int kvm_arch_init(void *opaque) ...@@ -1456,6 +1445,7 @@ int kvm_arch_init(void *opaque)
{ {
int err; int err;
int ret, cpu; int ret, cpu;
bool in_hyp_mode;
if (!is_hyp_mode_available()) { if (!is_hyp_mode_available()) {
kvm_err("HYP mode not available\n"); kvm_err("HYP mode not available\n");
...@@ -1474,20 +1464,27 @@ int kvm_arch_init(void *opaque) ...@@ -1474,20 +1464,27 @@ int kvm_arch_init(void *opaque)
if (err) if (err)
return err; return err;
if (is_kernel_in_hyp_mode()) in_hyp_mode = is_kernel_in_hyp_mode();
err = init_vhe_mode();
else if (!in_hyp_mode) {
err = init_hyp_mode(); err = init_hyp_mode();
if (err) if (err)
goto out_err; goto out_err;
}
err = init_subsystems(); err = init_subsystems();
if (err) if (err)
goto out_hyp; goto out_hyp;
if (in_hyp_mode)
kvm_info("VHE mode initialized successfully\n");
else
kvm_info("Hyp mode initialized successfully\n");
return 0; return 0;
out_hyp: out_hyp:
if (!in_hyp_mode)
teardown_hyp_mode(); teardown_hyp_mode();
out_err: out_err:
teardown_common_resources(); teardown_common_resources();
......
...@@ -1466,6 +1466,16 @@ static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its, ...@@ -1466,6 +1466,16 @@ static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
{ {
mutex_lock(&its->cmd_lock); mutex_lock(&its->cmd_lock);
/*
* It is UNPREDICTABLE to enable the ITS if any of the CBASER or
* device/collection BASER are invalid
*/
if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
(!(its->baser_device_table & GITS_BASER_VALID) ||
!(its->baser_coll_table & GITS_BASER_VALID) ||
!(its->cbaser & GITS_CBASER_VALID)))
goto out;
its->enabled = !!(val & GITS_CTLR_ENABLE); its->enabled = !!(val & GITS_CTLR_ENABLE);
/* /*
...@@ -1474,6 +1484,7 @@ static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its, ...@@ -1474,6 +1484,7 @@ static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
*/ */
vgic_its_process_commands(kvm, its); vgic_its_process_commands(kvm, its);
out:
mutex_unlock(&its->cmd_lock); mutex_unlock(&its->cmd_lock);
} }
...@@ -1801,37 +1812,33 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry, ...@@ -1801,37 +1812,33 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz, static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
int start_id, entry_fn_t fn, void *opaque) int start_id, entry_fn_t fn, void *opaque)
{ {
void *entry = kzalloc(esz, GFP_KERNEL);
struct kvm *kvm = its->dev->kvm; struct kvm *kvm = its->dev->kvm;
unsigned long len = size; unsigned long len = size;
int id = start_id; int id = start_id;
gpa_t gpa = base; gpa_t gpa = base;
char entry[esz];
int ret; int ret;
memset(entry, 0, esz);
while (len > 0) { while (len > 0) {
int next_offset; int next_offset;
size_t byte_offset; size_t byte_offset;
ret = kvm_read_guest(kvm, gpa, entry, esz); ret = kvm_read_guest(kvm, gpa, entry, esz);
if (ret) if (ret)
goto out; return ret;
next_offset = fn(its, id, entry, opaque); next_offset = fn(its, id, entry, opaque);
if (next_offset <= 0) { if (next_offset <= 0)
ret = next_offset; return next_offset;
goto out;
}
byte_offset = next_offset * esz; byte_offset = next_offset * esz;
id += next_offset; id += next_offset;
gpa += byte_offset; gpa += byte_offset;
len -= byte_offset; len -= byte_offset;
} }
ret = 1; return 1;
out:
kfree(entry);
return ret;
} }
/** /**
...@@ -1940,6 +1947,14 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device) ...@@ -1940,6 +1947,14 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
return 0; return 0;
} }
/**
* vgic_its_restore_itt - restore the ITT of a device
*
* @its: its handle
* @dev: device handle
*
* Return 0 on success, < 0 on error
*/
static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev) static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
{ {
const struct vgic_its_abi *abi = vgic_its_get_abi(its); const struct vgic_its_abi *abi = vgic_its_get_abi(its);
...@@ -1951,6 +1966,10 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev) ...@@ -1951,6 +1966,10 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
ret = scan_its_table(its, base, max_size, ite_esz, 0, ret = scan_its_table(its, base, max_size, ite_esz, 0,
vgic_its_restore_ite, dev); vgic_its_restore_ite, dev);
/* scan_its_table returns +1 if all ITEs are invalid */
if (ret > 0)
ret = 0;
return ret; return ret;
} }
...@@ -2048,11 +2067,12 @@ static int vgic_its_device_cmp(void *priv, struct list_head *a, ...@@ -2048,11 +2067,12 @@ static int vgic_its_device_cmp(void *priv, struct list_head *a,
static int vgic_its_save_device_tables(struct vgic_its *its) static int vgic_its_save_device_tables(struct vgic_its *its)
{ {
const struct vgic_its_abi *abi = vgic_its_get_abi(its); const struct vgic_its_abi *abi = vgic_its_get_abi(its);
u64 baser = its->baser_device_table;
struct its_device *dev; struct its_device *dev;
int dte_esz = abi->dte_esz; int dte_esz = abi->dte_esz;
u64 baser;
baser = its->baser_device_table; if (!(baser & GITS_BASER_VALID))
return 0;
list_sort(NULL, &its->device_list, vgic_its_device_cmp); list_sort(NULL, &its->device_list, vgic_its_device_cmp);
...@@ -2107,10 +2127,7 @@ static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr, ...@@ -2107,10 +2127,7 @@ static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
ret = scan_its_table(its, gpa, SZ_64K, dte_esz, ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
l2_start_id, vgic_its_restore_dte, NULL); l2_start_id, vgic_its_restore_dte, NULL);
if (ret <= 0)
return ret; return ret;
return 1;
} }
/** /**
...@@ -2140,8 +2157,9 @@ static int vgic_its_restore_device_tables(struct vgic_its *its) ...@@ -2140,8 +2157,9 @@ static int vgic_its_restore_device_tables(struct vgic_its *its)
vgic_its_restore_dte, NULL); vgic_its_restore_dte, NULL);
} }
/* scan_its_table returns +1 if all entries are invalid */
if (ret > 0) if (ret > 0)
ret = -EINVAL; ret = 0;
return ret; return ret;
} }
...@@ -2198,17 +2216,17 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) ...@@ -2198,17 +2216,17 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
static int vgic_its_save_collection_table(struct vgic_its *its) static int vgic_its_save_collection_table(struct vgic_its *its)
{ {
const struct vgic_its_abi *abi = vgic_its_get_abi(its); const struct vgic_its_abi *abi = vgic_its_get_abi(its);
u64 baser = its->baser_coll_table;
gpa_t gpa = BASER_ADDRESS(baser);
struct its_collection *collection; struct its_collection *collection;
u64 val; u64 val;
gpa_t gpa;
size_t max_size, filled = 0; size_t max_size, filled = 0;
int ret, cte_esz = abi->cte_esz; int ret, cte_esz = abi->cte_esz;
gpa = BASER_ADDRESS(its->baser_coll_table); if (!(baser & GITS_BASER_VALID))
if (!gpa)
return 0; return 0;
max_size = GITS_BASER_NR_PAGES(its->baser_coll_table) * SZ_64K; max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
list_for_each_entry(collection, &its->collection_list, coll_list) { list_for_each_entry(collection, &its->collection_list, coll_list) {
ret = vgic_its_save_cte(its, collection, gpa, cte_esz); ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
...@@ -2239,17 +2257,18 @@ static int vgic_its_save_collection_table(struct vgic_its *its) ...@@ -2239,17 +2257,18 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
static int vgic_its_restore_collection_table(struct vgic_its *its) static int vgic_its_restore_collection_table(struct vgic_its *its)
{ {
const struct vgic_its_abi *abi = vgic_its_get_abi(its); const struct vgic_its_abi *abi = vgic_its_get_abi(its);
u64 baser = its->baser_coll_table;
int cte_esz = abi->cte_esz; int cte_esz = abi->cte_esz;
size_t max_size, read = 0; size_t max_size, read = 0;
gpa_t gpa; gpa_t gpa;
int ret; int ret;
if (!(its->baser_coll_table & GITS_BASER_VALID)) if (!(baser & GITS_BASER_VALID))
return 0; return 0;
gpa = BASER_ADDRESS(its->baser_coll_table); gpa = BASER_ADDRESS(baser);
max_size = GITS_BASER_NR_PAGES(its->baser_coll_table) * SZ_64K; max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
while (read < max_size) { while (read < max_size) {
ret = vgic_its_restore_cte(its, gpa, cte_esz); ret = vgic_its_restore_cte(its, gpa, cte_esz);
...@@ -2258,6 +2277,10 @@ static int vgic_its_restore_collection_table(struct vgic_its *its) ...@@ -2258,6 +2277,10 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
gpa += cte_esz; gpa += cte_esz;
read += cte_esz; read += cte_esz;
} }
if (ret > 0)
return 0;
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment