Commit bb0cca24 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/single-step-async-exception into kvmarm-master/next

* kvm-arm64/single-step-async-exception:
  : .
  : Single-step fixes from Reiji Watanabe:
  :
  : "This series fixes two bugs of single-step execution enabled by
  : userspace, and add a test case for KVM_GUESTDBG_SINGLESTEP to
  : the debug-exception test to verify the single-step behavior."
  : .
  KVM: arm64: selftests: Add a test case for KVM_GUESTDBG_SINGLESTEP
  KVM: arm64: selftests: Refactor debug-exceptions to make it amenable to new test cases
  KVM: arm64: Clear PSTATE.SS when the Software Step state was Active-pending
  KVM: arm64: Preserve PSTATE.SS for the guest while single-step is enabled
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents b04b3315 b18e4d4a
...@@ -393,6 +393,7 @@ struct kvm_vcpu_arch { ...@@ -393,6 +393,7 @@ struct kvm_vcpu_arch {
*/ */
struct { struct {
u32 mdscr_el1; u32 mdscr_el1;
bool pstate_ss;
} guest_debug_preserved; } guest_debug_preserved;
/* vcpu power state */ /* vcpu power state */
...@@ -535,6 +536,9 @@ struct kvm_vcpu_arch { ...@@ -535,6 +536,9 @@ struct kvm_vcpu_arch {
#define IN_WFIT __vcpu_single_flag(sflags, BIT(3)) #define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
/* vcpu system registers loaded on physical CPU */ /* vcpu system registers loaded on physical CPU */
#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4)) #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
/* Software step state is Active-pending */
#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
......
...@@ -32,6 +32,10 @@ static DEFINE_PER_CPU(u64, mdcr_el2); ...@@ -32,6 +32,10 @@ static DEFINE_PER_CPU(u64, mdcr_el2);
* *
* Guest access to MDSCR_EL1 is trapped by the hypervisor and handled * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
* after we have restored the preserved value to the main context. * after we have restored the preserved value to the main context.
*
* When single-step is enabled by userspace, we tweak PSTATE.SS on every
* guest entry. Preserve PSTATE.SS so we can restore the original value
* for the vcpu after the single-step is disabled.
*/ */
static void save_guest_debug_regs(struct kvm_vcpu *vcpu) static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
{ {
...@@ -41,6 +45,9 @@ static void save_guest_debug_regs(struct kvm_vcpu *vcpu) ...@@ -41,6 +45,9 @@ static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
trace_kvm_arm_set_dreg32("Saved MDSCR_EL1", trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
vcpu->arch.guest_debug_preserved.mdscr_el1); vcpu->arch.guest_debug_preserved.mdscr_el1);
vcpu->arch.guest_debug_preserved.pstate_ss =
(*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
} }
static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
...@@ -51,6 +58,11 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) ...@@ -51,6 +58,11 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
trace_kvm_arm_set_dreg32("Restored MDSCR_EL1", trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
vcpu_read_sys_reg(vcpu, MDSCR_EL1)); vcpu_read_sys_reg(vcpu, MDSCR_EL1));
if (vcpu->arch.guest_debug_preserved.pstate_ss)
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
else
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
} }
/** /**
...@@ -188,7 +200,18 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) ...@@ -188,7 +200,18 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
* debugging the system. * debugging the system.
*/ */
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS; /*
* If the software step state at the last guest exit
* was Active-pending, we don't set DBG_SPSR_SS so
* that the state is maintained (to not run another
* single-step until the pending Software Step
* exception is taken).
*/
if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
else
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
mdscr |= DBG_MDSCR_SS; mdscr |= DBG_MDSCR_SS;
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
...@@ -262,6 +285,15 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) ...@@ -262,6 +285,15 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
* Restore the guest's debug registers if we were using them. * Restore the guest's debug registers if we were using them.
*/ */
if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) { if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
/*
* Mark the vcpu as ACTIVE_PENDING
* until Software Step exception is taken.
*/
vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
}
restore_guest_debug_regs(vcpu); restore_guest_debug_regs(vcpu);
/* /*
......
...@@ -937,6 +937,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -937,6 +937,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
} else { } else {
/* If not enabled clear all flags */ /* If not enabled clear all flags */
vcpu->guest_debug = 0; vcpu->guest_debug = 0;
vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
} }
out: out:
......
...@@ -152,8 +152,14 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) ...@@ -152,8 +152,14 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
run->debug.arch.hsr_high = upper_32_bits(esr); run->debug.arch.hsr_high = upper_32_bits(esr);
run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID; run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW) switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2; run->debug.arch.far = vcpu->arch.fault.far_el2;
break;
case ESR_ELx_EC_SOFTSTP_LOW:
vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
break;
}
return 0; return 0;
} }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#define SPSR_SS (1 << 21) #define SPSR_SS (1 << 21)
extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start; extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start;
extern unsigned char iter_ss_begin, iter_ss_end;
static volatile uint64_t sw_bp_addr, hw_bp_addr; static volatile uint64_t sw_bp_addr, hw_bp_addr;
static volatile uint64_t wp_addr, wp_data_addr; static volatile uint64_t wp_addr, wp_data_addr;
static volatile uint64_t svc_addr; static volatile uint64_t svc_addr;
...@@ -238,6 +239,46 @@ static void guest_svc_handler(struct ex_regs *regs) ...@@ -238,6 +239,46 @@ static void guest_svc_handler(struct ex_regs *regs)
svc_addr = regs->pc; svc_addr = regs->pc;
} }
enum single_step_op {
SINGLE_STEP_ENABLE = 0,
SINGLE_STEP_DISABLE = 1,
};
static void guest_code_ss(int test_cnt)
{
uint64_t i;
uint64_t bvr, wvr, w_bvr, w_wvr;
for (i = 0; i < test_cnt; i++) {
/* Bits [1:0] of dbg{b,w}vr are RES0 */
w_bvr = i << 2;
w_wvr = i << 2;
/* Enable Single Step execution */
GUEST_SYNC(SINGLE_STEP_ENABLE);
/*
* The userspace will veriry that the pc is as expected during
* single step execution between iter_ss_begin and iter_ss_end.
*/
asm volatile("iter_ss_begin:nop\n");
write_sysreg(w_bvr, dbgbvr0_el1);
write_sysreg(w_wvr, dbgwvr0_el1);
bvr = read_sysreg(dbgbvr0_el1);
wvr = read_sysreg(dbgwvr0_el1);
asm volatile("iter_ss_end:\n");
/* Disable Single Step execution */
GUEST_SYNC(SINGLE_STEP_DISABLE);
GUEST_ASSERT(bvr == w_bvr);
GUEST_ASSERT(wvr == w_wvr);
}
GUEST_DONE();
}
static int debug_version(struct kvm_vcpu *vcpu) static int debug_version(struct kvm_vcpu *vcpu)
{ {
uint64_t id_aa64dfr0; uint64_t id_aa64dfr0;
...@@ -246,7 +287,7 @@ static int debug_version(struct kvm_vcpu *vcpu) ...@@ -246,7 +287,7 @@ static int debug_version(struct kvm_vcpu *vcpu)
return id_aa64dfr0 & 0xf; return id_aa64dfr0 & 0xf;
} }
int main(int argc, char *argv[]) static void test_guest_debug_exceptions(void)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
...@@ -259,9 +300,6 @@ int main(int argc, char *argv[]) ...@@ -259,9 +300,6 @@ int main(int argc, char *argv[])
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu); vcpu_init_descriptor_tables(vcpu);
__TEST_REQUIRE(debug_version(vcpu) >= 6,
"Armv8 debug architecture not supported.");
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_BRK_INS, guest_sw_bp_handler); ESR_EC_BRK_INS, guest_sw_bp_handler);
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
...@@ -294,5 +332,108 @@ int main(int argc, char *argv[]) ...@@ -294,5 +332,108 @@ int main(int argc, char *argv[])
done: done:
kvm_vm_free(vm); kvm_vm_free(vm);
}
void test_single_step_from_userspace(int test_cnt)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
struct kvm_run *run;
uint64_t pc, cmd;
uint64_t test_pc = 0;
bool ss_enable = false;
struct kvm_guest_debug debug = {};
vm = vm_create_with_one_vcpu(&vcpu, guest_code_ss);
ucall_init(vm, NULL);
run = vcpu->run;
vcpu_args_set(vcpu, 1, test_cnt);
while (1) {
vcpu_run(vcpu);
if (run->exit_reason != KVM_EXIT_DEBUG) {
cmd = get_ucall(vcpu, &uc);
if (cmd == UCALL_ABORT) {
REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
} else if (cmd == UCALL_DONE) {
break;
}
TEST_ASSERT(cmd == UCALL_SYNC,
"Unexpected ucall cmd 0x%lx", cmd);
if (uc.args[1] == SINGLE_STEP_ENABLE) {
debug.control = KVM_GUESTDBG_ENABLE |
KVM_GUESTDBG_SINGLESTEP;
ss_enable = true;
} else {
debug.control = SINGLE_STEP_DISABLE;
ss_enable = false;
}
vcpu_guest_debug_set(vcpu, &debug);
continue;
}
TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
/* Check if the current pc is expected. */
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
TEST_ASSERT(!test_pc || pc == test_pc,
"Unexpected pc 0x%lx (expected 0x%lx)",
pc, test_pc);
/*
* If the current pc is between iter_ss_bgin and
* iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
* be the current pc + 4.
*/
if ((pc >= (uint64_t)&iter_ss_begin) &&
(pc < (uint64_t)&iter_ss_end))
test_pc = pc + 4;
else
test_pc = 0;
}
kvm_vm_free(vm);
}
static void help(char *name)
{
puts("");
printf("Usage: %s [-h] [-i iterations of the single step test]\n", name);
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int opt;
int ss_iteration = 10000;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
__TEST_REQUIRE(debug_version(vcpu) >= 6,
"Armv8 debug architecture not supported.");
kvm_vm_free(vm);
while ((opt = getopt(argc, argv, "i:")) != -1) {
switch (opt) {
case 'i':
ss_iteration = atoi(optarg);
break;
case 'h':
default:
help(argv[0]);
break;
}
}
test_guest_debug_exceptions();
test_single_step_from_userspace(ss_iteration);
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment