Commit 6165d5dd authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV: add virtual mode handlers for HPT hcalls and page faults

In order to support hash guests in the P9 path (which does not do real
mode hcalls or page fault handling), these real-mode hash specific
interrupts need to be implemented in virt mode.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-29-npiggin@gmail.com
parent a9aa86e0
...@@ -939,6 +939,52 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ...@@ -939,6 +939,52 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST; return RESUME_HOST;
switch (req) { switch (req) {
case H_REMOVE:
ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_ENTER:
ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_READ:
ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_CLEAR_MOD:
ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_CLEAR_REF:
ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_PROTECT:
ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_BULK_REMOVE:
ret = kvmppc_h_bulk_remove(vcpu);
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_CEDE: case H_CEDE:
break; break;
case H_PROD: case H_PROD:
...@@ -1138,6 +1184,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) ...@@ -1138,6 +1184,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
default: default:
return RESUME_HOST; return RESUME_HOST;
} }
WARN_ON_ONCE(ret == H_TOO_HARD);
kvmppc_set_gpr(vcpu, 3, ret); kvmppc_set_gpr(vcpu, 3, ret);
vcpu->arch.hcall_needed = 0; vcpu->arch.hcall_needed = 0;
return RESUME_GUEST; return RESUME_GUEST;
...@@ -1438,22 +1485,102 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, ...@@ -1438,22 +1485,102 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* We get these next two if the guest accesses a page which it thinks * We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because * it has mapped but which is not actually present, either because
* it is for an emulated I/O device or because the corresonding * it is for an emulated I/O device or because the corresonding
* host page has been paged out. Any other HDSI/HISI interrupts * host page has been paged out.
* have been handled already. *
* Any other HDSI/HISI interrupts have been handled already for P7/8
* guests. For POWER9 hash guests not using rmhandlers, basic hash
* fault handling is done here.
*/ */
case BOOK3S_INTERRUPT_H_DATA_STORAGE: case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
r = RESUME_PAGE_FAULT; unsigned long vsid;
if (vcpu->arch.fault_dsisr == HDSISR_CANARY) long err;
if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
r = RESUME_GUEST; /* Just retry if it's the canary */ r = RESUME_GUEST; /* Just retry if it's the canary */
break;
}
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* Radix doesn't require anything, and pre-ISAv3.0 hash
* already attempted to handle this in rmhandlers. The
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
r = RESUME_PAGE_FAULT;
break;
}
if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
kvmppc_core_queue_data_storage(vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
r = RESUME_GUEST;
break;
}
if (!(vcpu->arch.shregs.msr & MSR_DR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
vsid, vcpu->arch.fault_dsisr, true);
if (err == 0) {
r = RESUME_GUEST;
} else if (err == -1 || err == -2) {
r = RESUME_PAGE_FAULT;
} else {
kvmppc_core_queue_data_storage(vcpu,
vcpu->arch.fault_dar, err);
r = RESUME_GUEST;
}
break; break;
case BOOK3S_INTERRUPT_H_INST_STORAGE: }
case BOOK3S_INTERRUPT_H_INST_STORAGE: {
unsigned long vsid;
long err;
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
DSISR_SRR1_MATCH_64S; DSISR_SRR1_MATCH_64S;
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
vcpu->arch.fault_dsisr |= DSISR_ISSTORE; /*
r = RESUME_PAGE_FAULT; * Radix doesn't require anything, and pre-ISAv3.0 hash
* already attempted to handle this in rmhandlers. The
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
break;
}
if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
kvmppc_core_queue_inst_storage(vcpu,
vcpu->arch.fault_dsisr);
r = RESUME_GUEST;
break;
}
if (!(vcpu->arch.shregs.msr & MSR_IR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
vsid, vcpu->arch.fault_dsisr, false);
if (err == 0) {
r = RESUME_GUEST;
} else if (err == -1) {
r = RESUME_PAGE_FAULT;
} else {
kvmppc_core_queue_inst_storage(vcpu, err);
r = RESUME_GUEST;
}
break; break;
}
/* /*
* This occurs if the guest executes an illegal instruction. * This occurs if the guest executes an illegal instruction.
* If the guest debug is disabled, generate a program interrupt * If the guest debug is disabled, generate a program interrupt
......
...@@ -409,6 +409,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -409,6 +409,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.pgdir, true, vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]); &vcpu->arch.regs.gpr[4]);
} }
EXPORT_SYMBOL_GPL(kvmppc_h_enter);
#ifdef __BIG_ENDIAN__ #ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
...@@ -553,6 +554,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -553,6 +554,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.regs.gpr[4]); &vcpu->arch.regs.gpr[4]);
} }
EXPORT_SYMBOL_GPL(kvmppc_h_remove);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{ {
...@@ -671,6 +673,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) ...@@ -671,6 +673,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn) unsigned long pte_index, unsigned long avpn)
...@@ -741,6 +744,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -741,6 +744,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS; return H_SUCCESS;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_protect);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index) unsigned long pte_index)
...@@ -781,6 +785,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -781,6 +785,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
} }
return H_SUCCESS; return H_SUCCESS;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_read);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index) unsigned long pte_index)
...@@ -829,6 +834,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -829,6 +834,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index) unsigned long pte_index)
...@@ -876,6 +882,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -876,6 +882,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
unsigned long gpa, int writing, unsigned long *hpa, unsigned long gpa, int writing, unsigned long *hpa,
...@@ -1294,3 +1301,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, ...@@ -1294,3 +1301,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return -1; /* send fault up to host kernel mode */ return -1; /* send fault up to host kernel mode */
} }
EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment