Commit 122954ed authored by Ravi Bangoria's avatar Ravi Bangoria Committed by Paul Mackerras

KVM: PPC: Book3S HV: Rename current DAWR macros and variables

Power10 is introducing a second DAWR (Data Address Watchpoint
Register). Use real register names (with suffix 0) from ISA for
current macros and variables used by kvm.  One exception is
KVM_REG_PPC_DAWR.  Keep it as it is because it's uapi so changing it
will break userspace.
Signed-off-by: default avatarRavi Bangoria <ravi.bangoria@linux.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent afe75049
...@@ -583,8 +583,8 @@ struct kvm_vcpu_arch { ...@@ -583,8 +583,8 @@ struct kvm_vcpu_arch {
u32 ctrl; u32 ctrl;
u32 dabrx; u32 dabrx;
ulong dabr; ulong dabr;
ulong dawr; ulong dawr0;
ulong dawrx; ulong dawrx0;
ulong ciabr; ulong ciabr;
ulong cfar; ulong cfar;
ulong ppr; ulong ppr;
......
...@@ -526,8 +526,8 @@ int main(void) ...@@ -526,8 +526,8 @@ int main(void)
OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl); OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl);
OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr); OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr);
OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr); OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0);
OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx); OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
OFFSET(VCPU_DEC, kvm_vcpu, arch.dec); OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
......
...@@ -782,8 +782,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, ...@@ -782,8 +782,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START; return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP) if (value2 & DABRX_HYP)
return H_P4; return H_P4;
vcpu->arch.dawr = value1; vcpu->arch.dawr0 = value1;
vcpu->arch.dawrx = value2; vcpu->arch.dawrx0 = value2;
return H_SUCCESS; return H_SUCCESS;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
/* KVM does not support mflags=2 (AIL=2) */ /* KVM does not support mflags=2 (AIL=2) */
...@@ -1759,10 +1759,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1759,10 +1759,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->vtb); *val = get_reg_val(id, vcpu->arch.vcore->vtb);
break; break;
case KVM_REG_PPC_DAWR: case KVM_REG_PPC_DAWR:
*val = get_reg_val(id, vcpu->arch.dawr); *val = get_reg_val(id, vcpu->arch.dawr0);
break; break;
case KVM_REG_PPC_DAWRX: case KVM_REG_PPC_DAWRX:
*val = get_reg_val(id, vcpu->arch.dawrx); *val = get_reg_val(id, vcpu->arch.dawrx0);
break; break;
case KVM_REG_PPC_CIABR: case KVM_REG_PPC_CIABR:
*val = get_reg_val(id, vcpu->arch.ciabr); *val = get_reg_val(id, vcpu->arch.ciabr);
...@@ -1991,10 +1991,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1991,10 +1991,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.vcore->vtb = set_reg_val(id, *val); vcpu->arch.vcore->vtb = set_reg_val(id, *val);
break; break;
case KVM_REG_PPC_DAWR: case KVM_REG_PPC_DAWR:
vcpu->arch.dawr = set_reg_val(id, *val); vcpu->arch.dawr0 = set_reg_val(id, *val);
break; break;
case KVM_REG_PPC_DAWRX: case KVM_REG_PPC_DAWRX:
vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
break; break;
case KVM_REG_PPC_CIABR: case KVM_REG_PPC_CIABR:
vcpu->arch.ciabr = set_reg_val(id, *val); vcpu->arch.ciabr = set_reg_val(id, *val);
...@@ -3449,8 +3449,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3449,8 +3449,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
int trap; int trap;
unsigned long host_hfscr = mfspr(SPRN_HFSCR); unsigned long host_hfscr = mfspr(SPRN_HFSCR);
unsigned long host_ciabr = mfspr(SPRN_CIABR); unsigned long host_ciabr = mfspr(SPRN_CIABR);
unsigned long host_dawr = mfspr(SPRN_DAWR0); unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
unsigned long host_dawrx = mfspr(SPRN_DAWRX0); unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
unsigned long host_psscr = mfspr(SPRN_PSSCR); unsigned long host_psscr = mfspr(SPRN_PSSCR);
unsigned long host_pidr = mfspr(SPRN_PID); unsigned long host_pidr = mfspr(SPRN_PID);
...@@ -3489,8 +3489,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3489,8 +3489,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_SPURR, vcpu->arch.spurr); mtspr(SPRN_SPURR, vcpu->arch.spurr);
if (dawr_enabled()) { if (dawr_enabled()) {
mtspr(SPRN_DAWR0, vcpu->arch.dawr); mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
mtspr(SPRN_DAWRX0, vcpu->arch.dawrx); mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
} }
mtspr(SPRN_CIABR, vcpu->arch.ciabr); mtspr(SPRN_CIABR, vcpu->arch.ciabr);
mtspr(SPRN_IC, vcpu->arch.ic); mtspr(SPRN_IC, vcpu->arch.ic);
...@@ -3542,8 +3542,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3542,8 +3542,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
mtspr(SPRN_HFSCR, host_hfscr); mtspr(SPRN_HFSCR, host_hfscr);
mtspr(SPRN_CIABR, host_ciabr); mtspr(SPRN_CIABR, host_ciabr);
mtspr(SPRN_DAWR0, host_dawr); mtspr(SPRN_DAWR0, host_dawr0);
mtspr(SPRN_DAWRX0, host_dawrx); mtspr(SPRN_DAWRX0, host_dawrx0);
mtspr(SPRN_PID, host_pidr); mtspr(SPRN_PID, host_pidr);
/* /*
......
...@@ -33,8 +33,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) ...@@ -33,8 +33,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->dpdes = vc->dpdes; hr->dpdes = vc->dpdes;
hr->hfscr = vcpu->arch.hfscr; hr->hfscr = vcpu->arch.hfscr;
hr->tb_offset = vc->tb_offset; hr->tb_offset = vc->tb_offset;
hr->dawr0 = vcpu->arch.dawr; hr->dawr0 = vcpu->arch.dawr0;
hr->dawrx0 = vcpu->arch.dawrx; hr->dawrx0 = vcpu->arch.dawrx0;
hr->ciabr = vcpu->arch.ciabr; hr->ciabr = vcpu->arch.ciabr;
hr->purr = vcpu->arch.purr; hr->purr = vcpu->arch.purr;
hr->spurr = vcpu->arch.spurr; hr->spurr = vcpu->arch.spurr;
...@@ -151,8 +151,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) ...@@ -151,8 +151,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
vc->pcr = hr->pcr | PCR_MASK; vc->pcr = hr->pcr | PCR_MASK;
vc->dpdes = hr->dpdes; vc->dpdes = hr->dpdes;
vcpu->arch.hfscr = hr->hfscr; vcpu->arch.hfscr = hr->hfscr;
vcpu->arch.dawr = hr->dawr0; vcpu->arch.dawr0 = hr->dawr0;
vcpu->arch.dawrx = hr->dawrx0; vcpu->arch.dawrx0 = hr->dawrx0;
vcpu->arch.ciabr = hr->ciabr; vcpu->arch.ciabr = hr->ciabr;
vcpu->arch.purr = hr->purr; vcpu->arch.purr = hr->purr;
vcpu->arch.spurr = hr->spurr; vcpu->arch.spurr = hr->spurr;
......
...@@ -52,8 +52,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) ...@@ -52,8 +52,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_PID (SFS-32) #define STACK_SLOT_PID (SFS-32)
#define STACK_SLOT_IAMR (SFS-40) #define STACK_SLOT_IAMR (SFS-40)
#define STACK_SLOT_CIABR (SFS-48) #define STACK_SLOT_CIABR (SFS-48)
#define STACK_SLOT_DAWR (SFS-56) #define STACK_SLOT_DAWR0 (SFS-56)
#define STACK_SLOT_DAWRX (SFS-64) #define STACK_SLOT_DAWRX0 (SFS-64)
#define STACK_SLOT_HFSCR (SFS-72) #define STACK_SLOT_HFSCR (SFS-72)
#define STACK_SLOT_AMR (SFS-80) #define STACK_SLOT_AMR (SFS-80)
#define STACK_SLOT_UAMOR (SFS-88) #define STACK_SLOT_UAMOR (SFS-88)
...@@ -711,8 +711,8 @@ BEGIN_FTR_SECTION ...@@ -711,8 +711,8 @@ BEGIN_FTR_SECTION
mfspr r7, SPRN_DAWRX0 mfspr r7, SPRN_DAWRX0
mfspr r8, SPRN_IAMR mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_CIABR(r1) std r5, STACK_SLOT_CIABR(r1)
std r6, STACK_SLOT_DAWR(r1) std r6, STACK_SLOT_DAWR0(r1)
std r7, STACK_SLOT_DAWRX(r1) std r7, STACK_SLOT_DAWRX0(r1)
std r8, STACK_SLOT_IAMR(r1) std r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
...@@ -801,8 +801,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -801,8 +801,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
lbz r5, 0(r5) lbz r5, 0(r5)
cmpdi r5, 0 cmpdi r5, 0
beq 1f beq 1f
ld r5, VCPU_DAWR(r4) ld r5, VCPU_DAWR0(r4)
ld r6, VCPU_DAWRX(r4) ld r6, VCPU_DAWRX0(r4)
mtspr SPRN_DAWR0, r5 mtspr SPRN_DAWR0, r5
mtspr SPRN_DAWRX0, r6 mtspr SPRN_DAWRX0, r6
1: 1:
...@@ -1759,8 +1759,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) ...@@ -1759,8 +1759,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/* Restore host values of some registers */ /* Restore host values of some registers */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r5, STACK_SLOT_CIABR(r1) ld r5, STACK_SLOT_CIABR(r1)
ld r6, STACK_SLOT_DAWR(r1) ld r6, STACK_SLOT_DAWR0(r1)
ld r7, STACK_SLOT_DAWRX(r1) ld r7, STACK_SLOT_DAWRX0(r1)
mtspr SPRN_CIABR, r5 mtspr SPRN_CIABR, r5
/* /*
* If the DAWR doesn't work, it's ok to write these here as * If the DAWR doesn't work, it's ok to write these here as
...@@ -2574,8 +2574,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -2574,8 +2574,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
rlwimi r5, r4, 2, DAWRX_WT rlwimi r5, r4, 2, DAWRX_WT
clrrdi r4, r4, 3 clrrdi r4, r4, 3
std r4, VCPU_DAWR(r3) std r4, VCPU_DAWR0(r3)
std r5, VCPU_DAWRX(r3) std r5, VCPU_DAWRX0(r3)
/* /*
* If came in through the real mode hcall handler then it is necessary * If came in through the real mode hcall handler then it is necessary
* to write the registers since the return path won't. Otherwise it is * to write the registers since the return path won't. Otherwise it is
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment