Commit a78b55d1 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Alexander Graf

kvm: powerpc: book3s: drop is_hv_enabled

drop is_hv_enabled, because that should not be a callback property
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent cbbc58d4
...@@ -183,7 +183,6 @@ union kvmppc_one_reg { ...@@ -183,7 +183,6 @@ union kvmppc_one_reg {
struct kvmppc_ops { struct kvmppc_ops {
struct module *owner; struct module *owner;
bool is_hv_enabled;
int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id, int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
...@@ -232,6 +231,11 @@ struct kvmppc_ops { ...@@ -232,6 +231,11 @@ struct kvmppc_ops {
extern struct kvmppc_ops *kvmppc_hv_ops; extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops; extern struct kvmppc_ops *kvmppc_pr_ops;
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
{
return kvm->arch.kvm_ops == kvmppc_hv_ops;
}
/* /*
* Cuts out inst bits with ordering according to spec. * Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included. * That means the leftmost bit is zero. All given bits are included.
......
...@@ -72,7 +72,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) ...@@ -72,7 +72,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->kvm->arch.kvm_ops->is_hv_enabled) if (!is_kvmppc_hv_enabled(vcpu->kvm))
return to_book3s(vcpu)->hior; return to_book3s(vcpu)->hior;
return 0; return 0;
} }
...@@ -80,7 +80,7 @@ static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) ...@@ -80,7 +80,7 @@ static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
unsigned long pending_now, unsigned long old_pending) unsigned long pending_now, unsigned long old_pending)
{ {
if (vcpu->kvm->arch.kvm_ops->is_hv_enabled) if (is_kvmppc_hv_enabled(vcpu->kvm))
return; return;
if (pending_now) if (pending_now)
vcpu->arch.shared->int_pending = 1; vcpu->arch.shared->int_pending = 1;
...@@ -94,7 +94,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) ...@@ -94,7 +94,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
ulong crit_r1; ulong crit_r1;
bool crit; bool crit;
if (vcpu->kvm->arch.kvm_ops->is_hv_enabled) if (is_kvmppc_hv_enabled(vcpu->kvm))
return false; return false;
crit_raw = vcpu->arch.shared->critical; crit_raw = vcpu->arch.shared->critical;
......
...@@ -2160,7 +2160,6 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp, ...@@ -2160,7 +2160,6 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
} }
static struct kvmppc_ops kvm_ops_hv = { static struct kvmppc_ops kvm_ops_hv = {
.is_hv_enabled = true,
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
.get_one_reg = kvmppc_get_one_reg_hv, .get_one_reg = kvmppc_get_one_reg_hv,
......
...@@ -1526,7 +1526,6 @@ static long kvm_arch_vm_ioctl_pr(struct file *filp, ...@@ -1526,7 +1526,6 @@ static long kvm_arch_vm_ioctl_pr(struct file *filp,
} }
static struct kvmppc_ops kvm_ops_pr = { static struct kvmppc_ops kvm_ops_pr = {
.is_hv_enabled = false,
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
.get_one_reg = kvmppc_get_one_reg_pr, .get_one_reg = kvmppc_get_one_reg_pr,
......
...@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) ...@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
} }
/* Check for real mode returning too hard */ /* Check for real mode returning too hard */
if (xics->real_mode && vcpu->kvm->arch.kvm_ops->is_hv_enabled) if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
return kvmppc_xics_rm_complete(vcpu, req); return kvmppc_xics_rm_complete(vcpu, req);
switch (req) { switch (req) {
......
...@@ -200,7 +200,7 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu) ...@@ -200,7 +200,7 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
goto out; goto out;
/* HV KVM can only do PAPR mode for now */ /* HV KVM can only do PAPR mode for now */
if (!vcpu->arch.papr_enabled && vcpu->kvm->arch.kvm_ops->is_hv_enabled) if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
goto out; goto out;
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment