Commit 1b151ce4 authored by Paul Mackerras's avatar Paul Mackerras

KVM: PPC: Book3S HV: Rename hpte_setup_done to mmu_ready

This renames the kvm->arch.hpte_setup_done field to mmu_ready because
we will want to use it for radix guests too -- both for setting things
up before vcpu execution, and for excluding vcpus from executing while
MMU-related things get changed, such as in future switching the MMU
from radix to HPT mode or vice-versa.

This also moves the call to kvmppc_setup_partition_table() that was
done in kvmppc_hv_setup_htab_rma() for HPT guests, and the setting
of mmu_ready, into the caller in kvmppc_vcpu_run_hv().
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 8dc6cca5
...@@ -276,7 +276,7 @@ struct kvm_arch { ...@@ -276,7 +276,7 @@ struct kvm_arch {
int tlbie_lock; int tlbie_lock;
unsigned long lpcr; unsigned long lpcr;
unsigned long vrma_slb_v; unsigned long vrma_slb_v;
int hpte_setup_done; int mmu_ready;
atomic_t vcpus_running; atomic_t vcpus_running;
u32 online_vcores; u32 online_vcores;
atomic_t hpte_mod_interest; atomic_t hpte_mod_interest;
......
...@@ -140,12 +140,12 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) ...@@ -140,12 +140,12 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (kvm->arch.hpte_setup_done) { if (kvm->arch.mmu_ready) {
kvm->arch.hpte_setup_done = 0; kvm->arch.mmu_ready = 0;
/* order hpte_setup_done vs. vcpus_running */ /* order mmu_ready vs. vcpus_running */
smp_mb(); smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) { if (atomic_read(&kvm->arch.vcpus_running)) {
kvm->arch.hpte_setup_done = 1; kvm->arch.mmu_ready = 1;
goto out; goto out;
} }
} }
...@@ -1533,15 +1533,15 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, ...@@ -1533,15 +1533,15 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
/* This shouldn't be possible */ /* This shouldn't be possible */
ret = -EIO; ret = -EIO;
if (WARN_ON(!kvm->arch.hpte_setup_done)) if (WARN_ON(!kvm->arch.mmu_ready))
goto out_no_hpt; goto out_no_hpt;
/* Stop VCPUs from running while we mess with the HPT */ /* Stop VCPUs from running while we mess with the HPT */
kvm->arch.hpte_setup_done = 0; kvm->arch.mmu_ready = 0;
smp_mb(); smp_mb();
/* Boot all CPUs out of the guest so they re-read /* Boot all CPUs out of the guest so they re-read
* hpte_setup_done */ * mmu_ready */
on_each_cpu(resize_hpt_boot_vcpu, NULL, 1); on_each_cpu(resize_hpt_boot_vcpu, NULL, 1);
ret = -ENXIO; ret = -ENXIO;
...@@ -1564,7 +1564,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, ...@@ -1564,7 +1564,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
out: out:
/* Let VCPUs run again */ /* Let VCPUs run again */
kvm->arch.hpte_setup_done = 1; kvm->arch.mmu_ready = 1;
smp_mb(); smp_mb();
out_no_hpt: out_no_hpt:
resize_hpt_release(kvm, resize); resize_hpt_release(kvm, resize);
...@@ -1802,7 +1802,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1802,7 +1802,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
unsigned long tmp[2]; unsigned long tmp[2];
ssize_t nb; ssize_t nb;
long int err, ret; long int err, ret;
int hpte_setup; int mmu_ready;
if (!access_ok(VERIFY_READ, buf, count)) if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; return -EFAULT;
...@@ -1811,13 +1811,13 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1811,13 +1811,13 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
/* lock out vcpus from running while we're doing this */ /* lock out vcpus from running while we're doing this */
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
hpte_setup = kvm->arch.hpte_setup_done; mmu_ready = kvm->arch.mmu_ready;
if (hpte_setup) { if (mmu_ready) {
kvm->arch.hpte_setup_done = 0; /* temporarily */ kvm->arch.mmu_ready = 0; /* temporarily */
/* order hpte_setup_done vs. vcpus_running */ /* order mmu_ready vs. vcpus_running */
smp_mb(); smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) { if (atomic_read(&kvm->arch.vcpus_running)) {
kvm->arch.hpte_setup_done = 1; kvm->arch.mmu_ready = 1;
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return -EBUSY; return -EBUSY;
} }
...@@ -1870,7 +1870,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1870,7 +1870,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
"r=%lx\n", ret, i, v, r); "r=%lx\n", ret, i, v, r);
goto out; goto out;
} }
if (!hpte_setup && is_vrma_hpte(v)) { if (!mmu_ready && is_vrma_hpte(v)) {
unsigned long psize = hpte_base_page_size(v, r); unsigned long psize = hpte_base_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize); unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr; unsigned long lpcr;
...@@ -1879,7 +1879,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1879,7 +1879,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
(VRMA_VSID << SLB_VSID_SHIFT_1T); (VRMA_VSID << SLB_VSID_SHIFT_1T);
lpcr = senc << (LPCR_VRMASD_SH - 4); lpcr = senc << (LPCR_VRMASD_SH - 4);
kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
hpte_setup = 1; mmu_ready = 1;
} }
++i; ++i;
hptp += 2; hptp += 2;
...@@ -1895,9 +1895,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1895,9 +1895,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
} }
out: out:
/* Order HPTE updates vs. hpte_setup_done */ /* Order HPTE updates vs. mmu_ready */
smp_wmb(); smp_wmb();
kvm->arch.hpte_setup_done = hpte_setup; kvm->arch.mmu_ready = mmu_ready;
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
if (err) if (err)
......
...@@ -115,6 +115,7 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); ...@@ -115,6 +115,7 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
static void kvmppc_setup_partition_table(struct kvm *kvm);
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
int *ip) int *ip)
...@@ -3198,6 +3199,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -3198,6 +3199,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
unsigned long ebb_regs[3] = {}; /* shut up GCC */ unsigned long ebb_regs[3] = {}; /* shut up GCC */
unsigned long user_tar = 0; unsigned long user_tar = 0;
unsigned int user_vrsave; unsigned int user_vrsave;
struct kvm *kvm;
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
...@@ -3235,13 +3237,25 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -3235,13 +3237,25 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
return -EINTR; return -EINTR;
} }
atomic_inc(&vcpu->kvm->arch.vcpus_running); kvm = vcpu->kvm;
/* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ atomic_inc(&kvm->arch.vcpus_running);
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
smp_mb(); smp_mb();
/* On the first time here, set up HTAB and VRMA */ /* On the first time here, set up MMU if necessary */
if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) { if (!vcpu->kvm->arch.mmu_ready) {
mutex_lock(&kvm->lock);
r = 0;
if (!kvm->arch.mmu_ready) {
if (!kvm_is_radix(vcpu->kvm))
r = kvmppc_hv_setup_htab_rma(vcpu); r = kvmppc_hv_setup_htab_rma(vcpu);
if (!r) {
if (cpu_has_feature(CPU_FTR_ARCH_300))
kvmppc_setup_partition_table(kvm);
kvm->arch.mmu_ready = 1;
}
}
mutex_unlock(&kvm->lock);
if (r) if (r)
goto out; goto out;
} }
...@@ -3530,6 +3544,10 @@ static void kvmppc_setup_partition_table(struct kvm *kvm) ...@@ -3530,6 +3544,10 @@ static void kvmppc_setup_partition_table(struct kvm *kvm)
mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1); mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
} }
/*
* Set up HPT (hashed page table) and RMA (real-mode area).
* Must be called with kvm->lock held.
*/
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{ {
int err = 0; int err = 0;
...@@ -3541,10 +3559,6 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) ...@@ -3541,10 +3559,6 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
unsigned long psize, porder; unsigned long psize, porder;
int srcu_idx; int srcu_idx;
mutex_lock(&kvm->lock);
if (kvm->arch.hpte_setup_done)
goto out; /* another vcpu beat us to it */
/* Allocate hashed page table (if not done already) and reset it */ /* Allocate hashed page table (if not done already) and reset it */
if (!kvm->arch.hpt.virt) { if (!kvm->arch.hpt.virt) {
int order = KVM_DEFAULT_HPT_ORDER; int order = KVM_DEFAULT_HPT_ORDER;
...@@ -3603,18 +3617,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) ...@@ -3603,18 +3617,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* the -4 is to account for senc values starting at 0x10 */ /* the -4 is to account for senc values starting at 0x10 */
lpcr = senc << (LPCR_VRMASD_SH - 4); lpcr = senc << (LPCR_VRMASD_SH - 4);
kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
} else {
kvmppc_setup_partition_table(kvm);
} }
/* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */ /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */
smp_wmb(); smp_wmb();
kvm->arch.hpte_setup_done = 1;
err = 0; err = 0;
out_srcu: out_srcu:
srcu_read_unlock(&kvm->srcu, srcu_idx); srcu_read_unlock(&kvm->srcu, srcu_idx);
out: out:
mutex_unlock(&kvm->lock);
return err; return err;
up_out: up_out:
...@@ -3769,6 +3779,7 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) ...@@ -3769,6 +3779,7 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
*/ */
if (radix_enabled()) { if (radix_enabled()) {
kvm->arch.radix = 1; kvm->arch.radix = 1;
kvm->arch.mmu_ready = 1;
lpcr &= ~LPCR_VPM1; lpcr &= ~LPCR_VPM1;
lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
ret = kvmppc_init_vm_radix(kvm); ret = kvmppc_init_vm_radix(kvm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment