Commit 9a04b0fe authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/ppc-kvm' into next

Merge our KVM topic branch.
parents a9ffb8ee 6c645b01
...@@ -1014,6 +1014,18 @@ static inline void kvmppc_fix_ee_before_entry(void) ...@@ -1014,6 +1014,18 @@ static inline void kvmppc_fix_ee_before_entry(void)
#endif #endif
} }
static inline void kvmppc_fix_ee_after_exit(void)
{
#ifdef CONFIG_PPC64
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = PACA_IRQ_HARD_DIS;
irq_soft_mask_set(IRQS_ALL_DISABLED);
#endif
trace_hardirqs_off();
}
static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
{ {
ulong ea; ulong ea;
......
...@@ -1202,7 +1202,7 @@ static int resize_hpt_allocate(struct kvm_resize_hpt *resize) ...@@ -1202,7 +1202,7 @@ static int resize_hpt_allocate(struct kvm_resize_hpt *resize)
if (rc < 0) if (rc < 0)
return rc; return rc;
resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n", resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n", __func__,
resize->hpt.virt); resize->hpt.virt);
return 0; return 0;
...@@ -1443,7 +1443,7 @@ static void resize_hpt_prepare_work(struct work_struct *work) ...@@ -1443,7 +1443,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
*/ */
mutex_unlock(&kvm->arch.mmu_setup_lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", resize_hpt_debug(resize, "%s(): order = %d\n", __func__,
resize->order); resize->order);
err = resize_hpt_allocate(resize); err = resize_hpt_allocate(resize);
...@@ -1887,8 +1887,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1887,8 +1887,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
tmp); tmp);
if (ret != H_SUCCESS) { if (ret != H_SUCCESS) {
pr_err("kvm_htab_write ret %ld i=%ld v=%lx " pr_err("%s ret %ld i=%ld v=%lx r=%lx\n", __func__, ret, i, v, r);
"r=%lx\n", ret, i, v, r);
goto out; goto out;
} }
if (!mmu_ready && is_vrma_hpte(v)) { if (!mmu_ready && is_vrma_hpte(v)) {
......
...@@ -294,14 +294,14 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, ...@@ -294,14 +294,14 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvmppc_spapr_tce_table *stt = NULL; struct kvmppc_spapr_tce_table *stt = NULL;
struct kvmppc_spapr_tce_table *siter; struct kvmppc_spapr_tce_table *siter;
struct mm_struct *mm = kvm->mm; struct mm_struct *mm = kvm->mm;
unsigned long npages, size = args->size; unsigned long npages;
int ret; int ret;
if (!args->size || args->page_shift < 12 || args->page_shift > 34 || if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
(args->offset + args->size > (ULLONG_MAX >> args->page_shift))) (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
return -EINVAL; return -EINVAL;
npages = kvmppc_tce_pages(size); npages = kvmppc_tce_pages(args->size);
ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true); ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
if (ret) if (ret)
return ret; return ret;
...@@ -314,7 +314,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, ...@@ -314,7 +314,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
stt->liobn = args->liobn; stt->liobn = args->liobn;
stt->page_shift = args->page_shift; stt->page_shift = args->page_shift;
stt->offset = args->offset; stt->offset = args->offset;
stt->size = size; stt->size = args->size;
stt->kvm = kvm; stt->kvm = kvm;
mutex_init(&stt->alloc_lock); mutex_init(&stt->alloc_lock);
INIT_LIST_HEAD_RCU(&stt->iommu_tables); INIT_LIST_HEAD_RCU(&stt->iommu_tables);
......
...@@ -1190,8 +1190,7 @@ int kvmppc_uvmem_init(void) ...@@ -1190,8 +1190,7 @@ int kvmppc_uvmem_init(void)
pfn_first = res->start >> PAGE_SHIFT; pfn_first = res->start >> PAGE_SHIFT;
pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT); pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first), kvmppc_uvmem_bitmap = bitmap_zalloc(pfn_last - pfn_first, GFP_KERNEL);
sizeof(unsigned long), GFP_KERNEL);
if (!kvmppc_uvmem_bitmap) { if (!kvmppc_uvmem_bitmap) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unmap; goto out_unmap;
...@@ -1215,5 +1214,5 @@ void kvmppc_uvmem_free(void) ...@@ -1215,5 +1214,5 @@ void kvmppc_uvmem_free(void)
memunmap_pages(&kvmppc_uvmem_pgmap); memunmap_pages(&kvmppc_uvmem_pgmap);
release_mem_region(kvmppc_uvmem_pgmap.range.start, release_mem_region(kvmppc_uvmem_pgmap.range.start,
range_len(&kvmppc_uvmem_pgmap.range)); range_len(&kvmppc_uvmem_pgmap.range));
kfree(kvmppc_uvmem_bitmap); bitmap_free(kvmppc_uvmem_bitmap);
} }
...@@ -539,7 +539,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) ...@@ -539,7 +539,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
if (irq == XICS_IPI || irq == 0) { if (irq == XICS_IPI || irq == 0) {
/* /*
* This barrier orders the setting of xc->cppr vs. * This barrier orders the setting of xc->cppr vs.
* subsquent test of xc->mfrr done inside * subsequent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw * scan_interrupts and push_pending_to_hw
*/ */
smp_mb(); smp_mb();
...@@ -563,7 +563,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) ...@@ -563,7 +563,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
/* /*
* This barrier orders both setting of in_eoi above vs, * This barrier orders both setting of in_eoi above vs,
* subsequent test of guest_priority, and the setting * subsequent test of guest_priority, and the setting
* of xc->cppr vs. subsquent test of xc->mfrr done inside * of xc->cppr vs. subsequent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw * scan_interrupts and push_pending_to_hw
*/ */
smp_mb(); smp_mb();
...@@ -2390,7 +2390,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) ...@@ -2390,7 +2390,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
/* /*
* Now, we select a target if we have one. If we don't we * Now, we select a target if we have one. If we don't we
* leave the interrupt untargetted. It means that an interrupt * leave the interrupt untargetted. It means that an interrupt
* can become "untargetted" accross migration if it was masked * can become "untargetted" across migration if it was masked
* by set_xive() but there is little we can do about it. * by set_xive() but there is little we can do about it.
*/ */
......
...@@ -1015,6 +1015,9 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) ...@@ -1015,6 +1015,9 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
u32 last_inst = KVM_INST_FETCH_FAILED; u32 last_inst = KVM_INST_FETCH_FAILED;
enum emulation_result emulated = EMULATE_DONE; enum emulation_result emulated = EMULATE_DONE;
/* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */
kvmppc_fix_ee_after_exit();
/* update before a new last_exit_type is rewritten */ /* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu); kvmppc_update_timing_stats(vcpu);
......
...@@ -424,15 +424,6 @@ _GLOBAL(kvmppc_resume_host) ...@@ -424,15 +424,6 @@ _GLOBAL(kvmppc_resume_host)
mtspr SPRN_EPCR, r3 mtspr SPRN_EPCR, r3
isync isync
#ifdef CONFIG_64BIT
/*
* We enter with interrupts disabled in hardware, but
* we need to call RECONCILE_IRQ_STATE to ensure
* that the software state is kept in sync.
*/
RECONCILE_IRQ_STATE(r3,r5)
#endif
/* Switch to kernel stack and jump to handler. */ /* Switch to kernel stack and jump to handler. */
mr r3, r4 mr r3, r4
mr r5, r14 /* intno */ mr r5, r14 /* intno */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment