Commit 5656374b authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'gpc-fixes' of git://git.infradead.org/users/dwmw2/linux into HEAD

Pull Xen-for-KVM changes from David Woodhouse:

* add support for 32-bit guests in SCHEDOP_poll

* the rest of the gfn-to-pfn cache API cleanup

"I still haven't reinstated the last of those patches to make gpc->len
immutable."
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parents 74bee0ca 06e155c4
...@@ -2311,13 +2311,11 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, ...@@ -2311,13 +2311,11 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */ /* we verify if the enable bit is set... */
if (system_time & 1) { if (system_time & 1)
kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu, kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
KVM_HOST_USES_PFN, system_time & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)); sizeof(struct pvclock_vcpu_time_info));
} else { else
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); kvm_gpc_deactivate(&vcpu->arch.pv_time);
}
return; return;
} }
...@@ -3047,12 +3045,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, ...@@ -3047,12 +3045,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
unsigned long flags; unsigned long flags;
read_lock_irqsave(&gpc->lock, flags); read_lock_irqsave(&gpc->lock, flags);
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa, while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
offset + sizeof(*guest_hv_clock))) {
read_unlock_irqrestore(&gpc->lock, flags); read_unlock_irqrestore(&gpc->lock, flags);
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa, if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
offset + sizeof(*guest_hv_clock)))
return; return;
read_lock_irqsave(&gpc->lock, flags); read_lock_irqsave(&gpc->lock, flags);
...@@ -3401,7 +3397,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) ...@@ -3401,7 +3397,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu) static void kvmclock_reset(struct kvm_vcpu *vcpu)
{ {
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time); kvm_gpc_deactivate(&vcpu->arch.pv_time);
vcpu->arch.time = 0; vcpu->arch.time = 0;
} }
...@@ -11559,7 +11555,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -11559,7 +11555,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.regs_avail = ~0; vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0; vcpu->arch.regs_dirty = ~0;
kvm_gpc_init(&vcpu->arch.pv_time); kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
......
This diff is collapsed.
...@@ -207,4 +207,11 @@ struct compat_vcpu_runstate_info { ...@@ -207,4 +207,11 @@ struct compat_vcpu_runstate_info {
uint64_t time[4]; uint64_t time[4];
} __attribute__((packed)); } __attribute__((packed));
struct compat_sched_poll {
/* This is actually a guest virtual address which points to ports. */
uint32_t ports;
unsigned int nr_ports;
uint64_t timeout;
};
#endif /* __ARCH_X86_KVM_XEN_H__ */ #endif /* __ARCH_X86_KVM_XEN_H__ */
...@@ -1260,18 +1260,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); ...@@ -1260,18 +1260,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
* kvm_gpc_init - initialize gfn_to_pfn_cache. * kvm_gpc_init - initialize gfn_to_pfn_cache.
* *
* @gpc: struct gfn_to_pfn_cache object. * @gpc: struct gfn_to_pfn_cache object.
*
* This sets up a gfn_to_pfn_cache by initializing locks. Note, the cache must
* be zero-allocated (or zeroed by the caller before init).
*/
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
/**
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
* physical address.
*
* @kvm: pointer to kvm instance. * @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
* @vcpu: vCPU to be used for marking pages dirty and to be woken on * @vcpu: vCPU to be used for marking pages dirty and to be woken on
* invalidation. * invalidation.
* @usage: indicates if the resulting host physical PFN is used while * @usage: indicates if the resulting host physical PFN is used while
...@@ -1280,27 +1269,36 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc); ...@@ -1280,27 +1269,36 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
* changes!---will also force @vcpu to exit the guest and * changes!---will also force @vcpu to exit the guest and
* refresh the cache); and/or if the PFN used directly * refresh the cache); and/or if the PFN used directly
* by KVM (and thus needs a kernel virtual mapping). * by KVM (and thus needs a kernel virtual mapping).
*
* This sets up a gfn_to_pfn_cache by initializing locks and assigning the
* immutable attributes. Note, the cache must be zero-allocated (or zeroed by
* the caller before init).
*/
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);
/**
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
* physical address.
*
* @gpc: struct gfn_to_pfn_cache object.
* @gpa: guest physical address to map. * @gpa: guest physical address to map.
* @len: sanity check; the range being access must fit a single page. * @len: sanity check; the range being access must fit a single page.
* *
* @return: 0 for success. * @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary. * -EINVAL for a mapping which would cross a page boundary.
* -EFAULT for an untranslatable guest physical address. * -EFAULT for an untranslatable guest physical address.
* *
* This primes a gfn_to_pfn_cache and links it into the @kvm's list for * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
* invalidations to be processed. Callers are required to use kvm_gpc_check() * invalidations to be processed. Callers are required to use kvm_gpc_check()
* to ensure that the cache is valid before accessing the target page. * to ensure that the cache is valid before accessing the target page.
*/ */
int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
gpa_t gpa, unsigned long len);
/** /**
* kvm_gpc_check - check validity of a gfn_to_pfn_cache. * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
* *
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object. * @gpc: struct gfn_to_pfn_cache object.
* @gpa: current guest physical address to map.
* @len: sanity check; the range being access must fit a single page. * @len: sanity check; the range being access must fit a single page.
* *
* @return: %true if the cache is still valid and the address matches. * @return: %true if the cache is still valid and the address matches.
...@@ -1313,52 +1311,35 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ...@@ -1313,52 +1311,35 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
* Callers in IN_GUEST_MODE may do so without locking, although they should * Callers in IN_GUEST_MODE may do so without locking, although they should
* still hold a read lock on kvm->scru for the memslot checks. * still hold a read lock on kvm->scru for the memslot checks.
*/ */
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
unsigned long len);
/** /**
* kvm_gpc_refresh - update a previously initialized cache. * kvm_gpc_refresh - update a previously initialized cache.
* *
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object. * @gpc: struct gfn_to_pfn_cache object.
* @gpa: updated guest physical address to map.
* @len: sanity check; the range being access must fit a single page. * @len: sanity check; the range being access must fit a single page.
* *
* @return: 0 for success. * @return: 0 for success.
* -EINVAL for a mapping which would cross a page boundary. * -EINVAL for a mapping which would cross a page boundary.
* -EFAULT for an untranslatable guest physical address. * -EFAULT for an untranslatable guest physical address.
* *
* This will attempt to refresh a gfn_to_pfn_cache. Note that a successful * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
* returm from this function does not mean the page can be immediately * return from this function does not mean the page can be immediately
* accessed because it may have raced with an invalidation. Callers must * accessed because it may have raced with an invalidation. Callers must
* still lock and check the cache status, as this function does not return * still lock and check the cache status, as this function does not return
* with the lock still held to permit access. * with the lock still held to permit access.
*/ */
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
unsigned long len);
/**
* kvm_gpc_unmap - temporarily unmap a gfn_to_pfn_cache.
*
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
*
* This unmaps the referenced page. The cache is left in the invalid state
* but at least the mapping from GPA to userspace HVA will remain cached
* and can be reused on a subsequent refresh.
*/
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
/** /**
* kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache. * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
* *
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object. * @gpc: struct gfn_to_pfn_cache object.
* *
* This removes a cache from the @kvm's list to be processed on MMU notifier * This removes a cache from the VM's list to be processed on MMU notifier
* invocation. * invocation.
*/ */
void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
......
...@@ -67,6 +67,7 @@ struct gfn_to_pfn_cache { ...@@ -67,6 +67,7 @@ struct gfn_to_pfn_cache {
gpa_t gpa; gpa_t gpa;
unsigned long uhva; unsigned long uhva;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
struct kvm *kvm;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct list_head list; struct list_head list;
rwlock_t lock; rwlock_t lock;
......
...@@ -76,19 +76,17 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, ...@@ -76,19 +76,17 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
} }
} }
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
unsigned long len)
{ {
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
if (!gpc->active) if (!gpc->active)
return false; return false;
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE) if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
return false; return false;
if (gpc->gpa != gpa || gpc->generation != slots->generation || if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
kvm_is_error_hva(gpc->uhva))
return false; return false;
if (!gpc->valid) if (!gpc->valid)
...@@ -139,7 +137,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s ...@@ -139,7 +137,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
return kvm->mmu_invalidate_seq != mmu_seq; return kvm->mmu_invalidate_seq != mmu_seq;
} }
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
{ {
/* Note, the new page offset may be different than the old! */ /* Note, the new page offset may be different than the old! */
void *old_khva = gpc->khva - offset_in_page(gpc->khva); void *old_khva = gpc->khva - offset_in_page(gpc->khva);
...@@ -159,7 +157,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) ...@@ -159,7 +157,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
gpc->valid = false; gpc->valid = false;
do { do {
mmu_seq = kvm->mmu_invalidate_seq; mmu_seq = gpc->kvm->mmu_invalidate_seq;
smp_rmb(); smp_rmb();
write_unlock_irq(&gpc->lock); write_unlock_irq(&gpc->lock);
...@@ -217,7 +215,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) ...@@ -217,7 +215,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
* attempting to refresh. * attempting to refresh.
*/ */
WARN_ON_ONCE(gpc->valid); WARN_ON_ONCE(gpc->valid);
} while (mmu_notifier_retry_cache(kvm, mmu_seq)); } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
gpc->valid = true; gpc->valid = true;
gpc->pfn = new_pfn; gpc->pfn = new_pfn;
...@@ -238,10 +236,10 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) ...@@ -238,10 +236,10 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
return -EFAULT; return -EFAULT;
} }
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
unsigned long len) unsigned long len)
{ {
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
unsigned long page_offset = gpa & ~PAGE_MASK; unsigned long page_offset = gpa & ~PAGE_MASK;
bool unmap_old = false; bool unmap_old = false;
unsigned long old_uhva; unsigned long old_uhva;
...@@ -295,7 +293,7 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, ...@@ -295,7 +293,7 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
* drop the lock and do the HVA to PFN lookup again. * drop the lock and do the HVA to PFN lookup again.
*/ */
if (!gpc->valid || old_uhva != gpc->uhva) { if (!gpc->valid || old_uhva != gpc->uhva) {
ret = hva_to_pfn_retry(kvm, gpc); ret = hva_to_pfn_retry(gpc);
} else { } else {
/* /*
* If the HVA→PFN mapping was already valid, don't unmap it. * If the HVA→PFN mapping was already valid, don't unmap it.
...@@ -303,9 +301,8 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, ...@@ -303,9 +301,8 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
* may have changed. * may have changed.
*/ */
gpc->khva = old_khva + page_offset; gpc->khva = old_khva + page_offset;
old_pfn = KVM_PFN_ERR_FAULT;
old_khva = NULL;
ret = 0; ret = 0;
goto out_unlock;
} }
out: out:
...@@ -333,55 +330,37 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa, ...@@ -333,55 +330,37 @@ int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
{ {
void *old_khva; return __kvm_gpc_refresh(gpc, gpc->gpa, len);
kvm_pfn_t old_pfn;
mutex_lock(&gpc->refresh_lock);
write_lock_irq(&gpc->lock);
gpc->valid = false;
old_khva = gpc->khva - offset_in_page(gpc->khva);
old_pfn = gpc->pfn;
/*
* We can leave the GPA → uHVA map cache intact but the PFN
* lookup will need to be redone even for the same page.
*/
gpc->khva = NULL;
gpc->pfn = KVM_PFN_ERR_FAULT;
write_unlock_irq(&gpc->lock);
mutex_unlock(&gpc->refresh_lock);
gpc_unmap_khva(old_pfn, old_khva);
} }
EXPORT_SYMBOL_GPL(kvm_gpc_unmap); EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc) void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
{ {
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
rwlock_init(&gpc->lock); rwlock_init(&gpc->lock);
mutex_init(&gpc->refresh_lock); mutex_init(&gpc->refresh_lock);
gpc->kvm = kvm;
gpc->vcpu = vcpu;
gpc->usage = usage;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->uhva = KVM_HVA_ERR_BAD;
} }
EXPORT_SYMBOL_GPL(kvm_gpc_init); EXPORT_SYMBOL_GPL(kvm_gpc_init);
int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
gpa_t gpa, unsigned long len)
{ {
WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); struct kvm *kvm = gpc->kvm;
if (!gpc->active) { if (!gpc->active) {
gpc->khva = NULL; if (KVM_BUG_ON(gpc->valid, kvm))
gpc->pfn = KVM_PFN_ERR_FAULT; return -EIO;
gpc->uhva = KVM_HVA_ERR_BAD;
gpc->vcpu = vcpu;
gpc->usage = usage;
gpc->valid = false;
spin_lock(&kvm->gpc_lock); spin_lock(&kvm->gpc_lock);
list_add(&gpc->list, &kvm->gpc_list); list_add(&gpc->list, &kvm->gpc_list);
...@@ -396,12 +375,16 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, ...@@ -396,12 +375,16 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->active = true; gpc->active = true;
write_unlock_irq(&gpc->lock); write_unlock_irq(&gpc->lock);
} }
return kvm_gpc_refresh(kvm, gpc, gpa, len); return __kvm_gpc_refresh(gpc, gpa, len);
} }
EXPORT_SYMBOL_GPL(kvm_gpc_activate); EXPORT_SYMBOL_GPL(kvm_gpc_activate);
void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
{ {
struct kvm *kvm = gpc->kvm;
kvm_pfn_t old_pfn;
void *old_khva;
if (gpc->active) { if (gpc->active) {
/* /*
* Deactivate the cache before removing it from the list, KVM * Deactivate the cache before removing it from the list, KVM
...@@ -410,13 +393,26 @@ void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) ...@@ -410,13 +393,26 @@ void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
*/ */
write_lock_irq(&gpc->lock); write_lock_irq(&gpc->lock);
gpc->active = false; gpc->active = false;
gpc->valid = false;
/*
* Leave the GPA => uHVA cache intact, it's protected by the
* memslot generation. The PFN lookup needs to be redone every
* time as mmu_notifier protection is lost when the cache is
* removed from the VM's gpc_list.
*/
old_khva = gpc->khva - offset_in_page(gpc->khva);
gpc->khva = NULL;
old_pfn = gpc->pfn;
gpc->pfn = KVM_PFN_ERR_FAULT;
write_unlock_irq(&gpc->lock); write_unlock_irq(&gpc->lock);
spin_lock(&kvm->gpc_lock); spin_lock(&kvm->gpc_lock);
list_del(&gpc->list); list_del(&gpc->list);
spin_unlock(&kvm->gpc_lock); spin_unlock(&kvm->gpc_lock);
kvm_gpc_unmap(kvm, gpc); gpc_unmap_khva(old_pfn, old_khva);
} }
} }
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate); EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment