Commit 4a4c9924 authored by Anthony Liguori's avatar Anthony Liguori Committed by Avi Kivity

KVM: MMU: More struct kvm_vcpu -> struct kvm cleanups

This time, the biggest change is gpa_to_hpa. The translation of GPA to HPA does
not depend on the VCPU state unlike GVA to GPA so there's no need to pass in
the kvm_vcpu.
Signed-off-by: default avatarAnthony Liguori <aliguori@us.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent f67a46f4
...@@ -554,7 +554,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); ...@@ -554,7 +554,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa);
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
......
...@@ -451,14 +451,14 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -451,14 +451,14 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
} }
} }
static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) static void rmap_write_protect(struct kvm *kvm, u64 gfn)
{ {
struct kvm_rmap_desc *desc; struct kvm_rmap_desc *desc;
unsigned long *rmapp; unsigned long *rmapp;
u64 *spte; u64 *spte;
gfn = unalias_gfn(vcpu->kvm, gfn); gfn = unalias_gfn(kvm, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn); rmapp = gfn_to_rmap(kvm, gfn);
while (*rmapp) { while (*rmapp) {
if (!(*rmapp & 1)) if (!(*rmapp & 1))
...@@ -471,9 +471,9 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) ...@@ -471,9 +471,9 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
BUG_ON(!(*spte & PT_PRESENT_MASK)); BUG_ON(!(*spte & PT_PRESENT_MASK));
BUG_ON(!(*spte & PT_WRITABLE_MASK)); BUG_ON(!(*spte & PT_WRITABLE_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
rmap_remove(vcpu->kvm, spte); rmap_remove(kvm, spte);
set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(kvm);
} }
} }
...@@ -670,7 +670,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -670,7 +670,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
hlist_add_head(&page->hash_link, bucket); hlist_add_head(&page->hash_link, bucket);
vcpu->mmu.prefetch_page(vcpu, page); vcpu->mmu.prefetch_page(vcpu, page);
if (!metaphysical) if (!metaphysical)
rmap_write_protect(vcpu, gfn); rmap_write_protect(vcpu->kvm, gfn);
return page; return page;
} }
...@@ -823,19 +823,19 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) ...@@ -823,19 +823,19 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
__set_bit(slot, &page_head->slot_bitmap); __set_bit(slot, &page_head->slot_bitmap);
} }
hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
{ {
hpa_t hpa = gpa_to_hpa(vcpu, gpa); hpa_t hpa = gpa_to_hpa(kvm, gpa);
return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa; return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
} }
hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
{ {
struct page *page; struct page *page;
ASSERT((gpa & HPA_ERR_MASK) == 0); ASSERT((gpa & HPA_ERR_MASK) == 0);
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
if (!page) if (!page)
return gpa | HPA_ERR_MASK; return gpa | HPA_ERR_MASK;
return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
...@@ -848,7 +848,7 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -848,7 +848,7 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return UNMAPPED_GVA; return UNMAPPED_GVA;
return gpa_to_hpa(vcpu, gpa); return gpa_to_hpa(vcpu->kvm, gpa);
} }
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
...@@ -857,7 +857,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -857,7 +857,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return NULL; return NULL;
return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT); return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
} }
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
...@@ -1012,7 +1012,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -1012,7 +1012,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK); paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
if (is_error_hpa(paddr)) if (is_error_hpa(paddr))
return 1; return 1;
......
...@@ -103,7 +103,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, ...@@ -103,7 +103,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
walker->level - 1, table_gfn); walker->level - 1, table_gfn);
slot = gfn_to_memslot(vcpu->kvm, table_gfn); slot = gfn_to_memslot(vcpu->kvm, table_gfn);
hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); hpa = safe_gpa_to_hpa(vcpu->kvm, root & PT64_BASE_ADDR_MASK);
walker->page = pfn_to_page(hpa >> PAGE_SHIFT); walker->page = pfn_to_page(hpa >> PAGE_SHIFT);
walker->table = kmap_atomic(walker->page, KM_USER0); walker->table = kmap_atomic(walker->page, KM_USER0);
...@@ -159,7 +159,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, ...@@ -159,7 +159,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
walker->inherited_ar &= walker->table[index]; walker->inherited_ar &= walker->table[index];
table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
kunmap_atomic(walker->table, KM_USER0); kunmap_atomic(walker->table, KM_USER0);
paddr = safe_gpa_to_hpa(vcpu, table_gfn << PAGE_SHIFT); paddr = safe_gpa_to_hpa(vcpu->kvm, table_gfn << PAGE_SHIFT);
walker->page = pfn_to_page(paddr >> PAGE_SHIFT); walker->page = pfn_to_page(paddr >> PAGE_SHIFT);
walker->table = kmap_atomic(walker->page, KM_USER0); walker->table = kmap_atomic(walker->page, KM_USER0);
--walker->level; --walker->level;
...@@ -248,7 +248,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu, ...@@ -248,7 +248,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
if (!dirty) if (!dirty)
access_bits &= ~PT_WRITABLE_MASK; access_bits &= ~PT_WRITABLE_MASK;
paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK); paddr = gpa_to_hpa(vcpu->kvm, gaddr & PT64_BASE_ADDR_MASK);
spte |= PT_PRESENT_MASK; spte |= PT_PRESENT_MASK;
if (access_bits & PT_USER_MASK) if (access_bits & PT_USER_MASK)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment