Commit 70fd1906 authored by Suzuki K Poulose's avatar Suzuki K Poulose Committed by Christoffer Dall

kvm-arm: Use explicit stage2 helper routines

We have stage2 page table helpers for both arm and arm64. Switch to
the stage2 helpers for routines that only deal with stage2 page table.

Cc: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
parent 66f877fa
...@@ -319,9 +319,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, ...@@ -319,9 +319,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
pmd_t *pmd; pmd_t *pmd;
phys_addr_t next; phys_addr_t next;
pmd = pmd_offset(pud, addr); pmd = stage2_pmd_offset(pud, addr);
do { do {
next = kvm_pmd_addr_end(addr, end); next = stage2_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) { if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd)) if (pmd_thp_or_huge(*pmd))
kvm_flush_dcache_pmd(*pmd); kvm_flush_dcache_pmd(*pmd);
...@@ -337,11 +337,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, ...@@ -337,11 +337,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
pud_t *pud; pud_t *pud;
phys_addr_t next; phys_addr_t next;
pud = pud_offset(pgd, addr); pud = stage2_pud_offset(pgd, addr);
do { do {
next = kvm_pud_addr_end(addr, end); next = stage2_pud_addr_end(addr, end);
if (!pud_none(*pud)) { if (!stage2_pud_none(*pud)) {
if (pud_huge(*pud)) if (stage2_pud_huge(*pud))
kvm_flush_dcache_pud(*pud); kvm_flush_dcache_pud(*pud);
else else
stage2_flush_pmds(kvm, pud, addr, next); stage2_flush_pmds(kvm, pud, addr, next);
...@@ -357,9 +357,9 @@ static void stage2_flush_memslot(struct kvm *kvm, ...@@ -357,9 +357,9 @@ static void stage2_flush_memslot(struct kvm *kvm,
phys_addr_t next; phys_addr_t next;
pgd_t *pgd; pgd_t *pgd;
pgd = kvm->arch.pgd + kvm_pgd_index(addr); pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do { do {
next = kvm_pgd_addr_end(addr, end); next = stage2_pgd_addr_end(addr, end);
stage2_flush_puds(kvm, pgd, addr, next); stage2_flush_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end); } while (pgd++, addr = next, addr != end);
} }
...@@ -807,16 +807,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache ...@@ -807,16 +807,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pgd = kvm->arch.pgd + kvm_pgd_index(addr); pgd = kvm->arch.pgd + stage2_pgd_index(addr);
if (WARN_ON(pgd_none(*pgd))) { if (WARN_ON(stage2_pgd_none(*pgd))) {
if (!cache) if (!cache)
return NULL; return NULL;
pud = mmu_memory_cache_alloc(cache); pud = mmu_memory_cache_alloc(cache);
pgd_populate(NULL, pgd, pud); stage2_pgd_populate(pgd, pud);
get_page(virt_to_page(pgd)); get_page(virt_to_page(pgd));
} }
return pud_offset(pgd, addr); return stage2_pud_offset(pgd, addr);
} }
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
...@@ -826,15 +826,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache ...@@ -826,15 +826,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pmd_t *pmd; pmd_t *pmd;
pud = stage2_get_pud(kvm, cache, addr); pud = stage2_get_pud(kvm, cache, addr);
if (pud_none(*pud)) { if (stage2_pud_none(*pud)) {
if (!cache) if (!cache)
return NULL; return NULL;
pmd = mmu_memory_cache_alloc(cache); pmd = mmu_memory_cache_alloc(cache);
pud_populate(NULL, pud, pmd); stage2_pud_populate(pud, pmd);
get_page(virt_to_page(pud)); get_page(virt_to_page(pud));
} }
return pmd_offset(pud, addr); return stage2_pmd_offset(pud, addr);
} }
static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
...@@ -1042,10 +1042,10 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) ...@@ -1042,10 +1042,10 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
pmd_t *pmd; pmd_t *pmd;
phys_addr_t next; phys_addr_t next;
pmd = pmd_offset(pud, addr); pmd = stage2_pmd_offset(pud, addr);
do { do {
next = kvm_pmd_addr_end(addr, end); next = stage2_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) { if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd)) { if (pmd_thp_or_huge(*pmd)) {
if (!kvm_s2pmd_readonly(pmd)) if (!kvm_s2pmd_readonly(pmd))
...@@ -1070,12 +1070,12 @@ static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) ...@@ -1070,12 +1070,12 @@ static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
pud_t *pud; pud_t *pud;
phys_addr_t next; phys_addr_t next;
pud = pud_offset(pgd, addr); pud = stage2_pud_offset(pgd, addr);
do { do {
next = kvm_pud_addr_end(addr, end); next = stage2_pud_addr_end(addr, end);
if (!pud_none(*pud)) { if (!stage2_pud_none(*pud)) {
/* TODO:PUD not supported, revisit later if supported */ /* TODO:PUD not supported, revisit later if supported */
BUG_ON(pud_huge(*pud)); BUG_ON(stage2_pud_huge(*pud));
stage2_wp_pmds(pud, addr, next); stage2_wp_pmds(pud, addr, next);
} }
} while (pud++, addr = next, addr != end); } while (pud++, addr = next, addr != end);
...@@ -1092,7 +1092,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) ...@@ -1092,7 +1092,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
pgd_t *pgd; pgd_t *pgd;
phys_addr_t next; phys_addr_t next;
pgd = kvm->arch.pgd + kvm_pgd_index(addr); pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do { do {
/* /*
* Release kvm_mmu_lock periodically if the memory region is * Release kvm_mmu_lock periodically if the memory region is
...@@ -1104,8 +1104,8 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) ...@@ -1104,8 +1104,8 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) if (need_resched() || spin_needbreak(&kvm->mmu_lock))
cond_resched_lock(&kvm->mmu_lock); cond_resched_lock(&kvm->mmu_lock);
next = kvm_pgd_addr_end(addr, end); next = stage2_pgd_addr_end(addr, end);
if (pgd_present(*pgd)) if (stage2_pgd_present(*pgd))
stage2_wp_puds(pgd, addr, next); stage2_wp_puds(pgd, addr, next);
} while (pgd++, addr = next, addr != end); } while (pgd++, addr = next, addr != end);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment