Commit bc224df1 authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier

KVM: arm64: Introduce KVM_PGTABLE_S2_NOFWB stage 2 flag

In order to further configure stage 2 page-tables, pass flags to the
init function using a new enum.

The first of these flags allows to disable FWB even if the hardware
supports it as we will need to do so for the host stage 2.
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-33-qperret@google.com
parent 2fcb3a59
...@@ -56,6 +56,15 @@ struct kvm_pgtable_mm_ops { ...@@ -56,6 +56,15 @@ struct kvm_pgtable_mm_ops {
phys_addr_t (*virt_to_phys)(void *addr); phys_addr_t (*virt_to_phys)(void *addr);
}; };
/**
* enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
* @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
* ARM64_HAS_STAGE2_FWB.
*/
enum kvm_pgtable_stage2_flags {
KVM_PGTABLE_S2_NOFWB = BIT(0),
};
/** /**
* struct kvm_pgtable - KVM page-table. * struct kvm_pgtable - KVM page-table.
* @ia_bits: Maximum input address size, in bits. * @ia_bits: Maximum input address size, in bits.
...@@ -72,6 +81,7 @@ struct kvm_pgtable { ...@@ -72,6 +81,7 @@ struct kvm_pgtable {
/* Stage-2 only */ /* Stage-2 only */
struct kvm_s2_mmu *mmu; struct kvm_s2_mmu *mmu;
enum kvm_pgtable_stage2_flags flags;
}; };
/** /**
...@@ -196,20 +206,25 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, ...@@ -196,20 +206,25 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
/** /**
* kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. * kvm_pgtable_stage2_init_flags() - Initialise a guest stage-2 page-table.
* @pgt: Uninitialised page-table structure to initialise. * @pgt: Uninitialised page-table structure to initialise.
* @arch: Arch-specific KVM structure representing the guest virtual * @arch: Arch-specific KVM structure representing the guest virtual
* machine. * machine.
* @mm_ops: Memory management callbacks. * @mm_ops: Memory management callbacks.
* @flags: Stage-2 configuration flags.
* *
* Return: 0 on success, negative error code on failure. * Return: 0 on success, negative error code on failure.
*/ */
int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
struct kvm_pgtable_mm_ops *mm_ops); struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags);
#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \
kvm_pgtable_stage2_init_flags(pgt, arch, mm_ops, 0)
/** /**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* *
* The page-table is assumed to be unreachable by any hardware walkers prior * The page-table is assumed to be unreachable by any hardware walkers prior
* to freeing and therefore no TLB invalidation is performed. * to freeing and therefore no TLB invalidation is performed.
...@@ -218,7 +233,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); ...@@ -218,7 +233,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
/** /**
* kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table. * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address at which to place the mapping. * @addr: Intermediate physical address at which to place the mapping.
* @size: Size of the mapping. * @size: Size of the mapping.
* @phys: Physical address of the memory to map. * @phys: Physical address of the memory to map.
...@@ -251,7 +266,7 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, ...@@ -251,7 +266,7 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
/** /**
* kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
* track ownership. * track ownership.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Base intermediate physical address to annotate. * @addr: Base intermediate physical address to annotate.
* @size: Size of the annotated range. * @size: Size of the annotated range.
* @mc: Cache of pre-allocated and zeroed memory from which to allocate * @mc: Cache of pre-allocated and zeroed memory from which to allocate
...@@ -270,7 +285,7 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, ...@@ -270,7 +285,7 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
/** /**
* kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table. * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address from which to remove the mapping. * @addr: Intermediate physical address from which to remove the mapping.
* @size: Size of the mapping. * @size: Size of the mapping.
* *
...@@ -290,7 +305,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); ...@@ -290,7 +305,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
/** /**
* kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
* without TLB invalidation. * without TLB invalidation.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address from which to write-protect, * @addr: Intermediate physical address from which to write-protect,
* @size: Size of the range. * @size: Size of the range.
* *
...@@ -307,7 +322,7 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); ...@@ -307,7 +322,7 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
/** /**
* kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry. * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry. * @addr: Intermediate physical address to identify the page-table entry.
* *
* The offset of @addr within a page is ignored. * The offset of @addr within a page is ignored.
...@@ -321,7 +336,7 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); ...@@ -321,7 +336,7 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
/** /**
* kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry. * @addr: Intermediate physical address to identify the page-table entry.
* *
* The offset of @addr within a page is ignored. * The offset of @addr within a page is ignored.
...@@ -340,7 +355,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); ...@@ -340,7 +355,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
/** /**
* kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
* page-table entry. * page-table entry.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry. * @addr: Intermediate physical address to identify the page-table entry.
* @prot: Additional permissions to grant for the mapping. * @prot: Additional permissions to grant for the mapping.
* *
...@@ -359,7 +374,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, ...@@ -359,7 +374,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
/** /**
* kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
* access flag set. * access flag set.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry. * @addr: Intermediate physical address to identify the page-table entry.
* *
* The offset of @addr within a page is ignored. * The offset of @addr within a page is ignored.
...@@ -372,7 +387,7 @@ bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr); ...@@ -372,7 +387,7 @@ bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
* kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
* of Coherency for guest stage-2 address * of Coherency for guest stage-2 address
* range. * range.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address from which to flush. * @addr: Intermediate physical address from which to flush.
* @size: Size of the range. * @size: Size of the range.
* *
...@@ -411,7 +426,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, ...@@ -411,7 +426,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
* kvm_pgtable_stage2_find_range() - Find a range of Intermediate Physical * kvm_pgtable_stage2_find_range() - Find a range of Intermediate Physical
* Addresses with compatible permission * Addresses with compatible permission
* attributes. * attributes.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init(). * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Address that must be covered by the range. * @addr: Address that must be covered by the range.
* @prot: Protection attributes that the range must be compatible with. * @prot: Protection attributes that the range must be compatible with.
* @range: Range structure used to limit the search space at call time and * @range: Range structure used to limit the search space at call time and
......
...@@ -71,10 +71,10 @@ extern bool arm64_use_ng_mappings; ...@@ -71,10 +71,10 @@ extern bool arm64_use_ng_mappings;
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) #define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) #define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
#define PAGE_S2_MEMATTR(attr) \ #define PAGE_S2_MEMATTR(attr, has_fwb) \
({ \ ({ \
u64 __val; \ u64 __val; \
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) \ if (has_fwb) \
__val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \ __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \
else \ else \
__val = PTE_S2_MEMATTR(MT_S2_ ## attr); \ __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \
......
...@@ -508,11 +508,22 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) ...@@ -508,11 +508,22 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
return vtcr; return vtcr;
} }
static int stage2_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) static bool stage2_has_fwb(struct kvm_pgtable *pgt)
{
if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
return false;
return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
}
#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
kvm_pte_t *ptep)
{ {
bool device = prot & KVM_PGTABLE_PROT_DEVICE; bool device = prot & KVM_PGTABLE_PROT_DEVICE;
kvm_pte_t attr = device ? PAGE_S2_MEMATTR(DEVICE_nGnRE) : kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
PAGE_S2_MEMATTR(NORMAL); KVM_S2_MEMATTR(pgt, NORMAL);
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS; u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
if (!(prot & KVM_PGTABLE_PROT_X)) if (!(prot & KVM_PGTABLE_PROT_X))
...@@ -749,7 +760,7 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, ...@@ -749,7 +760,7 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
.arg = &map_data, .arg = &map_data,
}; };
ret = stage2_set_prot_attr(prot, &map_data.attr); ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
if (ret) if (ret)
return ret; return ret;
...@@ -784,18 +795,10 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, ...@@ -784,18 +795,10 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
return ret; return ret;
} }
static void stage2_flush_dcache(void *addr, u64 size) static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
{
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
return;
__flush_dcache_area(addr, size);
}
static bool stage2_pte_cacheable(kvm_pte_t pte)
{ {
u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR; u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
return memattr == PAGE_S2_MEMATTR(NORMAL); return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
} }
static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
...@@ -821,8 +824,8 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, ...@@ -821,8 +824,8 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
if (mm_ops->page_count(childp) != 1) if (mm_ops->page_count(childp) != 1)
return 0; return 0;
} else if (stage2_pte_cacheable(pte)) { } else if (stage2_pte_cacheable(pgt, pte)) {
need_flush = true; need_flush = !stage2_has_fwb(pgt);
} }
/* /*
...@@ -833,7 +836,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, ...@@ -833,7 +836,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
stage2_put_pte(ptep, mmu, addr, level, mm_ops); stage2_put_pte(ptep, mmu, addr, level, mm_ops);
if (need_flush) { if (need_flush) {
stage2_flush_dcache(kvm_pte_follow(pte, mm_ops), __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
kvm_granule_size(level)); kvm_granule_size(level));
} }
...@@ -979,13 +982,14 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, ...@@ -979,13 +982,14 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, enum kvm_pgtable_walk_flags flag,
void * const arg) void * const arg)
{ {
struct kvm_pgtable_mm_ops *mm_ops = arg; struct kvm_pgtable *pgt = arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep; kvm_pte_t pte = *ptep;
if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pte)) if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0; return 0;
stage2_flush_dcache(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level)); __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
return 0; return 0;
} }
...@@ -994,17 +998,18 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) ...@@ -994,17 +998,18 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
struct kvm_pgtable_walker walker = { struct kvm_pgtable_walker walker = {
.cb = stage2_flush_walker, .cb = stage2_flush_walker,
.flags = KVM_PGTABLE_WALK_LEAF, .flags = KVM_PGTABLE_WALK_LEAF,
.arg = pgt->mm_ops, .arg = pgt,
}; };
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) if (stage2_has_fwb(pgt))
return 0; return 0;
return kvm_pgtable_walk(pgt, addr, size, &walker); return kvm_pgtable_walk(pgt, addr, size, &walker);
} }
int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
struct kvm_pgtable_mm_ops *mm_ops) struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags)
{ {
size_t pgd_sz; size_t pgd_sz;
u64 vtcr = arch->vtcr; u64 vtcr = arch->vtcr;
...@@ -1021,6 +1026,7 @@ int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, ...@@ -1021,6 +1026,7 @@ int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
pgt->start_level = start_level; pgt->start_level = start_level;
pgt->mm_ops = mm_ops; pgt->mm_ops = mm_ops;
pgt->mmu = &arch->mmu; pgt->mmu = &arch->mmu;
pgt->flags = flags;
/* Ensure zeroed PGD pages are visible to the hardware walker */ /* Ensure zeroed PGD pages are visible to the hardware walker */
dsb(ishst); dsb(ishst);
...@@ -1101,7 +1107,7 @@ int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr, ...@@ -1101,7 +1107,7 @@ int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
u32 level; u32 level;
int ret; int ret;
ret = stage2_set_prot_attr(prot, &attr); ret = stage2_set_prot_attr(pgt, prot, &attr);
if (ret) if (ret)
return ret; return ret;
attr &= KVM_PTE_LEAF_S2_COMPAT_MASK; attr &= KVM_PTE_LEAF_S2_COMPAT_MASK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment