Commit 4155539b authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: nv: Enforce S2 alignment when contiguous bit is set

Despite KVM not using the contiguous bit for anything related to
TLBs, the spec does require that the alignment defined by the
contiguous bit for the page size and the level is enforced.

Add the required checks to offset the point where PA and VA merge.

Fixes: 61e30b9e ("KVM: arm64: nv: Implement nested Stage-2 page table walk logic")
Reported-by: default avatarAlexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 5fddf9ab
......@@ -205,4 +205,26 @@ static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
}
/* Adjust alignment for the contiguous bit as per StageOA() */
#define contiguous_bit_shift(d, wi, l) \
({ \
u8 shift = 0; \
\
if ((d) & PTE_CONT) { \
switch (BIT((wi)->pgshift)) { \
case SZ_4K: \
shift = 4; \
break; \
case SZ_16K: \
shift = (l) == 2 ? 5 : 7; \
break; \
case SZ_64K: \
shift = 5; \
break; \
} \
} \
\
shift; \
})
#endif /* __ARM64_KVM_NESTED_H */
......@@ -282,11 +282,6 @@ static int walk_nested_s2_pgd(phys_addr_t ipa,
return 1;
}
/*
* We don't use the contiguous bit in the stage-2 ptes, so skip check
* for misprogramming of the contiguous bit.
*/
if (check_output_size(wi, desc)) {
out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
out->upper_attr = desc;
......@@ -299,6 +294,8 @@ static int walk_nested_s2_pgd(phys_addr_t ipa,
return 1;
}
addr_bottom += contiguous_bit_shift(desc, wi, level);
/* Calculate and return the result */
paddr = (desc & GENMASK_ULL(47, addr_bottom)) |
(ipa & GENMASK_ULL(addr_bottom - 1, 0));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment