Commit 8884fd12 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/mmu-misc-6.12 into kvmarm-master/next

* kvm-arm64/mmu-misc-6.12:
  : .
  : Various minor MMU improvements and bug-fixes:
  :
  : - Prevent MTE tags being restored by userspace if we are actively
  :   logging writes, as that's a recipe for disaster
  :
  : - Correct the refcount on a page that is not considered for MTE
  :   tag copying (such as a device)
  :
  : - When walking a page table to split blocks, keep the DSB at the end
  :   the walk, as there is no need to perform it on every store.
  :
  : - Fix boundary check when transfering memory using FFA
  : .
  KVM: arm64: Add memory length checks and remove inline in do_ffa_mem_xfer
  KVM: arm64: Disallow copying MTE to guest memory while KVM is dirty logging
  KVM: arm64: Release pfn, i.e. put page, if copying MTE tags hits ZONE_DEVICE
  KVM: arm64: Move data barrier to end of split walk
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 0d56099e f26a525b
......@@ -1045,6 +1045,11 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
if (write && atomic_read(&kvm->nr_memslots_dirty_logging)) {
ret = -EBUSY;
goto out;
}
while (length > 0) {
kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
void *maddr;
......@@ -1059,6 +1064,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
page = pfn_to_online_page(pfn);
if (!page) {
/* Reject ZONE_DEVICE memory */
kvm_release_pfn_clean(pfn);
ret = -EFAULT;
goto out;
}
......
......@@ -426,9 +426,9 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
return;
}
static __always_inline void do_ffa_mem_xfer(const u64 func_id,
struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
static void __do_ffa_mem_xfer(const u64 func_id,
struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, len, ctxt, 1);
DECLARE_REG(u32, fraglen, ctxt, 2);
......@@ -440,9 +440,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
u32 offset, nr_ranges;
int ret = 0;
BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
func_id != FFA_FN64_MEM_LEND);
if (addr_mbz || npages_mbz || fraglen > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS;
......@@ -461,6 +458,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
goto out_unlock;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
goto out_unlock;
}
buf = hyp_buffers.tx;
memcpy(buf, host_buffers.tx, fraglen);
......@@ -512,6 +514,13 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
goto out_unlock;
}
#define do_ffa_mem_xfer(fid, res, ctxt) \
do { \
BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
(fid) != FFA_FN64_MEM_LEND); \
__do_ffa_mem_xfer((fid), (res), (ctxt)); \
} while (0);
static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
......
......@@ -1547,7 +1547,6 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
*/
new = kvm_init_table_pte(childp, mm_ops);
stage2_make_pte(ctx, new);
dsb(ishst);
return 0;
}
......@@ -1559,8 +1558,11 @@ int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = mc,
};
int ret;
return kvm_pgtable_walk(pgt, addr, size, &walker);
ret = kvm_pgtable_walk(pgt, addr, size, &walker);
dsb(ishst);
return ret;
}
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment