Commit f020e290 authored by Ian Campbell's avatar Ian Campbell Committed by Jeremy Fitzhardinge

privcmd: MMAPBATCH: Fix error handling/reporting

On error IOCTL_PRIVCMD_MMAPBATCH is expected to set the top nibble of
the effected MFN and return 0. Currently it leaves the MFN unmodified
and returns the number of failures. Therefore:

- reimplement remap_domain_mfn_range() using direct
  HYPERVISOR_mmu_update() calls and small batches. The xen_set_domain_pte()
  interface does not report errors and since some failures are
  expected/normal using the multicall infrastructure is too noisy.
- return 0 as expected
- writeback the updated MFN list to mmapbatch->arr not over mmapbatch,
  smashing the caller's stack.
- remap_domain_mfn_range can be static.

With this change I am able to start an HVM domain.
Signed-off-by: default avatarIan Campbell <ian.campbell@citrix.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
parent 8e3e9991
...@@ -32,14 +32,16 @@ ...@@ -32,14 +32,16 @@
#include <xen/features.h> #include <xen/features.h>
#include <xen/page.h> #include <xen/page.h>
#define REMAP_BATCH_SIZE 16
#ifndef HAVE_ARCH_PRIVCMD_MMAP #ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif #endif
struct remap_data { struct remap_data {
unsigned long mfn; unsigned long mfn;
unsigned domid;
pgprot_t prot; pgprot_t prot;
struct mmu_update *mmu_update;
}; };
static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
...@@ -48,17 +50,23 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, ...@@ -48,17 +50,23 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
struct remap_data *rmd = data; struct remap_data *rmd = data;
pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
xen_set_domain_pte(ptep, pte, rmd->domid); rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++;
return 0; return 0;
} }
int remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr, static int remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long mfn, unsigned long size, unsigned long addr,
pgprot_t prot, unsigned domid) unsigned long mfn, int nr,
pgprot_t prot, unsigned domid)
{ {
struct remap_data rmd; struct remap_data rmd;
int err; struct mmu_update mmu_update[REMAP_BATCH_SIZE];
int batch;
unsigned long range;
int err = 0;
prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
...@@ -66,10 +74,29 @@ int remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr, ...@@ -66,10 +74,29 @@ int remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr,
rmd.mfn = mfn; rmd.mfn = mfn;
rmd.prot = prot; rmd.prot = prot;
rmd.domid = domid;
err = apply_to_page_range(vma->vm_mm, addr, size, while (nr) {
remap_area_mfn_pte_fn, &rmd); batch = min(REMAP_BATCH_SIZE, nr);
range = (unsigned long)batch << PAGE_SHIFT;
rmd.mmu_update = mmu_update;
err = apply_to_page_range(vma->vm_mm, addr, range,
remap_area_mfn_pte_fn, &rmd);
if (err)
goto out;
err = -EFAULT;
if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
goto out;
nr -= batch;
addr += range;
}
err = 0;
out:
flush_tlb_all();
return err; return err;
} }
...@@ -158,7 +185,7 @@ static int traverse_pages(unsigned nelem, size_t size, ...@@ -158,7 +185,7 @@ static int traverse_pages(unsigned nelem, size_t size,
{ {
void *pagedata; void *pagedata;
unsigned pageidx; unsigned pageidx;
int ret; int ret = 0;
BUG_ON(size > PAGE_SIZE); BUG_ON(size > PAGE_SIZE);
...@@ -208,8 +235,7 @@ static int mmap_mfn_range(void *data, void *state) ...@@ -208,8 +235,7 @@ static int mmap_mfn_range(void *data, void *state)
rc = remap_domain_mfn_range(vma, rc = remap_domain_mfn_range(vma,
msg->va & PAGE_MASK, msg->va & PAGE_MASK,
msg->mfn, msg->mfn, msg->npages,
msg->npages << PAGE_SHIFT,
vma->vm_page_prot, vma->vm_page_prot,
st->domain); st->domain);
if (rc < 0) if (rc < 0)
...@@ -290,7 +316,7 @@ static int mmap_batch_fn(void *data, void *state) ...@@ -290,7 +316,7 @@ static int mmap_batch_fn(void *data, void *state)
struct mmap_batch_state *st = state; struct mmap_batch_state *st = state;
if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK,
*mfnp, PAGE_SIZE, *mfnp, 1,
st->vma->vm_page_prot, st->domain) < 0) { st->vma->vm_page_prot, st->domain) < 0) {
*mfnp |= 0xf0000000U; *mfnp |= 0xf0000000U;
st->err++; st->err++;
...@@ -362,9 +388,9 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) ...@@ -362,9 +388,9 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
if (state.err > 0) { if (state.err > 0) {
ret = state.err; ret = 0;
state.user = udata; state.user = m.arr;
traverse_pages(m.num, sizeof(xen_pfn_t), traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist, &pagelist,
mmap_return_errors, &state); mmap_return_errors, &state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment