Commit ceb90fa0 authored by Andres Lagar-Cavilla's avatar Andres Lagar-Cavilla Committed by Konrad Rzeszutek Wilk

xen/privcmd: add PRIVCMD_MMAPBATCH_V2 ioctl

PRIVCMD_MMAPBATCH_V2 extends PRIVCMD_MMAPBATCH with an additional
field for reporting the error code for every frame that could not be
mapped.  libxc prefers PRIVCMD_MMAPBATCH_V2 over PRIVCMD_MMAPBATCH.

Also expand PRIVCMD_MMAPBATCH to return appropriate error-encoding top nibble
in the mfn array.
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarAndres Lagar-Cavilla <andres@lagarcavilla.org>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 69870a84
...@@ -76,7 +76,7 @@ static void free_page_list(struct list_head *pages) ...@@ -76,7 +76,7 @@ static void free_page_list(struct list_head *pages)
*/ */
static int gather_array(struct list_head *pagelist, static int gather_array(struct list_head *pagelist,
unsigned nelem, size_t size, unsigned nelem, size_t size,
void __user *data) const void __user *data)
{ {
unsigned pageidx; unsigned pageidx;
void *pagedata; void *pagedata;
...@@ -246,61 +246,117 @@ struct mmap_batch_state { ...@@ -246,61 +246,117 @@ struct mmap_batch_state {
domid_t domain; domid_t domain;
unsigned long va; unsigned long va;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int err; /* A tristate:
* 0 for no errors
xen_pfn_t __user *user; * 1 if at least one error has happened (and no
* -ENOENT errors have happened)
* -ENOENT if at least 1 -ENOENT has happened.
*/
int global_error;
/* An array for individual errors */
int *err;
/* User-space mfn array to store errors in the second pass for V1. */
xen_pfn_t __user *user_mfn;
}; };
static int mmap_batch_fn(void *data, void *state) static int mmap_batch_fn(void *data, void *state)
{ {
xen_pfn_t *mfnp = data; xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state; struct mmap_batch_state *st = state;
int ret;
if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
st->vma->vm_page_prot, st->domain) < 0) { st->vma->vm_page_prot, st->domain);
*mfnp |= 0xf0000000U;
st->err++; /* Store error code for second pass. */
*(st->err++) = ret;
/* And see if it affects the global_error. */
if (ret < 0) {
if (ret == -ENOENT)
st->global_error = -ENOENT;
else {
/* Record that at least one error has happened. */
if (st->global_error == 0)
st->global_error = 1;
}
} }
st->va += PAGE_SIZE; st->va += PAGE_SIZE;
return 0; return 0;
} }
static int mmap_return_errors(void *data, void *state) static int mmap_return_errors_v1(void *data, void *state)
{ {
xen_pfn_t *mfnp = data; xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state; struct mmap_batch_state *st = state;
int err = *(st->err++);
return put_user(*mfnp, st->user++);
/*
* V1 encodes the error codes in the 32bit top nibble of the
* mfn (with its known limitations vis-a-vis 64 bit callers).
*/
*mfnp |= (err == -ENOENT) ?
PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_MFN_ERROR;
return __put_user(*mfnp, st->user_mfn++);
} }
static struct vm_operations_struct privcmd_vm_ops; static struct vm_operations_struct privcmd_vm_ops;
static long privcmd_ioctl_mmap_batch(void __user *udata) static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
{ {
int ret; int ret;
struct privcmd_mmapbatch m; struct privcmd_mmapbatch_v2 m;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long nr_pages; unsigned long nr_pages;
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
int *err_array = NULL;
struct mmap_batch_state state; struct mmap_batch_state state;
if (!xen_initial_domain()) if (!xen_initial_domain())
return -EPERM; return -EPERM;
if (copy_from_user(&m, udata, sizeof(m))) switch (version) {
return -EFAULT; case 1:
if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
return -EFAULT;
/* Returns per-frame error in m.arr. */
m.err = NULL;
if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
return -EFAULT;
break;
case 2:
if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
return -EFAULT;
/* Returns per-frame error code in m.err. */
if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
return -EFAULT;
break;
default:
return -EINVAL;
}
nr_pages = m.num; nr_pages = m.num;
if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
return -EINVAL; return -EINVAL;
ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
m.arr);
if (ret)
goto out;
if (list_empty(&pagelist)) {
ret = -EINVAL;
goto out;
}
if (ret || list_empty(&pagelist)) err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL);
if (err_array == NULL) {
ret = -ENOMEM;
goto out; goto out;
}
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
...@@ -315,24 +371,34 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) ...@@ -315,24 +371,34 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
goto out; goto out;
} }
state.domain = m.dom; state.domain = m.dom;
state.vma = vma; state.vma = vma;
state.va = m.addr; state.va = m.addr;
state.err = 0; state.global_error = 0;
state.err = err_array;
ret = traverse_pages(m.num, sizeof(xen_pfn_t), /* mmap_batch_fn guarantees ret == 0 */
&pagelist, mmap_batch_fn, &state); BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_batch_fn, &state));
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
if (state.err > 0) { if (state.global_error && (version == 1)) {
state.user = m.arr; /* Write back errors in second pass. */
state.user_mfn = (xen_pfn_t *)m.arr;
state.err = err_array;
ret = traverse_pages(m.num, sizeof(xen_pfn_t), ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist, &pagelist, mmap_return_errors_v1, &state);
mmap_return_errors, &state); } else
} ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
/* If we have not had any EFAULT-like global errors then set the global
* error to -ENOENT if necessary. */
if ((ret == 0) && (state.global_error == -ENOENT))
ret = -ENOENT;
out: out:
kfree(err_array);
free_page_list(&pagelist); free_page_list(&pagelist);
return ret; return ret;
...@@ -354,7 +420,11 @@ static long privcmd_ioctl(struct file *file, ...@@ -354,7 +420,11 @@ static long privcmd_ioctl(struct file *file,
break; break;
case IOCTL_PRIVCMD_MMAPBATCH: case IOCTL_PRIVCMD_MMAPBATCH:
ret = privcmd_ioctl_mmap_batch(udata); ret = privcmd_ioctl_mmap_batch(udata, 1);
break;
case IOCTL_PRIVCMD_MMAPBATCH_V2:
ret = privcmd_ioctl_mmap_batch(udata, 2);
break; break;
default: default:
......
...@@ -58,13 +58,33 @@ struct privcmd_mmapbatch { ...@@ -58,13 +58,33 @@ struct privcmd_mmapbatch {
int num; /* number of pages to populate */ int num; /* number of pages to populate */
domid_t dom; /* target domain */ domid_t dom; /* target domain */
__u64 addr; /* virtual address */ __u64 addr; /* virtual address */
xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ xen_pfn_t __user *arr; /* array of mfns - or'd with
PRIVCMD_MMAPBATCH_*_ERROR on err */
};
#define PRIVCMD_MMAPBATCH_MFN_ERROR 0xf0000000U
#define PRIVCMD_MMAPBATCH_PAGED_ERROR 0x80000000U
struct privcmd_mmapbatch_v2 {
unsigned int num; /* number of pages to populate */
domid_t dom; /* target domain */
__u64 addr; /* virtual address */
const xen_pfn_t __user *arr; /* array of mfns */
int __user *err; /* array of error codes */
}; };
/* /*
* @cmd: IOCTL_PRIVCMD_HYPERCALL * @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t * @arg: &privcmd_hypercall_t
* Return: Value returned from execution of the specified hypercall. * Return: Value returned from execution of the specified hypercall.
*
* @cmd: IOCTL_PRIVCMD_MMAPBATCH_V2
* @arg: &struct privcmd_mmapbatch_v2
* Return: 0 on success (i.e., arg->err contains valid error codes for
* each frame). On an error other than a failed frame remap, -1 is
* returned and errno is set to EINVAL, EFAULT etc. As an exception,
* if the operation was otherwise successful but any frame failed with
* -ENOENT, then -1 is returned and errno is set to ENOENT.
*/ */
#define IOCTL_PRIVCMD_HYPERCALL \ #define IOCTL_PRIVCMD_HYPERCALL \
_IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall)) _IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall))
...@@ -72,5 +92,7 @@ struct privcmd_mmapbatch { ...@@ -72,5 +92,7 @@ struct privcmd_mmapbatch {
_IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap)) _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap))
#define IOCTL_PRIVCMD_MMAPBATCH \ #define IOCTL_PRIVCMD_MMAPBATCH \
_IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch)) _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch))
#define IOCTL_PRIVCMD_MMAPBATCH_V2 \
_IOC(_IOC_NONE, 'P', 4, sizeof(struct privcmd_mmapbatch_v2))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment