Commit 99beae6c authored by Andres Lagar-Cavilla's avatar Andres Lagar-Cavilla Committed by Konrad Rzeszutek Wilk

xen/privcmd: Fix mmap batch ioctl.

1. If any individual mapping error happens, the V1 case will mark *all*
operations as failed. Fixed.

2. The err_array was allocated with kcalloc, resulting in potentially O(n) page
allocations. Refactor code to not use this array.
Signed-off-by: default avatarAndres Lagar-Cavilla <andres@lagarcavilla.org>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 7bcc1ec0
...@@ -258,11 +258,12 @@ struct mmap_batch_state { ...@@ -258,11 +258,12 @@ struct mmap_batch_state {
* -ENOENT if at least 1 -ENOENT has happened. * -ENOENT if at least 1 -ENOENT has happened.
*/ */
int global_error; int global_error;
/* An array for individual errors */ int version;
int *err;
/* User-space mfn array to store errors in the second pass for V1. */ /* User-space mfn array to store errors in the second pass for V1. */
xen_pfn_t __user *user_mfn; xen_pfn_t __user *user_mfn;
/* User-space int array to store errors in the second pass for V2. */
int __user *user_err;
}; };
/* auto translated dom0 note: if domU being created is PV, then mfn is /* auto translated dom0 note: if domU being created is PV, then mfn is
...@@ -285,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state) ...@@ -285,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state)
&cur_page); &cur_page);
/* Store error code for second pass. */ /* Store error code for second pass. */
*(st->err++) = ret; if (st->version == 1) {
if (ret < 0) {
/*
* V1 encodes the error codes in the 32bit top nibble of the
* mfn (with its known limitations vis-a-vis 64 bit callers).
*/
*mfnp |= (ret == -ENOENT) ?
PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_MFN_ERROR;
}
} else { /* st->version == 2 */
*((int *) mfnp) = ret;
}
/* And see if it affects the global_error. */ /* And see if it affects the global_error. */
if (ret < 0) { if (ret < 0) {
...@@ -302,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state) ...@@ -302,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state)
return 0; return 0;
} }
static int mmap_return_errors_v1(void *data, void *state) static int mmap_return_errors(void *data, void *state)
{ {
xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state; struct mmap_batch_state *st = state;
int err = *(st->err++);
/* if (st->version == 1) {
* V1 encodes the error codes in the 32bit top nibble of the xen_pfn_t mfnp = *((xen_pfn_t *) data);
* mfn (with its known limitations vis-a-vis 64 bit callers). if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
*/ return __put_user(mfnp, st->user_mfn++);
*mfnp |= (err == -ENOENT) ? else
PRIVCMD_MMAPBATCH_PAGED_ERROR : st->user_mfn++;
PRIVCMD_MMAPBATCH_MFN_ERROR; } else { /* st->version == 2 */
return __put_user(*mfnp, st->user_mfn++); int err = *((int *) data);
if (err)
return __put_user(err, st->user_err++);
else
st->user_err++;
}
return 0;
} }
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
...@@ -354,7 +372,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -354,7 +372,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long nr_pages; unsigned long nr_pages;
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
int *err_array = NULL;
struct mmap_batch_state state; struct mmap_batch_state state;
switch (version) { switch (version) {
...@@ -390,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -390,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
goto out; goto out;
} }
err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); if (version == 2) {
if (err_array == NULL) { /* Zero error array now to only copy back actual errors. */
ret = -ENOMEM; if (clear_user(m.err, sizeof(int) * m.num)) {
goto out; ret = -EFAULT;
goto out;
}
} }
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
...@@ -421,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -421,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
state.va = m.addr; state.va = m.addr;
state.index = 0; state.index = 0;
state.global_error = 0; state.global_error = 0;
state.err = err_array; state.version = version;
/* mmap_batch_fn guarantees ret == 0 */ /* mmap_batch_fn guarantees ret == 0 */
BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
...@@ -429,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -429,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
if (version == 1) { if (state.global_error) {
if (state.global_error) { /* Write back errors in second pass. */
/* Write back errors in second pass. */ state.user_mfn = (xen_pfn_t *)m.arr;
state.user_mfn = (xen_pfn_t *)m.arr; state.user_err = m.err;
state.err = err_array; ret = traverse_pages(m.num, sizeof(xen_pfn_t),
ret = traverse_pages(m.num, sizeof(xen_pfn_t), &pagelist, mmap_return_errors, &state);
&pagelist, mmap_return_errors_v1, &state); } else
} else ret = 0;
ret = 0;
} else if (version == 2) {
ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
if (ret)
ret = -EFAULT;
}
/* If we have not had any EFAULT-like global errors then set the global /* If we have not had any EFAULT-like global errors then set the global
* error to -ENOENT if necessary. */ * error to -ENOENT if necessary. */
...@@ -451,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -451,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
ret = -ENOENT; ret = -ENOENT;
out: out:
kfree(err_array);
free_page_list(&pagelist); free_page_list(&pagelist);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment