Commit a13d7201 authored by Julien Grall's avatar Julien Grall

xen/privcmd: Further s/MFN/GFN/ clean-up

The privcmd code is mixing the usage of GFN and MFN within the same
functions which make the code difficult to understand when you only work
with auto-translated guests.

The privcmd driver is only dealing with GFN so replace all the mention
of MFN into GFN.

The ioctl structure used to map foreign change has been left unchanged
given that the userspace is using it. Nonetheless, add a comment to
explain the expected value within the "mfn" field.
Signed-off-by: default avatarJulien Grall <julien.grall@citrix.com>
Reviewed-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent 859e3267
...@@ -49,35 +49,35 @@ static __read_mostly unsigned int xen_events_irq; ...@@ -49,35 +49,35 @@ static __read_mostly unsigned int xen_events_irq;
static __initdata struct device_node *xen_node; static __initdata struct device_node *xen_node;
int xen_remap_domain_mfn_array(struct vm_area_struct *vma, int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *mfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, unsigned domid,
struct page **pages) struct page **pages)
{ {
return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
prot, domid, pages); prot, domid, pages);
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Not used by XENFEAT_auto_translated guests. */ /* Not used by XENFEAT_auto_translated guests. */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t mfn, int nr, xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid, pgprot_t prot, unsigned domid,
struct page **pages) struct page **pages)
{ {
return -ENOSYS; return -ENOSYS;
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages) int nr, struct page **pages)
{ {
return xen_xlate_unmap_gfn_range(vma, nr, pages); return xen_xlate_unmap_gfn_range(vma, nr, pages);
} }
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
static void xen_percpu_init(void) static void xen_percpu_init(void)
{ {
......
...@@ -2812,9 +2812,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, ...@@ -2812,9 +2812,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
return 0; return 0;
} }
static int do_remap_mfn(struct vm_area_struct *vma, static int do_remap_gfn(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *mfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, unsigned domid,
struct page **pages) struct page **pages)
...@@ -2830,14 +2830,14 @@ static int do_remap_mfn(struct vm_area_struct *vma, ...@@ -2830,14 +2830,14 @@ static int do_remap_mfn(struct vm_area_struct *vma,
if (xen_feature(XENFEAT_auto_translated_physmap)) { if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef CONFIG_XEN_PVH #ifdef CONFIG_XEN_PVH
/* We need to update the local page tables and the xen HAP */ /* We need to update the local page tables and the xen HAP */
return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
prot, domid, pages); prot, domid, pages);
#else #else
return -EINVAL; return -EINVAL;
#endif #endif
} }
rmd.mfn = mfn; rmd.mfn = gfn;
rmd.prot = prot; rmd.prot = prot;
/* We use the err_ptr to indicate if there we are doing a contigious /* We use the err_ptr to indicate if there we are doing a contigious
* mapping or a discontigious mapping. */ * mapping or a discontigious mapping. */
...@@ -2865,8 +2865,8 @@ static int do_remap_mfn(struct vm_area_struct *vma, ...@@ -2865,8 +2865,8 @@ static int do_remap_mfn(struct vm_area_struct *vma,
batch_left, &done, domid); batch_left, &done, domid);
/* /*
* @err_ptr may be the same buffer as @mfn, so * @err_ptr may be the same buffer as @gfn, so
* only clear it after each chunk of @mfn is * only clear it after each chunk of @gfn is
* used. * used.
*/ */
if (err_ptr) { if (err_ptr) {
...@@ -2896,19 +2896,19 @@ static int do_remap_mfn(struct vm_area_struct *vma, ...@@ -2896,19 +2896,19 @@ static int do_remap_mfn(struct vm_area_struct *vma,
return err < 0 ? err : mapped; return err < 0 ? err : mapped;
} }
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t mfn, int nr, xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid, pgprot_t prot, unsigned domid,
struct page **pages) struct page **pages)
{ {
return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages); return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
int xen_remap_domain_mfn_array(struct vm_area_struct *vma, int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *mfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, struct page **pages) unsigned domid, struct page **pages)
{ {
...@@ -2917,13 +2917,13 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma, ...@@ -2917,13 +2917,13 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
* cause of "wrong memory was mapped in". * cause of "wrong memory was mapped in".
*/ */
BUG_ON(err_ptr == NULL); BUG_ON(err_ptr == NULL);
return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages); return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Returns: 0 success */ /* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages) int numpgs, struct page **pages)
{ {
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
...@@ -2935,4 +2935,4 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, ...@@ -2935,4 +2935,4 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
#endif #endif
} }
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
...@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size, ...@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
return ret; return ret;
} }
struct mmap_mfn_state { struct mmap_gfn_state {
unsigned long va; unsigned long va;
struct vm_area_struct *vma; struct vm_area_struct *vma;
domid_t domain; domid_t domain;
}; };
static int mmap_mfn_range(void *data, void *state) static int mmap_gfn_range(void *data, void *state)
{ {
struct privcmd_mmap_entry *msg = data; struct privcmd_mmap_entry *msg = data;
struct mmap_mfn_state *st = state; struct mmap_gfn_state *st = state;
struct vm_area_struct *vma = st->vma; struct vm_area_struct *vma = st->vma;
int rc; int rc;
...@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state) ...@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
return -EINVAL; return -EINVAL;
rc = xen_remap_domain_mfn_range(vma, rc = xen_remap_domain_gfn_range(vma,
msg->va & PAGE_MASK, msg->va & PAGE_MASK,
msg->mfn, msg->npages, msg->mfn, msg->npages,
vma->vm_page_prot, vma->vm_page_prot,
...@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata) ...@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
struct vm_area_struct *vma; struct vm_area_struct *vma;
int rc; int rc;
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
struct mmap_mfn_state state; struct mmap_gfn_state state;
/* We only support privcmd_ioctl_mmap_batch for auto translated. */ /* We only support privcmd_ioctl_mmap_batch for auto translated. */
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
...@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata) ...@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
&pagelist, &pagelist,
mmap_mfn_range, &state); mmap_gfn_range, &state);
out_up: out_up:
...@@ -299,18 +299,18 @@ struct mmap_batch_state { ...@@ -299,18 +299,18 @@ struct mmap_batch_state {
int global_error; int global_error;
int version; int version;
/* User-space mfn array to store errors in the second pass for V1. */ /* User-space gfn array to store errors in the second pass for V1. */
xen_pfn_t __user *user_mfn; xen_pfn_t __user *user_gfn;
/* User-space int array to store errors in the second pass for V2. */ /* User-space int array to store errors in the second pass for V2. */
int __user *user_err; int __user *user_err;
}; };
/* auto translated dom0 note: if domU being created is PV, then mfn is /* auto translated dom0 note: if domU being created is PV, then gfn is
* mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
*/ */
static int mmap_batch_fn(void *data, int nr, void *state) static int mmap_batch_fn(void *data, int nr, void *state)
{ {
xen_pfn_t *mfnp = data; xen_pfn_t *gfnp = data;
struct mmap_batch_state *st = state; struct mmap_batch_state *st = state;
struct vm_area_struct *vma = st->vma; struct vm_area_struct *vma = st->vma;
struct page **pages = vma->vm_private_data; struct page **pages = vma->vm_private_data;
...@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state) ...@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
cur_pages = &pages[st->index]; cur_pages = &pages[st->index];
BUG_ON(nr < 0); BUG_ON(nr < 0);
ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
(int *)mfnp, st->vma->vm_page_prot, (int *)gfnp, st->vma->vm_page_prot,
st->domain, cur_pages); st->domain, cur_pages);
/* Adjust the global_error? */ /* Adjust the global_error? */
...@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st) ...@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
if (st->version == 1) { if (st->version == 1) {
if (err) { if (err) {
xen_pfn_t mfn; xen_pfn_t gfn;
ret = get_user(mfn, st->user_mfn); ret = get_user(gfn, st->user_gfn);
if (ret < 0) if (ret < 0)
return ret; return ret;
/* /*
* V1 encodes the error codes in the 32bit top * V1 encodes the error codes in the 32bit top
* nibble of the mfn (with its known * nibble of the gfn (with its known
* limitations vis-a-vis 64 bit callers). * limitations vis-a-vis 64 bit callers).
*/ */
mfn |= (err == -ENOENT) ? gfn |= (err == -ENOENT) ?
PRIVCMD_MMAPBATCH_PAGED_ERROR : PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_MFN_ERROR; PRIVCMD_MMAPBATCH_MFN_ERROR;
return __put_user(mfn, st->user_mfn++); return __put_user(gfn, st->user_gfn++);
} else } else
st->user_mfn++; st->user_gfn++;
} else { /* st->version == 2 */ } else { /* st->version == 2 */
if (err) if (err)
return __put_user(err, st->user_err++); return __put_user(err, st->user_err++);
...@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state) ...@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
return 0; return 0;
} }
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update /* Allocate pfns that are then mapped with gfns from foreign domid. Update
* the vma with the page info to use later. * the vma with the page info to use later.
* Returns: 0 if success, otherwise -errno * Returns: 0 if success, otherwise -errno
*/ */
...@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
if (state.global_error) { if (state.global_error) {
/* Write back errors in second pass. */ /* Write back errors in second pass. */
state.user_mfn = (xen_pfn_t *)m.arr; state.user_gfn = (xen_pfn_t *)m.arr;
state.user_err = m.err; state.user_err = m.err;
ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_return_errors, &state); &pagelist, mmap_return_errors, &state);
...@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma) ...@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return; return;
rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
if (rc == 0) if (rc == 0)
free_xenballooned_pages(numpgs, pages); free_xenballooned_pages(numpgs, pages);
else else
......
...@@ -38,8 +38,8 @@ ...@@ -38,8 +38,8 @@
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
/* map fgmfn of domid to lpfn in the current domain */ /* map fgfn of domid to lpfn in the current domain */
static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
unsigned int domid) unsigned int domid)
{ {
int rc; int rc;
...@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, ...@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
.size = 1, .size = 1,
.space = XENMAPSPACE_gmfn_foreign, .space = XENMAPSPACE_gmfn_foreign,
}; };
xen_ulong_t idx = fgmfn; xen_ulong_t idx = fgfn;
xen_pfn_t gpfn = lpfn; xen_pfn_t gpfn = lpfn;
int err = 0; int err = 0;
...@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, ...@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
} }
struct remap_data { struct remap_data {
xen_pfn_t *fgmfn; /* foreign domain's gmfn */ xen_pfn_t *fgfn; /* foreign domain's gfn */
pgprot_t prot; pgprot_t prot;
domid_t domid; domid_t domid;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int index; int index;
struct page **pages; struct page **pages;
struct xen_remap_mfn_info *info; struct xen_remap_gfn_info *info;
int *err_ptr; int *err_ptr;
int mapped; int mapped;
}; };
...@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, ...@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
int rc; int rc;
rc = map_foreign_page(pfn, *info->fgmfn, info->domid); rc = map_foreign_page(pfn, *info->fgfn, info->domid);
*info->err_ptr++ = rc; *info->err_ptr++ = rc;
if (!rc) { if (!rc) {
set_pte_at(info->vma->vm_mm, addr, ptep, pte); set_pte_at(info->vma->vm_mm, addr, ptep, pte);
info->mapped++; info->mapped++;
} }
info->fgmfn++; info->fgfn++;
return 0; return 0;
} }
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *mfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, unsigned domid,
struct page **pages) struct page **pages)
...@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, ...@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
x86 PVOPS */ x86 PVOPS */
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
data.fgmfn = mfn; data.fgfn = gfn;
data.prot = prot; data.prot = prot;
data.domid = domid; data.domid = domid;
data.vma = vma; data.vma = vma;
......
...@@ -44,6 +44,10 @@ struct privcmd_hypercall { ...@@ -44,6 +44,10 @@ struct privcmd_hypercall {
struct privcmd_mmap_entry { struct privcmd_mmap_entry {
__u64 va; __u64 va;
/*
* This should be a GFN. It's not possible to change the name because
* it's exposed to the user-space.
*/
__u64 mfn; __u64 mfn;
__u64 npages; __u64 npages;
}; };
......
...@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); ...@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct; struct vm_area_struct;
/* /*
* xen_remap_domain_mfn_array() - map an array of foreign frames * xen_remap_domain_gfn_array() - map an array of foreign frames
* @vma: VMA to map the pages into * @vma: VMA to map the pages into
* @addr: Address at which to map the pages * @addr: Address at which to map the pages
* @gfn: Array of GFNs to map * @gfn: Array of GFNs to map
...@@ -46,14 +46,14 @@ struct vm_area_struct; ...@@ -46,14 +46,14 @@ struct vm_area_struct;
* Returns the number of successfully mapped frames, or a -ve error * Returns the number of successfully mapped frames, or a -ve error
* code. * code.
*/ */
int xen_remap_domain_mfn_array(struct vm_area_struct *vma, int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t *gfn, int nr, xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot, int *err_ptr, pgprot_t prot,
unsigned domid, unsigned domid,
struct page **pages); struct page **pages);
/* xen_remap_domain_mfn_range() - map a range of foreign frames /* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into * @vma: VMA to map the pages into
* @addr: Address at which to map the pages * @addr: Address at which to map the pages
* @gfn: First GFN to map. * @gfn: First GFN to map.
...@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma, ...@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
* Returns the number of successfully mapped frames, or a -ve error * Returns the number of successfully mapped frames, or a -ve error
* code. * code.
*/ */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
xen_pfn_t gfn, int nr, xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid, pgprot_t prot, unsigned domid,
struct page **pages); struct page **pages);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages); int numpgs, struct page **pages);
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment