Commit 82fd5ee9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-6.8-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - update some Xen PV interface related headers

 - fix some kernel-doc comments in the xenbus driver

 - fix the Xen gntdev driver to not access the struct page of pages
   imported from a DMA-buf

* tag 'for-linus-6.8-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import
  xen/xenbus: client: fix kernel-doc comments
  xen: update PV-device interface headers
parents 09d1c6a8 2d2db7d4
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/dma-direct.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -50,7 +51,7 @@ struct gntdev_dmabuf { ...@@ -50,7 +51,7 @@ struct gntdev_dmabuf {
/* Number of pages this buffer has. */ /* Number of pages this buffer has. */
int nr_pages; int nr_pages;
/* Pages of this buffer. */ /* Pages of this buffer (only for dma-buf export). */
struct page **pages; struct page **pages;
}; };
...@@ -484,7 +485,7 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags, ...@@ -484,7 +485,7 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
/* DMA buffer import support. */ /* DMA buffer import support. */
static int static int
dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs, dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
int count, int domid) int count, int domid)
{ {
grant_ref_t priv_gref_head; grant_ref_t priv_gref_head;
...@@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs, ...@@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
} }
gnttab_grant_foreign_access_ref(cur_ref, domid, gnttab_grant_foreign_access_ref(cur_ref, domid,
xen_page_to_gfn(pages[i]), 0); gfns[i], 0);
refs[i] = cur_ref; refs[i] = cur_ref;
} }
...@@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count) ...@@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf) static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
{ {
kfree(gntdev_dmabuf->pages);
kfree(gntdev_dmabuf->u.imp.refs); kfree(gntdev_dmabuf->u.imp.refs);
kfree(gntdev_dmabuf); kfree(gntdev_dmabuf);
} }
...@@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count) ...@@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
if (!gntdev_dmabuf->u.imp.refs) if (!gntdev_dmabuf->u.imp.refs)
goto fail; goto fail;
gntdev_dmabuf->pages = kcalloc(count,
sizeof(gntdev_dmabuf->pages[0]),
GFP_KERNEL);
if (!gntdev_dmabuf->pages)
goto fail;
gntdev_dmabuf->nr_pages = count; gntdev_dmabuf->nr_pages = count;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
...@@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, ...@@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
struct dma_buf *dma_buf; struct dma_buf *dma_buf;
struct dma_buf_attachment *attach; struct dma_buf_attachment *attach;
struct sg_table *sgt; struct sg_table *sgt;
struct sg_page_iter sg_iter; struct sg_dma_page_iter sg_iter;
unsigned long *gfns;
int i; int i;
dma_buf = dma_buf_get(fd); dma_buf = dma_buf_get(fd);
...@@ -624,26 +619,31 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, ...@@ -624,26 +619,31 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
gntdev_dmabuf->u.imp.sgt = sgt; gntdev_dmabuf->u.imp.sgt = sgt;
/* Now convert sgt to array of pages and check for page validity. */ gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
i = 0; if (!gfns) {
for_each_sgtable_page(sgt, &sg_iter, 0) { ret = ERR_PTR(-ENOMEM);
struct page *page = sg_page_iter_page(&sg_iter);
/*
* Check if page is valid: this can happen if we are given
* a page from VRAM or other resources which are not backed
* by a struct page.
*/
if (!pfn_valid(page_to_pfn(page))) {
ret = ERR_PTR(-EINVAL);
goto fail_unmap; goto fail_unmap;
} }
gntdev_dmabuf->pages[i++] = page; /*
* Now convert sgt to array of gfns without accessing underlying pages.
* It is not allowed to access the underlying struct page of an sg table
* exported by DMA-buf, but since we deal with special Xen dma device here
* (not a normal physical one) look at the dma addresses in the sg table
* and then calculate gfns directly from them.
*/
i = 0;
for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
gfns[i++] = pfn_to_gfn(pfn);
} }
ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages, ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
gntdev_dmabuf->u.imp.refs, gntdev_dmabuf->u.imp.refs,
count, domid)); count, domid));
kfree(gfns);
if (IS_ERR(ret)) if (IS_ERR(ret))
goto fail_end_access; goto fail_end_access;
......
...@@ -119,11 +119,13 @@ EXPORT_SYMBOL_GPL(xenbus_strstate); ...@@ -119,11 +119,13 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
* @callback: callback to register * @callback: callback to register
* *
* Register a @watch on the given path, using the given xenbus_watch structure * Register a @watch on the given path, using the given xenbus_watch structure
* for storage, and the given @callback function as the callback. Return 0 on * for storage, and the given @callback function as the callback. On success,
* success, or -errno on error. On success, the given @path will be saved as * the given @path will be saved as @watch->node, and remains the
* @watch->node, and remains the caller's to free. On error, @watch->node will * caller's to free. On error, @watch->node will
* be NULL, the device will switch to %XenbusStateClosing, and the error will * be NULL, the device will switch to %XenbusStateClosing, and the error will
* be saved in the store. * be saved in the store.
*
* Returns: %0 on success or -errno on error
*/ */
int xenbus_watch_path(struct xenbus_device *dev, const char *path, int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch, struct xenbus_watch *watch,
...@@ -160,12 +162,14 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path); ...@@ -160,12 +162,14 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
* @pathfmt: format of path to watch * @pathfmt: format of path to watch
* *
* Register a watch on the given @path, using the given xenbus_watch * Register a watch on the given @path, using the given xenbus_watch
* structure for storage, and the given @callback function as the callback. * structure for storage, and the given @callback function as the
* Return 0 on success, or -errno on error. On success, the watched path * callback. On success, the watched path (@path/@path2) will be saved
* (@path/@path2) will be saved as @watch->node, and becomes the caller's to * as @watch->node, and becomes the caller's to kfree().
* kfree(). On error, watch->node will be NULL, so the caller has nothing to * On error, watch->node will be NULL, so the caller has nothing to
* free, the device will switch to %XenbusStateClosing, and the error will be * free, the device will switch to %XenbusStateClosing, and the error will be
* saved in the store. * saved in the store.
*
* Returns: %0 on success or -errno on error
*/ */
int xenbus_watch_pathfmt(struct xenbus_device *dev, int xenbus_watch_pathfmt(struct xenbus_device *dev,
struct xenbus_watch *watch, struct xenbus_watch *watch,
...@@ -255,13 +259,15 @@ __xenbus_switch_state(struct xenbus_device *dev, ...@@ -255,13 +259,15 @@ __xenbus_switch_state(struct xenbus_device *dev,
} }
/** /**
* xenbus_switch_state * xenbus_switch_state - save the new state of a driver
* @dev: xenbus device * @dev: xenbus device
* @state: new state * @state: new state
* *
* Advertise in the store a change of the given driver to the given new_state. * Advertise in the store a change of the given driver to the given new_state.
* Return 0 on success, or -errno on error. On error, the device will switch * On error, the device will switch to XenbusStateClosing, and the error
* to XenbusStateClosing, and the error will be saved in the store. * will be saved in the store.
*
* Returns: %0 on success or -errno on error
*/ */
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
{ {
...@@ -305,7 +311,7 @@ static void xenbus_va_dev_error(struct xenbus_device *dev, int err, ...@@ -305,7 +311,7 @@ static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
} }
/** /**
* xenbus_dev_error * xenbus_dev_error - place an error message into the store
* @dev: xenbus device * @dev: xenbus device
* @err: error to report * @err: error to report
* @fmt: error message format * @fmt: error message format
...@@ -324,7 +330,7 @@ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) ...@@ -324,7 +330,7 @@ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
EXPORT_SYMBOL_GPL(xenbus_dev_error); EXPORT_SYMBOL_GPL(xenbus_dev_error);
/** /**
* xenbus_dev_fatal * xenbus_dev_fatal - put an error messages into the store and then shutdown
* @dev: xenbus device * @dev: xenbus device
* @err: error to report * @err: error to report
* @fmt: error message format * @fmt: error message format
...@@ -346,7 +352,7 @@ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) ...@@ -346,7 +352,7 @@ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
} }
EXPORT_SYMBOL_GPL(xenbus_dev_fatal); EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
/** /*
* Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
* avoiding recursion within xenbus_switch_state. * avoiding recursion within xenbus_switch_state.
*/ */
...@@ -453,7 +459,7 @@ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages, ...@@ -453,7 +459,7 @@ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
} }
EXPORT_SYMBOL_GPL(xenbus_teardown_ring); EXPORT_SYMBOL_GPL(xenbus_teardown_ring);
/** /*
* Allocate an event channel for the given xenbus_device, assigning the newly * Allocate an event channel for the given xenbus_device, assigning the newly
* created local port to *port. Return 0 on success, or -errno on error. On * created local port to *port. Return 0 on success, or -errno on error. On
* error, the device will switch to XenbusStateClosing, and the error will be * error, the device will switch to XenbusStateClosing, and the error will be
...@@ -479,7 +485,7 @@ int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port) ...@@ -479,7 +485,7 @@ int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
/** /*
* Free an existing event channel. Returns 0 on success or -errno on error. * Free an existing event channel. Returns 0 on success or -errno on error.
*/ */
int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port) int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
...@@ -499,7 +505,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); ...@@ -499,7 +505,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
/** /**
* xenbus_map_ring_valloc * xenbus_map_ring_valloc - allocate & map pages of VA space
* @dev: xenbus device * @dev: xenbus device
* @gnt_refs: grant reference array * @gnt_refs: grant reference array
* @nr_grefs: number of grant references * @nr_grefs: number of grant references
...@@ -507,10 +513,11 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); ...@@ -507,10 +513,11 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
* *
* Map @nr_grefs pages of memory into this domain from another * Map @nr_grefs pages of memory into this domain from another
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
* pages of virtual address space, maps the pages to that address, and * pages of virtual address space, maps the pages to that address, and sets
* sets *vaddr to that address. Returns 0 on success, and -errno on * *vaddr to that address. If an error is returned, device will switch to
* error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore. * XenbusStateClosing and the error message will be saved in XenStore.
*
* Returns: %0 on success or -errno on error
*/ */
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr) unsigned int nr_grefs, void **vaddr)
...@@ -599,14 +606,15 @@ static int __xenbus_map_ring(struct xenbus_device *dev, ...@@ -599,14 +606,15 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
} }
/** /**
* xenbus_unmap_ring * xenbus_unmap_ring - unmap memory from another domain
* @dev: xenbus device * @dev: xenbus device
* @handles: grant handle array * @handles: grant handle array
* @nr_handles: number of handles in the array * @nr_handles: number of handles in the array
* @vaddrs: addresses to unmap * @vaddrs: addresses to unmap
* *
* Unmap memory in this domain that was imported from another domain. * Unmap memory in this domain that was imported from another domain.
* Returns 0 on success and returns GNTST_* on error *
* Returns: %0 on success or GNTST_* on error
* (see xen/include/interface/grant_table.h). * (see xen/include/interface/grant_table.h).
*/ */
static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles, static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
...@@ -712,7 +720,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev, ...@@ -712,7 +720,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
} }
/** /**
* xenbus_unmap_ring_vfree * xenbus_unmap_ring_vfree - unmap a page of memory from another domain
* @dev: xenbus device * @dev: xenbus device
* @vaddr: addr to unmap * @vaddr: addr to unmap
* *
...@@ -720,7 +728,8 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev, ...@@ -720,7 +728,8 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
* Unmap a page of memory in this domain that was imported from another domain. * Unmap a page of memory in this domain that was imported from another domain.
* Use xenbus_unmap_ring_vfree if you mapped in your memory with * Use xenbus_unmap_ring_vfree if you mapped in your memory with
* xenbus_map_ring_valloc (it will free the virtual address space). * xenbus_map_ring_valloc (it will free the virtual address space).
* Returns 0 on success and returns GNTST_* on error *
* Returns: %0 on success or GNTST_* on error
* (see xen/include/interface/grant_table.h). * (see xen/include/interface/grant_table.h).
*/ */
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
...@@ -916,10 +925,10 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) ...@@ -916,10 +925,10 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
} }
/** /**
* xenbus_read_driver_state * xenbus_read_driver_state - read state from a store path
* @path: path for driver * @path: path for driver
* *
* Return the state of the driver rooted at the given store path, or * Returns: the state of the driver rooted at the given store path, or
* XenbusStateUnknown if no state can be read. * XenbusStateUnknown if no state can be read.
*/ */
enum xenbus_state xenbus_read_driver_state(const char *path) enum xenbus_state xenbus_read_driver_state(const char *path)
......
...@@ -537,7 +537,7 @@ struct xendispl_dbuf_create_req { ...@@ -537,7 +537,7 @@ struct xendispl_dbuf_create_req {
struct xendispl_page_directory { struct xendispl_page_directory {
grant_ref_t gref_dir_next_page; grant_ref_t gref_dir_next_page;
grant_ref_t gref[1]; /* Variable length */ grant_ref_t gref[];
}; };
/* /*
......
...@@ -95,7 +95,7 @@ struct __name##_sring { \ ...@@ -95,7 +95,7 @@ struct __name##_sring { \
RING_IDX req_prod, req_event; \ RING_IDX req_prod, req_event; \
RING_IDX rsp_prod, rsp_event; \ RING_IDX rsp_prod, rsp_event; \
uint8_t __pad[48]; \ uint8_t __pad[48]; \
union __name##_sring_entry ring[1]; /* variable-length */ \ union __name##_sring_entry ring[]; \
}; \ }; \
\ \
/* "Front" end's private variables */ \ /* "Front" end's private variables */ \
......
...@@ -659,7 +659,7 @@ struct xensnd_open_req { ...@@ -659,7 +659,7 @@ struct xensnd_open_req {
struct xensnd_page_directory { struct xensnd_page_directory {
grant_ref_t gref_dir_next_page; grant_ref_t gref_dir_next_page;
grant_ref_t gref[1]; /* Variable length */ grant_ref_t gref[];
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment