Commit 035e3a43 authored by Oleksandr Tyshchenko's avatar Oleksandr Tyshchenko Committed by Juergen Gross

xen/virtio: Optimize the setup of "xen-grant-dma" devices

This is needed to avoid having to parse the same device-tree
several times for a given device.

For this to work we need to install the xen_virtio_restricted_mem_acc
callback in Arm's xen_guest_init() which is same callback as x86's
PV and HVM modes already use and remove the manual assignment in
xen_setup_dma_ops(). Also we need to split the code to initialize
backend_domid into a separate function.

Prior to current patch we parsed the device-tree three times:
1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device()
2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid()
3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device()

With current patch we parse the device-tree only once in
xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid()

Other benefits are:
- Not diverge from x86 when setting up Xen grant DMA ops
- Drop several global functions
Signed-off-by: default avatarOleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Reviewed-by: default avatarXenia Ragiadakou <burzalodowa@gmail.com>
Reviewed-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Link: https://lore.kernel.org/r/20221025162004.8501-2-olekstysh@gmail.comSigned-off-by: default avatarJuergen Gross <jgross@suse.com>
parent 76dcd734
...@@ -445,7 +445,7 @@ static int __init xen_guest_init(void) ...@@ -445,7 +445,7 @@ static int __init xen_guest_init(void)
return 0; return 0;
if (IS_ENABLED(CONFIG_XEN_VIRTIO)) if (IS_ENABLED(CONFIG_XEN_VIRTIO))
virtio_set_mem_acc_cb(xen_virtio_mem_acc); virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
if (!acpi_disabled) if (!acpi_disabled)
xen_acpi_guest_init(); xen_acpi_guest_init();
......
...@@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = { ...@@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = {
.dma_supported = xen_grant_dma_supported, .dma_supported = xen_grant_dma_supported,
}; };
static bool xen_is_dt_grant_dma_device(struct device *dev)
{
struct device_node *iommu_np;
bool has_iommu;
iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
has_iommu = iommu_np &&
of_device_is_compatible(iommu_np, "xen,grant-dma");
of_node_put(iommu_np);
return has_iommu;
}
bool xen_is_grant_dma_device(struct device *dev)
{
/* XXX Handle only DT devices for now */
if (dev->of_node)
return xen_is_dt_grant_dma_device(dev);
return false;
}
bool xen_virtio_mem_acc(struct virtio_device *dev)
{
if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
return true;
return xen_is_grant_dma_device(dev->dev.parent);
}
static int xen_dt_grant_init_backend_domid(struct device *dev, static int xen_dt_grant_init_backend_domid(struct device *dev,
struct xen_grant_dma_data *data) domid_t *backend_domid)
{ {
struct of_phandle_args iommu_spec; struct of_phandle_args iommu_spec;
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
0, &iommu_spec)) { 0, &iommu_spec)) {
dev_err(dev, "Cannot parse iommus property\n"); dev_dbg(dev, "Cannot parse iommus property\n");
return -ESRCH; return -ESRCH;
} }
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
iommu_spec.args_count != 1) { iommu_spec.args_count != 1) {
dev_err(dev, "Incompatible IOMMU node\n"); dev_dbg(dev, "Incompatible IOMMU node\n");
of_node_put(iommu_spec.np); of_node_put(iommu_spec.np);
return -ESRCH; return -ESRCH;
} }
...@@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev, ...@@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
* The endpoint ID here means the ID of the domain where the * The endpoint ID here means the ID of the domain where the
* corresponding backend is running * corresponding backend is running
*/ */
data->backend_domid = iommu_spec.args[0]; *backend_domid = iommu_spec.args[0];
return 0; return 0;
} }
void xen_grant_setup_dma_ops(struct device *dev) static int xen_grant_init_backend_domid(struct device *dev,
domid_t *backend_domid)
{
int ret = -ENODEV;
if (dev->of_node) {
ret = xen_dt_grant_init_backend_domid(dev, backend_domid);
} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
dev_info(dev, "Using dom0 as backend\n");
*backend_domid = 0;
ret = 0;
}
return ret;
}
static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
{ {
struct xen_grant_dma_data *data; struct xen_grant_dma_data *data;
...@@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev) ...@@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
if (!data) if (!data)
goto err; goto err;
if (dev->of_node) { data->backend_domid = backend_domid;
if (xen_dt_grant_init_backend_domid(dev, data))
goto err;
} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
dev_info(dev, "Using dom0 as backend\n");
data->backend_domid = 0;
} else {
/* XXX ACPI device unsupported for now */
goto err;
}
if (store_xen_grant_dma_data(dev, data)) { if (store_xen_grant_dma_data(dev, data)) {
dev_err(dev, "Cannot store Xen grant DMA data\n"); dev_err(dev, "Cannot store Xen grant DMA data\n");
...@@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev) ...@@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev)
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
{ {
bool ret = xen_virtio_mem_acc(dev); domid_t backend_domid;
if (ret) if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
xen_grant_setup_dma_ops(dev->dev.parent); xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
return true;
}
return ret; return false;
} }
MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
......
...@@ -8,9 +8,7 @@ ...@@ -8,9 +8,7 @@
static inline void xen_setup_dma_ops(struct device *dev) static inline void xen_setup_dma_ops(struct device *dev)
{ {
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
if (xen_is_grant_dma_device(dev)) if (xen_swiotlb_detect())
xen_grant_setup_dma_ops(dev);
else if (xen_swiotlb_detect())
dev->dma_ops = &xen_swiotlb_dma_ops; dev->dma_ops = &xen_swiotlb_dma_ops;
#endif #endif
} }
......
...@@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { } ...@@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { }
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
#ifdef CONFIG_XEN_GRANT_DMA_OPS #ifdef CONFIG_XEN_GRANT_DMA_OPS
void xen_grant_setup_dma_ops(struct device *dev);
bool xen_is_grant_dma_device(struct device *dev);
bool xen_virtio_mem_acc(struct virtio_device *dev);
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev); bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
#else #else
static inline void xen_grant_setup_dma_ops(struct device *dev)
{
}
static inline bool xen_is_grant_dma_device(struct device *dev)
{
return false;
}
struct virtio_device; struct virtio_device;
static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
{
return false;
}
static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
{ {
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment