Commit c18a2a28 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2022-04-22' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Two fixes for the raspberrypi panel initialisation, one fix for a logic
inversion in radeon, a build and pm refcounting fix for vc4, two reverts
for drm_of_get_bridge that caused a number of regression and a locking
regression for amdgpu.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20220422084403.2xrhf3jusdej5yo4@houat
parents 70da382e 94f4c496
...@@ -128,6 +128,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs ...@@ -128,6 +128,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk; goto free_chunk;
} }
mutex_lock(&p->ctx->lock);
/* skip guilty context job */ /* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) { if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED; ret = -ECANCELED;
...@@ -709,6 +711,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, ...@@ -709,6 +711,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence); dma_fence_put(parser->fence);
if (parser->ctx) { if (parser->ctx) {
mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx); amdgpu_ctx_put(parser->ctx);
} }
if (parser->bo_list) if (parser->bo_list)
...@@ -1157,6 +1160,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, ...@@ -1157,6 +1160,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
{ {
int i, r; int i, r;
/* TODO: Investigate why we still need the context lock */
mutex_unlock(&p->ctx->lock);
for (i = 0; i < p->nchunks; ++i) { for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk; struct amdgpu_cs_chunk *chunk;
...@@ -1167,32 +1173,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, ...@@ -1167,32 +1173,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk); r = amdgpu_cs_process_fence_dep(p, chunk);
if (r) if (r)
return r; goto out;
break; break;
case AMDGPU_CHUNK_ID_SYNCOBJ_IN: case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk); r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r) if (r)
return r; goto out;
break; break;
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk); r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r) if (r)
return r; goto out;
break; break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
if (r) if (r)
return r; goto out;
break; break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
if (r) if (r)
return r; goto out;
break; break;
} }
} }
return 0; out:
mutex_lock(&p->ctx->lock);
return r;
} }
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
...@@ -1368,6 +1376,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -1368,6 +1376,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out; goto out;
r = amdgpu_cs_submit(&parser, cs); r = amdgpu_cs_submit(&parser, cs);
out: out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers); amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
......
...@@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
kref_init(&ctx->refcount); kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock); spin_lock_init(&ctx->ring_lock);
mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter; ctx->reset_counter_query = ctx->reset_counter;
...@@ -357,6 +358,7 @@ static void amdgpu_ctx_fini(struct kref *ref) ...@@ -357,6 +358,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
drm_dev_exit(idx); drm_dev_exit(idx);
} }
mutex_destroy(&ctx->lock);
kfree(ctx); kfree(ctx);
} }
......
...@@ -49,6 +49,7 @@ struct amdgpu_ctx { ...@@ -49,6 +49,7 @@ struct amdgpu_ctx {
bool preamble_presented; bool preamble_presented;
int32_t init_priority; int32_t init_priority;
int32_t override_priority; int32_t override_priority;
struct mutex lock;
atomic_t guilty; atomic_t guilty;
unsigned long ras_counter_ce; unsigned long ras_counter_ce;
unsigned long ras_counter_ue; unsigned long ras_counter_ue;
......
...@@ -214,29 +214,6 @@ int drm_of_encoder_active_endpoint(struct device_node *node, ...@@ -214,29 +214,6 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
} }
EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint); EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
static int find_panel_or_bridge(struct device_node *node,
struct drm_panel **panel,
struct drm_bridge **bridge)
{
if (panel) {
*panel = of_drm_find_panel(node);
if (!IS_ERR(*panel))
return 0;
/* Clear the panel pointer in case of error. */
*panel = NULL;
}
/* No panel found yet, check for a bridge next. */
if (bridge) {
*bridge = of_drm_find_bridge(node);
if (*bridge)
return 0;
}
return -EPROBE_DEFER;
}
/** /**
* drm_of_find_panel_or_bridge - return connected panel or bridge device * drm_of_find_panel_or_bridge - return connected panel or bridge device
* @np: device tree node containing encoder output ports * @np: device tree node containing encoder output ports
...@@ -259,44 +236,49 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, ...@@ -259,44 +236,49 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_panel **panel, struct drm_panel **panel,
struct drm_bridge **bridge) struct drm_bridge **bridge)
{ {
struct device_node *node; int ret = -EPROBE_DEFER;
int ret; struct device_node *remote;
if (!panel && !bridge) if (!panel && !bridge)
return -EINVAL; return -EINVAL;
if (panel) if (panel)
*panel = NULL; *panel = NULL;
if (bridge)
*bridge = NULL;
/* Check for a graph on the device node first. */
if (of_graph_is_present(np)) {
node = of_graph_get_remote_node(np, port, endpoint);
if (node) {
ret = find_panel_or_bridge(node, panel, bridge);
of_node_put(node);
if (!ret)
return 0;
}
}
/* Otherwise check for any child node other than port/ports. */ /*
for_each_available_child_of_node(np, node) { * of_graph_get_remote_node() produces a noisy error message if port
if (of_node_name_eq(node, "port") || * node isn't found and the absence of the port is a legit case here,
of_node_name_eq(node, "ports")) * so at first we silently check whether graph presents in the
continue; * device-tree node.
*/
if (!of_graph_is_present(np))
return -ENODEV;
ret = find_panel_or_bridge(node, panel, bridge); remote = of_graph_get_remote_node(np, port, endpoint);
of_node_put(node); if (!remote)
return -ENODEV;
if (panel) {
*panel = of_drm_find_panel(remote);
if (!IS_ERR(*panel))
ret = 0;
else
*panel = NULL;
}
/* No panel found yet, check for a bridge next. */
if (bridge) {
if (ret) {
*bridge = of_drm_find_bridge(remote);
if (*bridge)
ret = 0;
} else {
*bridge = NULL;
}
/* Stop at the first found occurrence. */
if (!ret)
return 0;
} }
return -EPROBE_DEFER; of_node_put(remote);
return ret;
} }
EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge); EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
......
...@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts, ...@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val); ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret) if (ret)
dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret); dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
} }
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val) static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
...@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel) ...@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
return 0; return 0;
} }
static int rpi_touchscreen_enable(struct drm_panel *panel) static int rpi_touchscreen_prepare(struct drm_panel *panel)
{ {
struct rpi_touchscreen *ts = panel_to_ts(panel); struct rpi_touchscreen *ts = panel_to_ts(panel);
int i; int i;
...@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel) ...@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01); rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100); msleep(100);
return 0;
}
static int rpi_touchscreen_enable(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
/* Turn on the backlight. */ /* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255); rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
...@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel, ...@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
static const struct drm_panel_funcs rpi_touchscreen_funcs = { static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable, .disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop, .unprepare = rpi_touchscreen_noop,
.prepare = rpi_touchscreen_noop, .prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable, .enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes, .get_modes = rpi_touchscreen_get_modes,
}; };
......
...@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev, ...@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
struct dma_fence *f; struct dma_fence *f;
int r = 0; int r = 0;
dma_resv_for_each_fence(&cursor, resv, shared, f) { dma_resv_for_each_fence(&cursor, resv, !shared, f) {
fence = to_radeon_fence(f); fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev) if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence); radeon_sync_fence(sync, fence);
......
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
config DRM_VC4 config DRM_VC4
tristate "Broadcom VC4 Graphics" tristate "Broadcom VC4 Graphics"
depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST
# Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only
# happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE.
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
depends on DRM depends on DRM
depends on SND && SND_SOC depends on SND && SND_SOC
depends on COMMON_CLK depends on COMMON_CLK
......
...@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) ...@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
unsigned long phy_clock; unsigned long phy_clock;
int ret; int ret;
ret = pm_runtime_get_sync(dev); ret = pm_runtime_resume_and_get(dev);
if (ret) { if (ret) {
DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port); DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return; return;
......
...@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo) ...@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
return container_of(bo, struct vmw_buffer_object, base); return container_of(bo, struct vmw_buffer_object, base);
} }
/**
* bo_is_vmw - check if the buffer object is a &vmw_buffer_object
* @bo: ttm buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* a &vmw_buffer_object.
*
* Returns:
* true if the object is of &vmw_buffer_object type, false if not.
*/
static bool bo_is_vmw(struct ttm_buffer_object *bo)
{
return bo->destroy == &vmw_bo_bo_free ||
bo->destroy == &vmw_gem_destroy;
}
/** /**
* vmw_bo_pin_in_placement - Validate a buffer to placement. * vmw_bo_pin_in_placement - Validate a buffer to placement.
...@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo); vmw_bo_unreference(&vbo);
if (unlikely(ret != 0 && ret != -ERESTARTSYS && if (unlikely(ret != 0)) {
ret != -EBUSY)) { if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle); (unsigned int) arg->handle);
return ret; return ret;
...@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo) void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{ {
/* Is @bo embedded in a struct vmw_buffer_object? */ /* Is @bo embedded in a struct vmw_buffer_object? */
if (vmw_bo_is_vmw_bo(bo)) if (!bo_is_vmw(bo))
return; return;
/* Kill any cached kernel maps before swapout */ /* Kill any cached kernel maps before swapout */
...@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo; struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */ /* Make sure @bo is embedded in a struct vmw_buffer_object? */
if (vmw_bo_is_vmw_bo(bo)) if (!bo_is_vmw(bo))
return; return;
vbo = container_of(bo, struct vmw_buffer_object, base); vbo = container_of(bo, struct vmw_buffer_object, base);
...@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo); vmw_resource_unbind_list(vbo);
} }
/**
* vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
* @bo: buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* a &vmw_buffer_object.
*
* Returns:
* true if the object is of &vmw_buffer_object type, false if not.
*/
bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &vmw_bo_bo_free ||
bo->destroy == &vmw_gem_destroy)
return true;
return false;
}
...@@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) ...@@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_no_fman; goto out_no_fman;
} }
drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver, ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
dev_priv->drm.dev, dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping, dev_priv->drm.anon_inode->i_mapping,
&dev_priv->vma_manager, dev_priv->drm.vma_offset_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent, dev_priv->map_mode == vmw_dma_alloc_coherent,
false); false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev) ...@@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_devcaps_destroy(dev_priv); vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv); vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev); ttm_device_fini(&dev_priv->bdev);
drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv); vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman); vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
...@@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr, ...@@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
return drm_get_unmapped_area(file, uaddr, len, pgoff, flags, return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
&dev_priv->vma_manager); dev_priv->drm.vma_offset_manager);
} }
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
......
...@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) ...@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base); container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res; struct vmw_resource *res = &user_srf->srf.res;
if (base->shareable && res && res->backup)
drm_gem_object_put(&res->backup->base.base);
*p_base = NULL; *p_base = NULL;
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
} }
...@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
vmw_bo_reference(res->backup); vmw_bo_reference(res->backup);
drm_gem_object_get(&res->backup->base.base);
} }
tmp = vmw_resource_reference(&srf->res); tmp = vmw_resource_reference(&srf->res);
...@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup); &res->backup);
if (ret == 0) if (ret == 0)
vmw_bo_reference(res->backup); vmw_bo_reference(res->backup);
} }
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node); drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size; rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle; rep->buffer_handle = backup_handle;
if (user_srf->prime.base.shareable)
drm_gem_object_get(&res->backup->base.base);
} else { } else {
rep->buffer_map_handle = 0; rep->buffer_map_handle = 0;
rep->buffer_size = 0; rep->buffer_size = 0;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Pointer to dma-buf-mapped memory, plus helpers.
*/
#ifndef __DMA_BUF_MAP_H__
#define __DMA_BUF_MAP_H__
#include <linux/io.h>
#include <linux/string.h>
/**
* DOC: overview
*
* Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
* Depending on the location of the buffer, users may have to access it with
* I/O operations or memory load/store operations. For example, copying to
* system memory could be done with memcpy(), copying to I/O memory would be
* done with memcpy_toio().
*
* .. code-block:: c
*
* void *vaddr = ...; // pointer to system memory
* memcpy(vaddr, src, len);
*
* void *vaddr_iomem = ...; // pointer to I/O memory
* memcpy_toio(vaddr, _iomem, src, len);
*
* When using dma-buf's vmap operation, the returned pointer is encoded as
* :c:type:`struct dma_buf_map <dma_buf_map>`.
* :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
* system or I/O memory and a flag that signals the required method of
* accessing the buffer. Use the returned instance and the helper functions
* to access the buffer's memory in the correct way.
*
* The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
* actually independent from the dma-buf infrastructure. When sharing buffers
* among devices, drivers have to know the location of the memory to access
* the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
* solves this problem for dma-buf and its users. If other drivers or
* sub-systems require similar functionality, the type could be generalized
* and moved to a more prominent header file.
*
* Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
* considered bad style. Rather then accessing its fields directly, use one
* of the provided helper functions, or implement your own. For example,
* instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
* statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
* dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
*
* .. code-block:: c
*
* struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
*
* dma_buf_map_set_vaddr(&map, 0xdeadbeaf);
*
* To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
*
* .. code-block:: c
*
* dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf);
*
* Instances of struct dma_buf_map do not have to be cleaned up, but
* can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
* always refer to system memory.
*
* .. code-block:: c
*
* dma_buf_map_clear(&map);
*
* Test if a mapping is valid with either dma_buf_map_is_set() or
* dma_buf_map_is_null().
*
* .. code-block:: c
*
* if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
* // always true
*
* Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
* for equality with dma_buf_map_is_equal(). Mappings the point to different
* memory spaces, system or I/O, are never equal. That's even true if both
* spaces are located in the same address space, both mappings contain the
* same address value, or both mappings refer to NULL.
*
* .. code-block:: c
*
* struct dma_buf_map sys_map; // refers to system memory
* struct dma_buf_map io_map; // refers to I/O memory
*
* if (dma_buf_map_is_equal(&sys_map, &io_map))
* // always false
*
* A set up instance of struct dma_buf_map can be used to access or manipulate
* the buffer memory. Depending on the location of the memory, the provided
* helpers will pick the correct operations. Data can be copied into the memory
* with dma_buf_map_memcpy_to(). The address can be manipulated with
* dma_buf_map_incr().
*
* .. code-block:: c
*
* const void *src = ...; // source buffer
* size_t len = ...; // length of src
*
* dma_buf_map_memcpy_to(&map, src, len);
* dma_buf_map_incr(&map, len); // go to first byte after the memcpy
*/
/**
* struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
* @vaddr_iomem: The buffer's address if in I/O memory
* @vaddr: The buffer's address if in system memory
* @is_iomem: True if the dma-buf memory is located in I/O
* memory, or false otherwise.
*/
struct dma_buf_map {
union {
void __iomem *vaddr_iomem;
void *vaddr;
};
bool is_iomem;
};
/**
* DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
* @vaddr_: A system-memory address
*/
#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
{ \
.vaddr = (vaddr_), \
.is_iomem = false, \
}
/**
* dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
* @map: The dma-buf mapping structure
* @vaddr: A system-memory address
*
* Sets the address and clears the I/O-memory flag.
*/
static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
{
map->vaddr = vaddr;
map->is_iomem = false;
}
/**
* dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory
* @map: The dma-buf mapping structure
* @vaddr_iomem: An I/O-memory address
*
* Sets the address and the I/O-memory flag.
*/
static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map,
void __iomem *vaddr_iomem)
{
map->vaddr_iomem = vaddr_iomem;
map->is_iomem = true;
}
/**
* dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
* @lhs: The dma-buf mapping structure
* @rhs: A dma-buf mapping structure to compare with
*
* Two dma-buf mapping structures are equal if they both refer to the same type of memory
* and to the same address within that memory.
*
* Returns:
* True is both structures are equal, or false otherwise.
*/
static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
const struct dma_buf_map *rhs)
{
if (lhs->is_iomem != rhs->is_iomem)
return false;
else if (lhs->is_iomem)
return lhs->vaddr_iomem == rhs->vaddr_iomem;
else
return lhs->vaddr == rhs->vaddr;
}
/**
* dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
* @map: The dma-buf mapping structure
*
* Depending on the state of struct dma_buf_map.is_iomem, tests if the
* mapping is NULL.
*
* Returns:
* True if the mapping is NULL, or false otherwise.
*/
static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
{
if (map->is_iomem)
return !map->vaddr_iomem;
return !map->vaddr;
}
/**
* dma_buf_map_is_set - Tests is the dma-buf mapping has been set
* @map: The dma-buf mapping structure
*
* Depending on the state of struct dma_buf_map.is_iomem, tests if the
* mapping has been set.
*
* Returns:
* True if the mapping is been set, or false otherwise.
*/
static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
{
return !dma_buf_map_is_null(map);
}
/**
* dma_buf_map_clear - Clears a dma-buf mapping structure
* @map: The dma-buf mapping structure
*
* Clears all fields to zero; including struct dma_buf_map.is_iomem. So
* mapping structures that were set to point to I/O memory are reset for
* system memory. Pointers are cleared to NULL. This is the default.
*/
static inline void dma_buf_map_clear(struct dma_buf_map *map)
{
if (map->is_iomem) {
map->vaddr_iomem = NULL;
map->is_iomem = false;
} else {
map->vaddr = NULL;
}
}
/**
* dma_buf_map_memcpy_to - Memcpy into dma-buf mapping
* @dst: The dma-buf mapping structure
* @src: The source buffer
* @len: The number of byte in src
*
* Copies data into a dma-buf mapping. The source buffer is in system
* memory. Depending on the buffer's location, the helper picks the correct
* method of accessing the memory.
*/
static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len)
{
if (dst->is_iomem)
memcpy_toio(dst->vaddr_iomem, src, len);
else
memcpy(dst->vaddr, src, len);
}
/**
* dma_buf_map_incr - Increments the address stored in a dma-buf mapping
* @map: The dma-buf mapping structure
* @incr: The number of bytes to increment
*
* Increments the address stored in a dma-buf mapping. Depending on the
* buffer's location, the correct value will be updated.
*/
static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr)
{
if (map->is_iomem)
map->vaddr_iomem += incr;
else
map->vaddr += incr;
}
#endif /* __DMA_BUF_MAP_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment